text
stringlengths
29
850k
#!/usr/bin/python """ Created on Tue Oct 25 15:19:00 2016 @author: noel """ import os import sys import em.tools.CHARMM_Parser as CP import em.describe.utilities as ut import em.tools.Super_Structures as SS import Bio.PDB.PDBParser as PDBParser import pandas as pd import em.tools.input_output as IO import optparse, pkg_resources def main(): usage = "usage: %prog [options] arg" d = "This program reads a CSV file that has been generated by Super_Structure.\ The file corresponds to a Super Structure of a Protein. \ Multiple residues can be added at the time, No terminal will be added..\n \ This program can only add residues or terminals that are in the parameter file." opt_parser = optparse.OptionParser(usage,description=d) opt_parser.add_option("--apn", type="str",help="Enter Instruction for where to append residues in hard '\"'\n \ quotes. Place: Amino Acid Number, Entity ID, Chain ID and \n \ the direction to add residues separated by comas. Add. The \ direction to add residues is either Ndir or Cdir. This means \ that if a residue is added in residue 10, it could be toward \ the N or C terminal. This is important so that the program \ knows if the new residue is placed before or after the residue.\ Example \"1,1,A,Ndir\" or \"20,2,A,Cdir\". \n \ Chain ID, amino acid or terminal name are not case sensitive \ and do not need to go in quotes.\n") opt_parser.add_option("-r","--res", type="str",help="Enter list of amino acids to be added in hard quotes.'\"'\n\ Example: \"ALA,VAL,ASP,ASN,GLU\".") opt_parser.add_option("--inp", type="str",help="Path to CSV file for adding residue.") opt_parser.add_option("--out", type="str",help="Path and name to CSV and PDB outputs with added residues.") #opt_parser.add_option("--pep", type="str",help="Path to peptide file.") options, args = opt_parser.parse_args() if not os.path.exists(options.inp): print "Error: File path Super Structure CSV file does not exist." print("Type -h or --help for description and options.") sys.exit(1) ########################## Init Setup ##################################### # Comment out the next four lines to test in Spyder. directory, filename = os.path.split(options.inp) params = CP.read_charmm_FF() insulin = SS.Super_Structure(params, options.inp,'add_linker') parse_list = options.apn.split(',') if options.res.find(',') == -1: aa_add = [i for i in options.res] aa_add = [ut.utilities.residueDict1_1[i] for i in aa_add] else: aa_add = options.res.split(',') parser2 = PDBParser() pep_file_path = pkg_resources.resource_filename('em', 'params/' + 'peptides.pdb') # pep_file_path= /home/noel/.cache/Python-Eggs/Entropy_Maxima-0.1.0-py2.7.egg-tmp/em/params/peptides.pdb # pep_file = parser2.get_structure('Peptides',pep_file_path) # Uncomment the next four lines to test #file_path = '/home/noel/Projects/Protein_design/EntropyMaxima/examples/Linker_minimization/2hiu.csv' #insulin = SS.Super_Structure(params, file_path,'add_linker') #parse_list = "1,1,A,Ndir".split(',') #aa_add = "ALA".split(',') ############################################### insulin.build_pep_and_anchers(pep_file) ############### Begin processing parse_list and aa_add #################### message = '' print(parse_list,len(parse_list)) if len(parse_list) == 4 and len(aa_add) > 0: aaid_add = int(parse_list[0]) ent_id_add = int(parse_list[1]) chain_add = str(parse_list[2]).upper() term_dir = str(parse_list[3]) # So far this only works with natural aminoacids and ACE and CTER if term_dir in ['Ndir','Cdir']: message += 'Adding residues '+str(aa_add)+' in th '+term_dir+' at amino acid '+str(aaid_add)+', '+'entity ' message += str(ent_id_add)+' and direction '+term_dir+'.' print(message) # TODO: counting atoms do not seem necessary. Consider deleting. #count_atoms_added = 0 #for i in aa_add: # for j in insulin.params.AA[i].atoms: # for k in j: # count_atoms_added += 1 #count_aa_added = len(aa_add) ################################################################### # So we now create the link dataframe and follow the prosses in # Super_Structures to populate its fields. link = pd.DataFrame() aa = [] aaid = [] entity_id = [] chain_id = [] atmtyp1 = [] atmtyp2 = [] charg = [] component = [] snum = 1 for res in aa_add: chrm = res pdbx = res if chrm in insulin.params.AA: comp = 1 for k in insulin.params.AA[chrm].atoms: for l in k: aa.append(pdbx) aaid.append(snum) entity_id.append(ent_id_add) chain_id.append(chain_add) atmtyp1.append(insulin.corrections(chrm,l)) atmtyp2.append(insulin.params.AA[chrm].atom_type[insulin.corrections(chrm,l)]) charg.append(insulin.params.AA[chrm].atom_chrg[insulin.corrections(chrm,l)]) if comp == 1: component.append('AMINO') else: if l in ['C','O']: component.append('CARBO') else: component.append(('SIDE'+str(comp))) comp += 1 snum += 1 else: print('Warning: Amino Acid identifier',chrm,' is not found in parameters.') sys.exit(1) link['aa'] = pd.Series(aa) link['aaid'] = pd.Series(aaid) link['ent_id'] = pd.Series(entity_id) link['chain'] = pd.Series(chain_id) link['atmtyp1'] = pd.Series(atmtyp1) link['atmtyp2'] = pd.Series(atmtyp2) link['component'] = pd.Series(component) link['charg'] = pd.Series(charg) ########################################################################### # Add atomtyp, masses and atmNumber to each atom type mass = [] atmNum = [] atmtyp3 = [] epsilon = [] rmin_half = [] atminfo = [] aainfo = [] for i in link['atmtyp2']: atmNum.append(params.am.MASS[i][0]) mass.append(params.am.MASS[i][1]) atmtyp3.append(params.am.MASS[i][2]) epsilon.append(params.NONBONDED[i][1]) rmin_half.append(params.NONBONDED[i][2]) atminfo.append(True) aainfo.append(False) link['epsilon'] = pd.Series(epsilon) link['rmin_half'] = pd.Series(rmin_half) link['atmtyp3'] = pd.Series(atmtyp3) link['mass'] = pd.Series(mass) link['atmNum'] = pd.Series(atmNum) ########################################################################### # DF Type correction. link['aaid'] = link['aaid'].apply(int) link['ent_id'] = link['ent_id'].apply(int) link['mass'] = link['mass'].apply(float) link['epsilon'] = link['epsilon'].apply(float) link['rmin_half'] = link['rmin_half'].apply(float) link['atmNum'] = link['atmNum'].apply(int) # We now fill out the number of columns in the DataFrame with nan for i in insulin.Full_Structure.columns: if i not in list(link.columns): if i[0:6] == 'aainfo': link[i] = pd.Series(aainfo) elif i[0:7] == 'atminfo': link[i] = pd.Series(atminfo) else: link[i] = pd.Series([float('nan') for j in range(len(link))]) if term_dir == 'Ndir': beg_insert = min(insulin.Full_Structure.index[(insulin.Full_Structure.aaid == aaid_add) &\ (insulin.Full_Structure.ent_id == ent_id_add) &\ (insulin.Full_Structure.chain == chain_add)]) end_insert = beg_insert + link.shape[0] elif term_dir == 'Cdir': print('WARNING: The code has not been design and tested for insertions in the CTER.') print('Exiting the program without finishing.') sys.exit(1) else: print('ERROR: wrong terminal to insert link. Ndir and Cdir are the only choices. Exiting now.') sys.exit(1) joint_df = pd.DataFrame(columns=link.columns) count = 0 insert = True # When links are added , aaid needs to be fixed to reflect added residues aaid_offset = 0 for i in insulin.Full_Structure.index: if (i >= beg_insert) and (i < end_insert): if insert: for j in link.index: joint_df.loc[count] = link.loc[j] joint_df.loc[count,'aaid'] = joint_df.loc[count,'aaid'] + aaid_offset current_aaid = link.loc[j,'aaid'] count += 1 insert = False aaid_offset = aaid_offset + current_aaid joint_df.loc[count] = insulin.Full_Structure.loc[i] # So that only residues after the added link get increased in the given ent_id and chain # Any other entity or chain in the molecules is not fixed. if (joint_df.loc[count,'ent_id'] == ent_id_add) & (joint_df.loc[count,'chain'] == chain_add): joint_df.loc[count,'aaid'] = joint_df.loc[count,'aaid'] + aaid_offset count += 1 # After adding residues, it all gets copied back to original dataframe. for i in joint_df.index: insulin.Full_Structure.loc[i] = joint_df.loc[i] # The way to get number of models is very specific to the way this program # stores data in DataFrame. Be careful if the data frame column structure changes. # TODO: missing atom coordinates are added manually. It needs to be automated more. num_models = len(range(((insulin.Full_Structure.shape[1]-20)/5)))+1 for i in range(1,num_models+1): for j in range(len(aa_add),0,-1): insulin.fit_coordinates(term_dir,j,ent_id_add,chain_add,str(i),aa_add[j-1]) # NOTE: insulin.models are not in the Super Structure Class, but it is added here. # This works, but it does not seem the best way to do it. should models be a field of super # structures and be initialized there? insulin.models = [str(i) for i in range(1,num_models+1)] ################ Write to outputs #################### file_name = os.path.basename(options.out).split('.')[0] dir_path = os.path.dirname(options.out) insulin.write_csv(os.path.dirname(options.out),file_name) IO.write_pdb(insulin,dir_path,file_name,'all') else: print("ERROR: only two directions to add residues, Ndir and Cdir.") print(" The entries are not case sensitive.") else: message += 'The number of entries in the instruction field, followed by -a or --apn, is not right.\n' message += 'Type -h or --help for instructions\n' print(message) if __name__ == '__main__': main()
Just one day after their shocking pregnancy announcement, Blac Chyna and Rob Kardashian were no-shows at their planned club appearance. In Touch has learned that the expecting parents, who broke their pregnancy news on Friday, were a no-call, no-show at The Pool After Dark in Atlantic City, New Jersey on Saturday, May 7. The couple was expected to appear at the club's ninth anniversary bash, but sources confirm to In Touch that they did not show and gave no notice. Ever since the couple announced they were pregnant — and reports are even showing that the Kardashian family was responsible for the leak — they have yet to make any additional announcements. Blac was spotted driving to a local Los Angeles Burger King on Friday, but neither has spoken out since their bombshell. Now, it looks like the two are taking a step back from the spotlight.
import json import numpy from datetime import datetime from datetime import timedelta from next.utils import utils from next.dashboard.AppDashboard import AppDashboard class PoolBasedBinaryClassificationDashboard(AppDashboard): def __init__(self,db,ell): AppDashboard.__init__(self,db,ell) def get_app_supported_stats(self): """ Returns a list of dictionaries describing the identifier (stat_id) and necessary params inputs to be used when calling getStats Expected output (list of dicts, each with fields): (string) stat_id : the identiifer of the statistic (string) description : docstring of describing outputs (list of string) necessary_params : list where each string describes the type of param input like 'alg_label' or 'task' """ stat_list = self.get_supported_stats() stat = {} stat['stat_id'] = 'test_error_multiline_plot' stat['description'] = self.test_error_multiline_plot.__doc__ stat['necessary_params'] = ['alg_label'] stat_list.append(stat) return stat_list def test_error_multiline_plot(self,app_id,exp_uid): """ Description: Returns multiline plot where there is a one-to-one mapping lines to algorithms and each line indicates the error on the validation set with respect to number of reported answers Expected input: None Expected output (in dict): mpld3 plot object """ # get list of algorithms associated with project alg_list,didSucceed,message = self.db.get(app_id+':experiments',exp_uid,'alg_list') x_min = numpy.float('inf') x_max = -numpy.float('inf') y_min = numpy.float('inf') y_max = -numpy.float('inf') list_of_alg_dicts = [] for algorithm in alg_list: alg_id = algorithm['alg_id'] alg_uid = algorithm['alg_uid'] alg_label = algorithm['alg_label'] list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app_id+':ALG-EVALUATION',{'alg_uid':alg_uid}) list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) ) x = [] y = [] for item in list_of_log_dict: num_reported_answers = item['num_reported_answers'] err = item['error'] x.append(num_reported_answers) y.append(err) alg_dict = {} alg_dict['legend_label'] = alg_label alg_dict['x'] = x alg_dict['y'] = y try: x_min = min(x_min,min(x)) x_max = max(x_max,max(x)) y_min = min(y_min,min(y)) y_max = max(y_max,max(y)) except: pass list_of_alg_dicts.append(alg_dict) import matplotlib.pyplot as plt import mpld3 fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE')) for alg_dict in list_of_alg_dicts: ax.plot(alg_dict['x'],alg_dict['y'],label=alg_dict['legend_label']) ax.set_xlabel('Number of answered triplets') ax.set_ylabel('Error on hold-out set') ax.set_xlim([x_min,x_max]) ax.set_ylim([y_min,y_max]) ax.grid(color='white', linestyle='solid') ax.set_title('Triplet Test Error', size=14) legend = ax.legend(loc=2,ncol=3,mode="expand") for label in legend.get_texts(): label.set_fontsize('small') plot_dict = mpld3.fig_to_dict(fig) plt.close() return plot_dict
Herman Tumurcuoglu is an internet pioneer, E-commerce consultant, conference speaker and lecturer. He has been in the web search industry since 1996 and has over 20 years of experience in Internet marketing. In 1996, he launched one of the web’s first commercial metasearch engine called Mamma.com. His consulting career has been marked by success in bringing prominence to companies such as Countrywide financial, and Ice.com on Google. In the last few years, Mr. Tumurcuoglu has used his expertise to help suppress prominence of negative reviews and articles at the top of Google search results. He has been a sessional lecturer at several Universities and Colleges and is the co-founder of the ORM agency Searchreputation.net.
# Copyright (C) 2005 Jeremy S. Sanders # Email: Jeremy Sanders <jeremy@jeremysanders.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################### from __future__ import division from .. import qtall as qt4 from . import utilfuncs import os.path import textwrap # where images are stored imagedir = os.path.join(utilfuncs.resourceDirectory, 'icons') _pixmapcache = {} def getPixmap(pixmap): """Return a cached QPixmap for the filename in the icons directory.""" if pixmap not in _pixmapcache: _pixmapcache[pixmap] = qt4.QPixmap(os.path.join(imagedir, pixmap)) return _pixmapcache[pixmap] def pixmapExists(pixmap): """Does the pixmap exist?""" return (pixmap in _pixmapcache or os.path.exists(os.path.join(imagedir, pixmap))) _iconcache = {} def getIcon(icon): """Return a cached QIconSet for the filename in the icons directory.""" if icon not in _iconcache: svg = os.path.join(imagedir, icon+'.svg') if os.path.exists(svg): filename = svg else: filename = os.path.join(imagedir, icon+'.png') _iconcache[icon] = qt4.QIcon(filename) return _iconcache[icon] def makeAction(parent, descr, menutext, slot, icon=None, key=None, checkable=False): """A quick way to set up an QAction object.""" a = qt4.QAction(parent) a.setText(menutext) a.setStatusTip(descr) a.setToolTip(textwrap.fill(descr, 25)) if slot: a.triggered.connect(slot) if icon: a.setIcon(getIcon(icon)) if key: a.setShortcut( qt4.QKeySequence(key) ) if checkable: a.setCheckable(True) return a def addToolbarActions(toolbar, actions, which): """Add actions listed in "which" from dict "actions" to toolbar "toolbar". """ for w in which: toolbar.addAction(actions[w]) def constructMenus(rootobject, menuout, menutree, actions): """Add menus to the output dict from the tree, listing actions from actions. rootobject: QMenu or QMenuBar to add menus to menuout: dict to store menus menutree: tree structure to create menus from actions: dict of actions to assign to menu items """ for menuid, menutext, actlist in menutree: # make a new menu if necessary if menuid not in menuout: menu = rootobject.addMenu(menutext) menuout[menuid] = menu else: menu = menuout[menuid] # add actions to the menu for action in actlist: if utilfuncs.isiternostr(action): # recurse for submenus constructMenus(menu, menuout, [action], actions) elif action == '': # blank means separator menu.addSeparator() else: # normal action menu.addAction(actions[action]) def populateMenuToolbars(items, toolbar, menus): """Construct the menus and toolbar from the list of items. toolbar is a QToolbar object menus is a dict of menus to add to Items are tuples consisting of: (actioname, status bar text, menu text, menu id, slot, icon filename, add to toolbar (bool), shortcut text) Returns a dict of actions """ actions = {} parent = toolbar.parent() for item in items: if len(item) == 1: if menus is not None: menus[item[0]].addSeparator() continue menuid, descr, menutext, menu, slot, icon, addtool, key = item # create action action = qt4.QAction(parent) action.setText(menutext) action.setStatusTip(descr) action.setToolTip(descr) # set shortcut if set if key: action.setShortcut( qt4.QKeySequence(key) ) # load icon if set if icon: action.setIcon(getIcon(icon)) if callable(slot): # connect the action to the slot action.triggered.connect(slot) # add to menu if menus is not None: menus[menu].addAction(action) elif slot is not None: if menus is not None: submenu = menus[menu].addMenu(menutext) menus["%s.%s" % (menu ,menuid)] = submenu populateMenuToolbars(slot, toolbar, menus) else: if menus is not None: menus[menu].addAction(action) # add to toolbar if addtool and toolbar is not None: toolbar.addAction(action) # save for later actions[menuid] = action return actions
Added on March 20, 2019 by Eme. The two week residency at Qtheatre at the Joan Sutherland Performing Arts Centre was one of my most favourite projects I have had the pleasure to be a part of. Thank you to kapatid, Jules Orcullo for your incredible storytelling in theatre and sharing the stories that run through our interconnected lived experiences with Kapwa. Twenty One Sixty Five is continuous in its developing stages. Please get in contact if you would want to be involved in sharing stories together over food.
# "Copyright (c) 2000-2003 The Regents of the University of California. # All rights reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose, without fee, and without written agreement # is hereby granted, provided that the above copyright notice, the following # two paragraphs and the author appear in all copies of this software. # # IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR # DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT # OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY # OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS # ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO # PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS." # # @author Kamin Whitehouse # from jpype import jimport, JInt from pytos.util.JavaInheritor import JavaInheritor import pytos.Comm as Comm from copy import * drip = jimport.net.tinyos.drip def getDripObject(app, motecom=None, channel=None) : """This function returns the drip object stored in app that is connected to optional motecom with a optional channel. If motecome and channel are specified but there is no drip object with these specs, it creates one""" drips = [] for conn in app.connections : if isinstance(conn, Drip) : # if motecom == None or conn.motecom == motecom : #we need this funtion in java # if channel == None or drip.channel == channel : #we need this funtion in java drips.append( conn ) if len(drips) == 0 and motecom != None and channel != None : drip = Drip(app, channel, app.motecom) app.connections.append(drip) drips.append(drip) return drips class Drip( JavaInheritor ) : """The Drip object inherits from the Drip.java object. It overrides the constructor, and the send and sendwakeup commands to handle python TosMsg objects. usage: drip = Drip(app, Channel, 'sf@localhost:9001') drip = Drip(app, Channel, moteif) drip.send(myTosMsg) drip.sendWakeup(myTosMsg) ... (plus all other functions inherited from the java object) For interface-compatbility with comm, you can also send a dest address, which is ignored: drip.send(addr, myTosMsg) """ def __init__( self , app, channel, moteIF ) : self.app = app if type(moteIF) == str : moteIF = Comm.openMoteIF(moteIF, app) dripObj = drip.Drip(channel, moteIF) JavaInheritor.__init__(self, (dripObj,) ) def send( self , msg, *comm ) : #For interface-compatbility with comm, you can also send a dest address, which is ignored: if type(msg) == int and len(comm) > 0: msg = comm[0] migMsg = msg.createMigMsg() self.migMsgSend(migMsg, msg.size) def sendWakeup( self , msg, *comm ) : migMsg = msg.createMigMsg() self.migMsgSendWakeup(migMsg, msg.size) def migMsgSend( self , msg, size, *comm ) : self._javaParents[0].send(msg, JInt(size)) def migMsgSendWakeup( self , msg, size, *comm ) : self._javaParents[0].sendWakeup(msg, size) def register( self , msg , callback, *comm ) : comm = Comm.getCommObject(self.app) comm.register(self.app.msgs.DripMsg, DripMsgPeeler(self.app, msg, callback)) def unregister( self , msg , callback , *comm ) : comm = Comm.getCommObject(self.app) comm.unregister(self.app.msgs.DripMsg, DripMsgPeeler(self.app, msg, callback)) def getCommObject(self, motecom) : """This function returns the comm object stored in app. If there is none, it creates one""" for conn in self.app.connections : if isinstance(conn, Comm.Comm) : if motecom not in conn._connected : conn.connect(motecom) return conn comm = Comm.Comm() comm.connect(self.motecom) self.app.connections.append(comm) return comm class DripMsgPeeler( Comm.MessageListener ) : """This is a wrapper callback object that peels the Drip headers out of a DripMsg mig object and creates a python TosMsg with the remaining data """ def __init__(self, app, msg, callback) : self.app = app self.msg = msg Comm.MessageListener.__init__(self, callback ) self._firstHashFunction = self._hashFunction self._hashFunction = self._combinedHash def _combinedHash(self): return self._firstHashFunction() + self.msg.amType #this will have to change def messageReceived( self , addr , dripMsg ) : if dripMsg.metadata.id == self.msg.amType : try: msg = deepcopy(self.msg) bytes = dripMsg.data.getBytes() msg.setBytes( bytes ) msg.parentMsg = dripMsg self.callback( addr, msg ) except Exception, inst: print inst raise
Download White Tulip live wallpaper for iPhoneYou will certainly enjoy its fascinating looks. At PHONEKY, you can download live wallpapers for Android and iOS mobile devices free of charge. Nice and beautiful looks of this live wallpaper will keep you captivated for a very long time. At PHONEKY, you will find many other live wallpapers and animations of different genres, from Nature and Sports to the Cars and Funny iPhone Live Wallpapers. You can download Live Wallpapers to your iPhone via PHONEKY iOS app. To see the Top 10 live wallpapers for your iPhone, just sort live wallpapers by popularity.
#!/usr/bin/python3 # TODO: type check inputs # TODO: generate an easily reviewable section of transitions import csv import datetime import os import shutil import subprocess import sys import decimal from optparse import OptionParser class Song: def __init__(self, num=None, file=None, start_time=None, fadein_length=None, end_time=None, fadeout_length=None, prev_talkover=None, title='', artist=''): self.num = num self.file = file self.start_time = start_time self.fadein_length = fadein_length self.end_time = end_time self.fadeout_length = fadeout_length self.prev_talkover = prev_talkover self.title = title self.artist = artist def main(argv): global DEBUG DEBUG = False global VERBOSE VERBOSE = False global pipeoutput pipeoutput = subprocess.DEVNULL usage = """usage: %prog [options]\n column headers for playlist.csv are\n file, start_time, fadein_length, end_time, fadeout_length, title, artist, prev_talkover\n of which only file is required""" parser = OptionParser(usage=usage) parser.add_option("-a", "--album", dest="album", metavar="Album Name", help="If --id3tag is selected, use this for album title") parser.add_option("-b", "--begin", type="int", dest="begin", metavar="STARTING_TRACK_NUM", help="Process only from this track in cuesheet") parser.add_option("-d", "--debug", action="store_true", dest="DEBUG", help="execute extra, debugging instructions and reports") parser.add_option("-e", "--end", type="int", dest="end", metavar="ENDING_TRACK_NUM", help="Process only to this track in cuesheet") parser.add_option("-i", "--id3tag", action="store_true", dest="id3tag", help="Add ID3 information. Uses title if present in\ cue sheet, or else file name.") parser.add_option("-m", "--mp3", action="store_true", dest="output_mp3", help="Output MP3 files instead of wav") parser.add_option("-o", "--output_dir", dest="output_dir", default="output", metavar="RELATIVE_PATH", help="Location of output directory relative to working directory.") parser.add_option("-p", "--playlist", dest="playlist_path", metavar="PATH", default="playlist.csv", help="Read playlist csv from PATH.") parser.add_option("-r", "--version_tag", dest="version", metavar="VERSION_STRING", help="Add this version number string to the ID3 comment tag.") parser.add_option("-s", "--source_dir", dest="source_dir", metavar="PATH", help="Path prepended to each filename in playlist.") parser.add_option("-v", "--verbose", action="store_true", dest="VERBOSE", help="Show extra messages during execution.") (options, args) = parser.parse_args() if options.DEBUG: DEBUG = True pipeoutput = None if options.VERBOSE: VERBOSE = True working_path = os.getcwd() temp_path = subprocess.run('mktemp -d', shell=True, check=True, stdout=subprocess.PIPE)\ .stdout.decode("utf-8").strip() output_dir = os.path.join(working_path, options.output_dir) output_dir_clear_wildcard = os.path.join(output_dir, '*') # TODO: should probably require a flag to do this without warning if os.path.isdir(output_dir): subprocess.call('rm {0}'.format(output_dir_clear_wildcard), shell=True) if options.source_dir: if os.path.isdir(options.source_dir): source_dir = os.path.join(working_path, options.source_dir) else: print('Output directory {0} is not a directory'.format(source_dir)) sys.exit(2) else: source_dir = '' try: id3tag = options.id3tag except AttributeError: id3tag = None try: album = options.album except AttributeError: album = None try: version_tag = options.version_tag except AttributeError: version_tag = None if not os.path.isfile(options.playlist_path): print('Missing playlist file "{0}"'.format(options.playlist_path)) sys.exit(2) playlist = load_playlist(options.playlist_path, source_dir, options.begin, options.end) convert_and_renumber(playlist, temp_path) normalize(temp_path) fade_and_crop(playlist, temp_path) if options.output_mp3: convert_to_mp3(playlist, temp_path, album, id3tag, version_tag) move_to_output(temp_path, output_dir) if os.path.isdir(temp_path): shutil.rmtree(temp_path) log('Done') def load_playlist(playlist_file, source_dir, begin, end): playlist = [] with open(playlist_file, 'rt') as f: reader = csv.DictReader(f) counter = 0 for line in reader: try: file = line['file'] except KeyError: file = '' if not file or file[0] == '#': continue else: file = os.path.join(source_dir, file) if not os.path.isfile(file): print('Could not find file {0}'.format(file)) continue try: start_time = line['start_time'] except KeyError: start_time = '' try: fadein_length = line['fadein_length'] except KeyError: fadein_length = '' try: end_time = line['end_time'] except KeyError: end_time = '' try: fadeout_length = line['fadeout_length'] except KeyError: fadeout_length = '' try: title = line['title'] except KeyError: title = '' try: artist = line['artist'] except KeyError: artist = '' try: prev_talkover = decimal.Decimal(line['prev_talkover']) except (KeyError, TypeError, decimal.InvalidOperation): prev_talkover = '' playlist.append(Song(counter, file, start_time, fadein_length, end_time, fadeout_length, prev_talkover, title, artist)) counter += 1 if begin or end: if not begin: begin = 0 if not end: end = len(playlist) playlist = playlist[begin:end] return playlist def convert_and_renumber(playlist, temp_path): for song in playlist: from_path = song.file to_path = num_concat(temp_path, song.num, 'wav') log('Converting {0}'.format(song.file)) subprocess.call('ffmpeg -i "{0}" -c:a pcm_s16le -vn "{1}"'. format(from_path, to_path), shell=True, stdout=pipeoutput, stderr=pipeoutput) def fade_and_crop(playlist, temp_path): # sox trim: Any number of positions may be given; ... The effect # then alternates between copying and discarding audio at each # position. If a position is preceded by an equals or minus sign, # it is interpreted relative to the beginning or the end of the # audio, respectively. ... Otherwise, it is considered an offset # from the last position, or from the start of audio for the first # parameter. Using a value of 0 for the first position parameter # allows copying from the beginning of the audio. # sox fade: fade [type] fade-in-length [stop-time # [fade-out-length]] Apply a fade effect to the beginning, end, or # both of the audio. An optional type can be specified to select # the shape of the fade curve: ... h for half a sine wave, l for # logarithmic .... A fade-in starts from the first sample and # ramps the signal level from 0 to full volume over fade-in-length # seconds. Specify 0 seconds if no fade-in is wanted. For # fade-outs, the audio will be truncated at stop-time and the # signal level will be ramped from full volume down to 0 starting # at fade-out-length seconds before the stop-time. If # fade-out-length is not specified, it defaults to the same value # as fade-in-length. No fade-out is performed if stop-time is not # specified. for song in playlist: from_path = num_concat(temp_path, song.num, 'wav') sox_path = num_concat(temp_path, song.num, 'sox.wav') log('Fading and cropping {0}'.format(song.file)) trim_command = '' if song.start_time or song.end_time: if song.start_time: start_time = song.start_time else: start_time = '0' if song.fadein_length: fadein_length = song.fadein_length fadein_command = ' fade l {0}'.format(fadein_length) else: fadein_command = '' if song.end_time: end_time = song.end_time trim_command = ' trim {0} ={1}'.format(start_time, end_time) else: fadeout_length = '' trim_command = ' trim {0}'.format(start_time) if song.fadeout_length: fadeout_length = song.fadeout_length fadeout_command = ' fade h 0 {0} {1}'.\ format(end_time, fadeout_length) else: fadeout_command = '' # fadeout, trim, and fadein commands are ordered so that # all of the timing inputs can remain relative to the # original file start time. sox_command = 'sox "{0}" "{1}" {2} {3} {4}'.format(from_path, sox_path, fadeout_command, trim_command, fadein_command) if DEBUG: print(sox_command) subprocess.call(sox_command, shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('mv {0} {1}'.format(sox_path, from_path), shell=True, stdout=pipeoutput, stderr=pipeoutput) # TODO: use python function for this? if song.prev_talkover: if song.num == 0: continue # can't fade the first song with a previous song prev_num = song.num - 1 first_file = num_concat(temp_path, prev_num, 'wav') second_file = num_concat(temp_path, song.num, 'wav') fadeout_file = os.path.join(temp_path, 'fadeout.wav') fadein_file = os.path.join(temp_path, 'fadein.wav') crossfade_file = os.path.join(temp_path, 'crossfade.wav') crossfadeout_file = os.path.join(temp_path, 'crossfadeout.wav') crossfadein_file = os.path.join(temp_path, 'crossfadein.wav') first_inter_file = os.path.join(temp_path, 'first_inter.wav') second_inter_file = os.path.join(temp_path, 'second_inter.wav') first_final_file = os.path.join(temp_path, 'first_final.wav') second_final_file = os.path.join(temp_path, 'second_final.wav') first_length = subprocess.call('sox "{0}" -n stat | grep Length | cut -d : -f 2 | cut -f 1'.format(first_file), shell=True, stdout=pipeoutput, stderr=pipeoutput) trim_length = first_length - song.prev_talkover crossfade_split_length = song.prev_talkover / decimal.Decimal('2') subprocess.call('sox {0} -r 44100 {1} trim {2}'.format(first_file, fadeout_file, trim_length), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox {0} -r 44100 {1} trim 0 {2}'.format(second_file, fadein_file, song.prev_talkover), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox -V -m -v 1.0 {0} -v 1.0 {1} {2}'.format(fadeout_file, fadein_file, crossfade_file), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox {0} {1} trim 0 {2}'.format(crossfade_file, crossfadeout_file, crossfade_split_length), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox {0} {1} trim {2}'.format(crossfade_file, crossfadein_file, crossfade_split_length), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox {0} -r 44100 {1} trim 0 {2}'.format(first_file, first_inter_file, trim_length), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox {0} -r 44100 {1} trim {2}'.format(second_file, second_inter_file, song.prev_talkover), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox -V {0} {1} {2}'.format(first_inter_file, crossfadeout_file, first_final_file), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('sox -V {0} {1} {2}'.format(crossfadein_file, second_inter_file, second_final_file), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('mv {0} {1}'.format(first_final_file, first_file), shell=True, stdout=pipeoutput, stderr=pipeoutput) subprocess.call('mv {0} {1}'.format(second_final_file, second_file), shell=True, stdout=pipeoutput, stderr=pipeoutput) try: os.remove(fadeout_file) os.remove(fadein_file) os.remove(crossfade_file) os.remove(crossfadeout_file) os.remove(crossfadein_file) os.remove(first_inter_file) os.remove(second_inter_file) except FileNotFoundError: pass def log(message): if VERBOSE: print('{0}: {1}'. format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), message)) def num_concat(temp_path, num, suffix): num_pad = '{:0>2}'.format(num) return os.path.join(temp_path, '{0}.{1}'.format(num_pad, suffix)) def normalize(temp_path): orig_path = os.getcwd() os.chdir(temp_path) log('Normalizing') subprocess.call('normalize-audio -a -8dB *wav', shell=True, stdout=pipeoutput, stderr=pipeoutput) os.chdir(orig_path) def convert_to_mp3(playlist, temp_path, album, id3tag, version): track_count = len(playlist) for i, song in enumerate(playlist): from_path = num_concat(temp_path, song.num, 'wav') output_path = num_concat(temp_path, song.num, 'mp3') log('Converting to mp3'.format(song)) subprocess.call('lame -vbr-new {0} {1}'.format(from_path, output_path), shell=True, stdout=pipeoutput, stderr=pipeoutput) if id3tag: if version: version_arg = '--comment="version {0}"'.format(version) else: version_arg = '' num_pad = '{:0>2}'.format(i + 1) if song.title: track_title = song.title else: track_title = 'Track {0}'.format(num_pad) subprocess.call('eyeD3 --album="{0}" --title="{1}" --artist="{2}" --track="{3}" --track-total="{4}" {5} "{6}"'.format(album, track_title, song.artist, num_pad, track_count, version_arg, output_path), # noqa shell=True, stdout=pipeoutput, stderr=pipeoutput) os.remove(from_path) def move_to_output(temp_path, output_path): from_path = os.path.join(temp_path, '*') if not os.path.isdir(output_path): os.mkdir(output_path) subprocess.call('mv {0} {1}'.format(from_path, output_path), shell=True, stdout=pipeoutput, stderr=pipeoutput) if __name__ == "__main__": main(sys.argv[1:])
Each Swing look and feel has a long list of User Interface Defaults (UI defaults) used to initialize Java components with default fonts, colors, icons, borders, and more. You can get and set these defaults to tune your application's overall appearance. This article shows how to use these defaults and surveys the principal look and feels for Java on Windows, Mac OS X, and Linux, to create a long list of the names and data types for their many UI defaults. Zebra stripes are subtle alternating stripes painted behind list items in a graphical user interface (GUI). They improve the readability of wide and long lists, but the JList class in Java's Swing doesn't support them. This tip shows how to extend JList to add zebra background stripes. Table zebra stripes are alternating subtle background stripes painted behind the table's rows in a graphical user interface (GUI). They improve the readability of long rows in wide tables, but the JTable class in Java's Swing doesn't support them. This tip shows how to extend JTable to add zebra background stripes. Zebra stripes in a graphical user interface (GUI) are subtle background stripes painted behind the rows of a hierarchical list, or tree. They improve the readability of wide tree rows, but the JTree class in Java's Swing doesn't support them. This tip shows how to extend JTree to add zebra background stripes. The starting point for building a link checker, web spider, or web page analyzer is, of course, to get the web page from the web server. Java's java.net package includes classes to manage URLs and to open web server connections. This tip shows how to use them to a get text, image, audio, or data file from a web server. Performance optimization requires that you measure the time to perform a task, then try algorithm and coding changes to make the task faster. Prior to Java 5, the only way to time a task was to measure wall clock time. Unfortunately, this gives inaccurate results when there is other activity on the system (and there always is). Java 5 introduced the java.lang.management package and methods to report CPU and user time per thread. These times are not affected by other system activity, making them just what we need for benchmarking. This article shows how to use the java.lang.management package to benchmark your application. Java's threads are essential for building complex applications, but thread control is split across several classes in different packages added at different times in the JDK's history. This tip shows how to connect these classes together to find threads and thread groups, and get thread information. Java has several ways to parse integers from strings. Performance differences between these methods can be significant when parsing a large number of integers. Doing your own integer parsing can provide an important speed boost. This tip looks at five ways to parse integers, compares their features, and benchmarks them to see which method is the fastest. Java has several classes for reading files, with and without buffering, random access, thread safety, and memory mapping. Some of these are much faster than the others. This article benchmarks 13 ways to read bytes from a file and shows which ways are the fastest. User interface themes for Mac OS X, Windows, and Linux define the OS-wide look of window frames, buttons, scrollbars, and the like. Preference settings enable users to tune color schemes to taste. For Java applications, changing the theme also sets colors for pre-defined java.awt.SystemColor objects that indicate the color of buttons, text, and more. In principal, applications may use these colors to customize their user interfaces to match the current theme. In practice, there are quite a few problems with doing this. This article begins a series that discusses Java's SystemColors and their use and problems on different OSes. Series articles also provide color swatches, RGB/HSV values, and downloadable color lists for SystemColors for each of the standard themes on the Mac, Windows, and Linux. The color swatches provide a resource for comparing theme colors and creating custom Java components that match the themes.
import os import json import unittest from mock import Mock from dmcontent.content_loader import ContentLoader from werkzeug.datastructures import MultiDict from app.presenters.search_presenters import filters_for_lot, set_filter_states content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('g6', 'data', 'manifest') questions_builder = content_loader.get_builder('g6', 'manifest') def _get_fixture_data(): test_root = os.path.abspath( os.path.join(os.path.dirname(__file__), "..") ) fixture_path = os.path.join( test_root, 'fixtures', 'search_results_fixture.json' ) with open(fixture_path) as fixture_file: return json.load(fixture_file) def _get_fixture_multiple_pages_data(): test_root = os.path.abspath( os.path.join(os.path.dirname(__file__), "..") ) fixture_path = os.path.join( test_root, 'fixtures', 'search_results_multiple_pages_fixture.json' ) with open(fixture_path) as fixture_file: return json.load(fixture_file) class TestSearchFilters(unittest.TestCase): def _get_filter_group_by_label(self, lot, label): filter_groups = filters_for_lot(lot, questions_builder) for filter_group in filter_groups: if filter_group['label'] == label: return filter_group def _get_request_for_params(self, params): return Mock(args=MultiDict(params)) def test_get_filter_groups_from_questions_with_radio_filters(self): radios_filter_group = self._get_filter_group_by_label( 'saas', 'Radios example' ) self.assertEqual({ 'label': 'Radios example', 'filters': [ { 'label': 'Option 1', 'name': 'radiosExample', 'id': 'radiosExample-option-1', 'value': 'option 1', }, { 'label': 'Option 2', 'name': 'radiosExample', 'id': 'radiosExample-option-2', 'value': 'option 2', } ] }, radios_filter_group) def test_get_filter_groups_from_questions_with_checkbox_filters(self): checkboxes_filter_group = self._get_filter_group_by_label( 'saas', 'Checkboxes example' ) self.assertEqual({ 'label': 'Checkboxes example', 'filters': [ { 'label': 'Option 1', 'name': 'checkboxesExample', 'id': 'checkboxesExample-option-1', 'value': 'option 1', }, { 'label': 'Option 2', 'name': 'checkboxesExample', 'id': 'checkboxesExample-option-2', 'value': 'option 2', } ] }, checkboxes_filter_group) def test_get_filter_groups_from_questions_with_boolean_filters(self): booleans_filter_group = self._get_filter_group_by_label( 'saas', 'Booleans example' ) self.assertEqual({ 'label': 'Booleans example', 'filters': [ { 'label': 'Option 1', 'name': 'booleanExample1', 'id': 'booleanExample1', 'value': 'true', }, { 'label': 'Option 2', 'name': 'booleanExample2', 'id': 'booleanExample2', 'value': 'true', } ] }, booleans_filter_group) def test_request_filters_are_set(self): search_filters = filters_for_lot('saas', questions_builder) request = self._get_request_for_params({ 'q': 'email', 'booleanExample1': 'true' }) set_filter_states(search_filters, request) self.assertEqual(search_filters[0]['filters'][0]['name'], 'booleanExample1') self.assertEqual(search_filters[0]['filters'][0]['checked'], True) self.assertEqual(search_filters[0]['filters'][1]['name'], 'booleanExample2') self.assertEqual(search_filters[0]['filters'][1]['checked'], False) def test_filter_groups_have_correct_default_state(self): request = self._get_request_for_params({ 'q': 'email', 'lot': 'paas' }) search_filters = filters_for_lot('paas', questions_builder) set_filter_states(search_filters, request) self.assertEqual( search_filters[0], { 'label': 'Booleans example', 'filters': [ { 'checked': False, 'label': 'Option 1', 'name': 'booleanExample1', 'id': 'booleanExample1', 'value': 'true', }, { 'checked': False, 'label': 'Option 2', 'name': 'booleanExample2', 'id': 'booleanExample2', 'value': 'true', } ] } ) def test_filter_groups_have_correct_state_when_changed(self): request = self._get_request_for_params({ 'q': 'email', 'lot': 'paas', 'booleanExample1': 'true' }) search_filters = filters_for_lot('paas', questions_builder) set_filter_states(search_filters, request) self.assertEqual( search_filters[0], { 'label': 'Booleans example', 'filters': [ { 'checked': True, 'label': 'Option 1', 'name': 'booleanExample1', 'id': 'booleanExample1', 'value': 'true', }, { 'checked': False, 'label': 'Option 2', 'name': 'booleanExample2', 'id': 'booleanExample2', 'value': 'true', } ] } ) def test_no_lot_is_the_same_as_all(self): all_filters = self._get_filter_group_by_label( 'all', 'Radios example' ) no_lot_filters = self._get_filter_group_by_label( None, 'Radios example' ) self.assertTrue(all_filters) self.assertEqual(all_filters, no_lot_filters) def test_instance_has_correct_filter_groups_for_paas(self): search_filters = filters_for_lot('paas', questions_builder) filter_group_labels = [ group['label'] for group in search_filters ] self.assertTrue('Booleans example' in filter_group_labels) self.assertTrue('Checkboxes example' in filter_group_labels) self.assertTrue('Radios example' in filter_group_labels) def test_instance_has_correct_filter_groups_for_iaas(self): search_filters = filters_for_lot('iaas', questions_builder) filter_group_labels = [ group['label'] for group in search_filters ] self.assertFalse('Booleans example' in filter_group_labels) self.assertTrue('Checkboxes example' in filter_group_labels) self.assertTrue('Radios example' in filter_group_labels)
In 2017, Theresa K. Nabors was a General Attorney at the U.S. Tax Court in Washington, District Of Columbia. Theresa K. Nabors is a GS-14 under the general schedule payscale. It is possible that Theresa K. Nabors has also worked at the . See more information here. Theresa K. Nabors's 2017 pay is -14% lower than the average General Attorney across all agencies. Theresa K. Nabors's 2017 pay is 41% higher than the average pay of a GS employee at the U.S. Tax Court.
"""Certificates API This is a Python API for generating certificates asynchronously. Other Django apps should use the API functions defined in this module rather than importing Django models directly. """ import logging from django.conf import settings from django.urls import reverse from django.db.models import Q from opaque_keys.edx.django.models import CourseKeyField from opaque_keys.edx.keys import CourseKey from branding import api as branding_api from lms.djangoapps.certificates.models import ( CertificateGenerationConfiguration, CertificateGenerationCourseSetting, CertificateInvalidation, CertificateStatuses, CertificateTemplate, CertificateTemplateAsset, ExampleCertificateSet, GeneratedCertificate, certificate_status_for_student ) from lms.djangoapps.certificates.queue import XQueueCertInterface from eventtracking import tracker from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from util.organizations_helpers import get_course_organization_id from xmodule.modulestore.django import modulestore log = logging.getLogger("edx.certificate") MODES = GeneratedCertificate.MODES def is_passing_status(cert_status): """ Given the status of a certificate, return a boolean indicating whether the student passed the course. This just proxies to the classmethod defined in models.py """ return CertificateStatuses.is_passing_status(cert_status) def format_certificate_for_user(username, cert): """ Helper function to serialize an user certificate. Arguments: username (unicode): The identifier of the user. cert (GeneratedCertificate): a user certificate Returns: dict """ try: return { "username": username, "course_key": cert.course_id, "type": cert.mode, "status": cert.status, "grade": cert.grade, "created": cert.created_date, "modified": cert.modified_date, "is_passing": is_passing_status(cert.status), "is_pdf_certificate": bool(cert.download_url), "download_url": ( cert.download_url or get_certificate_url(cert.user.id, cert.course_id, user_certificate=cert) if cert.status == CertificateStatuses.downloadable else None ), } except CourseOverview.DoesNotExist: return None def get_certificates_for_user(username): """ Retrieve certificate information for a particular user. Arguments: username (unicode): The identifier of the user. Returns: list Example Usage: >>> get_certificates_for_user("bob") [ { "username": "bob", "course_key": CourseLocator('edX', 'DemoX', 'Demo_Course', None, None), "type": "verified", "status": "downloadable", "download_url": "http://www.example.com/cert.pdf", "grade": "0.98", "created": 2015-07-31T00:00:00Z, "modified": 2015-07-31T00:00:00Z } ] """ certs = [] # Checks if certificates are not None before adding them to list for cert in GeneratedCertificate.eligible_certificates.filter(user__username=username).order_by("course_id"): formatted_cert = format_certificate_for_user(username, cert) if formatted_cert: certs.append(formatted_cert) return certs def get_certificate_for_user(username, course_key): """ Retrieve certificate information for a particular user for a specific course. Arguments: username (unicode): The identifier of the user. course_key (CourseKey): A Course Key. Returns: dict """ try: cert = GeneratedCertificate.eligible_certificates.get( user__username=username, course_id=course_key ) except GeneratedCertificate.DoesNotExist: return None return format_certificate_for_user(username, cert) def get_recently_modified_certificates(course_keys=None, start_date=None, end_date=None): """ Returns a QuerySet of GeneratedCertificate objects filtered by the input parameters and ordered by modified_date. """ cert_filter_args = {} if course_keys: cert_filter_args['course_id__in'] = course_keys if start_date: cert_filter_args['modified_date__gte'] = start_date if end_date: cert_filter_args['modified_date__lte'] = end_date return GeneratedCertificate.objects.filter(**cert_filter_args).order_by('modified_date') # pylint: disable=no-member def generate_user_certificates(student, course_key, course=None, insecure=False, generation_mode='batch', forced_grade=None): """ It will add the add-cert request into the xqueue. A new record will be created to track the certificate generation task. If an error occurs while adding the certificate to the queue, the task will have status 'error'. It also emits `edx.certificate.created` event for analytics. Args: student (User) course_key (CourseKey) Keyword Arguments: course (Course): Optionally provide the course object; if not provided it will be loaded. insecure - (Boolean) generation_mode - who has requested certificate generation. Its value should `batch` in case of django command and `self` if student initiated the request. forced_grade - a string indicating to replace grade parameter. if present grading will be skipped. """ xqueue = XQueueCertInterface() if insecure: xqueue.use_https = False if not course: course = modulestore().get_course(course_key, depth=0) generate_pdf = not has_html_certificates_enabled(course) cert = xqueue.add_cert( student, course_key, course=course, generate_pdf=generate_pdf, forced_grade=forced_grade ) # If cert_status is not present in certificate valid_statuses (for example unverified) then # add_cert returns None and raises AttributeError while accesing cert attributes. if cert is None: return if CertificateStatuses.is_passing_status(cert.status): emit_certificate_event('created', student, course_key, course, { 'user_id': student.id, 'course_id': unicode(course_key), 'certificate_id': cert.verify_uuid, 'enrollment_mode': cert.mode, 'generation_mode': generation_mode }) return cert.status def regenerate_user_certificates(student, course_key, course=None, forced_grade=None, template_file=None, insecure=False): """ It will add the regen-cert request into the xqueue. A new record will be created to track the certificate generation task. If an error occurs while adding the certificate to the queue, the task will have status 'error'. Args: student (User) course_key (CourseKey) Keyword Arguments: course (Course): Optionally provide the course object; if not provided it will be loaded. grade_value - The grade string, such as "Distinction" template_file - The template file used to render this certificate insecure - (Boolean) """ xqueue = XQueueCertInterface() if insecure: xqueue.use_https = False if not course: course = modulestore().get_course(course_key, depth=0) generate_pdf = not has_html_certificates_enabled(course) log.info( u"Started regenerating certificates for user %s in course %s with generate_pdf status: %s", student.username, unicode(course_key), generate_pdf ) return xqueue.regen_cert( student, course_key, course=course, forced_grade=forced_grade, template_file=template_file, generate_pdf=generate_pdf ) def certificate_downloadable_status(student, course_key): """ Check the student existing certificates against a given course. if status is not generating and not downloadable or error then user can view the generate button. Args: student (user object): logged-in user course_key (CourseKey): ID associated with the course Returns: Dict containing student passed status also download url, uuid for cert if available """ current_status = certificate_status_for_student(student, course_key) # If the certificate status is an error user should view that status is "generating". # On the back-end, need to monitor those errors and re-submit the task. response_data = { 'is_downloadable': False, 'is_generating': True if current_status['status'] in [CertificateStatuses.generating, CertificateStatuses.error] else False, 'is_unverified': True if current_status['status'] == CertificateStatuses.unverified else False, 'download_url': None, 'uuid': None, } may_view_certificate = CourseOverview.get_from_id(course_key).may_certify() if current_status['status'] == CertificateStatuses.downloadable and may_view_certificate: response_data['is_downloadable'] = True response_data['download_url'] = current_status['download_url'] or get_certificate_url(student.id, course_key) response_data['uuid'] = current_status['uuid'] return response_data def set_cert_generation_enabled(course_key, is_enabled): """Enable or disable self-generated certificates for a course. There are two "switches" that control whether self-generated certificates are enabled for a course: 1) Whether the self-generated certificates feature is enabled. 2) Whether self-generated certificates have been enabled for this particular course. The second flag should be enabled *only* when someone has successfully generated example certificates for the course. This helps avoid configuration errors (for example, not having a template configured for the course installed on the workers). The UI for the instructor dashboard enforces this constraint. Arguments: course_key (CourseKey): The course identifier. Keyword Arguments: is_enabled (boolean): If provided, enable/disable self-generated certificates for this course. """ CertificateGenerationCourseSetting.set_self_generatation_enabled_for_course(course_key, is_enabled) cert_event_type = 'enabled' if is_enabled else 'disabled' event_name = '.'.join(['edx', 'certificate', 'generation', cert_event_type]) tracker.emit(event_name, { 'course_id': unicode(course_key), }) if is_enabled: log.info(u"Enabled self-generated certificates for course '%s'.", unicode(course_key)) else: log.info(u"Disabled self-generated certificates for course '%s'.", unicode(course_key)) def is_certificate_invalid(student, course_key): """Check that whether the student in the course has been invalidated for receiving certificates. Arguments: student (user object): logged-in user course_key (CourseKey): The course identifier. Returns: Boolean denoting whether the student in the course is invalidated to receive certificates """ is_invalid = False certificate = GeneratedCertificate.certificate_for_student(student, course_key) if certificate is not None: is_invalid = CertificateInvalidation.has_certificate_invalidation(student, course_key) return is_invalid def cert_generation_enabled(course_key): """Check whether certificate generation is enabled for a course. There are two "switches" that control whether self-generated certificates are enabled for a course: 1) Whether the self-generated certificates feature is enabled. 2) Whether self-generated certificates have been enabled for this particular course. Certificates are enabled for a course only when both switches are set to True. Arguments: course_key (CourseKey): The course identifier. Returns: boolean: Whether self-generated certificates are enabled for the course. """ return ( CertificateGenerationConfiguration.current().enabled and CertificateGenerationCourseSetting.is_self_generation_enabled_for_course(course_key) ) def generate_example_certificates(course_key): """Generate example certificates for a course. Example certificates are used to validate that certificates are configured correctly for the course. Staff members can view the example certificates before enabling the self-generated certificates button for students. Several example certificates may be generated for a course. For example, if a course offers both verified and honor certificates, examples of both types of certificate will be generated. If an error occurs while starting the certificate generation job, the errors will be recorded in the database and can be retrieved using `example_certificate_status()`. Arguments: course_key (CourseKey): The course identifier. Returns: None """ xqueue = XQueueCertInterface() for cert in ExampleCertificateSet.create_example_set(course_key): xqueue.add_example_cert(cert) def example_certificates_status(course_key): """Check the status of example certificates for a course. This will check the *latest* example certificate task. This is generally what we care about in terms of enabling/disabling self-generated certificates for a course. Arguments: course_key (CourseKey): The course identifier. Returns: list Example Usage: >>> from lms.djangoapps.certificates import api as certs_api >>> certs_api.example_certificate_status(course_key) [ { 'description': 'honor', 'status': 'success', 'download_url': 'http://www.example.com/abcd/honor_cert.pdf' }, { 'description': 'verified', 'status': 'error', 'error_reason': 'No template found!' } ] """ return ExampleCertificateSet.latest_status(course_key) def _safe_course_key(course_key): if not isinstance(course_key, CourseKey): return CourseKey.from_string(course_key) return course_key def _course_from_key(course_key): return CourseOverview.get_from_id(_safe_course_key(course_key)) def _certificate_html_url(user_id, course_id, uuid): if uuid: return reverse('certificates:render_cert_by_uuid', kwargs={'certificate_uuid': uuid}) elif user_id and course_id: kwargs = {"user_id": str(user_id), "course_id": unicode(course_id)} return reverse('certificates:html_view', kwargs=kwargs) return '' def _certificate_download_url(user_id, course_id, user_certificate=None): if not user_certificate: try: user_certificate = GeneratedCertificate.eligible_certificates.get( user=user_id, course_id=_safe_course_key(course_id) ) except GeneratedCertificate.DoesNotExist: log.critical( u'Unable to lookup certificate\n' u'user id: %d\n' u'course: %s', user_id, unicode(course_id) ) if user_certificate: return user_certificate.download_url return '' def has_html_certificates_enabled(course): if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False): return False return course.cert_html_view_enabled def get_certificate_url(user_id=None, course_id=None, uuid=None, user_certificate=None): url = '' course = _course_from_key(course_id) if not course: return url if has_html_certificates_enabled(course): url = _certificate_html_url(user_id, course_id, uuid) else: url = _certificate_download_url(user_id, course_id, user_certificate=user_certificate) return url def get_active_web_certificate(course, is_preview_mode=None): """ Retrieves the active web certificate configuration for the specified course """ certificates = getattr(course, 'certificates', {}) configurations = certificates.get('certificates', []) for config in configurations: if config.get('is_active') or is_preview_mode: return config return None def get_certificate_template(course_key, mode, language): """ Retrieves the custom certificate template based on course_key, mode, and language. """ template = None # fetch organization of the course org_id = get_course_organization_id(course_key) # only consider active templates active_templates = CertificateTemplate.objects.filter(is_active=True) if org_id and mode: # get template by org, mode, and key org_mode_and_key_templates = active_templates.filter( organization_id=org_id, mode=mode, course_key=course_key ) template = get_language_specific_template_or_default(language, org_mode_and_key_templates) # since no template matched that course_key, only consider templates with empty course_key empty_course_key_templates = active_templates.filter(course_key=CourseKeyField.Empty) if not template and org_id and mode: # get template by org and mode org_and_mode_templates = empty_course_key_templates.filter( organization_id=org_id, mode=mode ) template = get_language_specific_template_or_default(language, org_and_mode_templates) if not template and org_id: # get template by only org org_templates = empty_course_key_templates.filter( organization_id=org_id, mode=None ) template = get_language_specific_template_or_default(language, org_templates) if not template and mode: # get template by only mode mode_templates = empty_course_key_templates.filter( organization_id=None, mode=mode ) template = get_language_specific_template_or_default(language, mode_templates) return template if template else None def get_language_specific_template_or_default(language, templates): """ Returns templates that match passed in language. Returns default templates If no language matches, or language passed is None """ two_letter_language = _get_two_letter_language_code(language) language_or_default_templates = list(templates.filter(Q(language=two_letter_language) | Q(language=None) | Q(language=''))) language_specific_template = get_language_specific_template(two_letter_language, language_or_default_templates) if language_specific_template: return language_specific_template else: return get_all_languages_or_default_template(language_or_default_templates) def get_language_specific_template(language, templates): for template in templates: if template.language == language: return template return None def get_all_languages_or_default_template(templates): for template in templates: if template.language == '': return template return templates[0] if templates else None def _get_two_letter_language_code(language_code): """ Shortens language to only first two characters (e.g. es-419 becomes es) This is needed because Catalog returns locale language which is not always a 2 letter code. """ if language_code is None: return None elif language_code == '': return '' else: return language_code[:2] def emit_certificate_event(event_name, user, course_id, course=None, event_data=None): """ Emits certificate event. """ event_name = '.'.join(['edx', 'certificate', event_name]) if course is None: course = modulestore().get_course(course_id, depth=0) context = { 'org_id': course.org, 'course_id': unicode(course_id) } data = { 'user_id': user.id, 'course_id': unicode(course_id), 'certificate_url': get_certificate_url(user.id, course_id) } event_data = event_data or {} event_data.update(data) with tracker.get_tracker().context(event_name, context): tracker.emit(event_name, event_data) def get_asset_url_by_slug(asset_slug): """ Returns certificate template asset url for given asset_slug. """ asset_url = '' try: template_asset = CertificateTemplateAsset.objects.get(asset_slug=asset_slug) asset_url = template_asset.asset.url except CertificateTemplateAsset.DoesNotExist: pass return asset_url def get_certificate_header_context(is_secure=True): """ Return data to be used in Certificate Header, data returned should be customized according to the site configuration. """ data = dict( logo_src=branding_api.get_logo_url(is_secure), logo_url=branding_api.get_base_url(is_secure), ) return data def get_certificate_footer_context(): """ Return data to be used in Certificate Footer, data returned should be customized according to the site configuration. """ data = dict() # get Terms of Service and Honor Code page url terms_of_service_and_honor_code = branding_api.get_tos_and_honor_code_url() if terms_of_service_and_honor_code != branding_api.EMPTY_URL: data.update({'company_tos_url': terms_of_service_and_honor_code}) # get Privacy Policy page url privacy_policy = branding_api.get_privacy_url() if privacy_policy != branding_api.EMPTY_URL: data.update({'company_privacy_url': privacy_policy}) # get About page url about = branding_api.get_about_url() if about != branding_api.EMPTY_URL: data.update({'company_about_url': about}) return data
The computer has no power.. Step #1: Check behind the student PC to ensure the power cable is present and plugged in. a. Check behind the student PC and look for the PC power cable and the power brick it follows. Ensure the power brick is receiving power from the wall outlet. b. Check the wall outlet and ensure that the power cable is seated in the socket. Step #2: Attempt to power on the PC. If you still have no power to the computer, please send a response email to the ticket and a technician will come take a further look at what the issue may be. Note: Once the computer is receiving power, you will notice the LED of the power button light up and the computer start up then shutdown again.
import DB_manager3, sys from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_ResultsManager(QtGui.QWidget): def __init__(self, database, tableName): QtGui.QWidget.__init__(self) self.dbu = DB_manager3.DatabaseTool(database, tableName) self.setupUi(self) self.UpdateTree() self.UpdateCombo() def setupUi(self, ResultsManager): ResultsManager.setObjectName(_fromUtf8("ResultsManager")) ResultsManager.setFixedSize(742, 229) self.labelhome = QtGui.QLabel(ResultsManager) self.labelhome.setGeometry(QtCore.QRect(530, 10, 92, 17)) self.labelhome.setObjectName(_fromUtf8("labelhome")) self.labelmessage = QtGui.QLabel(ResultsManager) self.labelmessage.setGeometry(QtCore.QRect(540, 90, 151, 20)) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.labelmessage.setFont(font) self.labelmessage.setStyleSheet(_fromUtf8("color: rgb(255, 0, 0);")) self.labelmessage.setText(_fromUtf8("")) self.labelmessage.setObjectName(_fromUtf8("labelmessage")) self.homename = QtGui.QComboBox(ResultsManager) self.homename.setGeometry(QtCore.QRect(520, 30, 91, 27)) self.homename.setObjectName(_fromUtf8("homename")) self.deletebutton = QtGui.QPushButton(ResultsManager) self.deletebutton.setGeometry(QtCore.QRect(480, 150, 261, 27)) self.deletebutton.setObjectName(_fromUtf8("deletebutton")) self.addbutton = QtGui.QPushButton(ResultsManager) self.addbutton.setGeometry(QtCore.QRect(480, 110, 261, 27)) self.addbutton.setObjectName(_fromUtf8("addbutton")) self.visitorname = QtGui.QComboBox(ResultsManager) self.visitorname.setGeometry(QtCore.QRect(650, 30, 91, 27)) self.visitorname.setObjectName(_fromUtf8("visitorname")) self.labelteam_2 = QtGui.QLabel(ResultsManager) self.labelteam_2.setGeometry(QtCore.QRect(620, 40, 47, 17)) self.labelteam_2.setObjectName(_fromUtf8("labelteam_2")) self.labelvisitor = QtGui.QLabel(ResultsManager) self.labelvisitor.setGeometry(QtCore.QRect(660, 10, 92, 17)) self.labelvisitor.setObjectName(_fromUtf8("labelvisitor")) self.labelresult = QtGui.QLabel(ResultsManager) self.labelresult.setGeometry(QtCore.QRect(500, 190, 201, 31)) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.labelresult.setFont(font) self.labelresult.setStyleSheet(_fromUtf8("color: rgb(0, 170, 0);")) self.labelresult.setText(_fromUtf8("")) self.labelresult.setObjectName(_fromUtf8("labelresult")) self.treeWidget = QtGui.QTreeWidget(ResultsManager) self.treeWidget.setGeometry(QtCore.QRect(10, 10, 451, 211)) self.treeWidget.setObjectName(_fromUtf8("treeWidget")) self.treeWidget.headerItem().setText(0, _fromUtf8("1")) self.homescore = QtGui.QLineEdit(ResultsManager) self.homescore.setGeometry(QtCore.QRect(520, 60, 91, 27)) self.homescore.setObjectName(_fromUtf8("homescore")) self.visitorscore = QtGui.QLineEdit(ResultsManager) self.visitorscore.setGeometry(QtCore.QRect(650, 60, 91, 27)) self.visitorscore.setObjectName(_fromUtf8("visitorscore")) self.label = QtGui.QLabel(ResultsManager) self.label.setGeometry(QtCore.QRect(470, 30, 66, 21)) self.label.setObjectName(_fromUtf8("label")) self.label_2 = QtGui.QLabel(ResultsManager) self.label_2.setGeometry(QtCore.QRect(470, 60, 66, 21)) self.label_2.setObjectName(_fromUtf8("label_2")) self.labelteam_3 = QtGui.QLabel(ResultsManager) self.labelteam_3.setGeometry(QtCore.QRect(620, 60, 47, 17)) font = QtGui.QFont() font.setPointSize(16) font.setBold(True) font.setWeight(75) self.labelteam_3.setFont(font) self.labelteam_3.setObjectName(_fromUtf8("labelteam_3")) self.retranslateUi(ResultsManager) QtCore.QMetaObject.connectSlotsByName(ResultsManager) def retranslateUi(self, ResultsManager): ResultsManager.setWindowTitle(_translate("ResultsManager", "ResultsManager", None)) self.labelhome.setText(_translate("ResultsManager", "Home", None)) self.deletebutton.setText(_translate("ResultsManager", "Delete", None)) self.addbutton.setText(_translate("ResultsManager", "Add", None)) self.labelteam_2.setText(_translate("ResultsManager", " Vs", None)) self.labelvisitor.setText(_translate("ResultsManager", "Visitor", None)) self.label.setText(_translate("ResultsManager", "Teams", None)) self.label_2.setText(_translate("ResultsManager", "Scores", None)) self.labelteam_3.setText(_translate("ResultsManager", " :", None)) @QtCore.pyqtSignature("on_addbutton_clicked()") def addbuttonf(self): if((self.homename.currentText() != 'Choose Team') and (self.visitorname.currentText() != 'Choose Team') and (self.homename.currentText() != self.visitorname.currentText())): Home = self.homename.currentText() Visitor = self.visitorname.currentText() Hscore = self.homescore.text() Vscore = self.visitorscore.text() self.dbu.AddEntryToTable(Home,Hscore,Vscore,Visitor) self.UpdateTree() self.labelresult.setText(_translate("ResultsManager", "Done!", None)) self.labelresult.setStyleSheet('QLabel#label {color: green}') else : self.labelmessage.setText(_translate("ResultsManager", "Fill The Form!", None)) self.labelmessage.setStyleSheet('QLabel#label {color: red}') @QtCore.pyqtSignature("on_deletebutton_clicked()") def delbutton(self): idl = self.treeWidget.currentItem() idd = idl.text(4) self.dbu.DelEntry(str(idd)) self.UpdateTree() def UpdateTree(self): col = self.dbu.GetColumns() table = self.dbu.GetTable() for c in range(len(col)): self.treeWidget.headerItem().setText(c, col[c][0]) self.treeWidget.clear() for item in range(len(table)): QtGui.QTreeWidgetItem(self.treeWidget) for value in range(len(table[item])): self.treeWidget.topLevelItem(item).setText(value, str(table[item][value])) def UpdateCombo(self): c = [] f = open('teams.txt','r') line = f.readlines() for i in range(len(line)): x = line[i] c.append(x[:len(x)-1]) f.close() self.homename.clear() self.visitorname.clear() self.homename.addItems(c) self.visitorname.addItems(c)
something. It is better to show numbers here to be able to judge. Subject: Register iso so slow. 1- Could you please guide me how i increase speed of registering iso template. 2- how could see percentage of iso that uploaded to ssvm in cloud stack 4.5.
import os, sys import cPickle as pickle import glob # import itertools import matplotlib.pyplot as plt import matplotlib.cm as mpl_cm import numpy as np #from mpl_toolkits.basemap import Basemap #import matplotlib.animation as animation import iris import iris.coords as coords import iris.quickplot as qplt import iris.plot as iplt import iris.coord_categorisation import cartopy.crs as ccrs import cartopy.io.img_tiles as cimgt import matplotlib.ticker as mticker from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import datetime def main(): lon_low= 60 lon_high = 105 lat_low = -10 lat_high = 30 first_of_year = datetime.date(2011, 01, 01) first_ordinal = first_of_year.toordinal() j=1 #pickle_name = 'pickle_daily_mean_*.p' pickle_name = 'pickle_model_mean_collapsed_*.p' flist = glob.glob ('/home/pwille/python_scripts/*/%s' % pickle_name) plt.figure(figsize=(30, 15)) #plt.gcf().subplots_adjust(hspace=0.05, wspace=0.05, top=0.95, bottom=0.05, left=0.075, right=0.925) plt.gcf().subplots_adjust(top=0.5) plt.suptitle('Mean sea level pressure of model runs (average of entire model run)') for i in flist: fname = str(i) experiment_id = fname.split('/')[4] if not os.path.exists('/home/pwille/python_scripts/pngs/%s' % (experiment_id)): os.makedirs('/home/pwille/python_scripts/pngs/%s' % (experiment_id)) #daily_mean = pickle.load( open( "/home/pwille/python_scripts/%s/pickle_daily_mean_%s.p" % (experiment_id, experiment_id), "rb" ) ) model_mean = pickle.load( open( "%s" % (fname), "rb" ) ) #print model_mean for sub_cube in model_mean.slices(['grid_latitude', 'grid_longitude']): #Get date in iso format for title, if needed #day=sub_cube_daily.coord('dayyear') #day_number = day.points[0] #day_number_ordinal=first_ordinal-1 + day_number #date_print = datetime.date.fromordinal(day_number_ordinal) #date_iso = str(date_print.isoformat()) sub_cube.units = 'hPa' sub_cube /= 100 # Load a Cynthia Brewer palette. brewer_cmap = mpl_cm.get_cmap('Spectral') #contour = qplt.contour(sub_cube_daily, brewer_cmap.N, cmap=brewer_cmap) clevs = np.arange(996,1016) sub_cube.coord('grid_latitude').guess_bounds() sub_cube.coord('grid_longitude').guess_bounds() print j plt.subplot(2, 4, j, projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low,lat_high)) #plt.subplot(4, 2, j, projection=ccrs.PlateCarree()) j+=1 contour = iplt.contour(sub_cube, clevs, colors='k',linewidths=0.5) #iplt.contourf(sub_cube, 16, cmap=brewer_cmap) #plt.title('Daily Mean Sea Level Pressure: %s model run. %s' % (experiment_id, date_iso), fontsize=12) plt.title('%s' % (experiment_id), fontsize=8) dx, dy = 10, 10 plt.clabel(contour, fmt='%d', inline=1, fontsize=8) plt.gca().coastlines(resolution='110m', color='gray') plt.gca().stock_img() gl = plt.gca().gridlines(draw_labels=True,linewidth=1, color='gray', alpha=0.5, linestyle='--') gl.xlabels_top = False gl.ylabels_right = False #gl.xlines = False gl.xlocator = mticker.FixedLocator(range(60,105+dx,dx)) gl.ylocator = mticker.FixedLocator(range(-10,30+dy,dx)) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size': 8, 'color': 'gray'} #gl.xlabel_style = {'color': 'red', 'weight': 'bold'} gl.ylabel_style = {'size': 8, 'color': 'gray'} #gl.xlabel_style = {'color': 'red', 'weight': 'bold'} #plt.savefig('/home/pwille/python_scripts/pngs/%s/msl_model_mean_%s.png' % (experiment_id, experiment_id)) # plt.subplots_adjust(top=0.9, bottom=0.1, hspace=0.2) plt.tight_layout() plt.subplots_adjust(top=0.9, wspace=0.2, hspace=0.2) #plt.show() plt.savefig('/home/pwille/python_scripts/pngs/msl_model_mean_ensemble.png') plt.close() #print sub_cube #print fname #print experiment_id if __name__ == '__main__': main()
Dr. Paula Rochon – vice-president of research, Women’s College Hospital and professor at the University of Toronto’s Institute of Health Policy, Management and Evaluation and the faculty of medicine – was elected as a fellow of the Canadian Academy of Health Sciences. The fellowship recognizes Dr. Rochon’s contribution of pharmaco-epidemiologic research to examine patterns of adverse drug events and to explore the impacts of drug therapies commonly prescribed to older adults. In September, Women’s Health Matters spoke with Dr. Janice Du Mont, a scientist in the Violence and Health Research Program at Women’s College Research Institute, about abusive relationships, and the different types of abuse that occur within intimate relationships. Dr. Paula Rochon spoke with CBC Radio about Women’s Xchange and broadening the reach of healthcare. Click here to listen to the interview. People have raised their voice with Avril Lavigne and Women’s College Hospital this summer. Click here to read more about the campaign. WCH board member Mina Mawani was profiled on the cover of South Asian Generation Next. Click here to read the article. The Association of Volunteers is hosting its most significant fundraising event of the year! The 2nd Annual Theatre Night VENUS IN FUR will take place on Tuesday, Oct. 1. Let your family, friends and colleagues know and join us for this entertaining evening filled with laughter and fun. Tickets are on sale now at the WCH information desk. Click here for more information. We hope to see you and your friends there!
"""actofgoods URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from . import views app_name = 'administration' urlpatterns = [ url(r'^administration/$', views.administration, name='administration'), url(r'^administration/requests/$', views.requests, name='requests'), url(r'^administration/categories/$', views.categories, name='categories'), url(r'^administration/needs/$', views.needs, name='needs'), url(r'^administration/information/$', views.informations, name='information'), url(r'^administration/users/$', views.users, name='users'), url(r'^administration/groups/$', views.groups, name='groups'), url(r'^administration/faq_administration/$', views.faq_administration, name='faq_administration'), url(r'^administration/users/(?P<pk>\d+)/delete/$', views.user_delete, name='user_delete'), url(r'^administration/groups/(?P<pk>\d+)/delete/$', views.group_delete, name='group_delete'), url(r'^administration/needs/(?P<pk>\d+)/delete/$', views.need_delete, name='need_delete'), url(r'^administration/information/(?P<pk>\d+)/delete/$', views.information_delete, name='information_delete'), url(r'^administration/users/(?P<pk>\d+)/make_admin/$', views.make_admin, name='make_admin'), url(r'^administration/categories/(?P<pk>\d+)/delete/$', views.categories_delete, name='categories_delete'), url(r'^administration/information/(?P<pk>\d+)/$', views.information_admin, name='information_admin'), url(r'^administration/information/comment/delete/$', views.comment_delete, name='comment_delete'), url(r'^administration/information/comment/(?P<pk>\d+)/delete/$', views.comment_delete, name='comment_delete'), url(r'^administration/information/(?P<pki>\d+)/(?P<pkc>\d+)/$', views.information_reported_comment_admin, name='information_reported_comment_admin'), ]
Published at Wednesday, March 20th, 2019 - 14:13:46 PM. Cellular Shades. By Michelle Debray.
# # VASP.py # # Interface to VASP (https://www.vasp.at) # # Copyright (c) 2014 Terumasa Tadano # # This file is distributed under the terms of the MIT license. # Please see the file 'LICENCE.txt' in the root directory # or http://opensource.org/licenses/mit-license.php for information. # from __future__ import print_function import numpy as np try: try: # cElementTree on Python 2.5+ import xml.etree.cElementTree as etree except ImportError: # ElementTree on Python 2.5+ import xml.etree.ElementTree as etree except ImportError: try: # cElementTree import cElementTree as etree except ImportError: # ElementTree import elementtree.ElementTree as etree def read_POSCAR(file_in): file_pos = open(file_in, 'r') file_pos.readline() a = float(file_pos.readline().rstrip()) lavec = np.zeros((3, 3)) for i in range(3): arr = file_pos.readline().rstrip().split() if len(arr) != 3: print("Could not read POSCAR properly") exit(1) for j in range(3): lavec[i, j] = a * float(arr[j]) lavec = lavec.transpose() invlavec = np.linalg.inv(lavec) elements = file_pos.readline().rstrip().split() if elements[0].isdigit(): nat_elem = [int(tmp) for tmp in elements] elements = [] else: nat_elem = [int(tmp) for tmp in file_pos.readline().rstrip().split()] nat = np.sum(nat_elem) basis = file_pos.readline().rstrip() x = np.zeros((nat, 3)) for i in range(nat): arr = file_pos.readline().rstrip().split() for j in range(3): x[i][j] = float(arr[j]) if basis == "Direct" or basis == "direct" or basis == "D" or basis == "d": xf = x else: xf = np.dot(x, invlavec) file_pos.close() return lavec, invlavec, elements, nat_elem, xf def write_POSCAR(prefix, counter, header, nzerofills, lavec, elems, nat, disp, coord): filename = prefix + str(counter).zfill(nzerofills) + ".POSCAR" f = open(filename, 'w') f.write("%s\n" % header) f.write("%s\n" % "1.0") for i in range(3): f.write("%20.15f %20.15f %20.15f\n" % (lavec[0][i], lavec[1][i], lavec[2][i])) for i in range(len(elems)): f.write("%s " % elems[i]) if len(elems) > 0: f.write("\n") for i in range(len(nat)): f.write("%d " % nat[i]) f.write("\n") f.write("Direct\n") for i in range(len(disp)): for j in range(3): f.write("%20.15f" % (coord[i][j] + disp[i][j])) f.write("\n") f.close() def get_coordinate_VASP(xml_file, nat): x = [] try: xml = etree.parse(xml_file) root = xml.getroot() for elems in root.findall('calculation/structure/varray'): str_coord = [elems2.text for elems2 in elems.findall('v')] n = len(str_coord) for i in range(n): x.extend([t for t in str_coord[i].split()]) return np.array(x, dtype=np.float) except: print("Error in reading atomic positions from the XML file: %s" % xml_file) def print_displacements_VASP(xml_files, lavec, nat, x0, conversion_factor, file_offset): x0 = np.round(x0, 8) lavec_transpose = lavec.transpose() vec_refold = np.vectorize(refold) if file_offset is None: disp_offset = np.zeros((nat, 3)) else: x0_offset = get_coordinate_VASP(file_offset, nat) try: x0_offset = np.reshape(x0_offset, (nat, 3)) except: print("File %s contains too many position entries" % file_offset) disp_offset = x0_offset - x0 for search_target in xml_files: x = get_coordinate_VASP(search_target, nat) ndata = len(x) // (3 * nat) x = np.reshape(x, (ndata, nat, 3)) for idata in range(ndata): disp = x[idata, :, :] - x0 - disp_offset disp = np.dot(vec_refold(disp), lavec_transpose) disp *= conversion_factor for i in range(nat): print("%15.7F %15.7F %15.7F" % (disp[i, 0], disp[i, 1], disp[i, 2])) def get_atomicforces_VASP(xml_file): f = [] try: xml = etree.parse(xml_file) root = xml.getroot() for elems in root.findall('calculation/varray'): if elems.get('name') == "forces": str_force = [elems2.text for elems2 in elems.findall('v')] for i in range(len(str_force)): f.extend([t for t in str_force[i].split()]) return np.array(f, dtype=np.float) except: print("Error in reading atomic forces from the XML file: %s" % xml_file) def print_atomicforces_VASP(xml_files, nat, conversion_factor, file_offset): if file_offset is None: force_offset = np.zeros((nat, 3)) else: data0 = get_atomicforces_VASP(file_offset) try: force_offset = np.reshape(data0, (nat, 3)) except: print("File %s contains too many force entries" % file_offset) for search_target in xml_files: data = get_atomicforces_VASP(search_target) ndata = len(data) // (3 * nat) data = np.reshape(data, (ndata, nat, 3)) for idata in range(ndata): f = data[idata, :, :] - force_offset f *= conversion_factor for i in range(nat): print("%15.8E %15.8E %15.8E" % (f[i][0], f[i][1], f[i][2])) def get_coordinate_and_force_VASP(xml_file, nat): x = [] f = [] try: xml = etree.parse(xml_file) root = xml.getroot() for elems in root.findall('calculation/structure/varray'): str_coord = [elems2.text for elems2 in elems.findall('v')] n = len(str_coord) for i in range(n): x.extend([t for t in str_coord[i].split()]) for elems in root.findall('calculation/varray'): if elems.get('name') == "forces": str_force = [elems2.text for elems2 in elems.findall('v')] for i in range(len(str_force)): f.extend([t for t in str_force[i].split()]) return np. array(x, dtype=np.float), np.array(f, dtype=np.float) except: print("Error in reading atomic positions and forces from the XML file: %s" % xml_file) def print_displacements_and_forces_VASP(xml_files, lavec, nat, x0, conversion_factor_disp, conversion_factor_force, conversion_factor_energy, file_offset, filter_emin, filter_emax): x0 = np.round(x0, 8) lavec_transpose = lavec.transpose() vec_refold = np.vectorize(refold) if file_offset is None: disp_offset = np.zeros((nat, 3)) force_offset = np.zeros((nat, 3)) epot_offset = 0 else: x0_offset, force_offset = get_coordinate_and_force_VASP(file_offset, nat) epot_offset, _ = get_energies_VASP(file_offset) epot_offset = np.array(epot_offset, dtype='float') try: x0_offset = np.reshape(x0_offset, (nat, 3)) except: print("File %s contains too many position entries" % file_offset) try: force_offset = np.reshape(force_offset, (nat, 3)) except: print("File %s contains too many force entries" % file_offset) disp_offset = x0_offset - x0 if len(epot_offset) > 1: print("File %s contains too many energy entries" % file_offset) for search_target in xml_files: x, force = get_coordinate_and_force_VASP(search_target, nat) epot, ekin = get_energies_VASP(search_target) ndata = len(x) // (3 * nat) ndata2 = len(force) // (3 * nat) if ndata != ndata2: print("The numbers of displacement and force entries are different.") exit(1) ndata_energy = len(epot) if ndata_energy != ndata: print("The numbers of displacement and energy entries are different.") exit(1) epot = np.array(epot, dtype='float') epot -= epot_offset x = np.reshape(x, (ndata, nat, 3)) force = np.reshape(force, (ndata, nat, 3)) for idata in range(ndata): disp = x[idata, :, :] - x0 - disp_offset disp = np.dot(vec_refold(disp), lavec_transpose) f = force[idata, :, :] - force_offset disp *= conversion_factor_disp f *= conversion_factor_force if filter_emin is not None: if filter_emin > epot[idata]: continue if filter_emax is not None: if filter_emax < epot[idata]: continue print("# Filename: %s, Snapshot: %d, E_pot (eV): %s" % (search_target, idata + 1, epot[idata])) for i in range(nat): print("%15.7F %15.7F %15.7F %20.8E %15.8E %15.8E" % (disp[i, 0], disp[i, 1], disp[i, 2], f[i][0], f[i][1], f[i][2])) def get_energies_VASP(xml_file): etot_array = [] ekin_array = [] try: xml = etree.parse(xml_file) root = xml.getroot() for elems in root.findall('calculation/energy'): etot = 'N/A' ekin = 'N/A' for elems2 in elems.findall('i'): if elems2.get('name') == "e_fr_energy": etot = elems2.text if elems2.get('name') == "kinetic": ekin = elems2.text etot_array.append(etot) ekin_array.append(ekin) return etot_array, ekin_array except: print("Error in reading energies from the XML file: %s" % xml_file) def print_energies_VASP(xml_files, conversion_factor, file_offset): print("# Etot, Ekin") etot_offset = 0.0 ekin_offset = 0.0 if file_offset: etot, ekin = get_energies_VASP(file_offset) if len(etot) > 1 or len(ekin) > 1: print("File %s contains too many energy entries" % file_offset) exit(1) if etot[0] != 'N/A': etot_offset = float(etot[0]) if ekin[0] != 'N/A': ekin_offset = float(ekin[0]) for search_target in xml_files: etot, ekin = get_energies_VASP(search_target) for i in range(len(etot)): if etot[i] != 'N/A': val_etot = float(etot[i]) - etot_offset print("%15.8E" % (val_etot * conversion_factor), end=' ') else: print("%s" % etot[i], end=' ') if ekin[i] != 'N/A': val_ekin = float(ekin[i]) - ekin_offset print("%15.8E" % (val_ekin * conversion_factor)) else: print("%s" % ekin[i]) def get_unit_conversion_factor(str_unit): Bohr_radius = 0.52917721067 Rydberg_to_eV = 13.60569253 disp_conv_factor = 1.0 energy_conv_factor = 1.0 force_conv_factor = 1.0 if str_unit == "ev": disp_conv_factor = 1.0 energy_conv_factor = 1.0 elif str_unit == "rydberg": disp_conv_factor = 1.0 / Bohr_radius energy_conv_factor = 1.0 / Rydberg_to_eV elif str_unit == "hartree": disp_conv_factor = 1.0 / Bohr_radius energy_conv_factor = 0.5 / Rydberg_to_eV else: print("This cannot happen.") exit(1) force_conv_factor = energy_conv_factor / disp_conv_factor return disp_conv_factor, force_conv_factor, energy_conv_factor def parse(SPOSCAR_init, xml_files, xml_file_offset, str_unit, print_disp, print_force, print_energy, filter_emin, filter_emax): aa, _, elems, nats, x_frac0 = read_POSCAR(SPOSCAR_init) scale_disp, scale_force, scale_energy = get_unit_conversion_factor(str_unit) if print_disp == True and print_force == True: print_displacements_and_forces_VASP(xml_files, aa, np.sum(nats), x_frac0, scale_disp, scale_force, scale_energy, xml_file_offset, filter_emin, filter_emax) elif print_disp == True: print_displacements_VASP(xml_files, aa, np.sum(nats), x_frac0, scale_disp, xml_file_offset) elif print_force == True: print_atomicforces_VASP(xml_files, np.sum(nats), scale_force, xml_file_offset) elif print_energy == True: print_energies_VASP(xml_files, scale_energy, xml_file_offset) def refold(x): if x >= 0.5: return x - 1.0 elif x < -0.5: return x + 1.0 else: return x
Cozy Chicks: In an instant ... gone. In an instant ... gone. Ossie -- on top of the bathroom mirror. I had planned to talk about my garden. (Such as it is.) But then real life reared its ugly head. We never know when someone we love will be instantly snatched from our lives. Eleven years ago this week, my cousin lost his middle child to a car accident. It happened so fast. One day here ... one day gone. It's a devastating loss his family has never recovered from. Something similar happened to my sister-in-law just yesterday. No, she didn't lose a child. She never had children. But she has cats. Or she had cats. She went away on vacation for a few days leaving her beloved cats in the care of the cat sitter she's trusted and used for more than a decade. On Wednesday the cat sitter said her boy was fit as a fiddle. Less than 24 hours later my sister in law came home to find her boy dead in the middle of her living room. There was blood around his mouth, but no other sign of trauma. Ossie was a bad boy. He was constantly in trouble. He bugged his housemate Mollie. He broke things. He climbed places where he shouldn't have gone. He should have been named Mischief, because that was what he was always into, and my sister in law loved him more fiercely than she ever loved a pet. I know exactly how that feels. I have had many cats over the years, and I've loved them all. But once in a lifetime there seems to be a pet that demands more love from you than any other. For me, that's my tiny son, Fred. Like Ossie, Fred is constantly in trouble. He should have been a greyhound. He loves to run. And even more, he loves to chase things. Or rather, he loves to chase the other cats. He talks. OMG, does he talk. But mostly at night...when the rest of us are trying to sleep. He has kitty OCD. That means sometimes he just runs and runs and runs and the worst thing you can do is pet a cat when it's suffering from that. Except ... if I pet him when he's hyper, he calms right down. Not for anybody else, but I seem to have the magic touch. Rest in peace, Ossie, and know that you were deeply loved. I'm so sorry Ossie passed away. I know your sister-in-law's heart is broken. I have five dogs, two of whom are 13 years old. I can't even think of losing them. I'm sending comforting vibes to you & your sister-in-law. I hope Ossie's wonderful,quirky antics are great memories for everyone who loved Ossie. I have been where your sil is right now, and it is a horrible place. My last cat was with me from the day she was born until she passed away at almost 20 yrs. She was my family, and it hurts like losing a family member, although not everyone understands that. My sympathies are with her as she grieves her loss. Our Golden mutt died suddenly...I didn't know they can appear fine fine but get a tumor that can rupture and they bleed out before you know what's happening. I wish no one would ever have to feel that. I always tell bereaved fur parents to be extra kind to themselves during the grieving.Our pets would never want to cause us pain so I believe it brings them peace if they see we are gentle with ourselves. Thank you all for your kind words. I'm always sad when a friend or relative looses a pet but for some reason--even though I never met Ossie--I feel like I've lost a pet because my SIL kept us up to date on all his antics and sent us many pictures. My heart goes out to your SIL. Last year I lost a cat that I had a special relationship with. I still miss him. His passing made way (without my seeking her) for another kitty that was unhappy in the home she had. Glad I was able to rescue her, but I still miss Little One. Rita, I'm so sorry you lost your cat last year, but happy that you could give another cat a good forever home. So sorry for the loss of Ossie. Please convey my thoughts to your sister in law. I have lost pets and every one is painful. My heart goes out to your sister-in-law.
# # Copyright (c) 2015 ThoughtWorks, Inc. # # Pixelated is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pixelated is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Pixelated. If not, see <http://www.gnu.org/licenses/>. import os import subprocess import couchdb import shutil LEAP_HOME_FOLDER = os.environ.get('LEAP_HOME_FOLDER', '/var/lib/pixelated/.leap/') def detect_hostname(): return os.environ.get('TESTHOST') or subprocess.check_output(['hostname', '-d']).strip() hostname = detect_hostname() user_agent_address = 'https://%s' % hostname def url_home(port=None): if port is not None: return '%s:%d' % (user_agent_address, port) else: return user_agent_address def login_url(): return url_home(port=8083) + '/login' def logout_url(): return url_home(port=8083) + '/logout' def signup_url(): return url_home() + '/signup' def leap_login_url(): return url_home() + '/login' def _netrc_couch_credentials(): with open('/etc/couchdb/couchdb.netrc', 'r') as netrc: netrc_line = netrc.readline().strip().split(' ') credentials = {} for index in xrange(0, len(netrc_line), 2): credentials[netrc_line[index]] = netrc_line[index+1] return credentials def _delete_identity(server, username): email = '%s@%s' % (username, detect_hostname()) filter_by_user_id = '''function(doc) { if (doc['address']=='%s') { emit(doc, null);} }''' % email identities = server['identities'] user_identities = identities.query(filter_by_user_id) for ident in user_identities: doc = identities.get(ident['id']) identities.delete(doc) def _delete_data(server, user_id): user_db = 'user-%s' % user_id if user_db in server: del server[user_db] def delete_soledad_server_db(user_id, username): couch_credentials = _netrc_couch_credentials() server = couchdb.Server("http://%(login)s:%(password)s@%(machine)s:5984" % couch_credentials) _delete_identity(server, username) _delete_data(server, user_id) def delete_soledad_client_db(user_id): soledad_folder = LEAP_HOME_FOLDER + user_id if os.path.exists(soledad_folder): shutil.rmtree(soledad_folder)
From time to time we also offer special workshops. For more information please just contact us! We will send you the list with our whole services and the current workshop dates.
def findProcess( processId ): ps= subprocess.Popen("ps -ef | grep "+processId, shell=True, stdout=subprocess.PIPE) output = ps.stdout.read() ps.stdout.close() ps.wait() return output def isProcessRunning( processId): output = findProcess( processId ) if re.search(processId, output) is None: return true else: return False def check_process(process): import re import subprocess returnprocess = False s = subprocess.Popen(["ps", "ax"],stdout=subprocess.PIPE) for x in s.stdout: if re.search(process, x): returnprocess = True if retornoprocesso == False: print 'no process executing' if retornoprocesso == True: print 'process executing' mluebke code is not 100% correct; kill() can also raise EPERM (access denied) in which case that obviously means a process exists. This is supposed to work: (edited as per Jason R. Coombs comments) import errno import os import sys def pid_exists(pid): """Check whether pid exists in the current process table. UNIX only. """ if pid < 0: return False if pid == 0: # According to "man 2 kill" PID 0 refers to every process # in the process group of the calling process. # On certain systems 0 is a valid PID but we have no way # to know that in a portable fashion. raise ValueError('invalid PID 0') try: os.kill(pid, 0) except OSError as err: if err.errno == errno.ESRCH: # ESRCH == No such process return False elif err.errno == errno.EPERM: # EPERM clearly means there's a process to deny access to return True else: # According to "man 2 kill" possible error values are # (EINVAL, EPERM, ESRCH) raise else: return True
Painted Buntings are said to be timid shy birds that are rarely seen at feeders so we were lucky enough to have this pair as daily guests during summer and early fall in 2006. The female returned May 2007 but the male has not shown up yet. They are definitely quick birds so very difficult to take clear photos. Male Rear of male Painted Bunting confronting a white crowned Sparrow.
# -*- coding: utf-8 -*- # Based on: # http://docs.opencv.org/trunk/d7/d8b/tutorial_py_lucas_kanade.html # https://github.com/opencv/opencv/blob/master/samples/python/opt_flow.py # # Outputs image where direction responds to hue, length by brightness # 0° Blue, 60° Magenta, 120° Red, 180° Yellow, 240° Green, 300° Cyan import argparse import cv2 import locale import os from glob import glob import numpy as np from pprint import pprint import sys try: locale.setlocale(locale.LC_ALL, 'en_US') except locale.Error: locale.setlocale(locale.LC_ALL, 'english-us') # input parser = argparse.ArgumentParser() parser.add_argument('-in', dest="INPUT_DIR", default="frames/*.png", help="Path to frames directory") parser.add_argument('-out', dest="OUTPUT_DIR", default="frames_flow/", help="Path to output directory") # init input args = parser.parse_args() # if not os.path.exists(args.OUTPUT_DIR): # os.makedirs(args.OUTPUT_DIR) def drawHsv(flow): h, w = flow.shape[:2] mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) hsv = np.zeros((h, w, 3), np.uint8) hsv[...,0] = ang*180/np.pi/2 hsv[...,1] = 255 hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR) return bgr frames = glob(args.INPUT_DIR) frameCount = len(frames) print "Found %s frames" % locale.format("%d", frameCount, grouping=True) frames.sort() prvs = None for i, f in enumerate(frames): im = cv2.imread(f) if prvs is None: prvs = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) continue nxt = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) flow = cv2.calcOpticalFlowFarneback(prvs,nxt, None, 0.5, 3, 15, 3, 5, 1.2, 0) bgr = drawHsv(flow) cv2.imshow('frame',bgr) cv2.waitKey(30) prvs = nxt cv2.destroyAllWindows()
A 70-year-old man presented with a painless papule on the nail bed of his right third finger, which had been present for 2 years. The patient did not have a history of nail trauma, nor did he have psoriasis or other inflammatory dermatosis. On physical examination there was a 4 × 4 mm erythematous to yellowish subungual keratotic papule with longitudinal splitting of the nail plate and ulceration on the nail bed of the right third finger (Figure 1A). The lesion was tender and firm in texture. Dermoscopy showed keratotic papules with a few dotted vessels at the peripheral area.
#!/usr/bin/env python import rospy from roscpp_initializer import roscpp_initializer from aikidopy import SkeletonMarker, InteractiveMarkerViewer from cozmopy import Cozmo import argparse if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--mesh_dir', dest='mesh_dir', required = True, help='The path to the directory containing Cozmos meshes') args = parser.parse_args() # rospy.init_node does not initialize roscpp and if roscpp is not initialized # instanciating ros::NodeHandle will lead to a fatal error. # roscpp_initializer initializes roscpp and ros::NodeHandle in the background roscpp_initializer.roscpp_init("load_cozmo", []) rospy.init_node("load_cozmo") rate = rospy.Rate(10) topicName = "dart_markers" baseFrameName = "map" if not rospy.is_shutdown(): cozmo = Cozmo(args.mesh_dir) skeleton = cozmo.getCozmoSkeleton(); print("Starting viewer. Please subscribe to the {} InteractiveMarker" " topic in Rviz \n".format(topicName)) viewer = InteractiveMarkerViewer(topicName, baseFrameName) cozmo_marker = viewer.addSkeleton(skeleton) viewer.setAutoUpdate(True); input_str = "" input_val = 0 while input_val != -1.0: input_str = raw_input("\nEnter forklift position (0-0.86 radians, -1 to quit): ") try: input_val = float(input_str) print input_val except ValueError as verr: print('Please enter a valid float value\n') continue if input_val == -1.0: break elif (input_val > 0.86 or input_val < 0): print('This value exceeds the joint limits, please enter valid value\n') continue cozmo.setForkliftPosition(input_val);
In the opening session of the National Genealogical society 2014 annual conference, Jordan Jones announced the participation and support of the NGS in the Genealogists’ Declaration of Rights. The declaration goes on to list the things our representatives can and should do, such as preserving our freedom to access public records of our government. Attendees were invited to sign the declaration. I hope the entire declaration will soon be available online. Perhaps the Records Preservation and Access Committee, one of the declaration’s sponsors will place it on their website (http://www.fgs.org/rpac/).
# -*- coding: utf-8 -*- import os import sys from setuptools import setup # temporarily redirect config directory to prevent matplotlib importing # testing that for writeable directory which results in sandbox error in # certain easy_install versions os.environ["MPLCONFIGDIR"] = "." pkg_name = "hspfbintoolbox" version = open("VERSION").readline().strip() if sys.argv[-1] == "publish": os.system("cleanpy .") os.system("python setup.py sdist") os.system("twine upload dist/{pkg_name}-{version}.tar.gz".format(**locals())) sys.exit() README = open("./README.rst").read() install_requires = [ # List your project dependencies here. # For more details, see: # http://packages.python.org/distribute/setuptools.html#declaring-dependencies "tstoolbox >= 103", ] extras_require = { "dev": [ "black", "cleanpy", "twine", "pytest", "coverage", "flake8", "pytest-cov", "pytest-mpl", "pre-commit", ] } setup( name=pkg_name, version=version, description=( "Reads Hydrological Simulation Program - " "FORTRAN binary output files." ), long_description=README, classifiers=[ # Get strings from # http://pypi.python.org/pypi?%3Aaction=list_classifiers "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords="hspf binary hydrologic simulation model", author="Tim Cera, P.E.", author_email="tim@cerazone.net", url="http://timcera.bitbucket.io/{pkg_name}/docsrc/index.html".format(**locals()), packages=[pkg_name], include_package_data=True, zip_safe=False, install_requires=install_requires, entry_points={ "console_scripts": ["{pkg_name}={pkg_name}.{pkg_name}:main".format(**locals())] }, extras_require=extras_require, python_requires=">=3.7.1", )
The Micromax Bharat 4 mobile was launched on 15th- September 2017 with a 5-Inches Touchscreen with 1280x720 pixels display resolution. And it's release date or month for sale, Expected to be October 2017. Micromax Bharat 4 has Android Operating System (OS) and powered by 1 GB of RAM and 16 GB of Internal Memory (ROM). The storage capacity can be expanded Up to 32 GB through a MicroSD Card. It has a non removal battery with 2500mAh capacity and clocked by Quad-core Processor. Micromax Bharat 4 Smartphone has 5MP rear or primary Camera and 5-megapixel front facing Selfie Camera with Auto focus, Flash and HD Recording (Yes). Its dimensions are 155mm X 72mm X 8.2mm. The Micromax Bharat 4 price in India INR 4999 (expected) as per the latest update on 24 April 2019 and it may vary according to storage and colors. When to buy Online Micromax Bharat 4 in Flipkart, Amazon, Ebay, Paytm, Snapdeal, Shopclues, BigC, Poorvika, Lotmobiles, Sangeetha, Etc., shopping website at lowest price Rs. 4999 will be updated soon. Also get information on Micromax Bharat 4 cash on delivery (COD), EMI & Exchange Offers, bank Cashback discount on Netbanking or Debit/ Credit Cards or E-Wallet, and more. Available Prices are valid for Micromax Bharat 4 in Bangalore, Ahmedabad, Hyderabad, Chennai, Mumbai, Delhi-NCR, Patna, Bhubaneswar, Pune, Etc., India's Cities.
#!/usr/bin/env python2 # # Copyright 2016 Philipp Winter <phw@nymity.ch> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Determine query quality of DNS resolvers in given pcap file. The code filters DNS queries for `PREFIX.tor.nymity.ch' and determines which resolvers are poorly configured. """ import re import sys import time import logging as log import datetime import cymruwhois import scapy.all as scapy # IP addresses of machines that were involved in this experiment. DNS_SERVER_ADDR = "198.83.85.34" DNS_QUERY_ADDR = "193.11.166.194" TARGET_DOMAIN = "tor.nymity.ch" log.basicConfig(level=log.getLevelName("INFO"), format="%(asctime)s [%(levelname)s]: %(message)s") # Maps exit relay fingerprints to DNS queries. has_lowercase = re.compile("[a-z]") has_uppercase = re.compile("[A-Z]") fingerprint_pattern = re.compile("^[a-fA-F0-9]{40,}$") def has_0x20_encoding(query): """ Return `True' if query uses 0x20 encoding. Note that there's a slim chance of false negatives here because a resolver could produce an all-lowercase or all-uppercase query despite using 0x20 encoding. """ return has_lowercase.search(query) and has_uppercase.search(query) def analyse_queries(exit_queries, whois): """ Iterate over queries and determine their quality. """ has_0x20 = 0 has_rand_port = 0 lacks_0x20 = set() lacks_rand = set() for exit_fpr, info in exit_queries.iteritems(): query, src_port, src_addr = info if has_0x20_encoding(query): has_0x20 += 1 else: lacks_0x20.add((exit_fpr, src_addr)) if src_port != 53: has_rand_port += 1 else: lacks_rand.add((exit_fpr, src_addr)) exit_queries_len = len(exit_queries) has_0x20_pct = float(has_0x20) / exit_queries_len * 100 has_rand_port_pct = float(has_rand_port) / exit_queries_len * 100 log.info("Extracted queries from %d resolvers." % exit_queries_len) log.info("%d out of %d resolvers (%.2f%%) use 0x20 encoding." % (has_0x20, exit_queries_len, has_0x20_pct)) log.info("%d out of %d resolvers (%.2f%%) use random source port." % (has_rand_port, exit_queries_len, has_rand_port_pct)) print "%d,%d,%d" % (exit_queries_len, has_0x20, has_rand_port) # Print resolvers that are poorly configured. addrs = [] for record, info in zip(whois.lookupmany([addr for _, addr in lacks_0x20]), lacks_0x20): exit_fpr, rslv_addr = info log.warning("%s %15s (%30s) lacks 0x20." % (exit_fpr[:8], rslv_addr, record.owner[:30])) addrs.append(rslv_addr) print ",".join(addrs) addrs = [] for record, info in zip(whois.lookupmany([addr for _, addr in lacks_rand]), lacks_rand): exit_fpr, rslv_addr = info log.warning("%s %15s (%30s) lacks random source port." % (exit_fpr[:8], rslv_addr, record.owner[:30])) addrs.append(rslv_addr) print ",".join(addrs) def matches_fingerprint(dns_label): """ Return `True' if given dns_label appears to be a fingerprint. """ return fingerprint_pattern.match(dns_label) def parse_file(pcap_file): """ Parse pcap file and return dictionary mapping exit fingerprint to its info. """ exit_queries = dict() try: packets = scapy.rdpcap(pcap_file) except Exception as err: log.critical("Error while reading pcap: %s" % err) sys.exit(3) for packet in packets: if not packet.haslayer(scapy.IP): continue src_addr = packet[scapy.IP].src if src_addr == DNS_QUERY_ADDR or src_addr == DNS_SERVER_ADDR: continue if not packet.haslayer(scapy.DNSQR): continue query = packet[scapy.DNSQR].qname if TARGET_DOMAIN not in query.lower(): continue # Extract fingerprint and add dictionary entry. dns_labels = query.split(".") if not matches_fingerprint(dns_labels[0]): continue exit_fpr = dns_labels[0].lower() exit_queries[exit_fpr] = (query, packet[scapy.UDP].sport, src_addr) if len(packets) >= 2: first, last = packets[0].time, packets[-1].time log.info("Trace duration: %s" % str(datetime.timedelta(seconds=last-first))) return exit_queries if __name__ == "__main__": if len(sys.argv) != 2: log.critical("Usage: %s PCAP_FILE" % sys.argv[0]) sys.exit(1) pcap_file = sys.argv[1] before = time.time() exit_queries = parse_file(pcap_file) log.info("Parsed file in %ss." % str(time.time() - before)) if len(exit_queries) == 0: log.critical("Could not extract any queries from pcap.") sys.exit(2) analyse_queries(exit_queries, cymruwhois.Client()) sys.exit(0)
When it comes to going on a diet, most people think they have to heavily reduce food, causing them to be starving, grumpy and miserable during the process. While this is the general consensus, luckily, it doesn’t have to be this way with my advanced hunger-fighting techniques. Check out this article by Rudy Mawer, MSc, CISSN for the reasons why!
import pandas as pd import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('ggplot') # Look Pretty def showandtell(title=None): if title != None: plt.savefig(title + ".png", bbox_inches='tight', dpi=300) plt.show() # exit() # # INFO: This dataset has call records for 10 users tracked over the course of 3 years. # Your job is to find out where the users likely live and work at! # # TODO: Load up the dataset and take a peek at its head # Convert the date using pd.to_datetime, and the time using pd.to_timedelta # df = pd.read_csv('Datasets/CDR.csv') df['CallDate'] = pd.to_datetime( df['CallDate'] ) df['CallTime'] = pd.to_timedelta( df['CallTime']) # # TODO: Get a distinct list of "In" phone numbers (users) and store the values in a # regular python list. # Hint: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.tolist.html # import numpy as np u = np.unique(df['In']) unique_nums = u.tolist() # # TODO: Create a slice called user1 that filters to only include dataset records where the # "In" feature (user phone number) is equal to the first number on your unique list above; # that is, the very first number in the dataset # user1 = df[df['In'] == unique_nums[0]] # INFO: Plot all the call locations user1.plot.scatter(x='TowerLon', y='TowerLat', c='gray', marker='o', alpha=0.1, title='Call Locations') # showandtell() # Comment this line out when you're ready to proceed # # INFO: The locations map above should be too "busy" to really wrap your head around. This # is where domain expertise comes into play. Your intuition tells you that people are likely # to behave differently on weekends: # # On Weekdays: # 1. People probably don't go into work # 2. They probably sleep in late on Saturday # 3. They probably run a bunch of random errands, since they couldn't during the week # 4. They should be home, at least during the very late hours, e.g. 1-4 AM # # On Weekdays: # 1. People probably are at work during normal working hours # 2. They probably are at home in the early morning and during the late night # 3. They probably spend time commuting between work and home everyday # # TODO: Add more filters to the user1 slice you created. Add bitwise logic so that you're # only examining records that came in on weekends (sat/sun). # user1 = user1[(user1['DOW'] == 'Sat') | (user1['DOW'] == 'Sun')] # # TODO: Further filter it down for calls that are came in either before 6AM OR after 10pm (22:00:00). # You can use < and > to compare the string times, just make sure you code them as military time # strings, eg: "06:00:00", "22:00:00": https://en.wikipedia.org/wiki/24-hour_clock # # You might also want to review the Data Manipulation section for this. Once you have your filtered # slice, print out its length: # user1a = user1[('06:00:00' > user1['CallTime']) | (user1['CallTime'] > '22:00:00')] # # INFO: Visualize the dataframe with a scatter plot as a sanity check. Since you're familiar # with maps, you know well that your X-Coordinate should be Longitude, and your Y coordinate # should be the tower Latitude. Check the dataset headers for proper column feature names. # https://en.wikipedia.org/wiki/Geographic_coordinate_system#Geographic_latitude_and_longitude # # At this point, you don't yet know exactly where the user is located just based off the cell # phone tower position data; but considering the below are for Calls that arrived in the twilight # hours of weekends, it's likely that wherever they are bunched up is probably near where the # caller's residence: fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(user1a.TowerLon, user1a.TowerLat, c='g', marker='o', alpha=0.2) # user1.plot.scatter(user1.TowerLon, user1.TowerLat, c='gray', alpha=0.1, title='Weekend Twilight Calls') # showandtell() # TODO: Comment this line out when you're ready to proceed # # TODO: Run K-Means with a K=1. There really should only be a single area of concentration. If you # notice multiple areas that are "hot" (multiple areas the usr spends a lot of time at that are FAR # apart from one another), then increase K=2, with the goal being that one of the centroids will # sweep up the annoying outliers; and the other will zero in on the user's approximate home location. # Or rather the location of the cell tower closest to their home..... # # Be sure to only feed in Lat and Lon coordinates to the KMeans algo, since none of the other # data is suitable for your purposes. Since both Lat and Lon are (approximately) on the same scale, # no feature scaling is required. Print out the centroid locations and add them onto your scatter # plot. Use a distinguishable marker and color. # # Hint: Make sure you graph the CORRECT coordinates. This is part of your domain expertise. # from sklearn.cluster import KMeans user1b = user1a[['TowerLon', 'TowerLat']] model = KMeans(n_clusters = 7) model.fit(user1b) # # INFO: Print and plot the centroids... centroids = model.cluster_centers_ ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='blue', alpha=0.5, linewidths=3, s=169) print('centroids:', centroids) # showandtell() # TODO: Comment this line out when you're ready to proceed # # TODO: Repeat the above steps for all 10 individuals, being sure to record their approximate home # locations. You might want to use a for-loop, unless you enjoy typing. # for index,item in enumerate(unique_nums): user = df[df['In'] == unique_nums[index]] user = user[('06:00:00' > user['CallTime']) | (user['CallTime'] > '22:00:00')] user = user[(user['DOW'] == 'Sat') | (user['DOW'] == 'Sun')] user = user[['TowerLon', 'TowerLat']] model = KMeans(n_clusters = 7) model.fit(user) centroids = model.cluster_centers_ ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='blue', alpha=0.5, linewidths=3, s=169) print(item, centroids)
Monday: Other reviews (nostalgia, books, other music); Friday: Articles. John ADAMS (b. 1947) American Berserk - Liviu Neagu-Gruber, Axel Hess (violins), Jens Brockman (viola), Michael Hablitzel (cello), Holger Groschopp, Majella Stockhausen (piano) rec. 2017 CRAGG FOUNDATION CF003 SACD [77:08] [SB] An enterprising collection of Adams’s chamber and piano works. Ludwig van BEETHOVEN (1770-1827) Violin sonatas Nos 7 & 9 Johannes BRAHMS (1833-1897) Scherzo - Max Rostal (violin) Franz Osborn (piano) rec. 1949/52 FORGOTTEN RECORDS FR1394 [69:58] [SG] A welcome return to the catalogue. RECORDING OF THE MONTH Pavel CHESNOKOV (1877-1944) Teach Me Thy Statutes - PaTRAM Institute Male Ch/Vladimir Gorbik rec. 2016 REFERENCE RECORDINGS FR-727 SACD [67:14] [SSi] An exceptional disc that any fan of choral music, and the Russian choral tradition in particular, would love. Vincent PERSICHETTI (1915-1987) Solo Piano Works Vol. 1 - Myron Silberstein (piano) rec. 2015 CENTAUR RECORDS CRC3632 [71:35] [RB] Delightful that this music is in such sympathetic hands. Franz SCHUBERT (1797-1828) Symphony No. 9 Luciano BERIO (1925-2003) Rendering - Solistes Européens Luxembourg/Christoph König RUBICON RCD1025 [78:31] [LW] Eloquent Berio and spirited Schubert. Dmitri SHOSTAKOVICH (1906-1975) Piano Quintet, String Quartet No.3 - Piotr Anderszewski (piano), Belcea Qt rec. 2017 ALPHA CLASSICS 360 [69:13] [BW] Well worth considering if the coupling appeals. rec. 2016 CPO 555 085-2 [61:22] [JV] This is a delighful disc, which no Telemann lover, nor any lover of wind music, should miss. RECORDING OF THE MONTH In Sorrow’s Footsteps Works by Jackson, Palestrina, Allegri & Macmillan - The Marian Consort/Rory McCleery rec. 2018 DELPHIAN DCD34215 [63:19] [JQ] The Marian Consort’s splendid celebration of ten years of music-making. Ghiselin DANCKERTS (1510-1567) Missa de Beata Virgine - Cantar Lontano/Marco Mencobani rec. 2009 PAN CLASSICS PC10327 [51:05] [RHa] An uneven recording fails to dent my enthusiasm for the quasi-improvisatory ecstasies of this peculiar work. Christoph Willibald GLUCK (1714-1787) Orfeo ed Euridice - Philippe Jaroussky, Amanda Forsythe, I Barocchisti, Diego Fasolis rec. 2016/17 ERATO 9029566023 [77:38] [SSi] A very well performed recording of Gluck’s most famous opera, but the drastic cuts made in this 1774 version make it very difficult for me to recommend it. Henry LAWES (1602-1645) Complete Music for Solo Lyra Viol - Richard Boothby (lyra viol) rec. 2015 HARMIONA MUNDI HMU907625 [59.37] [EML] Quite intense and introspective to listen to all the way through, but very rewarding. Wolfgang Amadeus MOZART (1756-1791) Flute Concertos Nos. 1 and 2; Josef MYSLIVEČEK (1737-1781) Flute Concerto - Ana de la Vega (flute), English CO rec. 2016 PENTATONE PTC5186723 SACD [62:58] [BW] Very fine performances of the Mozart made even more attractive by the addition of the Mysliveček. Edmund RUBBRA (1901-1986) Symphonies No. 2 & 4 - BBC SO/Sir Adrian Boult, Edmund Rubbra rec. 1942/54 SOMM CÉLESTE SOMMCD0179 [72:33] [JQ] An important historical addition to the Rubbra discography. Wilhelm STENHAMMAR (1871-1927) Symphony No. 2; Music for ‘A Dream Play’ - Antwerp SO/Christian Lindberg rec. 2017 BIS BIS-2329 SACD [58:23] [DM] A rare misstep for Lindberg/BIS. Richard STRAUSS (1864-1949) Der Rosenkavalier - Renée Fleming (soprano) Elina Garanca (mezzo) Günther Groissböck (bass) Metropolitan Opera O & Ch/Sebastian Weigle rec. 2017 DECCA Blu-ray 0743945 [227 mins] [RJF] The two main singers here inhabit their roles superbly. Rêverie Sergey PROKOFIEV (1891-1953) Symphony No. 1 - Philharmonia O/Thomas Schippers, Pro Arte O/Sir Eugene Goossens rec. 1956-58 FIRST HAND RECORDS FHR16 [64:24] [RB] Performances of Russian music that will more than hold the attention. Kalevi AHO (b. 1949) Wind Quintets - BPO Wind Quintet rec. 2014 BIS BIS-2176 SACD [54:42] Timpani Concerto, Piano Concerto No.1 - Ari-Pekka Mäenpää (timpani), Sonja Fräki (piano), Turku PO/Erkki Lasonpalo; Eva Ollikainen rec. 2017 BIS BIS-2306 SACD [60:17] Soprano Saxophone Concerto, Piano and Wind Quintet, Solo I - Anders Paulsson (soprano saxophone), Jaakko Kuusisto (violin), Lapland CO/John Storgårds rec. 2015/17 BIS BIS-2216 SACD [58:41] [DM] Music that’s finely crafted and unfailingly accessible; the timpani concerto is a real treat. Sir Granville BANTOCK (1868-1946) Bantock Rediscovered - Maria Marchant (piano) rec. 2017 SOMM RECORDINGS SOMMCD0183 [73:43] [IL] Another CD of great interest to all admirers of Bantock. Ludwig van BEETHOVEN (1770-1827) The Complete String Quartets Vol. 1 - Cuarteto Casals rec. 2015/2017 HARMONIA MUNDI HMM902400.02 [181:22] [SSi] This is a wonderful beginning to what promises to be an insightful cycle of the Beethoven string quartets. Ludwig van BEETHOVEN (1770-1827) & Dmitri SHOSTAKOVICH (1906-1975) Symphonies No. 1 - Dresdner Philharmonie/Michael Sanderling rec. 2017 SONY CLASSICAL 88985492782 [60.27] [MC] These performances of these first symphonies of Beethoven and Shostakovich make an exhilarating listening experience. Peter CROSSLEY-HOLLAND (1916-2001) Symphony Eugene GOOSSENS (1893-1962) Variations on ‘Cadet Rousselle’ John IRELAND (1879-1962) Orchestral works - Royal Scottish Ntl O/Martin Yates rec. 2008 DUTTON EPOCH CDLX7215 [72:03] [RB] A disc of world premieres most eloquently done. Heino ELLER (1887-1970) Complete Piano Music Vol. 6 - Sten Lassmann (piano) rec. 2014 TOCCATA CLASSICS TOCC0475 [64:53] [PRB] Estonian pianist Sten Lassman’s superb playing shows great empathy with the work of his compatriot. Joseph RHEINBERGER (1839-1901) Violin Sonatas - Thomas Schrott (violin) Piero Barbareschi (piano) rec. 2016 BRILLIANT CLASSICS 95635 [45:21] [BSt] This disc’s short measure turns out to be a virtue of sorts. Georg Philipp TELEMANN (1681-1767) Fantasias for solo violin - Kinga Augustyn (violin) rec. 2015 CENTAUR CRC3607 [71:37] [DBi] Kinga Augustyn's performance of Telemann's lovely pieces make for splendid listening. Kalevi AHO (b. 1949) Solo I, Violin sonata, In Memoriam Per Henrik Nordgren Einojuhani RAUTAVAARA (1928-2016) Variétude Per Henrik NORDGREN (1944-2008) Violin sonata - Renate Eggebrecht (violin) rec. 2017 TROUBADISC TRO-CD01452 [59:39] [SSi] A must for all devotees of modern Finnish music. Anton BRUCKNER (1824-1896) Symphony No. 3 - Munich PO/Valery Gergiev rec. 2017 MÜNCHNER PHILHARMONIKER 9305211251 [55.29] [MC] An outstanding recording that I find both satisfying and compelling. Antonín DVOŘÁK (1841-1907) Piano Quintets, Bagatelles - Busch Trio, Maria Milstein (violin) Miguel da Silva (viola) rec. 2017 ALPHA CLASSICS 403 [80:41] [PRB] Well worth considering, if not perhaps quite the best around. Enjott SCHNEIDER (b. 1950) Magic of Irreality - Siberian State SO Krasnoyarsk/Vladimir Lande rec. 2012 WERGO WER51182 [59:35] [DC] Massively entertaining, or corny as…? Antonio VIVALDI (1678-1741) String concertos III; Viola d'amore concertos - Alessandro Tampieri (viola d’amore); Accademia Bizantina/Ottavio Dantone rec. 2017 NAÏVE OP30570 [66:21 + 47:57] [BW] I may prefer other recordings in places, but overall, I’m very happy to add this to my sizeable Desert Island Vivaldi assemblage. Claudio Abbado - The Last Concert Works by Berlioz & Mendelssohn - Berlin Philharmonic rec. 2013 BERLINER PHILHARMONIKER RECORDINGS BPHR160089 SACD [40:11 + 55:46] [MG] Late Abbado here offers live performances emotionally more engaged and dramatic than those of illustrious contemporaries, repackaged at an attractive price. Jiří Bělohlávek (conductor) Recollection rec. 1980-2006 SUPRAPHON SU4250-2 [8 CDs: 584 mins] [JW] A fair and thoughtful summation of Bělohlávek’s studio legacy that demonstrates the high level of interpretative control and consistency he displayed throughout his career. Tossy Spivakovsky (violin) Live Performances rec. 1943-69 DOREMI DHR-8025-8 [4 CDs: 300:57] [SG] A valuable addition to the Spivakovsky discography. RECORDING OF THE MONTH Johann Sebastian BACH (1685-1750) Dialogkantaten - Sophie Karthäuser (soprano) Michael Volle (bass) RIAS Kammerchor, Akademie für Alte Musik Berlin/Raphael Alpermann rec. 2017 HARMONIA MUNDI HMM902368 [64:38] [SSi] One of the best Bach cantata discs I have heard for some time. Mauro GIULIANI (1781-1829) Opere solistiche per voce e chitarra - Rossana Bertini (soprano) Davide Ficco (guitar) rec. 2013 TACTUS TC780703 [69:16] [JV] Not only guitar aficionados but also lovers of early 19th-century Italian opera will enjoy this disc. Ildebrando PIZZETTI (1880-1968) Songs - Hanna Hipp (mezzo soprano), Emma Abbate (piano) rec. 2017 RESONUS RES10209 [55:20] [JW] This latest Resonus disc has certainly fulfilled its brief. Camille SAINT-SAËNS (1835-1921) Music for Piano Duo and Duet, Volume 2 - Martin Jones and Adrian Farmer (pianos) rec. 2015/16 NIMBUS NI5941 [68:47] [JWe] I hope this duo go on to record more volumes of these works. Heinrich SCHÜTZ (1585-1672) Symphoniæ sacræ II - Dorothee Mields (soprano) Instrumental Ens/Hans-Christoph Rademann rec. 2017 CARUS 83.274 [68:36 + 65:10] [BW] The eighteenth recording in this Carus series maintains the very high standards established to date. Kurt Masur (conductor) Works by Beethoven, Haydn, Dessau & Mahler - Annelies Burmeister (contralto) Berlin Staatskapelle rec. 1967 ORCHESTRAL CONCERT CDs CD13/2011 [68.36] [DBi] More memorable performances restored from the archive, including a rare piece by Dessau. The Organ of St Bavo, Haarlem Joseph Nolan (organ) rec. 2017 SIGNUM CLASSICS SIGCD546 [70:08] [DC] A uniquely special venue and a fine recital. Romance de Guerre Works by Elgar, Fairchild & Gaubert - Ambroise Aubrun (violin), Steven Vanhauwaert (piano) rec. 2017 HORTUS 726 [71:15] [JQ] An excellent disc of violin sonatas from the era of the Great War.
#!/usr/bin/env python2 ''' Processes asm.js code to make it run in an emterpreter. Currently this requires the asm.js code to have been built with -s FINALIZE_ASM_JS=0 ''' import os, sys, re, json import asm_module, shared, shutil # params INNERTERPRETER_LAST_OPCODE = 0 # 'CONDD' EMT_STACK_MAX = 1024*1024 LOG_CODE = os.environ.get('EMCC_LOG_EMTERPRETER_CODE') ZERO = False ASYNC = False ASSERTIONS = False PROFILING = False SWAPPABLE = False FROUND = False ADVISE = False MEMORY_SAFE = False def handle_arg(arg): global ZERO, ASYNC, ASSERTIONS, PROFILING, FROUND, ADVISE, MEMORY_SAFE if '=' in arg: l, r = arg.split('=') if l == 'ZERO': ZERO = int(r) elif l == 'ASYNC': ASYNC = int(r) elif l == 'ASSERTIONS': ASSERTIONS = int(r) elif l == 'PROFILING': PROFILING = int(r) elif l == 'FROUND': FROUND = int(r) elif l == 'ADVISE': ADVISE = int(r) elif l == 'MEMORY_SAFE': MEMORY_SAFE = int(r) return False return True DEBUG = os.environ.get('EMCC_DEBUG') config = shared.Configuration() temp_files = config.get_temp_files() if DEBUG: print >> sys.stderr, 'running emterpretify on', sys.argv if FROUND: shared.Settings.PRECISE_F32 = 1 sys.argv = filter(handle_arg, sys.argv) # consts BLACKLIST = set(['_malloc', '_free', '_memcpy', '_memmove', '_memset', 'copyTempDouble', 'copyTempFloat', '_strlen', 'stackAlloc', 'setThrew', 'stackRestore', 'setTempRet0', 'getTempRet0', 'stackSave', 'runPostSets', '_emscripten_autodebug_double', '_emscripten_autodebug_float', '_emscripten_autodebug_i8', '_emscripten_autodebug_i16', '_emscripten_autodebug_i32', '_emscripten_autodebug_i64', '_strncpy', '_strcpy', '_strcat', '_saveSetjmp', '_testSetjmp', '_emscripten_replace_memory', '_bitshift64Shl', '_bitshift64Ashr', '_bitshift64Lshr', 'setAsyncState', 'emtStackSave']) WHITELIST = [] YIELDLIST = ['stackSave', 'stackRestore', 'stackAlloc', 'setThrew', '_memset'] # functions which are ok to run while doing a sleep_with_yield. SYNC_FUNCS = set(['_emscripten_sleep', '_emscripten_sleep_with_yield', '_emscripten_wget_data', '_emscripten_idb_load', '_emscripten_idb_store', '_emscripten_idb_delete']) OPCODES = [ # l, lx, ly etc - one of 256 locals 'SET', # [lx, ly, 0] lx = ly (int or float, not double) 'SETVI', # [l, vl, vh] l = v (16-bit signed int) 'SETVIB', # [l, 0, 0] [..v..] l = 32-bit int in next 32-bit instruction 'ADD', # [lx, ly, lz] lx = ly + lz (32-bit int) 'SUB', # [lx, ly, lz] lx = ly - lz (32-bit int) 'MUL', # [lx, ly, lz] lx = ly * lz (32-bit int) 'SDIV', # [lx, ly, lz] lx = ly / lz (32-bit signed int) 'UDIV', # [lx, ly, lz] lx = ly / lz (32-bit unsigned int) 'SMOD', # [lx, ly, lz] lx = ly % lz (32-bit signed int) 'UMOD', # [lx, ly, lz] lx = ly % lz (32-bit unsigned int) 'NEG', # [lx, ly, 0] lx = -ly (int) 'BNOT', # [lx, ly, 0] ly = ~ly (int) 'LNOT', # [lx, ly, 0] ly = !ly (int) 'EQ', # [lx, ly, lz] lx = ly == lz (32-bit int) 'NE', # [lx, ly, lz] lx = ly != lz (32-bit int) 'SLT', # [lx, ly, lz] lx = ly < lz (32-bit signed) 'ULT', # [lx, ly, lz] lx = ly < lz (32-bit unsigned) 'SLE', # [lx, ly, lz] lx = ly <= lz (32-bit signed) 'ULE', # [lx, ly, lz] lx = ly <= lz (32-bit unsigned) 'AND', # [lx, ly, lz] lx = ly & lz 'OR', # [lx, ly, lz] lx = ly | lz 'XOR', # [lx, ly, lz] lx = ly ^ lz 'SHL', # [lx, ly, lz] lx = ly << lz 'ASHR', # [lx, ly, lz] lx = ly >> lz 'LSHR', # [lx, ly, lz] lx = ly >>> lz 'ADDV', # [lx, ly, v] lx = ly + v (32-bit int, v is 8-bit signed) 'SUBV', 'MULV', 'SDIVV', 'UDIVV', # (v is 8-bit unsigned) 'SMODV', 'UMODV', # (v is 8-bit unsigned) 'EQV', 'NEV', 'SLTV', 'ULTV', # (v is 8-bit unsigned) 'SLEV', 'ULEV', # (v is 8-bit unsigned) 'ANDV', 'ORV', 'XORV', 'SHLV', # (v is 8-bit unsigned) 'ASHRV', # (v is 8-bit unsigned) 'LSHRV', # (v is 8-bit unsigned) 'LNOTBRF', # [cond] [absolute-target] cond+branch 'EQBRF', 'NEBRF', 'SLTBRF', 'ULTBRF', 'SLEBRF', 'ULEBRF', 'LNOTBRT', 'EQBRT', 'NEBRT', 'SLTBRT', 'ULTBRT', 'SLEBRT', 'ULEBRT', 'SETD', # [lx, ly, 0] lx = ly (double) 'SETVD', # [lx, vl, vh] lx = ly (16 bit signed int, converted into double) 'SETVDI', # [lx, 0, 0] [..v..] lx = v (32 bit signed int, converted into double) 'SETVDF', # [lx, 0, 0] [..v..] lx = v (32 bit float, converted into double) 'SETVDD', # [lx, 0, 0][.v.][.v.] lx = v (64 bit double) 'ADDD', # [lx, ly, lz] lx = ly + lz (double) 'SUBD', # [lx, ly, lz] lx = ly - lz (double) 'MULD', # [lx, ly, lz] lx = ly * lz (double) 'DIVD', # [lx, ly, lz] lx = ly / lz (double) 'MODD', # [lx, ly, lz] lx = ly % lz (double) 'NEGD', # [lx, ly, 0] lx = -ly (double) 'EQD', # [lx, ly, lz] lx = ly == lz (double) 'NED', # [lx, ly, lz] lx = ly != lz (double) 'LTD', # [lx, ly, lz] lx = ly < lz (signed) 'LED', # [lx, ly, lz] lx = ly < lz (double) 'GTD', # [lx, ly, lz] lx = ly <= lz (double) 'GED', # [lx, ly, lz] lx = ly <= lz (double) 'D2I', # [lx, ly, 0] lx = ~~ly (double-to-int) 'SI2D', # [lx, ly, 0] lx = +ly (signed int-to-double) 'UI2D', # [lx, ly, 0] lx = +ly (unsigned int-to-double) 'LOAD8', # [lx, ly, 0] lx = HEAP8[ly >> 0] 'LOADU8', # [lx, ly, 0] lx = HEAPU8[ly >> 0] 'LOAD16', # [lx, ly, 0] lx = HEAP16[ly >> 1] 'LOADU16', # [lx, ly, 0] lx = HEAPU16[ly >> 1] 'LOAD32', # [lx, ly, 0] lx = HEAP32[ly >> 2] - no need for unsigned version, this is set to a register anyhow 'STORE8', # [lx, ly, 0] HEAP8[lx >> 2] = ly 'STORE16', # [lx, ly, 0] HEAP16[lx >> 2] = ly 'STORE32', # [lx, ly, 0] HEAP32[lx >> 2] = ly 'LOADF64', # [lx, ly, 0] lx = HEAPF64[ly >> 3] 'STOREF64', # [lx, ly, 0] HEAPF64[lx >> 3] = ly 'LOADF32', # [lx, ly, 0] lx = HEAPF32[ly >> 3] 'STOREF32', # [lx, ly, 0] HEAPF32[lx >> 3] = ly 'LOAD8A', # [lx, ly, lz] load-add and store-add instructions, whose pointer input is a signed addition: lx = load(ly + lz), store(lx + ly) = lz 'LOADU8A', 'LOAD16A', 'LOADU16A', 'LOAD32A', 'STORE8A', 'STORE16A', 'STORE32A', 'LOADF64A', 'STOREF64A', 'LOADF32A', 'STOREF32A', 'LOAD8AV', # [lx, ly, lz] load-add and store-add instructions, whose pointer input is a signed addition: lx = load(ly + lz), store(lx + ly) = lz, where the second add op is 8-bit signed 'LOADU8AV', 'LOAD16AV', 'LOADU16AV', 'LOAD32AV', 'STORE8AV', 'STORE16AV', 'STORE32AV', 'LOADF64AV', 'STOREF64AV', 'LOADF32AV', 'STOREF32AV', 'STORE8C', 'STORE16C', 'STORE32C', 'STOREF64C', 'STOREF32C', 'BR', # [0, tl, th] jump t instructions (multiple of 4) 'BRT', # [cond, tl, th] if cond, jump t instructions (multiple of 4) 'BRF', # [cond, tl, th] if !cond, jump t instructions (multiple of 4) 'BRA', # [0, 0, 0] [addr] jump to addr 'BRTA', # [cond, 0, 0] [addr] if cond, jump to addr 'BRFA', # [cond, 0, 0] [addr] if !cond, jump to addr 'COND', # [out, cond, x] [y] out = cond ? x : y, int 'CONDD', # [out, cond, x] [y] out = cond ? x : y, double 'GETTDP', # [l, 0, 0] l = tempDoublePtr 'GETTR0', # [l, 0, 0] l = tempRet0 'SETTR0', # [l, 0, 0] tempRet0 = l 'GETGLBI', # [l, vl, vh] get global value, int, indexed by v 'GETGLBD', # [l, vl, vh] get global value, double, indexed by v 'SETGLBI', # [vl, vh, l] set global value, int, indexed by v (v = l) 'SETGLBD', # [vl, vh, l] set global value, double, indexed by v (v = l) 'INTCALL', # [lx, 0, 0] [target] [params] (lx = ) target(params..) # Internal, emterpreter-to-emterpreter call. 'EXTCALL', # [lx, targetl, targeth] [params...] (lx = ) target(params..) lx's existence and type depend on the target's actual callsig; # this instruction can take multiple 32-bit instruction chunks # if target is a function table, then the first param is the index of the register holding the function pointer 'GETST', # [l, 0, 0] l = STACKTOP 'SETST', # [l, 0, 0] STACKTOP = l 'SWITCH', # [lx, ly, lz] switch (lx) { .. }. followed by a jump table for values in range [ly..ly+lz), after which is the default (which might be empty) 'RET', # [l, 0, 0] return l (depending on which emterpreter_x we are in, has the right type) 'FUNC', # [num params, total locals (low 8 bits), total locals (high 8 bits)] [which emterpreter (0 = normal, 1 = zero), 0, 0, 0] function with n locals (each taking 64 bits), of which the first are params # this is read in the emterpreter prelude, and also in intcalls # slow locals support - copying from/to slow locals 'FSLOW', # [lx, lyl, lyh] lx = ly (int or float, not double; ly = lyl,lyh 'FSLOWD', # [lx, lyl, lyh] lx = ly (double) 'TSLOW', # [lxl, lxh, ly] lx = ly (int or float, not double; lx = lxl,lxh 'TSLOWD', # [lxl, lxh, ly] lx = ly (double; lx = lxl,lxh) ] if FROUND: OPCODES.append( 'FROUND', # [lx, ly] lx = Math.fround(ly), rounds doubles to floats ) def randomize_opcodes(): global OPCODES import random random.shuffle(opcodes) print OPCODES #randomize_opcodes() assert len(OPCODES) == len(set(OPCODES)) # no dupe names assert len(OPCODES) < 256 ROPCODES = {} for i in range(len(OPCODES)): ROPCODES[OPCODES[i]] = i GLOBAL_BASE = 256*8 # utils settings = { 'PRECISE_F32': 0 } # TODO def bytify(x): assert x >= 0 and x < (1 << 32) return [x & 255, (x >> 8) & 255, (x >> 16) & 255, (x >> 24) & 255] def next_power_of_two(x): if x == 0: return 0 ret = 1 while ret < x: ret <<= 1 return ret def get_access(l, s='i', base='sp', offset=None): if offset is not None: offset = '+ ' + str(offset) + ' ' else: offset = '' if s == 'i': return 'HEAP32[' + str(base) + ' + (' + l + ' << 3) ' + offset + '>> 2]' elif s == 'd' or s == 'f': return 'HEAPF64[' + str(base) + ' + (' + l + ' << 3) ' + offset + '>> 3]' else: assert 0 def get_coerced_access(l, s='i', unsigned=False, base='sp', offset=None): if s == 'i': if not unsigned: return get_access(l, s, base, offset) + '|0' else: return get_access(l, s, base, offset) + '>>>0' elif s == 'd' or s == 'f': return '+' + get_access(l, s, base, offset) else: assert 0 def make_assign(left, right, temp): # safely assign, taking into account memory safety if not MEMORY_SAFE: return left + ' = ' + right + ';' return temp + ' = ' + right + '; ' + left + ' = ' + temp + ';' CASES = {} CASES[ROPCODES['SET']] = get_access('lx') + ' = ' + get_coerced_access('ly') + ';' CASES[ROPCODES['GETST']] = get_access('lx') + ' = STACKTOP;' CASES[ROPCODES['SETST']] = 'STACKTOP = ' + get_coerced_access('lx') + ';' CASES[ROPCODES['SETVI']] = get_access('lx') + ' = inst >> 16;' CASES[ROPCODES['SETVIB']] = 'pc = pc + 4 | 0; ' + get_access('lx') + ' = HEAP32[pc >> 2] | 0;' CASES[ROPCODES['ADD']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['SUB']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') - (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['MUL']] = make_assign(get_access('lx'), 'Math_imul(' + get_coerced_access('ly') + ', ' + get_coerced_access('lz') + ') | 0', 'ly') CASES[ROPCODES['SDIV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') / (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['UDIV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') / (' + get_coerced_access('lz', unsigned=True) + ') >>> 0;' CASES[ROPCODES['SMOD']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') % (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['UMOD']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') % (' + get_coerced_access('lz', unsigned=True) + ') >>> 0;' CASES[ROPCODES['NEG']] = get_access('lx') + ' = -(' + get_coerced_access('ly') + ');' CASES[ROPCODES['BNOT']] = get_access('lx') + ' = ~(' + get_coerced_access('ly') + ');' CASES[ROPCODES['LNOT']] = get_access('lx') + ' = !(' + get_coerced_access('ly') + ');' CASES[ROPCODES['EQ']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') == (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['NE']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') != (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['SLT']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') < (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['ULT']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') < (' + get_coerced_access('lz', unsigned=True) + ') | 0;' CASES[ROPCODES['SLE']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') <= (' + get_coerced_access('lz') + ') | 0;' CASES[ROPCODES['ULE']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') <= (' + get_coerced_access('lz', unsigned=True) + ') | 0;' CASES[ROPCODES['AND']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') & (' + get_coerced_access('lz') + ');' CASES[ROPCODES['OR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') | (' + get_coerced_access('lz') + ');' CASES[ROPCODES['XOR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') ^ (' + get_coerced_access('lz') + ');' CASES[ROPCODES['SHL']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') << (' + get_coerced_access('lz') + ');' CASES[ROPCODES['ASHR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >> (' + get_coerced_access('lz') + ');' CASES[ROPCODES['LSHR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >>> (' + get_coerced_access('lz') + ');' CASES[ROPCODES['ADDV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') + (inst >> 24) | 0;' CASES[ROPCODES['SUBV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') - (inst >> 24) | 0;' CASES[ROPCODES['MULV']] = make_assign(get_access('lx'), 'Math_imul(' + get_coerced_access('ly') + ', inst >> 24) | 0', 'ly') CASES[ROPCODES['SDIVV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') / (inst >> 24) | 0;' CASES[ROPCODES['UDIVV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') / (lz >>> 0) >>> 0;' CASES[ROPCODES['SMODV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') % (inst >> 24) | 0;' CASES[ROPCODES['UMODV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') % (lz >>> 0) >>> 0;' CASES[ROPCODES['EQV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') == (inst >> 24) | 0;' CASES[ROPCODES['NEV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') != (inst >> 24) | 0;' CASES[ROPCODES['SLTV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') < (inst >> 24) | 0;' CASES[ROPCODES['ULTV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') < (lz >>> 0) | 0;' CASES[ROPCODES['SLEV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') <= (inst >> 24) | 0;' CASES[ROPCODES['ULEV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') <= (lz >>> 0) | 0;' CASES[ROPCODES['ANDV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') & (inst >> 24);' CASES[ROPCODES['ORV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') | (inst >> 24);' CASES[ROPCODES['XORV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') ^ (inst >> 24);' CASES[ROPCODES['SHLV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') << lz;' CASES[ROPCODES['ASHRV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >> lz;' CASES[ROPCODES['LSHRV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >>> lz;' CASES[ROPCODES['LNOTBRF']] = 'if (' + get_coerced_access('ly') + ') { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }' CASES[ROPCODES['EQBRF']] = 'if ((' + get_coerced_access('ly') + ') == (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['NEBRF']] = 'if ((' + get_coerced_access('ly') + ') != (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['SLTBRF']] = 'if ((' + get_coerced_access('ly') + ') < (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['ULTBRF']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') < (' + get_coerced_access('lz', unsigned=True) + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['SLEBRF']] = 'if ((' + get_coerced_access('ly') + ') <= (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['ULEBRF']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') <= (' + get_coerced_access('lz', unsigned=True) + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['LNOTBRT']] = 'if (' + get_coerced_access('ly') + ') { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['EQBRT']] = 'if ((' + get_coerced_access('ly') + ') == (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }' CASES[ROPCODES['NEBRT']] = 'if ((' + get_coerced_access('ly') + ') != (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }' CASES[ROPCODES['SLTBRT']] = 'if ((' + get_coerced_access('ly') + ') < (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }' CASES[ROPCODES['ULTBRT']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') < (' + get_coerced_access('lz', unsigned=True) + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }' CASES[ROPCODES['SLEBRT']] = 'if ((' + get_coerced_access('ly') + ') <= (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }' CASES[ROPCODES['ULEBRT']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') <= (' + get_coerced_access('lz', unsigned=True) + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }' CASES[ROPCODES['SETD']] = get_access('lx', s='d') + ' = ' + get_coerced_access('ly', s='d') + ';' CASES[ROPCODES['SETVD']] = get_access('lx', s='d') + ' = +(inst >> 16);' CASES[ROPCODES['SETVDI']] = 'pc = pc + 4 | 0; ' + get_access('lx', s='d') + ' = +(HEAP32[pc >> 2] | 0);' CASES[ROPCODES['SETVDF']] = 'pc = pc + 4 | 0; ' + get_access('lx', s='d') + ' = +HEAPF32[pc >> 2];' CASES[ROPCODES['SETVDD']] = 'HEAP32[tempDoublePtr >> 2] = HEAP32[pc + 4 >> 2]; HEAP32[tempDoublePtr + 4 >> 2] = HEAP32[pc + 8 >> 2]; pc = pc + 8 | 0; ' + get_access('lx', s='d') + ' = +HEAPF64[tempDoublePtr >> 3];' CASES[ROPCODES['ADDD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') + (' + get_coerced_access('lz', s='d') + ');' CASES[ROPCODES['SUBD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') - (' + get_coerced_access('lz', s='d') + ');' CASES[ROPCODES['MULD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') * (' + get_coerced_access('lz', s='d') + ');' CASES[ROPCODES['DIVD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') / (' + get_coerced_access('lz', s='d') + ');' CASES[ROPCODES['MODD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') % (' + get_coerced_access('lz', s='d') + ');' CASES[ROPCODES['NEGD']] = get_access('lx', s='d') + ' = -(' + get_coerced_access('ly', s='d') + ');' CASES[ROPCODES['EQD']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') == (' + get_coerced_access('lz', s='d') + ') | 0;' CASES[ROPCODES['NED']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') != (' + get_coerced_access('lz', s='d') + ') | 0;' CASES[ROPCODES['LTD']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') < (' + get_coerced_access('lz', s='d') + ') | 0;' CASES[ROPCODES['LED']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') <= (' + get_coerced_access('lz', s='d') + ') | 0;' CASES[ROPCODES['GTD']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') > (' + get_coerced_access('lz', s='d') + ') | 0;' CASES[ROPCODES['GED']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') >= (' + get_coerced_access('lz', s='d') + ') | 0;' CASES[ROPCODES['D2I']] = get_access('lx') + ' = ~~(' + get_coerced_access('ly', s='d') + ');' CASES[ROPCODES['SI2D']] = get_access('lx', s='d') + ' = +(' + get_coerced_access('ly') + ');' CASES[ROPCODES['UI2D']] = get_access('lx', s='d') + ' = +(' + get_coerced_access('ly', unsigned=True) + ');' CASES[ROPCODES['LOAD8']] = get_access('lx') + ' = ' + 'HEAP8[' + get_access('ly') + ' >> 0];' CASES[ROPCODES['LOADU8']] = get_access('lx') + ' = ' + 'HEAPU8[' + get_access('ly') + ' >> 0];' CASES[ROPCODES['LOAD16']] = get_access('lx') + ' = ' + 'HEAP16[' + get_access('ly') + ' >> 1];' CASES[ROPCODES['LOADU16']] = get_access('lx') + ' = ' + 'HEAPU16[' + get_access('ly') + ' >> 1];' CASES[ROPCODES['LOAD32']] = get_access('lx') + ' = ' + 'HEAP32[' + get_access('ly') + ' >> 2];' CASES[ROPCODES['STORE8']] = 'HEAP8[' + get_access('lx') + ' >> 0] = ' + get_coerced_access('ly') + ';' CASES[ROPCODES['STORE16']] = 'HEAP16[' + get_access('lx') + ' >> 1] = ' + get_coerced_access('ly') + ';' CASES[ROPCODES['STORE32']] = 'HEAP32[' + get_access('lx') + ' >> 2] = ' + get_coerced_access('ly') + ';' CASES[ROPCODES['LOADF64']] = get_access('lx', s='d') + ' = ' + '+HEAPF64[' + get_access('ly') + ' >> 3];' CASES[ROPCODES['STOREF64']] = 'HEAPF64[' + get_access('lx') + ' >> 3] = ' + get_coerced_access('ly', s='d') + ';' CASES[ROPCODES['LOADF32']] = get_access('lx', s='d') + ' = ' + '+HEAPF32[' + get_access('ly') + ' >> 2];' CASES[ROPCODES['STOREF32']] = 'HEAPF32[' + get_access('lx') + ' >> 2] = ' + get_coerced_access('ly', s='d') + ';' CASES[ROPCODES['LOAD8A']] = get_access('lx') + ' = ' + 'HEAP8[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 0];' CASES[ROPCODES['LOADU8A']] = get_access('lx') + ' = ' + 'HEAPU8[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 0];' CASES[ROPCODES['LOAD16A']] = get_access('lx') + ' = ' + 'HEAP16[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 1];' CASES[ROPCODES['LOADU16A']] = get_access('lx') + ' = ' + 'HEAPU16[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 1];' CASES[ROPCODES['LOAD32A']] = get_access('lx') + ' = ' + 'HEAP32[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 2];' CASES[ROPCODES['STORE8A']] = 'HEAP8[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 0] = ' + get_coerced_access('lz') + ';' CASES[ROPCODES['STORE16A']] = 'HEAP16[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 1] = ' + get_coerced_access('lz') + ';' CASES[ROPCODES['STORE32A']] = 'HEAP32[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 2] = ' + get_coerced_access('lz') + ';' CASES[ROPCODES['LOADF64A']] = get_access('lx', s='d') + ' = ' + '+HEAPF64[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 3];' CASES[ROPCODES['STOREF64A']] = 'HEAPF64[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 3] = ' + get_coerced_access('lz', s='d') + ';' CASES[ROPCODES['LOADF32A']] = get_access('lx', s='d') + ' = ' + '+HEAPF32[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 2];' CASES[ROPCODES['STOREF32A']] = 'HEAPF32[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 2] = ' + get_coerced_access('lz', s='d') + ';' CASES[ROPCODES['LOAD8AV']] = get_access('lx') + ' = ' + 'HEAP8[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 0];' CASES[ROPCODES['LOADU8AV']] = get_access('lx') + ' = ' + 'HEAPU8[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 0];' CASES[ROPCODES['LOAD16AV']] = get_access('lx') + ' = ' + 'HEAP16[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 1];' CASES[ROPCODES['LOADU16AV']] = get_access('lx') + ' = ' + 'HEAPU16[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 1];' CASES[ROPCODES['LOAD32AV']] = get_access('lx') + ' = ' + 'HEAP32[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 2];' CASES[ROPCODES['STORE8AV']] = 'HEAP8[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 0] = ' + get_coerced_access('lz') + ';' CASES[ROPCODES['STORE16AV']] = 'HEAP16[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 1] = ' + get_coerced_access('lz') + ';' CASES[ROPCODES['STORE32AV']] = 'HEAP32[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 2] = ' + get_coerced_access('lz') + ';' CASES[ROPCODES['LOADF64AV']] = get_access('lx', s='d') + ' = ' + '+HEAPF64[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 3];' CASES[ROPCODES['STOREF64AV']] = 'HEAPF64[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 3] = ' + get_coerced_access('lz', s='d') + ';' CASES[ROPCODES['LOADF32AV']] = get_access('lx', s='d') + ' = ' + '+HEAPF32[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 2];' CASES[ROPCODES['STOREF32AV']] = 'HEAPF32[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 2] = ' + get_coerced_access('lz', s='d') + ';' CASES[ROPCODES['STORE8C']] = 'HEAP8[' + get_access('lx') + ' >> 0] = HEAP8[' + get_access('ly') + ' >> 0] | 0;' CASES[ROPCODES['STORE16C']] = 'HEAP16[' + get_access('lx') + ' >> 1] = HEAP16[' + get_access('ly') + ' >> 1] | 0;' CASES[ROPCODES['STORE32C']] = 'HEAP32[' + get_access('lx') + ' >> 2] = HEAP32[' + get_access('ly') + ' >> 2] | 0;' CASES[ROPCODES['STOREF32C']] = 'HEAPF32[' + get_access('lx') + ' >> 2] = +HEAPF32[' + get_access('ly') + ' >> 2];' CASES[ROPCODES['STOREF64C']] = 'HEAPF64[' + get_access('lx') + ' >> 3] = +HEAPF64[' + get_access('ly') + ' >> 3];' CASES[ROPCODES['BR']] = 'pc = pc + ((inst >> 16) << 2) | 0; PROCEED_WITHOUT_PC_BUMP;' CASES[ROPCODES['BRT']] = 'if (' + get_coerced_access('lx') + ') { pc = pc + ((inst >> 16) << 2) | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['BRF']] = 'if (!(' + get_coerced_access('lx') + ')) { pc = pc + ((inst >> 16) << 2) | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['BRA']] = 'pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP;' CASES[ROPCODES['BRTA']] = 'pc = pc + 4 | 0; if (' + get_coerced_access('lx') + ') { pc = HEAP32[pc >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['BRFA']] = 'pc = pc + 4 | 0; if (!(' + get_coerced_access('lx') + ')) { pc = HEAP32[pc >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }' CASES[ROPCODES['COND']] = 'pc = pc + 4 | 0; ' + get_access('lx') + ' = (' + get_coerced_access('ly') + ') ? (' + get_coerced_access('lz') + ') : (' + get_coerced_access('(HEAPU8[pc >> 0] | 0)') + ');' CASES[ROPCODES['CONDD']] = 'pc = pc + 4 | 0; ' + get_access('lx', s='d') + ' = (' + get_coerced_access('ly') + ') ? (' + get_coerced_access('lz', s='d') + ') : (' + get_coerced_access('(HEAPU8[pc >> 0] | 0)', s='d') + ');' CASES[ROPCODES['GETTDP']] = get_access('lx') + ' = tempDoublePtr;' #CASES[ROPCODES['GETPC']] = get_access('lx') + ' = pc;' CASES[ROPCODES['GETTR0']] = get_access('lx') + ' = tempRet0;' CASES[ROPCODES['SETTR0']] = 'tempRet0 = ' + get_coerced_access('lx') + ';' if FROUND: CASES[ROPCODES['FROUND']] = get_access('lx', s='d') + ' = Math_fround(' + get_coerced_access('ly', s='d') + ');' # stacktop handling: if allowing async, the very bottom will contain the function being executed, # for stack trace reconstruction. We store [pc of function, curr pc] # where curr pc is the current position in that function, when asyncing # The effective sp, where locals reside, is 8 above that. def push_stacktop(zero): return (' sp = EMTSTACKTOP;' if not ASYNC else ' sp = EMTSTACKTOP + 8 | 0;') if not zero else '' def pop_stacktop(zero): return '//Module.print("exit");\n' + ((' EMTSTACKTOP = sp; ' if not ASYNC else 'EMTSTACKTOP = sp - 8 | 0; ') if not zero else '') def handle_async_pre_call(): return 'HEAP32[sp - 4 >> 2] = pc;' if ASYNC else '' def handle_async_post_call(): assert not ZERO return 'if ((asyncState|0) == 1) { ' + pop_stacktop(zero=False) + ' return }\n' if ASYNC else '' # save pc and exit immediately if currently saving state CASES[ROPCODES['INTCALL']] = ''' lz = HEAPU8[(HEAP32[pc + 4 >> 2] | 0) + 1 | 0] | 0; // FUNC inst, see definition above; we read params here ly = 0; assert(((EMTSTACKTOP + 8|0) <= (EMT_STACK_MAX|0))|0); // for return value %s %s while ((ly|0) < (lz|0)) { %s = %s; %s = %s; ly = ly + 1 | 0; } %s %s emterpret(HEAP32[pc + 4 >> 2] | 0); %s %s %s = HEAP32[EMTSTACKTOP >> 2] | 0; %s = HEAP32[EMTSTACKTOP + 4 >> 2] | 0; pc = pc + (((4 + lz + 3) >> 2) << 2) | 0; ''' % ( 'if ((HEAPU8[(HEAP32[pc + 4 >> 2] | 0) + 4 | 0] | 0) == 0) {' if ZERO else '', 'if ((asyncState|0) != 2) {' if ASYNC else '', get_access('ly', base='EMTSTACKTOP', offset=8 if ASYNC else 0), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]'), get_access('ly', base='EMTSTACKTOP', offset=12 if ASYNC else 4), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]', offset=4), '}' if ASYNC else '', handle_async_pre_call(), handle_async_post_call(), ('''} else { while ((ly|0) < (lz|0)) { %s = %s; %s = %s; ly = ly + 1 | 0; } emterpret_z(HEAP32[pc + 4 >> 2] | 0); }''' % ( get_access('ly', base=0), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]'), get_access('ly', base=0, offset=4), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]', offset=4), )) if ZERO else '', get_access('lx'), get_access('lx', offset=4), ) CASES[ROPCODES['SWITCH']] = ''' lz = ''' + get_coerced_access('lz') + '''; lx = ((''' + get_coerced_access('lx') + ''') - (''' + get_coerced_access('ly') + ''')) >>> 0; // lx is now relative to the base if ((lx >>> 0) >= (lz >>> 0)) { // is the adjusted value too big? pc = (pc + (lz << 2)) | 0; // jump to right after the table, where the default is PROCEED_WITH_PC_BUMP; // also increment the pc normally, to skip the switch itself } pc = HEAP32[pc + 4 + (lx << 2) >> 2] | 0; // load from the jump table which is right after this instruction, and set pc PROCEED_WITHOUT_PC_BUMP;''' CASES[ROPCODES['FSLOW']] = get_access('lx') + ' = ' + get_coerced_access('inst >>> 16') + ';' CASES[ROPCODES['FSLOWD']] = get_access('lx', s='d') + ' = ' + get_coerced_access('inst >>> 16', s='d') + ';' CASES[ROPCODES['TSLOW']] = get_access('inst >>> 16') + ' = ' + get_coerced_access('lx') + ';' CASES[ROPCODES['TSLOWD']] = get_access('inst >>> 16', s='d') + ' = ' + get_coerced_access('lx', s='d') + ';' opcode_used = {} for opcode in OPCODES: opcode_used[opcode] = False def is_function_table(name): return name.startswith('FUNCTION_TABLE_') def is_dyn_call(func): return func.startswith('dynCall_') def make_emterpreter(zero=False): # return is specialized per interpreter CASES[ROPCODES['RET']] = pop_stacktop(zero) CASES[ROPCODES['RET']] += 'HEAP32[EMTSTACKTOP >> 2] = ' + get_coerced_access('lx') + '; HEAP32[EMTSTACKTOP + 4 >> 2] = ' + get_coerced_access('lx', offset=4) + '; return;' # call is custom generated using information of actual call patterns, and which emterpreter this is def make_target_call(i): name = global_func_names[i] sig = global_func_sigs[i] function_pointer_call = is_function_table(name) # our local registers are never true floats, and we just do fround calls to ensure correctness, not caring # about performance. but when coercing to outside of the emterpreter, we need to know the true sig, # and must use frounds true_sig = sig if function_pointer_call: true_sig = name.split('_')[-1] elif name in actual_sigs: true_sig = actual_sigs[name] def fix_coercion(value, s): if s == 'f': value = 'Math_fround(' + value + ')' return value ret = name if function_pointer_call: ret += '[' + get_access('HEAPU8[pc+4>>0]') + ' & %d]' % (next_power_of_two(asm.tables[name].count(',')+1)-1) ret += '(' + ', '.join([fix_coercion(get_coerced_access('HEAPU8[pc+%d>>0]' % (i+4+int(function_pointer_call)), s=sig[i+1]), true_sig[i+1]) for i in range(len(sig)-1)]) + ')' if sig[0] != 'v': ret = shared.JS.make_coercion(fix_coercion(ret, true_sig[0]), sig[0]) if not ASYNC: ret = make_assign(get_access('lx', sig[0]), ret, 'ly' if sig[0] == 'i' else 'ld') else: # we cannot save the return value immediately! if we are saving the stack, it is meaningless, and would corrupt a local stack variable if sig[0] == 'i': ret = 'lz = ' + ret else: assert sig[0] == 'd' ret = 'ld = ' + ret elif name in actual_sigs and actual_sigs[name][0] != 'v': ret = shared.JS.make_coercion(ret, actual_sigs[name][0]) # return value ignored, but need a coercion if ASYNC: # check if we are asyncing, and if not, it is ok to save the return value ret = handle_async_pre_call() + ret + '; ' + handle_async_post_call() if sig[0] != 'v': ret += ' else ' + get_access('lx', sig[0]) + ' = '; if sig[0] == 'i': ret += 'lz' else: assert sig[0] == 'd' ret += 'ld ' ret += ';' extra = len(sig) - 1 + int(function_pointer_call) # [opcode, lx, target, sig], take the usual 4. params are extra if extra > 0: ret += '; pc = pc + %d | 0' % (4*((extra+3)>>2)) return ' ' + ret + '; PROCEED_WITH_PC_BUMP;' CASES[ROPCODES['EXTCALL']] = 'switch ((inst>>>16)|0) {\n' + \ '\n'.join([' case %d: {\n%s\n }' % (i, make_target_call(i)) for i in range(global_func_id)]) + \ '\n default: assert(0);' + \ '\n }' if ROPCODES['GETGLBI'] not in CASES: def make_load(i, t): name = rglobal_vars[i] return ' ' + get_access('lx', t) + ' = ' + name + '; PROCEED_WITH_PC_BUMP;' def make_getglb(suffix, t): CASES[ROPCODES['GETGLB' + suffix]] = 'switch (ly|0) {\n' + \ '\n'.join([' case %d: {\n%s\n }' % (i, make_load(i, t)) for i in range(global_var_id) if global_var_types[rglobal_vars[i]] == t]) + \ '\n default: assert(0);' + \ '\n }' make_getglb('I', 'i') make_getglb('D', 'd') def make_store(i, t): name = rglobal_vars[i] return ' ' + name + ' = ' + get_coerced_access('lz', t) + '; PROCEED_WITH_PC_BUMP;' def make_setglb(suffix, t): CASES[ROPCODES['SETGLB' + suffix]] = 'switch ((inst >> 8)&255) {\n' + \ '\n'.join([' case %d: {\n%s\n }' % (i, make_store(i, t)) for i in range(global_var_id) if global_var_types[rglobal_vars[i]] == t]) + \ '\n default: assert(0);' + \ '\n }' make_setglb('I', 'i') make_setglb('D', 'd') def fix_case(case): # we increment pc at the top of the loop. to avoid a pc bump, we decrement it first; this is rare, most opcodes just continue; this avoids any code at the end of the loop return case.replace('PROCEED_WITH_PC_BUMP', 'continue').replace('PROCEED_WITHOUT_PC_BUMP', 'pc = pc - 4 | 0; continue').replace('continue; continue;', 'continue;') def process(code): if not ASSERTIONS: code = code.replace(' assert(', ' //assert(') if zero: code = code.replace('sp + ', '') return code main_loop_prefix = r''' //if (first) first = false; else print('last lx (' + lx + '): ' + [''' + get_coerced_access('lx') + ',' + get_coerced_access('lx', s='d') + ''']); pc = pc + 4 | 0; inst = HEAP32[pc>>2]|0; lx = (inst >> 8) & 255; ly = (inst >> 16) & 255; lz = inst >>> 24; //Module.print([pc, inst&255, %s[inst&255], lx, ly, lz, HEAPU8[pc + 4],HEAPU8[pc + 5],HEAPU8[pc + 6],HEAPU8[pc + 7]].join(', ')); ''' % (json.dumps(OPCODES)) if not INNERTERPRETER_LAST_OPCODE: main_loop = main_loop_prefix + r''' switch (inst&255) { %s default: assert(0); } ''' % ('\n'.join([fix_case(' case %d: %s break;' % (k, CASES[k])) for k in sorted(CASES.keys()) if opcode_used[OPCODES[k]]])) else: # emit an inner interpreter (innerterpreter) loop, of trivial opcodes that hopefully the JS engine will implement with no spills assert OPCODES[-1] == 'FUNC' # we don't need to emit that one main_loop = r''' innerterpreter: while (1) { %s switch (inst&255) { %s %s default: break innerterpreter; } } switch (inst&255) { %s default: assert(0); } ''' % ( ' ' + '\n '.join(main_loop_prefix.split('\n')), '\n'.join([fix_case(' case %d: %s break;' % (ROPCODES[k], CASES[ROPCODES[k]])) for k in OPCODES[:-1][:ROPCODES[INNERTERPRETER_LAST_OPCODE]+1]]), '\n'.join([fix_case(' case %d:' % (ROPCODES[k])) for k in OPCODES[:-1][ROPCODES[INNERTERPRETER_LAST_OPCODE]+1:]]), '\n'.join([fix_case(' case %d: %s break;' % (ROPCODES[k], CASES[ROPCODES[k]])) for k in OPCODES[:-1][ROPCODES[INNERTERPRETER_LAST_OPCODE]+1:]]) ) return process(r''' function emterpret%s(pc) { //Module.print('emterpret: ' + pc + ',' + EMTSTACKTOP); pc = pc | 0; var %sinst = 0, lx = 0, ly = 0, lz = 0; %s %s %s assert(((HEAPU8[pc>>0]>>>0) == %d)|0); lx = HEAPU16[pc + 2 >> 1] | 0; // num locals %s %s //print('enter func ' + [pc, HEAPU8[pc + 0],HEAPU8[pc + 1],HEAPU8[pc + 2],HEAPU8[pc + 3],HEAPU8[pc + 4],HEAPU8[pc + 5],HEAPU8[pc + 6],HEAPU8[pc + 7]].join(', ')); //var first = true; pc = pc + 4 | 0; while (1) { %s } assert(0); }''' % ( '' if not zero else '_z', 'sp = 0, ' if not zero else '', '' if not ASYNC and not MEMORY_SAFE else 'var ld = +0;', '' if not ASYNC else 'HEAP32[EMTSTACKTOP>>2] = pc;\n', push_stacktop(zero), ROPCODES['FUNC'], (''' EMTSTACKTOP = EMTSTACKTOP + (lx ''' + (' + 1 ' if ASYNC else '') + '''<< 3) | 0; assert(((EMTSTACKTOP|0) <= (EMT_STACK_MAX|0))|0);\n''' + (' if ((asyncState|0) != 2) {' if ASYNC else '')) if not zero else '', ' } else { pc = (HEAP32[sp - 4 >> 2] | 0) - 8 | 0; }' if ASYNC else '', main_loop, )) # main if __name__ == '__main__': infile = sys.argv[1] outfile = sys.argv[2] force_memfile = sys.argv[3] if len(sys.argv) >= 4 else None original_yieldlist = YIELDLIST extra_blacklist = [] if len(sys.argv) >= 5: temp = sys.argv[4] if temp[0] == '"': # response file assert temp[1] == '@' temp = open(temp[2:-1]).read() extra_blacklist = json.loads(temp) if len(sys.argv) >= 6: temp = sys.argv[5] if temp[0] == '"': # response file assert temp[1] == '@' temp = open(temp[2:-1]).read() WHITELIST = json.loads(temp) if len(sys.argv) >= 7: temp = sys.argv[6] if temp[0] == '"': # response file assert temp[1] == '@' temp = open(temp[2:-1]).read() YIELDLIST = YIELDLIST + json.loads(temp) if len(sys.argv) >= 8: SWAPPABLE = int(sys.argv[7]) if ADVISE: # Advise the user on which functions should likely be emterpreted temp = temp_files.get('.js').name shared.Building.js_optimizer(infile, ['dumpCallGraph'], output_filename=temp, just_concat=True) asm = asm_module.AsmModule(temp) lines = asm.funcs_js.split('\n') can_call = {} for i in range(len(lines)): line = lines[i] if line.startswith('// REACHABLE '): curr = json.loads(line[len('// REACHABLE '):]) func = curr[0] targets = curr[2] can_call[func] = set(targets) # function tables too - treat a function all as a function that can call anything in it, which is effectively what it is for name, funcs in asm.tables.iteritems(): can_call[name] = set(funcs[1:-1].split(',')) #print can_call # Note: We ignore calls in from outside the asm module, so you could do emterpreted => outside => emterpreted, and we would # miss the first one there. But this is acceptable to do, because we can't save such a stack anyhow, due to the outside! #print 'can call', can_call, '\n!!!\n', asm.tables, '!' reachable_from = {} for func, targets in can_call.iteritems(): for target in targets: if target not in reachable_from: reachable_from[target] = set() reachable_from[target].add(func) #print 'reachable from', reachable_from # find all functions that can reach the sync funcs, which are those that can be on the stack during an async save/load, and hence must all be emterpreted to_check = list(SYNC_FUNCS) advised = set() while len(to_check) > 0: curr = to_check.pop() if curr in reachable_from: for reacher in reachable_from[curr]: if reacher not in advised: if not is_dyn_call(reacher) and not is_function_table(reacher): advised.add(str(reacher)) to_check.append(reacher) print "Suggested list of functions to run in the emterpreter:" print " -s EMTERPRETIFY_WHITELIST='" + str(sorted(list(advised))).replace("'", '"') + "'" print "(%d%% out of %d functions)" % (int((100.0*len(advised))/len(can_call)), len(can_call)) if len(YIELDLIST) > len(original_yieldlist): # advise on the yield list as well. Anything a yield function can reach, likely needs to also be a yield function YIELD_IGNORE = set(['abort']) to_check = list(YIELDLIST) advised = set([str(f) for f in YIELDLIST]) while len(to_check) > 0: curr = to_check.pop() if curr not in can_call: continue for next in can_call[curr]: if next not in advised: advised.add(str(next)) to_check.append(next) advised = [next for next in advised if not is_dyn_call(next) and not is_function_table(next) and not next in original_yieldlist and next not in SYNC_FUNCS and next not in YIELD_IGNORE and next[0] == '_'] print print "Suggested list of yield functions for the emterpreter:" print " -s EMTERPRETIFY_YIELDLIST='" + str(sorted(list(advised))).replace("'", '"') + "'" print "(%d%% out of %d functions)" % (int((100.0*len(advised))/len(can_call)), len(can_call)) sys.exit(0) BLACKLIST = set(list(BLACKLIST) + extra_blacklist) if DEBUG or SWAPPABLE: orig = infile + '.orig.js' shared.logging.debug('saving original (non-emterpreted) code to ' + orig) shutil.copyfile(infile, orig) # final global functions asm = asm_module.AsmModule(infile) # process blacklist for func in extra_blacklist: assert func in asm.funcs, 'requested blacklist of %s but it does not exist' % func ## debugging #import hashlib #def hash(s): # hash_object = hashlib.sha256(s) # return int(hash_object.hexdigest(), 16) #if len(WHITELIST) == 0 and len(extra_blacklist) == 0: # WHITELIST = set([func for func in asm.funcs if func[0] == '_' and hash(func) % 3 == 1]) # print >> sys.stderr, 'manual whitelist', len(WHITELIST), '/', len(asm.funcs) ## if len(WHITELIST) > 0: # we are using a whitelist: fill the blacklist with everything not whitelisted BLACKLIST = set([func for func in asm.funcs if func not in WHITELIST]) # decide which functions will be emterpreted, and find which are externally reachable (from outside other emterpreted code; those will need trampolines) emterpreted_funcs = set([func for func in asm.funcs if func not in BLACKLIST and not is_dyn_call(func)]) tabled_funcs = asm.get_table_funcs() exported_funcs = [func.split(':')[0] for func in asm.exports] temp = temp_files.get('.js').name # infile + '.tmp.js' # find emterpreted functions reachable by non-emterpreted ones, we will force a trampoline for them later shared.Building.js_optimizer(infile, ['findReachable'], extra_info={ 'blacklist': list(emterpreted_funcs) }, output_filename=temp, just_concat=True) asm = asm_module.AsmModule(temp) lines = asm.funcs_js.split('\n') reachable_funcs = set([]) for i in range(len(lines)): line = lines[i] if line.startswith('// REACHABLE '): curr = json.loads(line[len('// REACHABLE '):]) reachable_funcs = set(list(reachable_funcs) + curr) external_emterpreted_funcs = filter(lambda func: func in tabled_funcs or func in exported_funcs or func in reachable_funcs, emterpreted_funcs) # process functions, generating bytecode shared.Building.js_optimizer(infile, ['emterpretify'], extra_info={ 'emterpretedFuncs': list(emterpreted_funcs), 'externalEmterpretedFuncs': list(external_emterpreted_funcs), 'opcodes': OPCODES, 'ropcodes': ROPCODES, 'ASYNC': ASYNC, 'PROFILING': PROFILING, 'ASSERTIONS': ASSERTIONS, 'yieldFuncs': YIELDLIST }, output_filename=temp, just_concat=True) # load the module and modify it asm = asm_module.AsmModule(temp) # find memfile. can be x.js.mem or x.html.mem in_mem_file = infile + '.mem' in_mem_file_base = os.path.basename(in_mem_file) out_mem_file = outfile + '.mem' out_mem_file_base = os.path.basename(out_mem_file) if in_mem_file_base not in asm.pre_js: in_mem_file = (infile + '.mem').replace('.js.mem', '.html.mem') in_mem_file_base = os.path.basename(in_mem_file) out_mem_file = (outfile + '.mem').replace('.js.mem', '.html.mem') out_mem_file_base = os.path.basename(out_mem_file) assert in_mem_file_base in asm.pre_js, 'we assume a mem init file for now (looked for %s)' % in_mem_file if not force_memfile: asm.pre_js = asm.pre_js.replace(in_mem_file_base, out_mem_file_base) assert os.path.exists(in_mem_file), 'need to find mem file at %s' % in_mem_file else: out_mem_file = force_memfile out_mem_file_base = os.path.basename(out_mem_file) mem_init = map(ord, open(in_mem_file, 'rb').read()) zero_space = asm.staticbump - len(mem_init) assert zero_space >= 0 # can be positive, if we add a bump of zeros assert ('GLOBAL_BASE: %d,' % GLOBAL_BASE) in asm.pre_js, 'we assume a specific global base, and that we can write to all memory below it' # calculate where code will start while len(mem_init) % 8 != 0: mem_init.append(0) asm.staticbump += 1 code_start = len(mem_init) + GLOBAL_BASE # parse out bytecode and add to mem init file all_code = [] funcs = {} lines = asm.funcs_js.split('\n') asm.funcs_js = None func = None # first pass, collect and process bytecode global_funcs = {} # 'name|sig' -> id global_func_names = {} # id -> name global_func_sigs = {} # id -> sig, one name can have multiple sigs global_func_id = 0 global_vars = {} rglobal_vars = {} global_var_types = {} global_var_id = 0 def note_global(target, j, code): global global_var_id imp = asm.imports[target] ty = asm.get_import_type(imp) assert ty in ['i', 'd'], target if code[j] == 'GETGLBI' and ty == 'd': # the js optimizer doesn't know all types, we must fix it up here assert '.0' in imp or '+' in imp, imp code[j] = 'GETGLBD' ty = 'd' if target not in global_vars: global_vars[target] = global_var_id rglobal_vars[global_var_id] = target global_var_id += 1 global_var_types[target] = ty else: assert global_var_types[target] == ty call_sigs = {} # signatures appearing for each call target def process_code(func, code, absolute_targets): global global_func_id absolute_start = code_start + len(all_code) # true absolute starting point of this function #print 'processing code', func, absolute_start for i in range(len(code)/4): j = i*4 if code[j] == 'EXTCALL': # fix CALL instructions' targets and signatures target = code[j+2] sig = code[j+3] if target not in call_sigs: call_sigs[target] = [] sigs = call_sigs[target] if sig not in sigs: sigs.append(sig) fullname = target + '|' + sig if fullname not in global_funcs: global_funcs[fullname] = global_func_id global_func_names[global_func_id] = target global_func_sigs[global_func_id] = sig global_func_id += 1 code[j+2] = global_funcs[fullname] & 255 code[j+3] = global_funcs[fullname] >> 8 if sig[0] == 'v': if code[j+1] == -1: # dummy value for assignment XXX we should not have assignments on void calls code[j+1] = 0 # clear it else: assert code[j+1] >= 0 # there should be a real target here elif code[j] in ['GETGLBI', 'GETGLBD']: # fix global-accessing instructions' targets target = code[j+2] note_global(target, j, code) code[j+2] = global_vars[target] elif code[j] in ['SETGLBI', 'SETGLBD']: # fix global-accessing instructions' targets target = code[j+1] note_global(target, j, code) code[j+1] = global_vars[target] elif code[j] == 'absolute-value': # put the 32-bit absolute value of an abolute target here absolute_value = absolute_start + absolute_targets[unicode(code[j+1])] #print ' fixing absolute value', code[j+1], absolute_targets[unicode(code[j+1])], absolute_value assert absolute_value < (1 << 31) assert absolute_value % 4 == 0 value = bytify(absolute_value) for k in range(4): code[j + k] = value[k] actual_sigs = {} for i in range(len(lines)): line = lines[i] if line.startswith('function ') and '}' not in line: assert not func elif line.startswith('// EMTERPRET_INFO '): try: func, curr, absolute_targets = json.loads(line[len('// EMTERPRET_INFO '):]) except Exception, e: print >> sys.stderr, 'failed to parse code from', line raise e assert len(curr) % 4 == 0, len(curr) funcs[func] = len(all_code) # no operation here should change the length if LOG_CODE: print >> sys.stderr, 'raw bytecode for %s:' % func, curr, 'insts:', len(curr)/4 process_code(func, curr, absolute_targets) #print >> sys.stderr, 'processed bytecode for %s:' % func, curr all_code += curr func = None lines[i] = '' elif line.startswith('// return type: ['): name, sig = line.split('[')[1].split(']')[0].split(',') actual_sigs[name] = sig lines[i] = '' if global_func_id >= 65536: msg = 'Too many extcall-able global functions (%d) for emterpreter bytecode' % global_func_id if PROFILING: msg += '\nDue to --profiling or --profiling-funcs being on, all emterpreter calls are extcalls. Building without those flags might avoid this problem.' raise Exception(msg) assert global_var_id < 256, [global_vars, global_var_id] def post_process_code(code): for i in range(len(code)/4): j = i*4 if code[j] == 'absolute-funcaddr': # put the 32-bit absolute value of an abolute function here absolute_value = code_start + funcs[code[j+1]] #print ' fixing absolute value', code[j+1], absolute_targets[unicode(code[j+1])], absolute_value assert absolute_value < (1 << 31) assert absolute_value % 4 == 0 value = bytify(absolute_value) for k in range(4): code[j + k] = value[k] # finalize instruction string names to opcodes for i in range(len(code)/4): j = i*4 if type(code[j]) in (str, unicode): opcode_used[code[j]] = True code[j] = ROPCODES[code[j]] # sanity checks for i in range(len(code)): v = code[i] assert type(v) == int and v >= 0 and v < 256, [i, v, 'in', code[i-5:i+5], ROPCODES] post_process_code(all_code) # create new mem init mem_init = mem_init + all_code asm.staticbump += len(all_code) while len(mem_init) % 8 != 0: mem_init.append(0) asm.staticbump += 1 stack_start = len(mem_init) asm.staticbump += EMT_STACK_MAX while asm.staticbump % 8 != 0: asm.staticbump += 1 open(out_mem_file, 'wb').write(''.join(map(chr, mem_init))) # second pass, finalize trampolines for i in range(len(lines)): line = lines[i] if line.startswith('function ') and '}' not in line: assert not func func = line.split(' ')[1].split('(')[0] elif line.startswith('}'): assert func func = None elif func and func in funcs: call = '(EMTERPRETER_' + func + ')' if call in line: lines[i] = lines[i].replace(call, '(%s)' % (funcs[func] + code_start)) # finalize funcs JS (first line has the marker, add emterpreters right after that) asm.funcs_js = '\n'.join([lines[0], make_emterpreter(), make_emterpreter(zero=True) if ZERO else '', '\n'.join(filter(lambda line: len(line) > 0, lines[1:]))]) + '\n' lines = None # set up emterpreter stack top asm.set_pre_js(js='var EMTSTACKTOP = STATIC_BASE + %s, EMT_STACK_MAX = EMTSTACKTOP + %d;' % (stack_start, EMT_STACK_MAX)) # send EMT vars into asm asm.pre_js += "Module.asmLibraryArg['EMTSTACKTOP'] = EMTSTACKTOP; Module.asmLibraryArg['EMT_STACK_MAX'] = EMT_STACK_MAX;\n" extra_vars = 'var EMTSTACKTOP = env.EMTSTACKTOP|0;\nvar EMT_STACK_MAX = env.EMT_STACK_MAX|0;\n' first_func = asm.imports_js.find('function ') if first_func < 0: asm.imports_js += extra_vars else: # imports contains a function (not a true asm function, hidden from opt passes) that we must not be before asm.imports_js = asm.imports_js[:first_func] + '\n' + extra_vars + '\n' + asm.imports_js[first_func:] asm.write(outfile) temp_files.clean()
Zinnia Group prides itself on staying abreast of international trends as well as analysing changes in the make up of the Australian community, and emerging attitudes and behaviour. Zinnia Group's vision for dedicated contemporary interment sites is a reflection of significant, well documented, demographic changes along with a nuanced understanding of the way contemporary Australians view death and dying. The Australian community is ageing swiftly, placing significant pressure on a range of resources and services. The most obvious of these is cemeteries. The number of deaths in Australia has been climbing steadily over the last decade and naturally, is expected to increase significantly as the numbers of aged Australians grows. This represents two significant challenges for the Australian funeral sector: firstly, how to provide sufficient burial sites to meet the increased need, and secondly, how to offer affordable burial options as the best sites becomes scarcer. It is expected that Sydney’s increased number of deaths will account for all the existing burial spaces in the metropolitan area by 2035. There is also a growing trend towards cremation in Australia with a only a portion memorialised where the cremation takes place. This represents a significant oportunity for private cemetery operators to provide viable, attractive interment alternatives. In a sector as sensitive as death care, the basic laws of supply and demand still prevail. Compounding the effect of Australia's ageing population is a significant shortage of burial space, especially in New South Wales. As Sydney's cemeteries approach capacity significant price increases are expected, reflecting the rarity of the burial spaces and the desirability of their unique locations. The number of Australians aged 85 and over is expected to quadruple from 400,000 in 2010 to 1.8 million in 2050. Between 2005-2013, there was an average 89% increase in prices across six of Sydney's major cemeteries, representing an average of 11.1% growth per annum. Over 245,000 additional burial sites will be required in Sydney by 2020. Based on the current rate of deaths, the eight Crown Land cemeteries in Sydney will reach capacity by as early as 2035. "The landscape architect's insights and exemplary landscape approach to the originally limited brief led to an expansion of the client's role for the site including an educational program for this landscape. The restored woodland landscape provides an inspirational setting for the visitor and as a whole transforms the cemetery model and the memorial experience." Innovation in the death care industry continues to excite strong public interest. Zinnia Group follows public discussion and debate in the press, looking to identify the concerns that are most important to the broader community in order to make the sites we offer as relevant and engaging as possible.
from beatbox import SoapFaultError from beatbox.python_client import _prepareSObjects from types import DictType, StringTypes, IntType, ListType, TupleType import beatbox import datetime import sfconfig import unittest class TestUtils(unittest.TestCase): def setUp(self): self.svc = svc = beatbox.PythonClient() svc.login(sfconfig.USERNAME, sfconfig.PASSWORD) self._todelete = list() def tearDown(self): svc = self.svc ids = self._todelete if ids: while len(ids) > 200: svc.delete(ids[:200]) ids = ids[200:] if ids: svc.delete(ids) def testDescribeGlobal(self): svc = self.svc res = svc.describeGlobal() self.assertEqual(type(res), DictType) self.failUnless(type(res['encoding']) in StringTypes) self.assertEqual(type(res['maxBatchSize']), IntType) self.assertEqual(type(res['types']), ListType) self.failUnless(len(res['sobjects']) > 0) # BBB for API < 17.0 self.failUnless(len(res['types']) > 0) def testDescribeSObjects(self): svc = self.svc globalres = svc.describeGlobal() types = globalres['types'][:100] res = svc.describeSObjects(types[0]) self.assertEqual(type(res), ListType) self.assertEqual(len(res), 1) res = svc.describeSObjects(types) self.assertEqual(len(types), len(res)) def testCreate(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create([data]) self.failUnless(type(res) in (ListType, TupleType)) self.failUnless(len(res) == 1) self.failUnless(res[0]['success']) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve( 'LastName, FirstName, Phone, Email, Birthdate', 'Contact', [id]) self.assertEqual(len(contacts), 1) contact = contacts[0] for k in ['LastName', 'FirstName', 'Phone', 'Email', 'Birthdate']: self.assertEqual( data[k], contact[k]) def testSetIntegerField(self): # Passes when you feed it floats, even if salesforce field is defined # for 0 decimal places. Lack of data validation in SF? svc = self.svc testField = 'Favorite_Integer__c' data = dict( type='Contact', LastName='Doe', FirstName='John', Favorite_Integer__c=-25 ) res = svc.create([data]) self.failUnless(type(res) in (ListType, TupleType)) self.failUnless(len(res) == 1) self.failUnless(res[0]['success']) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, FirstName, Favorite_Integer__c', 'Contact', [id]) self.assertEqual(len(contacts), 1) contact = contacts[0] self.assertEqual(data[testField], contact[testField]) def testSetFloatField(self): # this fails when you have a large amount (I didn't test the #) of decimal places. svc = self.svc testField = 'Favorite_Float__c' data = dict( type='Contact', LastName='Doe', FirstName='John', Favorite_Float__c=-1.999888777 ) res = svc.create([data]) self.failUnless(type(res) in (ListType, TupleType)) self.failUnless(len(res) == 1) self.failUnless(res[0]['success']) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, FirstName, Favorite_Float__c', 'Contact', [id]) self.assertEqual(len(contacts), 1) contact = contacts[0] self.assertEqual(data[testField], contact[testField]) def testCreatePickListMultiple(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4), Favorite_Fruit__c=["Apple", "Orange", "Pear"] ) res = svc.create([data]) self.failUnless(type(res) in (ListType, TupleType)) self.failUnless(len(res) == 1) self.failUnless(res[0]['success']) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, FirstName, Phone, Email, Birthdate, \ Favorite_Fruit__c', 'Contact', [id]) self.assertEqual(len(contacts), 1) contact = contacts[0] for k in ['LastName', 'FirstName', 'Phone', 'Email', 'Birthdate', 'Favorite_Fruit__c']: self.assertEqual( data[k], contact[k]) def testFailedCreate(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate='foo' ) self.assertRaises(SoapFaultError, svc.create, data) def testRetrieve(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create([data]) id = res[0]['id'] self._todelete.append(id) typedesc = svc.describeSObjects('Contact')[0] fieldnames = list() fields = typedesc.fields.values() fieldnames = [f.name for f in fields if f.type not in ('address',)] fieldnames = ', '.join(fieldnames) contacts = svc.retrieve(fieldnames, 'Contact', [id]) self.assertEqual(len(contacts), 1) def testRetrieveDeleted(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create(data) id = res[0]['id'] svc.delete(id) typedesc = svc.describeSObjects('Contact')[0] fieldnames = list() fields = typedesc.fields.values() fieldnames = [f.name for f in fields] fieldnames = ', '.join(fieldnames) contacts = svc.retrieve(fieldnames, 'Contact', [id]) self.assertEqual(len(contacts), 0) def testDelete(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create([data]) id = res[0]['id'] res = svc.delete([id]) self.failUnless(res[0]['success']) contacts = svc.retrieve('LastName', 'Contact', [id]) self.assertEqual(len(contacts), 0) def testUpdate(self): svc = self.svc originaldate = datetime.date(1970, 1, 4) newdate = datetime.date(1970, 1, 5) lastname = 'Doe' data = dict( type='Contact', LastName=lastname, FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=originaldate ) res = svc.create([data]) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, Birthdate', 'Contact', [id]) self.assertEqual(contacts[0]['Birthdate'], originaldate) self.assertEqual(contacts[0]['LastName'], lastname) data = dict( type='Contact', Id=id, Birthdate=newdate) svc.update(data) contacts = svc.retrieve('LastName, Birthdate', 'Contact', [id]) self.assertEqual(contacts[0]['Birthdate'], newdate) self.assertEqual(contacts[0]['LastName'], lastname) def testShrinkMultiPicklist(self): svc = self.svc originalList = ["Pear", "Apple"] newList = ["Pear"] lastname = 'Doe' data = dict( type='Contact', LastName=lastname, FirstName='John', Phone='123-456-7890', Email='john@doe.com', Favorite_Fruit__c=originalList ) res = svc.create([data]) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2) data = dict( type='Contact', Id=id, Favorite_Fruit__c=newList) svc.update(data) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 1) def testGrowMultiPicklist(self): svc = self.svc originalList = ["Pear", "Apple"] newList = ["Pear", "Apple", "Orange"] lastname = 'Doe' data = dict( type='Contact', LastName=lastname, FirstName='John', Phone='123-456-7890', Email='john@doe.com', Favorite_Fruit__c=originalList ) res = svc.create([data]) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2) data = dict( type='Contact', Id=id, Favorite_Fruit__c=newList) svc.update(data) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 3) def testUpdateDeleted(self): svc = self.svc originaldate = datetime.date(1970, 1, 4) newdate = datetime.date(1970, 1, 5) lastname = 'Doe' data = dict( type='Contact', LastName=lastname, FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=originaldate ) res = svc.create(data) id = res[0]['id'] svc.delete(id) contacts = svc.retrieve('LastName, Birthdate', 'Contact', [id]) self.assertEqual(len(contacts), 0) data = dict( type='Contact', Id=id, Birthdate=newdate) res = svc.update(data) self.failUnless(not res[0]['success']) self.failUnless(len(res[0]['errors']) > 0) def testQuery(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create([data]) self._todelete.append(res[0]['id']) data2 = dict( type='Contact', LastName='Doe', FirstName='Jane', Phone='123-456-7890', Email='jane@doe.com', Birthdate=datetime.date(1972, 10, 15) ) res = svc.create([data2]) janeid = res[0]['id'] self._todelete.append(janeid) res = svc.query("SELECT LastName, FirstName, Phone, Email, Birthdate " "FROM Contact WHERE LastName = 'Doe'") self.assertEqual(res['size'], 2) res = svc.query("SELECT Id, LastName, FirstName, Phone, Email, Birthdate " "FROM Contact WHERE LastName = 'Doe' and FirstName = 'Jane'") self.assertEqual(res['size'], 1) self.assertEqual(res['records'][0]['Id'], janeid) def testBackwardsCompatibleQuery(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create([data]) self._todelete.append(res[0]['id']) data2 = dict( type='Contact', LastName='Doe', FirstName='Jane', Phone='123-456-7890', Email='jane@doe.com', Birthdate=datetime.date(1972, 10, 15) ) res = svc.create([data2]) janeid = res[0]['id'] self._todelete.append(janeid) # conditional expression as positional arg res = svc.query( 'LastName, FirstName, Phone, Email, Birthdate', 'Contact', "LastName = 'Doe'") self.assertEqual(res['size'], 2) # conditional expression as *empty* positional arg res = svc.query('LastName', 'Contact', '') self.failUnless(res['size'] > 0) # conditional expression as kwarg res = svc.query( 'Id, LastName, FirstName, Phone, Email, Birthdate', 'Contact', conditionalExpression="LastName = 'Doe' and FirstName = 'Jane'") self.assertEqual(res['size'], 1) self.assertEqual(res['records'][0]['Id'], janeid) def testTypeDescriptionsCache(self): # patch describeSObjects to make a record when it is called calls = [] standard_describeSObjects = beatbox.PythonClient.describeSObjects def patched_describeSObjects(self, sObjectTypes): calls.append(sObjectTypes) return standard_describeSObjects(self, sObjectTypes) beatbox.PythonClient.describeSObjects = patched_describeSObjects # turn the cache on self.svc.cacheTypeDescriptions = True # should get called the first time self.svc.query('SELECT Id FROM Contact') self.assertEqual(calls, [['Contact']]) # but not the second time self.svc.query('SELECT Id FROM Contact') self.assertEqual(calls, [['Contact']]) # if we flush the cache, it should get called again self.svc.flushTypeDescriptionsCache() self.svc.query('SELECT Id FROM Contact') self.assertEqual(calls, [['Contact'], ['Contact']]) # clean up self.svc.cacheTypeDescriptions = False def testChildToParentMultiQuery(self): svc = self.svc account_data = dict( type='Account', Name='ChildTestAccount', AccountNumber='987654321', Site='www.testsite.com', ) account = svc.create([account_data]) self._todelete.append(account[0]['id']) contact_data = dict( type='Contact', LastName='TestLastName', FirstName='TestFirstName', Phone='123-456-7890', AccountID=account[0]['id'], Email='testfirstname@testlastname.com', Birthdate=datetime.date(1965, 1, 5) ) contact = svc.create([contact_data]) self._todelete.append(contact[0]['id']) query_res = svc.query( "Id, LastName, FirstName, Account.Site, Account.AccountNumber", "Contact", "Phone='123-456-7890'" ) self.assertEqual(query_res.size, 1) rr = query_res.records[0] self.assertEqual(rr.type, 'Contact') map(self.assertEqual, [rr.Id, rr.LastName, rr.FirstName, rr.Account.Site, rr.Account.AccountNumber], [contact[0]['id'], contact_data['LastName'], contact_data['FirstName'], account_data['Site'], account_data['AccountNumber']]) def testChildToParentMultiQuery2(self): svc = self.svc paccount_data = dict( type='Account', Name='ParentTestAccount', AccountNumber='123456789', Site='www.testsite.com', ) paccount = svc.create([paccount_data]) self._todelete.append(paccount[0]['id']) caccount_data = dict( type='Account', Name='ChildTestAccount', AccountNumber='987654321', Site='www.testsite.com', ParentID=paccount[0]['id'] ) caccount = svc.create([caccount_data]) self._todelete.append(caccount[0]['id']) contact_data = dict( type='Contact', LastName='TestLastName', FirstName='TestFirstName', Phone='123-456-7890', AccountID=caccount[0]['id'], Email='testfirstname@testlastname.com', Birthdate=datetime.date(1965, 1, 5) ) contact = svc.create([contact_data]) self._todelete.append(contact[0]['id']) query_res = svc.query( "Id, LastName, FirstName, Account.Site, Account.Parent.AccountNumber", "Contact", "Account.AccountNumber='987654321'" ) rr = query_res.records[0] self.assertEqual(query_res.size, 1) self.assertEqual(rr.type, 'Contact') map(self.assertEqual, [rr.Id, rr.LastName, rr.FirstName, rr.Account.Site, rr.Account.Parent.AccountNumber], [contact[0]['id'], contact_data['LastName'], contact_data['FirstName'], caccount_data['Site'], paccount_data['AccountNumber']]) def testParentToChildMultiQuery(self): svc = self.svc caccount_data = dict( type='Account', Name='ChildTestAccount', AccountNumber='987654321', Site='www.testsite.com', ) caccount = svc.create([caccount_data]) self._todelete.append(caccount[0]['id']) contact_data = dict( type='Contact', LastName='TestLastName', FirstName='TestFirstName', Phone='123-456-7890', AccountID=caccount[0]['id'], Email='testfirstname@testlastname.com', Birthdate=datetime.date(1965, 1, 5) ) contact = svc.create([contact_data]) self._todelete.append(contact[0]['id']) contact_data2 = dict( type='Contact', LastName='TestLastName2', FirstName='TestFirstName2', Phone='123-456-7890', AccountID=caccount[0]['id'], Email='testfirstname2@testlastname2.com', Birthdate=datetime.date(1965, 1, 5) ) contact2 = svc.create([contact_data2]) self._todelete.append(contact2[0]['id']) query_res = svc.query( "Id, Name, (select FirstName from Contacts)", "Account", "AccountNumber='987654321'" ) rr = query_res.records[0] self.assertEqual(query_res.size, 1) self.assertEqual(rr.type, 'Account') map(self.assertEqual, [rr.Id, rr.Name], [caccount[0]['id'], caccount_data['Name']]) def testParentToChildMultiQuery2(self): svc = self.svc caccount_data = dict( type='Account', Name='ChildTestAccount', AccountNumber='987654321', Site='www.testsite.com', ) caccount = svc.create([caccount_data]) self._todelete.append(caccount[0]['id']) contact_data = dict( type='Contact', LastName='TestLastName', FirstName='TestFirstName', Phone='123-456-7890', AccountID=caccount[0]['id'], Email='testfirstname@testlastname.com', Birthdate=datetime.date(1965, 1, 5) ) contact = svc.create([contact_data]) self._todelete.append(contact[0]['id']) contact_data2 = dict( type='Contact', LastName='TestLastName2', FirstName='TestFirstName2', Phone='123-456-7890', AccountID=caccount[0]['id'], Email='testfirstname2@testlastname2.com', Birthdate=datetime.date(1965, 1, 5) ) contact2 = svc.create([contact_data2]) self._todelete.append(contact2[0]['id']) query_res = svc.query( "Id, Name, (select FirstName, Account.Site from Contacts), (select Name from Assets)", "Account", "AccountNumber='987654321'" ) rr = query_res.records[0] self.assertEqual(query_res.size, 1) self.assertEqual(rr.type, 'Account') map(self.assertEqual, [rr.Id, rr.Name], [caccount[0]['id'], caccount_data['Name']]) result = 0 for name in [contact_data2['FirstName'], contact_data['FirstName']]: if name in [rr.Contacts.records[i].FirstName for i in range(len(rr.Contacts.records))]: result += 1 self.assertEqual(result, rr.Contacts.size) def testMultiQueryCount(self): svc = self.svc contact_data = dict( type='Contact', LastName='TestLastName', FirstName='TestFirstName', Phone='123-456-7890', Email='testfirstname@testlastname.com', Birthdate=datetime.date(1965, 1, 5) ) contact = svc.create([contact_data]) self._todelete.append(contact[0]['id']) contact_data2 = dict( type='Contact', LastName='TestLastName2', FirstName='TestFirstName2', Phone='123-456-7890', Email='testfirstname2@testlastname2.com', Birthdate=datetime.date(1965, 1, 5) ) contact2 = svc.create([contact_data2]) self._todelete.append(contact2[0]['id']) query_res = svc.query("count()", "Contact", "Phone='123-456-7890'") self.assertEqual(query_res.size, 2) def testAggregateQuery(self): svc = self.svc contact_data = dict( type='Contact', LastName='TestLastName', FirstName='TestFirstName', Phone='123-456-7890', Email='testfirstname@testlastname.com', Birthdate=datetime.date(1900, 1, 5) ) contact = svc.create([contact_data]) self._todelete.append(contact[0]['id']) res = svc.query("SELECT MAX(CreatedDate) FROM Contact GROUP BY LastName") # the aggregate result is in the 'expr0' attribute of the result self.failUnless(hasattr(res[0], 'expr0')) # (unfortunately no field type info is returned as part of the # AggregateResult object, so we can't automatically marshall to the # correct Python type) def testQueryDoesNotExist(self): res = self.svc.query( 'LastName, FirstName, Phone, Email, Birthdate', 'Contact', "LastName = 'Doe'") self.assertEqual(res['size'], 0) def testQueryMore(self): svc = self.svc svc.batchSize = 100 data = list() for x in range(250): data.append(dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) )) res = svc.create(data[:200]) ids = [x['id'] for x in res] self._todelete.extend(ids) res = svc.create(data[200:]) ids = [x['id'] for x in res] self._todelete.extend(ids) res = svc.query( 'LastName, FirstName, Phone, Email, Birthdate', 'Contact', "LastName = 'Doe'") self.failUnless(not res['done']) self.assertEqual(len(res['records']), 200) res = svc.queryMore(res['queryLocator']) self.failUnless(res['done']) self.assertEqual(len(res['records']), 50) def testSearch(self): res = self.svc.search("FIND {barr} in ALL FIELDS RETURNING Contact(Id, Birthdate)") self.assertEqual(len(res), 1) self.assertEqual(res[0].type, 'Contact') self.assertEqual(type(res[0].Birthdate), datetime.date) res = self.svc.search("FIND {khgkshgsuhalsf} in ALL FIELDS RETURNING Contact(Id)") self.assertEqual(len(res), 0) def testGetDeleted(self): svc = self.svc startdate = datetime.datetime.utcnow() enddate = startdate + datetime.timedelta(seconds=61) data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create(data) id = res[0]['id'] svc.delete(id) res = svc.getDeleted('Contact', startdate, enddate) self.failUnless(len(res) != 0) ids = [r['id'] for r in res] self.failUnless(id in ids) def testGetUpdated(self): svc = self.svc startdate = datetime.datetime.utcnow() enddate = startdate + datetime.timedelta(seconds=61) data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.create(data) id = res[0]['id'] self._todelete.append(id) data = dict( type='Contact', Id=id, FirstName='Jane') svc.update(data) res = svc.getUpdated('Contact', startdate, enddate) self.failUnless(id in res) def testGetUserInfo(self): svc = self.svc userinfo = svc.getUserInfo() self.failUnless('accessibilityMode' in userinfo) self.failUnless('currencySymbol' in userinfo) self.failUnless('organizationId' in userinfo) self.failUnless('organizationMultiCurrency' in userinfo) self.failUnless('organizationName' in userinfo) self.failUnless('userDefaultCurrencyIsoCode' in userinfo) self.failUnless('userEmail' in userinfo) self.failUnless('userFullName' in userinfo) self.failUnless('userId' in userinfo) self.failUnless('userLanguage' in userinfo) self.failUnless('userLocale' in userinfo) self.failUnless('userTimeZone' in userinfo) self.failUnless('userUiSkin' in userinfo) def testDescribeTabs(self): tabinfo = self.svc.describeTabs() for info in tabinfo: self.failUnless('label' in info) self.failUnless('logoUrl' in info) self.failUnless('selected' in info) self.failUnless('tabs' in info) for tab in info['tabs']: self.failUnless('custom' in tab) self.failUnless('label' in tab) self.failUnless('sObjectName' in tab) self.failUnless('url' in tab) def testDescribeLayout(self): svc = self.svc self.assertRaises( NotImplementedError, svc.describeLayout, 'Contact') def testSetMultiPicklistToEmpty(self): svc = self.svc originalList = ["Pear", "Apple"] newList = [] lastname = 'Doe' data = dict( type='Contact', LastName=lastname, FirstName='John', Favorite_Fruit__c=originalList ) res = svc.create([data]) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2) data = dict( type='Contact', Id=id, Favorite_Fruit__c=newList) svc.update(data) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.failUnless(isinstance(contacts[0]['Favorite_Fruit__c'], list)) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 0) def testAddToEmptyMultiPicklist(self): svc = self.svc originalList = [] newList = ["Pear", "Apple"] lastname = 'Doe' data = dict( type='Contact', LastName=lastname, FirstName='John', Favorite_Fruit__c=originalList ) res = svc.create([data]) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.failUnless(isinstance(contacts[0]['Favorite_Fruit__c'], list)) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 0) data = dict( type='Contact', Id=id, Favorite_Fruit__c=newList) svc.update(data) contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id]) self.failUnless(isinstance(contacts[0]['Favorite_Fruit__c'], list)) self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2) def testIsNillableField(self): svc = self.svc res = svc.describeSObjects('Contact') self.assertFalse(res[0].fields['LastName'].nillable) self.assertTrue(res[0].fields['FirstName'].nillable) self.assertTrue(res[0].fields['Favorite_Fruit__c'].nillable) def testUpsert(self): svc = self.svc data = dict( type='Contact', LastName='Doe', FirstName='John', Phone='123-456-7890', Email='john@doe.com', Birthdate=datetime.date(1970, 1, 4) ) res = svc.upsert('Email', [data]) self.failUnless(type(res) in (ListType, TupleType)) self.failUnless(len(res) == 1) self.failUnless(res[0]['success']) id = res[0]['id'] self._todelete.append(id) contacts = svc.retrieve( 'LastName, FirstName, Phone, Email, Birthdate', 'Contact', [id]) self.assertEqual(len(contacts), 1) contact = contacts[0] for k in ['LastName', 'FirstName', 'Phone', 'Email', 'Birthdate']: self.assertEqual( data[k], contact[k]) def testPrepareSObjectsWithNone(self): obj = { 'val': None, } prepped_obj = _prepareSObjects([obj]) self.assertEqual(prepped_obj, [{ 'val': [], 'fieldsToNull': ['val'], }]) def testRetrieveTextWithNewlines(self): data = dict( type='Contact', LastName='Doe', FirstName='John', Description="This is a\nmultiline description.", ) res = self.svc.create([data]) self.failUnless(type(res) in (ListType, TupleType)) self.failUnless(len(res) == 1) self.failUnless(res[0]['success']) id = res[0]['id'] self._todelete.append(id) contacts = self.svc.retrieve('Description', 'Contact', [id]) self.assertEqual(len(contacts), 1) contact = contacts[0] self.assertEqual(data['Description'], contact['Description']) def test_suite(): return unittest.TestSuite(( unittest.makeSuite(TestUtils), )) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
On 8 November, gain insight on how the institutions deal with day-to-day business situations not regulated by arbitration rules. Representatives of the four arbitrations institutions will share practical experience from details of day-to-day case administration not foreseen in the rules, and how they deal with them. Situations not regulated by the rules frequently occur at all arbitral institutions, and the principles applied by the institutions in these situations is of high relevance both for parties and arbitrators. The seminar is free of charges: registration can be made online on a “first come first serve basis” by 31 October 2018.
from __future__ import print_function, division import sys sys.dont_write_bytecode = True from ok import * import random r = random.random isa = isinstance """ # Compartmental Modeling ## Diapers q +-----+ r +-----+ ---->| C |---->| D |--> s ^ +-----+ +-+---+ | | +-----------------+ C = stock of clean diapers D = stock of dirty diapers q = inflow of clean diapers r = flow of clean diapers to dirty diapers s = out-flow of dirty diapers """ class o: """Emulate Javascript's uber simple objects. Note my convention: I use "`i`" not "`this`.""" def has(i) : return i.__dict__ def __init__(i,**d) : i.has().update(d) def __setitem__(i,k,v) : i.has()[k] = v def __getitem__(i,k) : return i.has()[k] def __repr__(i) : return 'o'+str(i.has()) def copy(i): j = o() for k in i.has(): j[k] = i[k] return j def asList(i,keys=[]): keys = keys or i.keys() return [i[k] for k in keys] class Has: def __init__(i,init,lo=0,hi=100): i.init,i.lo,i.hi = init,lo,hi def restrain(i,x): return max(i.lo, min(i.hi, x)) def rank(i): if isa(i,Flow) : return 3 if isa(i,Stock): return 1 if isa(i,Aux) : return 2 def __repr__(i): return str(dict(what=i.__class__.__name__, name= i.name,init= i.init, lo = i.lo, hi = i.hi)) class Flow(Has) : pass class Stock(Has): pass class Aux(Has) : pass F,S,A=Flow,Stock,Aux class Model: def about(i): tmp=i.have() for k,v in tmp.has().items(): v.name = k return tmp def run(i,dt=1,tmax=100): print(r()) t,u, keep = 0, o(), [] about = i.about() keys = sorted(about.keys, key=lambda z:z.rank()) print(keys) for k,a in about.items(): u[k] = a.init keep = [["t"] + keys, [0] + about.asList(u,keys)] while t < tmax: v = copy(u) i.step(dt,t,u,v) for k in about: v[k] = about[k].restrain(v[k]) keep += [[dt] + about.asList(u,keys)] t += dt return keep class Diapers(Model): def have(i): return o(C = S(20), D = S(0), q = F(0), r = F(8), s = F(0)) def step(i,dt,t,u,v): def saturday(x): return int(x) % 7 == 6 v.C += dt*(u.q - u.r) v.D += dt*(u.r - u.s) v.q = 70 if saturday(t) else 0 v.s = u.D if saturday(t) else 0 if t == 27: # special case (the day i forget) v.s = 0 @ok def _diapers1(): print(Diapers().about())
CentricsMI is our web service based order routing platform. Using well documented and widely used API’s, retailers can submit photo product orders to be distributed to our network of manufacturers. And the service provides two way communication between manufacturers and retailers, so customers can get automated updates on order/shipping status. CentricsMI provides the basis of our extensive product fulfillment network, and is leveraged by leading companies such as Kodak Alaris, Walmart, Fujifilm, District Photo and dozens of other companies who make or sell photo products.
# -*- coding: utf-8 -*- import os class HostFile: delimiter = "##### DO NOT EDIT BETWEEN THESE LINES - DETOURD #####\n" def __init__ ( self, path ): self.path = path def canWrite ( self ): try: f = open( self.path, 'r+' ) f.close() return True except IOError: return False def getDetours ( self ): with open( self.path, 'r' ) as handle: detours = [] opened = False for line in handle: if line == self.delimiter: if opened: break else: opened = True else: if opened: try: detours.append( self.parseDetour( line ) ) except IndexError, e: pass return detours def writeDetours ( self, detours ): with open( self.path, 'r+' ) as handle: buffer = '' opened = False for line in handle: if line == self.delimiter: opened = not opened continue if opened: continue buffer += line handle.seek( 0 ) handle.truncate() handle.write( self.delimiter ) for detour in detours: handle.write( "%s\t%s\n" % ( detour['ip'], detour['host'].lower() ) ) handle.write( self.delimiter ) handle.write( buffer ) def parseDetour ( self, line ): split = line.strip().split() return { 'ip': split[0], 'host': split[1] } def findDetour ( self, host ): for detour in self.getDetours(): if detour['host'] == host.lower(): return detour return None
In collaboration, Josh Baer, Demisch Danant and Friedman Benda Gallery present In Their Own Words, an exhibition of photography, sculpture, furniture and architecture created within, and often in reaction to, various socio-political moments of the 20th century. Captioned by the artists’ own words, the creator’s original impetus and intent is highlighted. By pairing commentary alongside art and design objects, In Their Own Words elicits recognition of the artist’s initial concept and examines the current understanding of those same ideas. Furthermore, by exhibiting artworks and design objects together in confluence, In Their Own Words dissolves the long held division between the fields and provokes a debate on the cultural significance and authority currently assigned to each. In Their Own Words will include quotations and works from some of the most influential creative voices of the past century. Each art and design object has been selected because of its socio-political character. Some works were created as part of a greater objective to improve societal conditions while others are more documentative of the social circumstances surrounding their creation. Works will be included by Studio Alchimia, Atelier A, Ron Arad, Hans Bellmer, Jurgen Bey, Alighiero Boetti, Lee Bontecou, Marcel Breuer, Charles Eames, Pentagon Group, Jeff Koons, Jackson Pollock, Jean Prouvé, Gerrit Rietveld, Richard Serra, Ettore Sottsass, Tato, Rosemarie -Trockel, Lawrence Weiner, Kehinde Wiley, and Frank Lloyd Wright.
#!/usr/bin/env python3 """Module for CISCO-CDP-MIB.""" from collections import defaultdict from switchmap.snmp.base_query import Query def get_query(): """Return this module's Query class.""" return CiscoCdpQuery def init_query(snmp_object): """Return initialize and return this module's Query class.""" return CiscoCdpQuery(snmp_object) class CiscoCdpQuery(Query): """Class interacts with CISCO-CDP-MIB. Args: None Returns: None Key Methods: supported: Queries the device to determine whether the MIB is supported using a known OID defined in the MIB. Returns True if the device returns a response to the OID, False if not. layer1: Returns all needed layer 1 MIB information from the device. Keyed by OID's MIB name (primary key), ifIndex (secondary key) """ def __init__(self, snmp_object): """Function for intializing the class. Args: snmp_object: SNMP Interact class object from snmp_manager.py Returns: None """ # Define query object self.snmp_object = snmp_object # Get one OID entry in MIB (cdpCacheDeviceId) test_oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.6' super().__init__(snmp_object, test_oid, tags=['layer1']) def layer1(self): """Get layer 1 data from device. Args: None Returns: final: Final results """ # Initialize key variables final = defaultdict(lambda: defaultdict(dict)) # Get interface cdpCacheDeviceId data values = self.cdpcachedeviceid() for key, value in values.items(): final[key]['cdpCacheDeviceId'] = value # Get interface cdpCachePlatform data values = self.cdpcacheplatform() for key, value in values.items(): final[key]['cdpCachePlatform'] = value # Get interface cdpCacheDevicePort data values = self.cdpcachedeviceport() if values is not None: for key, value in values.items(): final[key]['cdpCacheDevicePort'] = value # Return return final def cdpcachedeviceid(self, oidonly=False): """Return dict of CISCO-CDP-MIB cdpCacheDeviceId for each port. Args: oidonly: Return OID's value, not results, if True Returns: data_dict: Dict of cdpCacheDeviceId using ifIndex as key """ # Initialize key variables data_dict = defaultdict(dict) # OID to process oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.6' # Return OID value. Used for unittests if oidonly is True: return oid # Process results results = self.snmp_object.swalk(oid, normalized=False) for key, value in results.items(): ifindex = _ifindex(key) data_dict[ifindex] = str(bytes(value), encoding='utf-8') # Return the interface descriptions return data_dict def cdpcacheplatform(self, oidonly=False): """Return dict of CISCO-CDP-MIB cdpCachePlatform for each port. Args: oidonly: Return OID's value, not results, if True Returns: data_dict: Dict of cdpCachePlatform using ifIndex as key """ # Initialize key variables data_dict = defaultdict(dict) # OID to process oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.8' # Return OID value. Used for unittests if oidonly is True: return oid # Process results results = self.snmp_object.swalk(oid, normalized=False) for key, value in results.items(): ifindex = _ifindex(key) data_dict[ifindex] = str(bytes(value), encoding='utf-8') # Return the interface descriptions return data_dict def cdpcachedeviceport(self, oidonly=False): """Return dict of CISCO-CDP-MIB cdpCacheDevicePort for each port. Args: oidonly: Return OID's value, not results, if True Returns: data_dict: Dict of cdpCacheDevicePort using ifIndex as key """ # Initialize key variables data_dict = defaultdict(dict) # OID to process oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.7' # Return OID value. Used for unittests if oidonly is True: return oid # Process results results = self.snmp_object.swalk(oid, normalized=False) for key, value in results.items(): ifindex = _ifindex(key) data_dict[ifindex] = str(bytes(value), encoding='utf-8') # Return the interface descriptions return data_dict def _ifindex(oid): """Return the ifindex from a CDP OID. Args: oid: OID Returns: ifindex: value of the ifindex """ # Initialize key variables nodes = oid.split('.') ifindex = int(nodes[-2]) # Return return ifindex
Zack Cozart had three hits, and closer Cody Allen notched an adventurous four-out save to help the Los Angeles Angels to a 6-5 victory against the host Chicago Cubs on Saturday afternoon. Justin Bour and Brian Goodwin added two hits apiece for the Angels, who evened the three-game series at 1-1. Javier Baez doubled three times for the Cubs, and Jason Heyward had two hits. The first five Angels reached base in the second inning, paving the way for a three-run rally. David Fletcher and Cozart contributed run-scoring singles before pitcher Chris Stratton drove in a run on a double play. A catcher interference call against Contreras preceded Cozart’s hit. Baez smacked RBI doubles in the third and fifth innings to help the Cubs rally from their early three-run deficit. Cubs right-hander Kyle Hendricks (0-3) retired the Angels in order in the top half of both innings, limiting further damage. Los Angeles struck immediately when Cubs manager Joe Maddon went to his bullpen in the sixth. With two outs, pinch hitter Albert Pujols drew an RBI walk against Brad Brach, prompting another pitching change. Randy Rosario walked Kole Calhoun, the first hitter he faced, to force home another run, giving the Angels a 5-2 advantage. Peter Bourjos added a run-scoring sacrifice fly in the eighth inning to pad the lead to 6-2. Chicago loaded the bases with two out in the bottom of the eighth. Representing the tying run, pinch hitter Mark Zagunis hit a two-run single against Hansel Robles to bring the Cubs within two. Allen fanned Daniel Descalso to end the threat before navigating the wild drama of the ninth, earning his fourth save in as many chances this season. Stratton allowed two runs on five hits in 4 2/3 innings, scattering four walks and five strikeouts. He fell one out short of qualifying for the win. Ty Buttrey (1-0) earned the win in relief, retiring Heyward on a groundout to end the fifth. Hendricks gave up three runs — two earned — on six hits in five innings with one walk and two strikeouts.
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################### # Initialization and startup file # ################################### def packageMissing(name): raise ImportError('Dependency \''+name+'\' has not been found. Please refer to the installation manual.') import time, os, webbrowser, collections try: from bottle import Bottle except ImportError: packageMissing("Bottle") serverUsed = "" try: if os.name.lower() == "nt": import cherrypy serverUsed = "cherrypy" cherrypy.response.timeout = 14400000 cherrypy.config.update({'response.timeout': 14400000}) cherrypy.engine.timeout_monitor.unsubscribe() else: raise ImportError("Not Using Windows") except ImportError: try: # Fallback for MacOS, Unix.. or for Windows not having installed cherrypy import paste serverUsed = "paste" except ImportError: packageMissing("paste") from Controllers.HomeController import homebottle, homesetclassifiercollection from Models.ClassifierCollection import ClassifierCollection from Models.ClassificationModules.nndescriptiononly import nndescriptiononly from Models.ClassificationModules.lrdescriptiononly import lrdescriptiononly from Models.ClassificationModules.nnreadmeonly import nnreadmeonly from Models.ClassificationModules.lrreadmeonly import lrreadmeonly from Models.ClassificationModules.readmeonlyrandomforest import readmeonlyrandomforest from Models.ClassificationModules.multinomialnbreadmeonly import multinomialnbreadmeonly from Models.ClassificationModules.multinomialnbdescriptiononly import multinomialnbdescriptiononly from Models.ClassificationModules.bernoullinbreadmeonly import bernoullinbreadmeonly from Models.ClassificationModules.bernoullinbdescriptiononly import bernoullinbdescriptiononly from Models.ClassificationModules.nnmetaonly import nnmetaonly from Models.ClassificationModules.metaonlyrandomforest import metaonlyrandomforest from Models.ClassificationModules.metaonlysvc import metaonlysvc from Models.ClassificationModules.metaonlyadaboost import metaonlyadaboost from Models.ClassificationModules.reponamelstm import reponamelstm from Models.ClassificationModules.readmelstm import readmelstm from Models.ClassificationModules.nnall import nnall from Models.ClassificationModules.knnreadmeonly import knnreadmeonly from Models.ClassificationModules.svcfilenamesonly import filenamesonlysvc from Models.ClassificationModules.lrstacking import lrstacking from Models.ClassificationModules.svmall import svmall from Models.ClassificationModules.rfall import allrandomforest from Models.ClassificationModules.gbrtmetaonly import gbrtmetaonly from Models.ClassificationModules.gbrtreadmeonly import gbrtreadmeonly from Models.ClassificationModules.gbrtfilesandfolders import gbrtfilesandfolders from Models.ClassificationModules.gbrtdescriptionmeta import gbrtdescriptionmeta from Models.ClassificationModules.svmreadmemeta import svmreadmemeta from Models.ClassificationModules.allbernoullinb import allbernoullinb from Models.ClassificationModules.allmultinomialnb import allmultinomialnb from Models.ClassificationModules.averageensemble import averageensemble from Models.ClassificationModules.nnstacking import nnstacking from Models.ClassificationModules.lrstackingmeta import lrstackingmeta from Models.ClassificationModules.foldernameslstm import foldernameslstm from Models.ClassificationModules.descriptionfoldersreponamelstm import descriptionfoldersreponamelstm from Models.ClassificationModules.descriptionlstm import descriptionlstm from Models.ClassificationModules.descriptionreponamelstm import descriptionreponamelstm import Models.DatabaseCommunication as DC print("Starting application..") rootApp = Bottle() # Initialize ClassifierCollection classifiercollection = ClassifierCollection() print 'Getting DB Data to be able to create vectorizers for classifiers that need it' descriptionCorpus, readmeCorpus, filenameCorpus, filetypeCorpus, foldernameCorpus = DC.getCorpi() #Initialize Classifiers print 'Creating and adding Classifiers to Classifier Collection:' # First load all classifiers which don't need other classifiers as parameter loadedClassifiers = [] # Keep track, which classifiers have be loaded or such attempt has been made classifiers = {} classifiers['metaonlyrandomforest'] = metaonlyrandomforest() classifiers['metaonlysvc'] = metaonlysvc() classifiers['gbrtdescriptionmeta'] = gbrtdescriptionmeta(descriptionCorpus) classifiers['svmreadmemeta'] = svmreadmemeta(readmeCorpus) #classifiers['descriptionlstm'] = descriptionlstm() # Remove all commented classifiers? #classifiers['descriptionfoldersreponamelstm'] = descriptionfoldersreponamelstm() classifiers['reponamelstm'] = reponamelstm() #classifiers['readmelstm'] = readmelstm() #classifiers['descriptionreponamelstm'] = descriptionreponamelstm() for classifier in classifiers: loaded_classifier = classifiers[classifier].loadClassificationModuleSavePoint(filename="lastused") if loaded_classifier is not None: classifiers[classifier] = loaded_classifier loadedClassifiers.append(classifier) # Now all classifiers should have been loaded from last savepoint, if available # Use these loaded classifiers by giving them to specific ensemble-Models classifiers['nnall'] = nnall(readmeCorpus + descriptionCorpus, filetypeCorpus, filenameCorpus, foldernameCorpus) classifiers['svmall'] = svmall(readmeCorpus + descriptionCorpus, filetypeCorpus, filenameCorpus, foldernameCorpus) classifiers['allrandomforest'] = allrandomforest(readmeCorpus + descriptionCorpus, filetypeCorpus, filenameCorpus, foldernameCorpus) for classifier in classifiers: if classifier not in loadedClassifiers: loaded_classifier = classifiers[classifier].loadClassificationModuleSavePoint(filename="lastused") if loaded_classifier is not None: classifiers[classifier] = loaded_classifier loadedClassifiers.append(classifier) #classifiers['lrstacking'] = lrstacking([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']]) #classifiers['averageensemble'] = averageensemble([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']]) classifiers['nnstacking'] = nnstacking([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']]) #classifiers['lrstackingmeta'] = lrstackingmeta([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']]) # Finally load all meta-models such as lrstacking for classifier in classifiers: if classifier not in loadedClassifiers: loaded_classifier = classifiers[classifier].loadClassificationModuleSavePoint(filename="lastused") if loaded_classifier is not None: classifiers[classifier] = loaded_classifier # Order the classifiers for the final submission orderedClassifiers = collections.OrderedDict() order = ['nnstacking', 'gbrtdescriptionmeta', 'svmall', 'nnall', 'svmreadmemeta', 'allrandomforest', 'metaonlyrandomforest', 'metaonlysvc', 'reponamelstm'] for classifiername in order: orderedClassifiers[classifiername] = classifiers[classifiername] # Load classifiers into collection for c in orderedClassifiers: classifiercollection.addClassificationModule(classifiers[c]) # Pass ClassifierCollection to Controller homesetclassifiercollection(classifiercollection) # Wait a bit so website doesnt get called before it's ready time.sleep(3) print 'Done. Starting Bottle...' #Start Bottle if __name__ == '__main__': webbrowser.open("http://localhost:8080/") rootApp.merge(homebottle) rootApp.run(server=serverUsed, debug=True)
In our service desk, where a device is reported as being slow, broken, malfunctioning, or for any other reason the user wishes to have it replaced, we first determine the age of the device. If the device is outside of the standard hardware lifecycle, it will be replaced, because the maintenance and TCO (Total Cost of Ownership) of devices older than the standard lifecycle is more costly than the replacement costs. If it’s within the life cycle, it will either be repaired, or we’ll evaluate if the user actually needs a more capable machine to carry out their role. In very general, cumulative terms, the TCO of a device increases over time. When the annual TCO exceeds the cost of a new device, it is overdue to be replaced. Loss of productivity of the employee using the device. Added complexity from maintaining an older (less uniform) fleet. Security concerns due to older devices. This is standard across the IT industry, although many science/tech firms may have dramatically shorter lifecycles due to the higher workloads that devices are expected to handle. The above lifecycle means that we will maintain a life cycle of replacing 33% of our laptops each year, 25% of our desktops, 20% of our monitors, and so on. This is the staggered approach; some firms employ the forklift approach which means replacing (e.g) the entire laptop fleet once every three years. This impacts cash flow harder, and can be more disruptive during the change, but has the advantage of delivering a perfectly uniform fleet of hardware each time. Many contact centre-style businesses employ this approach. Get rid of tuition fees. All university education should be free. All university-level education should be free. Those people crying out for the good old days when fewer people went to university have got completely the wrong end of the stick. 100 years ago, the same could be said for high/secondary school – why do we need our working classes to be able to read and write, do reasonably complex maths, understand any scientific principles at all? We live in an age where (almost) everything we do, everything we work with, play with, consume and produce are linked inextricably to very complex scientific products and concepts. Some of the people arguing here went to school before DNA was discovered, for heaven’s sake. School children now learn about the structure and principles of DNA, particle physics, climate modelling, computing science, software development, and other stuff that didn’t exist 30 years ago. It’s simply not the case that there’s an “ideal” percentage of the population that should have a university education. As society and technology progresses, there is simply more to know and more to understand. This has been the case since the dawn of human civilisation and will continue to be the case until civilisation ceases to be. As a society, we owe it to ourselves to aim to provide a university (and higher, if possible) education to every person that desires it and is able to do so. The progress and survival of the human race to some degree relies upon us getting this right, not penny-pinching and making people pay for the “privilege” of developing their (and as a result, society’s) skillset and knowledge. Just as we reap the benefits of all children going to school up to the age of sixteen, the benefits of nearly everyone in society having a higher level education wouldn’t take long to be realised, through the development of life-enhancing and preserving technologies, to more rapidly developing alternative energy sources and mitigating climate change. There is also such a thing as knowledge for knowledge’s sake. A more educated society is a fairer, more equal, and (hopefully) happier society. Put simply, higher education benefits all of us, not just the person being educated.
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Oracle's ZFSSA REST API. """ import mock from oslo_log import log from manila import exception from manila.share.drivers.zfssa import restclient from manila.share.drivers.zfssa import zfssarest from manila import test from manila.tests import fake_zfssa LOG = log.getLogger(__name__) class ZFSSAApiTestCase(test.TestCase): """Tests ZFSSAApi.""" @mock.patch.object(zfssarest, 'factory_restclient') def setUp(self, _restclient): super(ZFSSAApiTestCase, self).setUp() self.host = 'fakehost' self.user = 'fakeuser' self.url = None self.pool = 'fakepool' self.project = 'fakeproject' self.share = 'fakeshare' self.snap = 'fakesnapshot' _restclient.return_value = fake_zfssa.FakeRestClient() self._zfssa = zfssarest.ZFSSAApi() self._zfssa.set_host('fakehost') def _create_response(self, status): response = fake_zfssa.FakeResponse(status) return response def test_enable_service(self): self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.ACCEPTED) self._zfssa.enable_service('nfs') self.assertEqual(1, self._zfssa.rclient.put.call_count) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.OK) self.assertRaises(exception.ShareBackendException, self._zfssa.enable_service, 'nfs') def test_verify_avail_space(self): self.mock_object(self._zfssa, 'verify_project') self.mock_object(self._zfssa, 'get_project_stats') self._zfssa.get_project_stats.return_value = 2000 self._zfssa.verify_avail_space(self.pool, self.project, self.share, 1000) self.assertEqual(1, self._zfssa.verify_project.call_count) self.assertEqual(1, self._zfssa.get_project_stats.call_count) self._zfssa.verify_project.assert_called_with(self.pool, self.project) self._zfssa.get_project_stats.assert_called_with(self.pool, self.project) self._zfssa.get_project_stats.return_value = 900 self.assertRaises(exception.ShareBackendException, self._zfssa.verify_avail_space, self.pool, self.project, self.share, 1000) def test_create_project(self): self.mock_object(self._zfssa, 'verify_pool') self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') arg = { 'name': self.project, 'sharesmb': 'off', 'sharenfs': 'off', 'mountpoint': 'fakemnpt', } self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.NOT_FOUND) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) self._zfssa.create_project(self.pool, self.project, arg) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(1, self._zfssa.rclient.post.call_count) self.assertEqual(1, self._zfssa.verify_pool.call_count) self._zfssa.verify_pool.assert_called_with(self.pool) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.create_project, self.pool, self.project, arg) def test_create_share(self): self.mock_object(self._zfssa, 'verify_avail_space') self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.NOT_FOUND) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) arg = { "name": self.share, "quota": 1, } self._zfssa.create_share(self.pool, self.project, arg) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(1, self._zfssa.rclient.post.call_count) self.assertEqual(1, self._zfssa.verify_avail_space.call_count) self._zfssa.verify_avail_space.assert_called_with(self.pool, self.project, arg, arg['quota']) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.create_share, self.pool, self.project, arg) self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.OK) self.assertRaises(exception.ShareBackendException, self._zfssa.create_share, self.pool, self.project, arg) def test_modify_share(self): self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.ACCEPTED) arg = {"name": "dummyname"} svc = self._zfssa.share_path % (self.pool, self.project, self.share) self._zfssa.modify_share(self.pool, self.project, self.share, arg) self.assertEqual(1, self._zfssa.rclient.put.call_count) self._zfssa.rclient.put.assert_called_with(svc, arg) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.modify_share, self.pool, self.project, self.share, arg) def test_delete_share(self): self.mock_object(self._zfssa.rclient, 'delete') self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.NO_CONTENT) svc = self._zfssa.share_path % (self.pool, self.project, self.share) self._zfssa.delete_share(self.pool, self.project, self.share) self.assertEqual(1, self._zfssa.rclient.delete.call_count) self._zfssa.rclient.delete.assert_called_with(svc) def test_create_snapshot(self): self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) arg = {"name": self.snap} svc = self._zfssa.snapshots_path % (self.pool, self.project, self.share) self._zfssa.create_snapshot(self.pool, self.project, self.share, self.snap) self.assertEqual(1, self._zfssa.rclient.post.call_count) self._zfssa.rclient.post.assert_called_with(svc, arg) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.create_snapshot, self.pool, self.project, self.share, self.snap) def test_delete_snapshot(self): self.mock_object(self._zfssa.rclient, 'delete') self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.NO_CONTENT) svc = self._zfssa.snapshot_path % (self.pool, self.project, self.share, self.snap) self._zfssa.delete_snapshot(self.pool, self.project, self.share, self.snap) self.assertEqual(1, self._zfssa.rclient.delete.call_count) self._zfssa.rclient.delete.assert_called_with(svc) self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.delete_snapshot, self.pool, self.project, self.share, self.snap) def test_clone_snapshot(self): self.mock_object(self._zfssa, 'verify_avail_space') self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.CREATED) snapshot = { "id": self.snap, "share_id": self.share, } clone = { "id": "cloneid", "size": 1, } arg = { "name": "dummyname", "quota": 1, } self._zfssa.clone_snapshot(self.pool, self.project, snapshot, clone, arg) self.assertEqual(1, self._zfssa.rclient.put.call_count) self.assertEqual(1, self._zfssa.verify_avail_space.call_count) self._zfssa.verify_avail_space.assert_called_with(self.pool, self.project, clone['id'], clone['size']) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.clone_snapshot, self.pool, self.project, snapshot, clone, arg) def _create_entry(self, sharenfs, ip): if sharenfs == 'off': sharenfs = 'sec=sys' entry = (',rw=@%s' % ip) if '/' not in ip: entry = entry + '/32' arg = {'sharenfs': sharenfs + entry} return arg def test_allow_access_nfs(self): self.mock_object(self._zfssa, 'get_share') self.mock_object(self._zfssa, 'modify_share') details = {"sharenfs": "off"} access = { "access_type": "nonip", "access_to": "foo", } # invalid access type self.assertRaises(exception.InvalidShareAccess, self._zfssa.allow_access_nfs, self.pool, self.project, self.share, access) # valid entry access.update({"access_type": "ip"}) arg = self._create_entry("off", access['access_to']) self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(1, self._zfssa.get_share.call_count) self.assertEqual(1, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, arg) # add another entry access.update({"access_to": "10.0.0.1/24"}) arg = self._create_entry("off", access['access_to']) self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, arg) # verify modify_share is not called if sharenfs='on' details = {"sharenfs": "on"} self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) # verify modify_share is not called if ip is already in the list access.update({"access_to": "10.0.0.1/24"}) details = self._create_entry("off", access['access_to']) self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) def test_deny_access_nfs(self): self.mock_object(self._zfssa, 'get_share') self.mock_object(self._zfssa, 'modify_share') data1 = self._create_entry("off", "10.0.0.1") access = { "access_type": "nonip", "access_to": "foo", } # invalid access_type self.assertRaises(exception.InvalidShareAccess, self._zfssa.deny_access_nfs, self.pool, self.project, self.share, access) # valid entry access.update({"access_type": "ip"}) self._zfssa.get_share.return_value = data1 self._zfssa.deny_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(1, self._zfssa.get_share.call_count) self.assertEqual(0, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) # another valid entry data1 = self._create_entry(data1['sharenfs'], '10.0.0.2/24') data2 = self._create_entry(data1['sharenfs'], access['access_to']) self._zfssa.get_share.return_value = data2 self._zfssa.deny_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.get_share.call_count) self.assertEqual(1, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, data1)
Book design, typesetting, graphics, and illustration. I am a designer and publishing consultant with seven years of in-house art direction and design experience. From covers and interiors to social media graphics and catalogs, my experience spans the full publishing cycle, and I specialize in working with clients who may need a range of design services. Whether you are a publisher looking for a technically-skilled typesetter or a writer looking for someone to help bring your project to life, I have a solution for you.
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file from waflib import Task,Errors from waflib.TaskGen import taskgen_method,before_method @taskgen_method def add_dbus_file(self,filename,prefix,mode): if not hasattr(self,'dbus_lst'): self.dbus_lst=[] if not'process_dbus'in self.meths: self.meths.append('process_dbus') self.dbus_lst.append([filename,prefix,mode]) @before_method('apply_core') def process_dbus(self): for filename,prefix,mode in getattr(self,'dbus_lst',[]): node=self.path.find_resource(filename) if not node: raise Errors.WafError('file not found '+filename) tsk=self.create_task('dbus_binding_tool',node,node.change_ext('.h')) tsk.env.DBUS_BINDING_TOOL_PREFIX=prefix tsk.env.DBUS_BINDING_TOOL_MODE=mode class dbus_binding_tool(Task.Task): color='BLUE' ext_out=['.h'] run_str='${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}' shell=True def configure(conf): dbus_binding_tool=conf.find_program('dbus-binding-tool',var='DBUS_BINDING_TOOL')
The Tyne Theatre & Opera House is very excited to announce that the West End Operatic Society is bringing the Broadway Smash-hit and winner of eight Tony Awards, Hairspray on Monday 13th – Saturday 18th April 2015! Hairspray’s toe tapping 60s pop score makes it impossible to leave the theatre without a smile. It’s packed with humour and a story of love, respect and acceptance. In Baltimore the 50s are out and change is in the air. Loveable plus size heroine, Tracy Turnblad, has a passion for dancing and wins a spot on the local TV dance program, The Corny Collins show. Overnight she finds herself transformed from outsider to teen celebrity, singing and dancing all the while! Can the larger than life adolescent manage to vanquish the show’s reigning princess, integrate the cast and find true love without messing up her hair?
from pytest import raises import sqlalchemy as sa from sqlalchemy.dialects import postgresql from sqlalchemy_utils.types import TSVectorType from sqlalchemy_utils.expressions import ( explain, explain_analyze, tsvector_match, tsvector_concat, to_tsquery, plainto_tsquery ) from tests import TestCase class ExpressionTestCase(TestCase): dns = 'postgres://postgres@localhost/sqlalchemy_utils_test' def create_models(self): class Article(self.Base): __tablename__ = 'article' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.Unicode(255)) content = sa.Column(sa.UnicodeText) search_vector = sa.Column(TSVectorType) search_vector2 = sa.Column(TSVectorType) self.Article = Article def assert_startswith(self, query, query_part): assert str( query.compile(dialect=postgresql.dialect()) ).startswith(query_part) # Check that query executes properly self.session.execute(query) class TestExplain(ExpressionTestCase): def test_render_explain(self): self.assert_startswith( explain(self.session.query(self.Article)), 'EXPLAIN SELECT' ) def test_render_explain_with_analyze(self): self.assert_startswith( explain(self.session.query(self.Article), analyze=True), 'EXPLAIN (ANALYZE true) SELECT' ) def test_with_string_as_stmt_param(self): self.assert_startswith( explain('SELECT 1 FROM article'), 'EXPLAIN SELECT' ) def test_format(self): self.assert_startswith( explain('SELECT 1 FROM article', format='json'), 'EXPLAIN (FORMAT json) SELECT' ) def test_timing(self): self.assert_startswith( explain('SELECT 1 FROM article', analyze=True, timing=False), 'EXPLAIN (ANALYZE true, TIMING false) SELECT' ) def test_verbose(self): self.assert_startswith( explain('SELECT 1 FROM article', verbose=True), 'EXPLAIN (VERBOSE true) SELECT' ) def test_buffers(self): self.assert_startswith( explain('SELECT 1 FROM article', analyze=True, buffers=True), 'EXPLAIN (ANALYZE true, BUFFERS true) SELECT' ) def test_costs(self): self.assert_startswith( explain('SELECT 1 FROM article', costs=False), 'EXPLAIN (COSTS false) SELECT' ) class TestExplainAnalyze(ExpressionTestCase): def test_render_explain_analyze(self): assert str( explain_analyze(self.session.query(self.Article)) .compile( dialect=postgresql.dialect() ) ).startswith('EXPLAIN (ANALYZE true) SELECT') class TestMatchTSVector(ExpressionTestCase): def test_raises_exception_if_less_than_2_parameters_given(self): with raises(Exception): str( tsvector_match( self.Article.search_vector, ) ) def test_supports_postgres(self): assert str(tsvector_match( self.Article.search_vector, to_tsquery('something'), )) == '(article.search_vector) @@ to_tsquery(:to_tsquery_1)' class TestToTSQuery(ExpressionTestCase): def test_requires_atleast_one_parameter(self): with raises(Exception): str(to_tsquery()) def test_supports_postgres(self): assert str(to_tsquery('something')) == 'to_tsquery(:to_tsquery_1)' class TestPlainToTSQuery(ExpressionTestCase): def test_requires_atleast_one_parameter(self): with raises(Exception): str(plainto_tsquery()) def test_supports_postgres(self): assert str(plainto_tsquery('s')) == ( 'plainto_tsquery(:plainto_tsquery_1)' ) class TestConcatTSVector(ExpressionTestCase): def test_concatenate_search_vectors(self): assert str(tsvector_match( tsvector_concat( self.Article.search_vector, self.Article.search_vector2 ), to_tsquery('finnish', 'something'), )) == ( '(article.search_vector || article.search_vector2) ' '@@ to_tsquery(:to_tsquery_1, :to_tsquery_2)' )
Why a Professional Auctioneer? | Why Fladeboe? Believing it is our job to “inspire audiences to give,” Fladeboe Auctions works closely with each client in the months leading up to the event to ensure the most successful auction possible. We understand the stress involved in planning a fundraising event and our goal is to make your auction run smoothly and efficiently while raising the maximum dollars possible! A professional fundraising auctioneer knows how to conduct a fundraising auction. A fundraising auction is different from any other kind of auction. The goal is not just to sell items, but to also get the audience excited about the mission of the organization. The auctioneer will remind the audience why it is important to bid high and bid often—“inspiring” them to give. Live auctions and direct giving moments raise significant dollars. Chances are, a large portion of the revenues raised at the event will come from the live auction and direct giving moment (fund‐a‐need). You trust professionals for catering and entertainment – you should do the same for the fundraising. If the audience is having fun, chances are you will have a successful auction. A professional fundraising auctioneer sees him/herself as an auctioneer and an entertainer. He/she knows how to keep the pace lively and interact with the audience in a fun and comfortable manner. An experienced fundraising auctioneer can guide you toward what works and what doesn’t. He/she will be able to tell you why a dinner party for ten with your board president as host and chef will create more enthusiasm (and bidding) than a fur coat or riding lawn mower. He/she will know how much similar live auction items have been selling for at other events and can help you to set attainable (and even exceed‐able) bidding goals for the audience. Professional fundraising auctioneers will be with you every step of the way. A professional fundraising auctioneer won’t just show up the night of the event to conduct the auction. He/she will take time to learn about your organization, your audience and your goals. You'll receive expert and experienced advice throughout the planning process.
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: ai ts=4 sts=4 et sw=4 nu """ Flash a clean, single FAT partition MBR onto the specifief SD-card """ import os import sys import argparse import multiprocessing import data from backend.util import flash_image_with_etcher from util import CLILogger def wipe_card(logger, sd_card): logger.step("Wiping `{}` SD-card by flashing empty/clean MBR".format(sd_card)) retcode = multiprocessing.Value("i", -1) flash_image_with_etcher( os.path.join(data.data_dir, "mbr.img"), sd_card, retcode, True ) if retcode.value == 0: logger.succ("SD-card `{}` wiped successfuly".format(sd_card)) else: logger.err("Unable to wipe SD-card at `{}`".format(sd_card)) return retcode.value def main(): logger = CLILogger() parser = argparse.ArgumentParser(description="SD-card Wiping Tool") parser.add_argument("--sdcard", help="Device path for the SD-card", required=True) # defaults to help args = parser.parse_args(["--help"] if len(sys.argv) < 2 else None) sys.exit(wipe_card(logger, sd_card=args.sdcard))
Currently we have 255 properties for sale in Cancelada, Estepona. The most affordable apartment requires an investment of €128,000 and offers 72 m² of living space. For the most pricey 5 bedroom villa a potential buyer will need to pay €3,900,000. To get more information about any of the properties for sale in Cancelada contact the listing agency by phone or the enquiry form provided on the property details page.
#!/usr/bin/env python # Author: csiu # Created: 2015-02-17 import argparse import sys import os from utils import random_string, get_value_from_keycolonvalue_list usage = """Essentially running: bedtools intersect -a <features.gff> -b <mirna_prox.gff> -s -f 1 -r -wao """ def bedtools_intersect(gff_a, gff_b, gff_out): ## unify cmd = 'bedtools intersect -a '+gff_a+' -b '+gff_b+' -s -f 1 -r -wao >'+gff_out print cmd os.system(cmd) return def gff_unify_features(gff_a, gff_b, fname, dfvalue, f_out, retainSourceFeature=False): ## unify f_out_tmp = f_out+'.tmp' bedtools_intersect(gff_a, gff_b, f_out_tmp) ## parse with open(f_out, 'w') as out: with open(f_out_tmp) as f: for l in f: l = l.strip().split('\t') chrom = l[0] start = l[3] stop = l[4] count = l[5] strand = l[6] features = l[7] info_a = l[8] _chrom = l[9] if chrom == _chrom: ## yes overlap of features w/ mirna_proximity x_b = l[14] info_b = l[17] mirbase_id = get_value_from_keycolonvalue_list('mirbase_id', info_b.split(';')) else: x_b = dfvalue info_b = '' mirbase_id = '.' features = '%s;%s:%s' % (features, fname, x_b) new_info = info_a + '@' + info_b if retainSourceFeature: newline = '\t'.join([chrom, l[1], l[2], start, stop, count, strand, features, new_info]) else: newline = '\t'.join([chrom, 'putative_tss', mirbase_id, start, stop, count, strand, features, new_info]) out.write(newline + '\n') os.system('rm '+f_out_tmp) return def _verify_mirbaseID(gff_infile, gff_outfile): with open(gff_outfile, 'w') as out: with open(gff_infile) as f: for l in f: info = l.strip().split('\t')[8].split('@') _x = info[-2].split(';') _y = info[-1].split(';') _x = get_value_from_keycolonvalue_list('mirbase_id', _x) _y = get_value_from_keycolonvalue_list('mirbase_id', _y) if _x == _y or _x == '' or _y == '': out.write(l) return def main(gff_a, gff_b, fname, dfvalue, f_out, retainSourceFeature=False): tmpfile = f_out + '.tmp' gff_unify_features(gff_a, gff_b, fname, dfvalue, tmpfile, retainSourceFeature) _verify_mirbaseID(tmpfile, f_out) os.remove(tmpfile) return f_out if __name__ == '__main__': parser = argparse.ArgumentParser(description=usage, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-a', dest='gff_a', required=True, help='''path to <features>.gff (this should contain "cpg, cons, and tata" in the info column e.g. column 8)''') parser.add_argument('-b', dest='gff_b', required=True, help='''path to <mirna_proximity>.gff (this should contain the feature you want in add to <GFF_A> in column 6 )''') parser.add_argument('-f', dest='fname', required=True, help='''name of feature''') parser.add_argument('-d', dest='dfvalue', default='na', help='''default feature value''') parser.add_argument('-o', dest='outfile', default='all_features.gff', help='specify outfile; default = "all_features.gff"') ##get at the arguments args = parser.parse_args() ## do something.. main(args.gff_a, args.gff_b, args.fname, args.dfvalue, args.outfile)
The agent printed for J. Bew, No. 28, in Pater-Noster Row; J. Almon, Piccadilly; and G. Woodfall, Charing Cross represents an entity (e.g. person, organization, etc.) associated with resources found in University of Manitoba Libraries.
""" Weapon The default weapon object, file is called Arms so that it doesn't conflict with a tutorial weapon object. """ from evennia import DefaultObject from evennia import default_cmds, CmdSet, utils from commands.default_cmdsets import ChargenCmdset, ShopCmdset, BankCmdset, MerchantCmdset from world import english_utils, npc_rules # , npc_rules from random import randint import time from evennia import TICKER_HANDLER as tickerhandler class Combat_Mob(DefaultObject): # This mob will not attack people but it will defend itself from attack. """ """ def at_object_creation(self): # Inherit the object properties. super(Combat_Mob, self).at_object_creation() self.aliases.add([]) #self.name = "a ruddy bronze broadsword" # not sure if I need this self.db.live_name = "a giant rat" self.db.defeated_name = "the mangled remains of some large vermin" # must not have 'rat' in it or it can't be targetted! self.db.alive = True self.db.desc = "" self.db.health = 35 self.db.max_health = 35 # NPC damage, (player level * strength) * weapon damage ratio. # So a level 1 would do 10 damage a hit, then 20, then 30, up to 1,000 per hit at level 100. self.db.damage_amount = 15 self.db.ticker_speed = 3 # how often it attempts to attack or move/attack if a target is not found. This will only fire so many times before they 'forget'. self.db.counter_attack_chance = False # integer chance this npc will trigger a counter-attack. Defaults as false. self.db.respawn_speed = 600 # SHOULD BE A MULTIPLE OF 100 self.db.tries = 3 # how long it will spend trying to find its attacker before shutting down. self.db.exp_level = 10 # this is the relative level of the creature self.db.exp_multiplier = 4 # If you're under the level, subtract player level from NPC level and multiply by the multiplier. self.db.exp_max_level = 20 # At this level you won't gain any experience from killing this NPC. self.db.home_location = "#2" # This should be set! # So normally any kill is worth 1% exp. # But if your level is under the npc's level, you get a bonus # The bonus is level difference * multiplier. # This multiplier equation could similarly be used when attacking people below your current level, so you might # level up multiple times from killing a high-level person. self.db.offended_by = [] self.db.lootable = False #can be LOOTed for silver. self.db.looted_yet = False self.db.silver_amount = 0 self.db.skinnable = True #can be SKINNED for a pelt or skin item. self.db.skinned_yet = False self.db.pelt_name = "a giant rat pelt" self.db.attack_message_1 = "A giant rat hurls itself bodily into " self.db.attack_message_2 = "A giant rat claws and bites at " self.db.attack_message_3 = "With a resounding crunching sound, a giant rat bites into " def npc_active_ticks(self, *args, **kwargs): "Ticks after the NPC has been attacked." targets = False # Any targets in the room? # This should probably go below. if self.db.tries_left > 0: for i in self.location.contents: if i in self.db.offended_by: targets = True npc_rules.attack(self, i) self.db.tries_left = 3 return if not targets: for k, v in self.location.db.trails.iteritems(): target_name = str(self.db.offended_by[0]) if k == target_name: destination = self.search(v) self.move_to(destination) for i in self.location.contents: if i in self.db.offended_by: targets = True npc_rules.attack(self, i) self.db.tries_left = 3 break break self.db.tries_left = self.db.tries_left - 1 if self.db.tries_left < 0: self.db.offended_by = [] self.db.tries_left = self.db.tries tickerhandler.remove(self.db.ticker_speed, self.npc_active_ticks) return def npc_revive_ticks(self, *args, **kwargs): "ticked when " self.db.alive = True self.name = self.db.live_name self.db.health = self.db.max_health self.db.looted_yet = False self.db.skinned_yet = False destination = self.search(self.db.home_location, global_search=True) self.move_to(destination) tickerhandler.remove(self.db.respawn_speed, self.npc_revive_ticks) return class Combat_Merchant_Mob(DefaultObject): # This mob will not attack people but it will defend itself from attack. """ """ def at_object_creation(self): # Inherit the object properties. super(Combat_Merchant_Mob, self).at_object_creation() self.cmdset.add(MerchantCmdset, permanent=True) self.aliases.add([]) #self.name = "a ruddy bronze broadsword" # not sure if I need this self.db.live_name = "a giant rat" self.db.defeated_name = "the mangled remains of some large vermin" # must not have 'rat' in it or it can't be targetted! self.db.alive = True self.db.desc = "" self.db.trade_item = "pelts" self.db.health = 135 self.db.max_health = 135 # NPC damage, (player level * strength) * weapon damage ratio. # So a level 1 would do 10 damage a hit, then 20, then 30, up to 1,000 per hit at level 100. self.db.damage_amount = 30 self.db.ticker_speed = 3 # how often it attempts to attack or move/attack if a target is not found. This will only fire so many times before they 'forget'. self.db.counter_attack_chance = False # integer chance this npc will trigger a counter-attack. Defaults as false. self.db.respawn_speed = 600 # SHOULD BE A MULTIPLE OF 100 self.db.tries = 3 # how long it will spend trying to find its attacker before shutting down. self.db.exp_level = 10 # this is the relative level of the creature self.db.exp_multiplier = 4 # If you're under the level, subtract player level from NPC level and multiply by the multiplier. self.db.exp_max_level = 20 # At this level you won't gain any experience from killing this NPC. self.db.home_location = "#2" # This should be set! # So normally any kill is worth 1% exp. # But if your level is under the npc's level, you get a bonus # The bonus is level difference * multiplier. # This multiplier equation could similarly be used when attacking people below your current level, so you might # level up multiple times from killing a high-level person. self.db.offended_by = [] self.db.lootable = True #can be LOOTed for silver. self.db.looted_yet = False self.db.silver_amount = 10 self.db.skinnable = False #can be SKINNED for a pelt or skin item. self.db.skinned_yet = False self.db.pelt_name = "a giant rat pelt" self.db.attack_message_1 = "A giant rat hurls itself bodily into " self.db.attack_message_2 = "A giant rat claws and bites at " self.db.attack_message_3 = "With a resounding crunching sound, a giant rat bites into " def npc_active_ticks(self, *args, **kwargs): "Ticks after the NPC has been attacked." targets = False # Any targets in the room? # This should probably go below. if self.db.tries_left > 0: for i in self.location.contents: if i in self.db.offended_by: targets = True npc_rules.attack(self, i) self.db.tries_left = 3 return if not targets: for k, v in self.location.db.trails.iteritems(): target_name = str(self.db.offended_by[0]) if k == target_name: destination = self.search(v) self.move_to(destination) for i in self.location.contents: if i in self.db.offended_by: targets = True npc_rules.attack(self, i) self.db.tries_left = 3 break break self.db.tries_left = self.db.tries_left - 1 if self.db.tries_left < 0: self.db.offended_by = [] self.db.tries_left = self.db.tries tickerhandler.remove(self.db.ticker_speed, self.npc_active_ticks) return def npc_revive_ticks(self, *args, **kwargs): "ticked when " self.db.alive = True self.name = self.db.live_name self.db.health = self.db.max_health self.db.looted_yet = False self.db.skinned_yet = False destination = self.search(self.db.home_location, global_search=True) self.move_to(destination) tickerhandler.remove(self.db.respawn_speed, self.npc_revive_ticks) return
Do you Want Healthier Hair and Nails? Over the last few months, I have noticed that my hair is shinier and my nails are stronger. This is an unexpected benefit of eating flax seeds. When I was younger, I had fairly healthy nails, but as I got older, I started to notice that they were breaking and splitting more easily. I had to keep them cut short to keep them as healthy as possible. After a few months of eating flax seeds, I noticed some improvement and so I did a little research. The article “10 Flax Seed Benefits and Nutrition Facts” by Dr. Axe provided much-needed information. As it turns out, the alpha-linolenic acid (ALA) fats in flax seeds benefit our skin, hair, and nails by reducing dryness and flakiness. Two other advantages that I wasn’t aware of are that the consumption of flax seeds can improve rosacea, which I have, and dry eye. It seems like there are only upsides to eating flax seeds every day. “If you want healthier skin, hair, and nails then consider adding 2 tbsp of flax seeds to your smoothie or 1 tbsp of flax seed oil to your daily routine. My favorite way of eating ground flax seeds is to add one tablespoon to my oatmeal every morning. The nutty flavor adds a great taste, and the texture is fine in oatmeal. I have also tried adding ground flaxseeds to yogurt. I like it that way, especially when I add in nuts too. Just make sure to store your ground flax seeds in the freezer to keep them as fresh as possible. In the article mentioned above, Dr. Axe suggests some more ways to add ground flaxseeds to your diet. He also shares information about how to bake with ground flaxseeds. “There are many great ways to add these super seeds into your diet, including adding them to homemade muffins, breads, and cookies. Question: Do you eat ground flax seeds? Have you noticed an improvement in your hair or nails? I would love to hear your comments. Hi, I’m Robbin Folsom, and I’m all about being as healthy as possible. I’m a health and wellness coach for active people, and I want to help others lose weight and feel their best. After years of slowing gaining a pound or two a year, I finally decided it was time to reverse this trend. I lost 10% of my body weight by eating delicious, whole, healthy foods. I feel better than I have in years, and I want to help you improve your health one bite at a time! Receive my posts to your inbox.
from __future__ import absolute_import, print_function, division from pony.py23compat import int_types, basestring, imap, iteritems import json from operator import attrgetter from collections import defaultdict from datetime import date, datetime from decimal import Decimal from pony.orm.core import Attribute, Set, Entity, EntityMeta, TransactionError, db_session, flush # PermissionError, get_current_user, get_current_user_groups # can_view, can_edit, can_delete from pony.utils import throw, cut_traceback __all__ = 'basic_converter', 'get_schema_dict', 'get_schema_json', 'to_json', 'save_changes' def basic_converter(x): if isinstance(x, (datetime, date, Decimal)): return str(x) if isinstance(x, dict): return dict(x) if isinstance(x, Entity): pkval = x._get_raw_pkval_() return pkval[0] if len(pkval) == 1 else pkval try: iter(x) except: raise TypeError(x) return list(x) def get_schema_dict(db): result = [] for entity in sorted(db.entities.values(), key=attrgetter('_id_')): # if not can_view(entity): continue attrs = [] for attr in entity._new_attrs_: d = dict(name = attr.name, type = attr.py_type.__name__, kind = attr.__class__.__name__) if attr.auto: d['auto'] = True if attr.reverse: # if not can_view(attr.reverse.entity): continue d['reverse'] = attr.reverse.name if attr.lazy: d['lazy'] = True if attr.nullable: d['nullable'] = True if attr.default and issubclass(type(attr.default), (int_types, basestring)): d['defaultValue'] = attr.default attrs.append(d) d = dict(name=entity.__name__, newAttrs=attrs, pkAttrs=[ attr.name for attr in entity._pk_attrs_ ]) if entity._all_bases_: d['bases'] = [ base.__name__ for base in entity._all_bases_ ] if entity._simple_keys_: d['simpleKeys'] = [ attr.name for attr in entity._simple_keys_ ] if entity._composite_keys_: d['compositeKeys'] = [ [ attr.name for attr in attrs ] for attrs in entity._composite_keys_ ] result.append(d) return result def get_schema_json(db): return json.dumps(get_schema_dict(db), default=basic_converter) @cut_traceback def to_json(database, data, include=(), exclude=(), converter=None, with_schema=True): for attrs, param_name in ((include, 'include'), (exclude, 'exclude')): for attr in attrs: if not isinstance(attr, Attribute): throw(TypeError, "Each item of '%s' list should be attribute. Got: %s" % (param_name, attr)) include, exclude = set(include), set(exclude) if converter is None: converter = basic_converter # def user_has_no_rights_to_see(obj, attr=None): # user_groups = get_current_user_groups() # throw(PermissionError, 'The current user %s which belongs to groups %s ' # 'has no rights to see the object %s on the frontend' # % (get_current_user(), sorted(user_groups), obj)) object_set = set() caches = set() def obj_converter(obj): if not isinstance(obj, Entity): return converter(obj) caches.add(obj._session_cache_) if len(caches) > 1: throw(TransactionError, 'An attempt to serialize objects belonging to different transactions') # if not can_view(obj): # user_has_no_rights_to_see(obj) object_set.add(obj) pkval = obj._get_raw_pkval_() if len(pkval) == 1: pkval = pkval[0] return { 'class': obj.__class__.__name__, 'pk': pkval } data_json = json.dumps(data, default=obj_converter) objects = {} if caches: cache = caches.pop() if cache.database is not database: throw(TransactionError, 'An object does not belong to specified database') object_list = list(object_set) objects = {} for obj in object_list: if obj in cache.seeds[obj._pk_attrs_]: obj._load_() entity = obj.__class__ # if not can_view(obj): # user_has_no_rights_to_see(obj) d = objects.setdefault(entity.__name__, {}) for val in obj._get_raw_pkval_(): d = d.setdefault(val, {}) assert not d, d for attr in obj._attrs_: if attr in exclude: continue if attr in include: pass # if attr not in entity_perms.can_read: user_has_no_rights_to_see(obj, attr) elif attr.is_collection: continue elif attr.lazy: continue # elif attr not in entity_perms.can_read: continue if attr.is_collection: if not isinstance(attr, Set): throw(NotImplementedError) value = [] for item in attr.__get__(obj): if item not in object_set: object_set.add(item) object_list.append(item) pkval = item._get_raw_pkval_() value.append(pkval[0] if len(pkval) == 1 else pkval) value.sort() else: value = attr.__get__(obj) if value is not None and attr.is_relation: if attr in include and value not in object_set: object_set.add(value) object_list.append(value) pkval = value._get_raw_pkval_() value = pkval[0] if len(pkval) == 1 else pkval d[attr.name] = value objects_json = json.dumps(objects, default=converter) if not with_schema: return '{"data": %s, "objects": %s}' % (data_json, objects_json) schema_json = get_schema_json(database) return '{"data": %s, "objects": %s, "schema": %s}' % (data_json, objects_json, schema_json) @cut_traceback @db_session def save_changes(db, changes, observer=None): changes = json.loads(changes) import pprint; pprint.pprint(changes) objmap = {} for diff in changes['objects']: if diff['_status_'] == 'c': continue pk = diff['_pk_'] pk = (pk,) if type(pk) is not list else tuple(pk) entity_name = diff['class'] entity = db.entities[entity_name] obj = entity._get_by_raw_pkval_(pk, from_db=False) oid = diff['_id_'] objmap[oid] = obj def id2obj(attr, val): return objmap[val] if attr.reverse and val is not None else val # def user_has_no_rights_to(operation, obj): # user_groups = get_current_user_groups() # throw(PermissionError, 'The current user %s which belongs to groups %s ' # 'has no rights to %s the object %s on the frontend' # % (get_current_user(), sorted(user_groups), operation, obj)) for diff in changes['objects']: entity_name = diff['class'] entity = db.entities[entity_name] dbvals = {} newvals = {} for name, val in diff.items(): if name not in ('class', '_pk_', '_id_', '_status_'): attr = entity._adict_[name] if not attr.is_collection: if type(val) is dict: if 'old' in val: dbvals[attr] = attr.validate(id2obj(attr, val['old'])) if 'new' in val: newvals[attr.name] = attr.validate(id2obj(attr, val['new'])) else: newvals[attr.name] = attr.validate(id2obj(attr, val)) oid = diff['_id_'] status = diff['_status_'] if status == 'c': assert not dbvals obj = entity(**newvals) if observer: flush() # in order to get obj.id observer('create', obj, newvals) objmap[oid] = obj # if not can_edit(obj): user_has_no_rights_to('create', obj) else: obj = objmap[oid] if status == 'd': # if not can_delete(obj): user_has_no_rights_to('delete', obj) if observer: observer('delete', obj) obj.delete() elif status == 'u': # if not can_edit(obj): user_has_no_rights_to('update', obj) if newvals: assert dbvals if observer: oldvals = dict((attr.name, val) for attr, val in iteritems(dbvals)) observer('update', obj, newvals, oldvals) obj._db_set_(dbvals) # dbvals can be modified here for attr in dbvals: attr.__get__(obj) obj.set(**newvals) else: assert not dbvals objmap[oid] = obj flush() for diff in changes['objects']: if diff['_status_'] == 'd': continue obj = objmap[diff['_id_']] entity = obj.__class__ for name, val in diff.items(): if name not in ('class', '_pk_', '_id_', '_status_'): attr = entity._adict_[name] if attr.is_collection and attr.reverse.is_collection and attr < attr.reverse: removed = [ objmap[oid] for oid in val.get('removed', ()) ] added = [ objmap[oid] for oid in val.get('added', ()) ] collection = attr.__get__(obj) if removed: observer('remove', obj, {name: removed}) collection.remove(removed) if added: observer('add', obj, {name: added}) collection.add(added) flush() def deserialize(x): t = type(x) if t is list: return list(imap(deserialize, x)) if t is dict: if '_id_' not in x: return dict((key, deserialize(val)) for key, val in iteritems(x)) obj = objmap.get(x['_id_']) if obj is None: entity_name = x['class'] entity = db.entities[entity_name] pk = x['_pk_'] obj = entity[pk] return obj return x return deserialize(changes['data'])
Cash Loan Against Car Capital Park - 063 195 3725 | Pawn Cars and still drive it! You can now get fast cash loans against your car at Cash Loan Against Car Capital Park today! Make sure that you have all the required documents and that your car is in a good condition as this will affect the amount of money you can get for it. Get in touch with us if you have all that’s required for same day loans against your car and we will ensure you get your cash fast! Cash Loan Against Car Capital Park professional and fast loans against your car! At Cash Loan Against Car Capital Park strives to provides you with fast loan services because we need that having money problems can be a stressful ordeal to go through and that is why we make it our mission to help you get the cash you need as fast as possible. At Pawn car and drive it Capital Park is a one of a kind loan company that seeks to protect you and your car when using it to secure the cash you need. Cash Loan Against Car Capital Park aims help you get the cash you need fast! At Cash Loan Against Car Capital Park our customers enjoy the fastest loaning services in the city because we have out in place simple ways to process your loan application and give you same day feedback! Cash Loan Against Car Capital Park is who you call for fast cash guaranteed! At Cash Loan Against Car Capital Park our loan experts are available for free consultations during normal working hours and they are happy to advise you on the best ways of taking a loan against your car, give us a call today and let us get you the cash you need fast!
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import threading import time import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.lib.core import error_codes_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.framework import common_shapes from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.util import compat # NOTE(mrry): Dummy shape registration for ops used in the tests, since they # don't have C++ op registrations on which to attach C++ shape fns. ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape) class SessionTest(test_util.TensorFlowTestCase): def testUseExistingGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(graph=g): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testUseDefaultGraph(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testCreate(self): with session.Session(): inp = constant_op.constant(10.0, shape=[2, 3], name='W1') copy = array_ops.identity(inp) # Test with feed. # TODO(mrry): Investigate why order='F' didn't work. arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C') copy_val = copy.eval({'W1:0': arr}) self.assertAllEqual(arr, copy_val) # Test without feed. copy_val = copy.eval() self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32), copy_val) def testManyCPUs(self): # TODO(keveman): Implement ListDevices and test for the number of # devices returned by ListDevices. with session.Session( config=config_pb2.ConfigProto(device_count={'CPU': 2})): inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) def testPerSessionThreads(self): # TODO(keveman): Implement ListDevices and test for the number of # devices returned by ListDevices. with session.Session( config=config_pb2.ConfigProto(use_per_session_threads=True)): inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) def testSessionInterOpThreadPool(self): config = config_pb2.ConfigProto() pool = config.session_inter_op_thread_pool.add() with session.Session(config=config) as s: inp = constant_op.constant(10.0, name='W1') results = s.run([inp]) self.assertAllEqual([10.0], results) pool = config.session_inter_op_thread_pool.add() pool.num_threads = 1 with session.Session(config=config) as s: inp = constant_op.constant(20.0, name='W2') results = s.run([inp]) self.assertAllEqual([20.0], results) def testErrorsReported(self): with session.Session() as s: constant_op.constant(10.0, name='W1') with self.assertRaises(ValueError): s.run('foo:0') def testErrorPayload(self): with session.Session(): a = array_ops.placeholder(dtypes.float32) with self.assertRaisesOpError(lambda e: e.op == a.op): a.eval() def testErrorCodeWithNoNodeDef(self): with session.Session() as s: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) def exc_predicate(e): return (e.op is None and e.node_def is None and e.error_code == error_codes_pb2.INVALID_ARGUMENT) with self.assertRaisesOpError(exc_predicate): # Run with a bogus handle. s.partial_run('foo', r1, feed_dict={a: 1, b: 2}) def testOpConstructionErrorPayload(self): with session.Session(): failing_op = ops.get_default_graph().create_op( 'ConstructionFails', [], [], name='f') def exc_predicate(e): return (e.op == failing_op and e.error_code == error_codes_pb2.INVALID_ARGUMENT) with self.assertRaisesOpError(exc_predicate): failing_op.run() def testErrorBasedOn(self): with session.Session() as sess: a = constant_op.constant(0.0, shape=[2, 3]) # NOTE(mrry): The original_op is nonsense, but used here to test that the # errors are reported correctly. # pylint: disable=protected-access with sess.graph._original_op(a.op): b = array_ops.identity(a, name='id') with sess.graph._original_op(b.op): c = array_ops.placeholder(dtypes.float32) # pylint: enable=protected-access def exc_predicate(e): return (e.op == c.op and e.op._original_op == b.op and e.op._original_op._original_op == a.op) with self.assertRaisesOpError(exc_predicate): c.eval() def testFetchNone(self): with session.Session() as s: a = constant_op.constant(1.0) with self.assertRaises(TypeError): s.run(None) with self.assertRaises(TypeError): s.run([None]) with self.assertRaises(TypeError): s.run({'b': None}) with self.assertRaises(TypeError): s.run({'a': a, 'b': None}) def testFetchSingleton(self): with session.Session() as sess: a = constant_op.constant(42.0) res = sess.run(a) self.assertEqual(42.0, res) res = sess.run(a.op) # An op, not a tensor. self.assertEqual(None, res) def testFetchSingletonByName(self): with session.Session() as sess: a = constant_op.constant(42.0) res = sess.run(a.name) self.assertEqual(42.0, res) res = sess.run(a.op) # An op, not a tensor. self.assertEqual(None, res) def testFetchList(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) v = variables.Variable([54.0]) assign = v.assign([63.0]) res = sess.run([a, b, c, a.name, assign.op]) self.assertTrue(isinstance(res, list)) self.assertEqual(42.0, res[0]) self.assertEqual(None, res[1]) self.assertEqual(44.0, res[2]) self.assertEqual(42.0, res[3]) self.assertEqual(None, res[4]) self.assertEqual(63.0, sess.run(v)) def testFetchTuple(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run((a, b, c, a.name)) self.assertTrue(isinstance(res, tuple)) self.assertEqual(42.0, res[0]) self.assertEqual(None, res[1]) self.assertEqual(44.0, res[2]) self.assertEqual(42.0, res[3]) def testFetchNamedTuple(self): # pylint: disable=invalid-name ABC = collections.namedtuple('ABC', ['a', 'b', 'c']) # pylint: enable=invalid-name with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run(ABC(a, b, c)) self.assertTrue(isinstance(res, ABC)) self.assertEqual(42.0, res.a) self.assertEqual(None, res.b) self.assertEqual(44.0, res.c) def testFetchDict(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run({'a': a, 'b': b, 'c': c}) self.assertTrue(isinstance(res, dict)) self.assertEqual(42.0, res['a']) self.assertEqual(None, res['b']) self.assertEqual(44.0, res['c']) def testFetchOrderedDict(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)])) self.assertTrue(isinstance(res, collections.OrderedDict)) self.assertEqual([3, 2, 1], list(res.keys())) self.assertEqual(42.0, res[3]) self.assertEqual(None, res[2]) self.assertEqual(44.0, res[1]) def testFetchNestingEmptyOneLevel(self): with session.Session() as sess: a_val = 11.0 a = constant_op.constant(a_val) res = sess.run([[], tuple(), {}]) self.assertTrue(isinstance(res, list)) self.assertEquals(3, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(0, len(res[0])) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(0, len(res[1])) self.assertTrue(isinstance(res[2], dict)) self.assertEqual(0, len(res[2])) res = sess.run([[], tuple(), {}, a]) self.assertTrue(isinstance(res, list)) self.assertEquals(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(0, len(res[0])) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(0, len(res[1])) self.assertTrue(isinstance(res[2], dict)) self.assertEqual(0, len(res[2])) self.assertEqual(a_val, res[3]) def testFetchNestingOneLevel(self): with session.Session() as sess: # pylint: disable=invalid-name ABC = collections.namedtuple('ABC', ['a', 'b', 'c']) DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g']) # pylint: enable=invalid-name a_val = 42.0 b_val = None c_val = 44.0 a = constant_op.constant(a_val) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(c_val) # List of lists, tuples, namedtuple, and dict res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c), {'a': a.name, 'c': c, 'b': b}]) self.assertTrue(isinstance(res, list)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(3, len(res[0])) self.assertEqual(a_val, res[0][0]) self.assertEqual(b_val, res[0][1]) self.assertEqual(c_val, res[0][2]) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(3, len(res[1])) self.assertEqual(a_val, res[1][0]) self.assertEqual(b_val, res[1][1]) self.assertEqual(c_val, res[1][2]) self.assertTrue(isinstance(res[2], ABC)) self.assertEqual(a_val, res[2].a) self.assertEqual(b_val, res[2].b) self.assertEqual(c_val, res[2].c) self.assertTrue(isinstance(res[3], dict)) self.assertEqual(3, len(res[3])) self.assertEqual(a_val, res[3]['a']) self.assertEqual(b_val, res[3]['b']) self.assertEqual(c_val, res[3]['c']) # Tuple of lists, tuples, namedtuple, and dict res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), {'a': a, 'c': c, 'b': b})) self.assertTrue(isinstance(res, tuple)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(3, len(res[0])) self.assertEqual(a_val, res[0][0]) self.assertEqual(b_val, res[0][1]) self.assertEqual(c_val, res[0][2]) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(3, len(res[1])) self.assertEqual(a_val, res[1][0]) self.assertEqual(b_val, res[1][1]) self.assertEqual(c_val, res[1][2]) self.assertTrue(isinstance(res[2], ABC)) self.assertEqual(a_val, res[2].a) self.assertEqual(b_val, res[2].b) self.assertEqual(c_val, res[2].c) self.assertTrue(isinstance(res[3], dict)) self.assertEqual(3, len(res[3])) self.assertEqual(a_val, res[3]['a']) self.assertEqual(b_val, res[3]['b']) self.assertEqual(c_val, res[3]['c']) # Namedtuple of lists, tuples, namedtuples, and dict res = sess.run(DEFG(d=[a, b, c], e=(a, b, c), f=ABC(a=a.name, b=b, c=c), g={'a': a, 'c': c, 'b': b})) self.assertTrue(isinstance(res, DEFG)) self.assertTrue(isinstance(res.d, list)) self.assertEqual(3, len(res.d)) self.assertEqual(a_val, res.d[0]) self.assertEqual(b_val, res.d[1]) self.assertEqual(c_val, res.d[2]) self.assertTrue(isinstance(res.e, tuple)) self.assertEqual(3, len(res.e)) self.assertEqual(a_val, res.e[0]) self.assertEqual(b_val, res.e[1]) self.assertEqual(c_val, res.e[2]) self.assertTrue(isinstance(res.f, ABC)) self.assertEqual(a_val, res.f.a) self.assertEqual(b_val, res.f.b) self.assertEqual(c_val, res.f.c) self.assertTrue(isinstance(res.g, dict)) self.assertEqual(3, len(res.g)) self.assertEqual(a_val, res.g['a']) self.assertEqual(b_val, res.g['b']) self.assertEqual(c_val, res.g['c']) # Dict of lists, tuples, namedtuples, and dict res = sess.run({'d': [a, b, c], 'e': (a, b, c), 'f': ABC(a=a, b=b, c=c), 'g': {'a': a.name, 'c': c, 'b': b}}) self.assertTrue(isinstance(res, dict)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res['d'], list)) self.assertEqual(3, len(res['d'])) self.assertEqual(a_val, res['d'][0]) self.assertEqual(b_val, res['d'][1]) self.assertEqual(c_val, res['d'][2]) self.assertTrue(isinstance(res['e'], tuple)) self.assertEqual(3, len(res['e'])) self.assertEqual(a_val, res['e'][0]) self.assertEqual(b_val, res['e'][1]) self.assertEqual(c_val, res['e'][2]) self.assertTrue(isinstance(res['f'], ABC)) self.assertEqual(a_val, res['f'].a) self.assertEqual(b_val, res['f'].b) self.assertEqual(c_val, res['f'].c) self.assertTrue(isinstance(res['g'], dict)) self.assertEqual(3, len(res['g'])) self.assertEqual(a_val, res['g']['a']) self.assertEqual(b_val, res['g']['b']) self.assertEqual(c_val, res['g']['c']) def testFetchTensorObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) results_with_list = s.run([c]) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0]) results_with_single = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single) results_with_get = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get) a_val, b_val = s.run([a, b]) # Test multiple fetches. self.assertAllEqual([[1.0, 1.0]], a_val) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val) results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]}) self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0]) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], results_with_dict['b']) self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0]) self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1]) # Test nested structures results_with_nested_list = s.run([[[a, b], b], a, [a, b]]) self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0]) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], results_with_nested_list[0][0][1]) self.assertAllEqual(results_with_nested_list[0][0][0], results_with_nested_list[1]) self.assertAllEqual(results_with_nested_list[1], results_with_nested_list[2][0]) self.assertAllEqual(results_with_nested_list[0][0][1], results_with_nested_list[0][1]) self.assertAllEqual(results_with_nested_list[0][1], results_with_nested_list[2][1]) def testFetchScalar(self): with session.Session() as s: for scalar in np.int32, np.int64, np.float16, np.float32, np.float64: x = scalar(7) y = scalar(8) tf_x = constant_op.constant(x, shape=[]) tf_y = constant_op.constant(y) tf_xy = math_ops.add(tf_x, tf_y) # Single fetch xy = s.run(tf_xy) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # List fetch xy, = s.run([tf_xy]) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # Dict fetch xy = s.run({'xy': tf_xy})['xy'] self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # Nested list fetch xy = s.run([[[tf_xy]], tf_xy, [tf_xy]]) self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]]) self.assertEqual(scalar, type(xy[0][0][0])) self.assertEqual(scalar, type(xy[1])) self.assertEqual(scalar, type(xy[2][0])) def testFetchOperationObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) v = variables.Variable(a, name='testFetchOperationObject_v') s.run(v.initializer) v_val = s.run(v) self.assertAllEqual([[1.0, 1.0]], v_val) def testFetchSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = sparse_tensor.SparseTensor( constant_op.constant(indices), constant_op.constant(values), constant_op.constant(shape)) # Single fetch, use as tuple sp_out = s.run(sp) indices_out, values_out, shape_out = sp_out self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Single fetch, use as SparseTensorValue sp_out = s.run(sp) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Tuple fetch, use as tuple indices_out, values_out, shape_out = s.run(sp) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as tuple (indices_out, values_out, shape_out), = s.run([sp]) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as SparseTensorValue sp_out, = s.run([sp]) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Dict fetch (single value), use as tuple indices_out, values_out, shape_out = s.run({'sp': sp})['sp'] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Dict fetch (list value), use as tuple (indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp'] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Dict fetch, use as SparseTensorValue sp_out = s.run({'sp': sp})['sp'] self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Nested list fetch use as tuple sp_out = s.run([[[sp]], sp]) indices_out, values_out, shape_out = sp_out[0][0][0] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) indices_out, values_out, shape_out = sp_out[1] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Nested list fetch, use as SparseTensorValue sp_out = s.run([[[sp]], sp]) self.assertAllEqual(sp_out[0][0][0].indices, indices) self.assertAllEqual(sp_out[0][0][0].values, values) self.assertAllEqual(sp_out[0][0][0].dense_shape, shape) self.assertAllEqual(sp_out[1].indices, indices) self.assertAllEqual(sp_out[1].values, values) self.assertAllEqual(sp_out[1].dense_shape, shape) def testFeedSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = sparse_tensor.SparseTensor( array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(3,)),) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with tuple, fetch sp directly sp_out = s.run(sp, {sp: (indices, values, shape)}) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: sparse_tensor.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run( sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) # Feed SparseTensorValue and fetch sp directly. sp_out = s.run( sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) def testFeedSparsePlaceholder(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1') sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: sparse_tensor.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run( sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) def testFeedSparsePlaceholderPartialShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder( shape=[None, 9, 2], dtype=np.float32, name='placeholder1') sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: sparse_tensor.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run( sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)}) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) def testFeedSparsePlaceholderConstantShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder(dtype=np.float32, shape=shape, name='placeholder1') self.assertAllEqual(sp.dense_shape.eval(session=s), shape) self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], {sp: (indices, values)}) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) def testFetchIndexedSlices(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), constant_op.constant(dense_shape)) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlices(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.int64, shape=(3,)),) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind_dense_shape = array_ops.identity(ind.dense_shape) ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape) # Feed with tuple values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], {ind: (values, indices, dense_shape)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testFetchIndexedSlicesWithoutDenseShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = None ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), None) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlicesWithoutDenseShape(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = None ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind2 = ops.IndexedSlices(ind_values, ind_indices) # Feed with tuple values_out, indices_out = s.run( [ind_values, ind_indices], {ind: (values, indices)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue values_out, indices_out = s.run( [ind_values, ind_indices], {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices, dense_shape)}) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testExtendWithStatelessOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) # Extend will happen here. e_val = s.run(e) self.assertAllEqual([[24.0]], e_val) def testExtendWithStatefulOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testExtendWithStatefulOperations_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) # Extend will happen here. e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) def testExtendWithGroupBy(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) p = variables.Variable(a, name='testExtendWithGroupBy_p') a_val = a.eval() # Force an Extend after this op. self.assertAllEqual([[1.0, 1.0]], a_val) b = constant_op.constant(2.0, shape=[1, 2]) q = variables.Variable(b, name='testExtendWithGroupBy_q') # Extend will happen here. init = control_flow_ops.group(p.initializer, q.initializer) s.run(init) p_val, q_val = s.run([p, q]) self.assertAllEqual([[1.0, 1.0]], p_val) self.assertAllEqual([[2.0, 2.0]], q_val) def testTensorGetMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]}) self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val) def testOperationRunMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 2], name='b') v = variables.Variable(a, a.dtype) assign_a_to_v = state_ops.assign(v, a) assign_a_to_v.eval() v_val = v.eval() self.assertAllEqual([[1.0, 1.0]], v_val) assign_b_to_v = state_ops.assign(v, b) assign_b_to_v.eval() v_val = v.eval() self.assertAllEqual([[2.0, 2.0]], v_val) assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]}) v_val = v.eval() self.assertAllEqual([[3.0, 3.0]], v_val) def testDefaultGraph(self): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) self.assertEqual(ops.get_default_graph(), a.graph) self.assertEqual(ops.get_default_graph(), b.graph) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testDefaultGraph_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def _testDefaultGraphInThread(self, constructed_event, continue_event, i): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='var_%d' % i) # Block here until all threads have constructed their graph. constructed_event.set() continue_event.wait() assign_c_to_v = state_ops.assign(v, c) v.initializer.run() assign_c_to_v.eval() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def testDefaultGraphWithThreads(self): # Fork ten threads that use their thread-local default graph. threads = [] constructed_events = [threading.Event() for _ in range(10)] continue_event = threading.Event() for i, constructed_event in enumerate(constructed_events): t = self.checkedThread(target=self._testDefaultGraphInThread, args=(constructed_event, continue_event, i)) threads.append(t) for t in threads: t.start() for constructed_event in constructed_events: constructed_event.wait() continue_event.set() for t in threads: t.join() def testParallelRun(self): with session.Session() as sess: c = constant_op.constant(5.0) ev = threading.Event() def run_step(): ev.wait() val = c.eval(session=sess) self.assertEqual(val, 5.0) threads = [self.checkedThread(target=run_step) for _ in range(100)] for t in threads: t.start() ev.set() for t in threads: t.join() def testRunFeedDict(self): with session.Session() as s: x = array_ops.zeros([2]) y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x: [1, 1]}) assert (y == 2 * np.ones(2)).all() # Test nested tuple keys z = (((array_ops.zeros([2]),),), array_ops.zeros([2]), (array_ops.zeros([2]),)) result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2] values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),)) result_value = s.run(result, feed_dict={z: values}) self.assertAllEqual(result_value[0], 2 * np.ones(2)) self.assertAllEqual(result_value[1], 2 * np.array([2, 2])) self.assertAllEqual(result_value[2], 2 * np.array([3, 3])) def testGraphDef(self): with session.Session() as sess: self.assertProtoEquals( 'versions { producer: %d min_consumer: %d }' % ( versions.GRAPH_DEF_VERSION, versions.GRAPH_DEF_VERSION_MIN_CONSUMER), sess.graph_def) c = constant_op.constant(5.0, name='c') self.assertEquals(len(sess.graph_def.node), 1) d = constant_op.constant(6.0, name='d') self.assertEquals(len(sess.graph_def.node), 2) self.assertAllEqual(c.eval(), 5.0) self.assertAllEqual(d.eval(), 6.0) e = constant_op.constant(7.0, name='e') self.assertEquals(len(sess.graph_def.node), 3) self.assertAllEqual(e.eval(), 7.0) def testUseAfterClose(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): sess.run(c) def testUseAfterCloseConcurrent(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) def update_thread(): with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): while True: sess.run(c) t = threading.Thread(target=update_thread) t.start() time.sleep(0.1) sess.close() t.join() def testUseEmptyGraph(self): with session.Session() as sess: with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run([]) with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run(()) with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run({}) def testNotEntered(self): # pylint: disable=protected-access self.assertEqual(ops._default_session_stack.get_default(), None) # pylint: enable=protected-access with ops.device('/cpu:0'): sess = session.Session() c_1 = constant_op.constant(5.0) with sess.graph.as_default(): c_2 = constant_op.constant(5.0) self.assertEqual(c_1.graph, c_2.graph) self.assertEqual(sess.run(c_2), 5.0) with self.assertRaisesWithPredicateMatch( ValueError, lambda e: 'No default session is registered.' in str(e)): c_2.eval() def testInteractive(self): with ops.device('/cpu:0'): sess = session.InteractiveSession() a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval()) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) self.assertAllEqual([[24.0]], e.eval()) sess.close() def testInteractivePlacePrunedGraph(self): sess = session.InteractiveSession() # Build a graph that has a bad op in it (no kernel). # # This test currently does not link in any GPU kernels, # which is why placing this is invalid. If at some point # GPU kernels are added to this test, some other different # op / device combo should be chosen. with ops.device('/gpu:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(1.0, shape=[1, 2]) # Only run the valid op, this should work. b.eval() with self.assertRaises(errors.InvalidArgumentError): a.eval() sess.close() def testDefaultSessionPlacePrunedGraph(self): sess = session.Session() # Build a graph that has a bad op in it (no kernel). # # This test currently does not link in any GPU kernels, # which is why placing this is invalid. If at some point # GPU kernels are added to this test, some other different # op / device combo should be chosen. with ops.device('/gpu:0'): _ = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(1.0, shape=[1, 2]) with self.assertRaises(errors.InvalidArgumentError): # Even though we don't run the bad op, we place the entire # graph, which should fail with a non-interactive session. sess.run(b) sess.close() def testSharedGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) with session.Session(graph=g) as sess1: with session.Session(graph=g) as sess2: self.assertAllEqual(sess1.run(c), sess2.run(c)) def testDuplicatedInputs(self): with session.Session() as sess: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 3]) a_val, b_val, a2_val = sess.run([a, b, a]) self.assertAllEqual(a_val, [[1.0, 1.0]]) self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]]) self.assertAllEqual(a2_val, [[1.0, 1.0]]) def testFeedAndFetch(self): with session.Session() as sess: for dtype in [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool, dtypes.complex64, dtypes.complex128]: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: np_dtype = dtype.as_numpy_dtype feed_t = array_ops.placeholder(dtype=dtype, shape=shape) out_t = array_ops.identity(feed_t) np_array = np.random.randint(-10, 10, shape) if dtype == dtypes.bool: np_array = np_array > 0 elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) self.assertAllEqual(np_array, sess.run(out_t, feed_dict={feed_t: np_array})) # Check that we can also get the feed back. self.assertAllEqual(np_array, sess.run(feed_t, feed_dict={feed_t: np_array})) # Also check that we can get both back. out_v, feed_v = sess.run([out_t, feed_t], feed_dict={feed_t: np_array}) self.assertAllEqual(np_array, out_v) self.assertAllEqual(np_array, feed_v) def testFeedError(self): with session.Session() as sess: feed_t = array_ops.placeholder(dtype=dtypes.float32) out_t = array_ops.identity(feed_t) feed_val = constant_op.constant(5.0) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): sess.run(out_t, feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.eval(feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.op.run(feed_dict={feed_t: feed_val}) def testFeedPrecisionLossError(self): with session.Session() as sess: largest_int64 = np.iinfo(np.int64).max feed_int_implicit_int32 = constant_op.constant(1) feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32) out_t = constant_op.constant(1.0) with self.assertRaisesRegexp(TypeError, 'is not compatible with Tensor type'): sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64}) with self.assertRaisesRegexp(TypeError, 'is not compatible with Tensor type'): sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64}) def testStringFetch(self): with session.Session(): for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) if size > 0 else [] c = constant_op.constant(c_list) self.assertAllEqual(c.eval(), c_list) def testStringFeed(self): with session.Session() as sess: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape) c = array_ops.identity(feed_t) self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list) self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}), c_list) c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list}) self.assertAllEqual(c_v, c_list) self.assertAllEqual(feed_v, c_list) def testStringFeedWithNullCharacters(self): with session.Session(): c_list = [b'\n\x01\x00', b'\n\x00\x01'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) self.assertEqual(c_list[0], out[0]) self.assertEqual(c_list[1], out[1]) def testStringFeedWithUnicode(self): with session.Session(): c_list = [u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode', u'\U0001f60e deal with it'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) for i in range(len(c_list)): self.assertEqual(c_list[i], out[i].decode('utf-8')) out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)}) for i in range(len(c_list)): self.assertEqual(c_list[i], out[i].decode('utf-8')) def testInvalidTargetFails(self): with self.assertRaisesRegexp( errors.NotFoundError, 'No session factory registered for the given session options'): session.Session('INVALID_TARGET') def testFetchByNameDifferentStringTypes(self): with session.Session() as sess: c = constant_op.constant(42.0, name='c') d = constant_op.constant(43.0, name=u'd') e = constant_op.constant(44.0, name=b'e') f = constant_op.constant(45.0, name=r'f') self.assertTrue(isinstance(c.name, six.text_type)) self.assertTrue(isinstance(d.name, six.text_type)) self.assertTrue(isinstance(e.name, six.text_type)) self.assertTrue(isinstance(f.name, six.text_type)) self.assertEqual(42.0, sess.run('c:0')) self.assertEqual(42.0, sess.run(u'c:0')) self.assertEqual(42.0, sess.run(b'c:0')) self.assertEqual(42.0, sess.run(r'c:0')) self.assertEqual(43.0, sess.run('d:0')) self.assertEqual(43.0, sess.run(u'd:0')) self.assertEqual(43.0, sess.run(b'd:0')) self.assertEqual(43.0, sess.run(r'd:0')) self.assertEqual(44.0, sess.run('e:0')) self.assertEqual(44.0, sess.run(u'e:0')) self.assertEqual(44.0, sess.run(b'e:0')) self.assertEqual(44.0, sess.run(r'e:0')) self.assertEqual(45.0, sess.run('f:0')) self.assertEqual(45.0, sess.run(u'f:0')) self.assertEqual(45.0, sess.run(b'f:0')) self.assertEqual(45.0, sess.run(r'f:0')) def testIncorrectGraph(self): with ops.Graph().as_default() as g_1: c_1 = constant_op.constant(1.0, name='c') with ops.Graph().as_default() as g_2: c_2 = constant_op.constant(2.0, name='c') self.assertEqual('c', c_1.op.name) self.assertEqual('c', c_2.op.name) with session.Session(graph=g_1) as sess_1: self.assertEqual(1.0, sess_1.run(c_1)) with self.assertRaises(ValueError): sess_1.run(c_2) with self.assertRaises(ValueError): sess_1.run(c_2.op) with session.Session(graph=g_2) as sess_2: with self.assertRaises(ValueError): sess_2.run(c_1) with self.assertRaises(ValueError): sess_2.run(c_1.op) self.assertEqual(2.0, sess_2.run(c_2)) def testPartialRun(self): with session.Session() as sess: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.mul(r1, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 17 res = sess.partial_run(h, r2, feed_dict={c: temp}) self.assertEqual(153, res) # Call again on the same graph. h2 = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 18 res = sess.partial_run(h2, r2, feed_dict={c: temp}) self.assertEqual(162, res) def testPartialRunIncomplete(self): with session.Session() as sess: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.mul(r1, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) def testConcurrentPartialRun(self): with session.Session() as sess: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.mul(r1, c) h1 = sess.partial_run_setup([r1], [a, b, c]) h2 = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 19 res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9}) self.assertEqual(66, res) res = sess.partial_run(h2, r2, feed_dict={c: 7}) self.assertEqual(462, res) def testManyPartialRun(self): with session.Session() as sess: steps = 200 inputs = [] outputs = [] a = constant_op.constant(2.0, dtypes.float32) for i in xrange(steps): inputs.append(array_ops.placeholder(dtypes.float32, shape=[])) a = math_ops.mul(a, inputs[i]) outputs.append(a) h = sess.partial_run_setup(outputs, inputs) for i in xrange(steps): res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0}) self.assertEqual(2.0, res) feed_dict = {} for i in xrange(steps): feed_dict[inputs[i]] = 1.0 res = sess.run(outputs, feed_dict) self.assertEqual(steps, len(res)) self.assertEqual(2.0, res[-1]) def testRunAndPartialRun(self): with session.Session() as sess: a = constant_op.constant(2.0, dtypes.float32) b = a * 2 c = b * 3 r1 = sess.run([b, c]) h = sess.partial_run_setup([b, c], []) r2 = sess.partial_run(h, [b, c]) self.assertEqual(r1, r2) def testFeedDictKeyException(self): with session.Session() as sess: a = constant_op.constant(1.0, dtypes.float32, name='a') with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'): sess.run(a, feed_dict={'a': [2.0]}) def testPerStepTrace(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: sess.run(constant_op.constant(1.0)) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testRunOptionsRunMetadata(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: # all combinations are valid sess.run(constant_op.constant(1.0), options=None, run_metadata=None) sess.run(constant_op.constant(1.0), options=None, run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), options=run_options, run_metadata=None) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testFeedShapeCompatibility(self): with session.Session() as sess: some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0]) new_shape = constant_op.constant([2, 2]) reshaped_tensor = array_ops.reshape(some_tensor, new_shape) with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'): sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]}) with self.assertRaisesRegexp(ValueError, 'may not be fed'): sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]}) def testInferShapesFalse(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant([[1, 2]]) sess = session.Session() self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr) # Avoid lint error regarding 'unused' var a. self.assertTrue(a == a) def testInferShapesTrue(self): config = config_pb2.ConfigProto( graph_options=config_pb2.GraphOptions(infer_shapes=True)) with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant([[1, 2]]) sess = session.Session(config=config) self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr) # Avoid lint error regarding 'unused' var a. self.assertTrue(a == a) def testBuildCostModel(self): run_options = config_pb2.RunOptions() config = config_pb2.ConfigProto( allow_soft_placement=True, graph_options=config_pb2.GraphOptions(build_cost_model=100)) with session.Session(config=config) as sess: with ops.device('/gpu:0'): a = array_ops.placeholder(dtypes.float32, shape=[]) b = math_ops.add(a, a) c = array_ops.identity(b) d = math_ops.mul(c, c) for step in xrange(120): run_metadata = config_pb2.RunMetadata() sess.run(d, feed_dict={a: 1.0}, options=run_options, run_metadata=run_metadata) if step == 99: self.assertTrue(run_metadata.HasField('cost_graph')) else: self.assertFalse(run_metadata.HasField('cost_graph')) def testNonInteractiveSessionNesting(self): sess1 = session.Session() sess1_controller = sess1.as_default() sess1_controller.__enter__() sess2 = session.Session() sess2_controller = sess2.as_default() sess2_controller.__enter__() with self.assertRaisesRegexp(AssertionError, 'Nesting violated'): sess1_controller.__exit__(None, None, None) ops._default_session_stack.reset() def testInteractiveSessionNesting(self): sess1 = session.InteractiveSession() sess2 = session.InteractiveSession() del sess1 del sess2 def testAsDefault(self): c = constant_op.constant(37) sess = session.Session() with sess.as_default(): self.assertEqual(37, c.eval()) # Ensure that the session remains valid even when it is not captured. with session.Session().as_default(): self.assertEqual(37, c.eval()) def testReentry(self): sess = session.Session() with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'): with sess: with sess: pass def testInvalidArgument(self): with self.assertRaisesRegexp(TypeError, 'target must be a string'): session.Session(37) with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'): session.Session(config=37) with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'): session.Session(graph=37) def testTimeoutWithShortOperations(self): num_epochs = 5 q = data_flow_ops.FIFOQueue( capacity=50, dtypes=[dtypes.int32], shapes=[()]) enqueue_op = q.enqueue_many(constant_op.constant([1, 2])) # Use a 10-second timeout, which should be longer than any # non-blocking enqueue_many op. config = config_pb2.ConfigProto(operation_timeout_in_ms=10000) with session.Session(config=config) as sess: for _ in range(num_epochs): sess.run(enqueue_op) self.assertEqual(sess.run(q.size()), num_epochs * 2) def testRegisterFetchAndFeedConversionFunctions(self): class SquaredTensor(object): def __init__(self, tensor): self.sq = math_ops.square(tensor) fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0]) feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)] feed_fn2 = lambda feed: [feed.sq] session.register_session_run_conversion_functions(SquaredTensor, fetch_fn, feed_fn1, feed_fn2) with self.assertRaises(ValueError): session.register_session_run_conversion_functions(SquaredTensor, fetch_fn, feed_fn1, feed_fn2) with self.test_session() as sess: np1 = np.array([1.0, 1.5, 2.0, 2.5]) np2 = np.array([3.0, 3.5, 4.0, 4.5]) squared_tensor = SquaredTensor(np2) squared_eval = sess.run(squared_tensor) self.assertAllClose(np2 * np2, squared_eval) squared_eval = sess.run(squared_tensor, feed_dict={ squared_tensor : np1 * np1}) self.assertAllClose(np1 * np1, squared_eval) partial_run = sess.partial_run_setup([squared_tensor], []) squared_eval = sess.partial_run(partial_run, squared_tensor) self.assertAllClose(np2 * np2, squared_eval) if __name__ == '__main__': googletest.main()
Hours in Accounting: 24 semester hours of upper division or graduate level accounting courses including at least one course in each of the following subject areas to be approved to sit for the examination: financial accounting; auditing; taxation; management accounting and 24 semester hours in business related courses. Experience Requirements: 12 months (2000 hours actual work experience) of private, governmental, academic, or public accounting work acceptable to the board. As of July 1, 2015, Montana became a 1-tier state.
from helper import * import optional section("Check optional requirements") requirements_failed = [] def check_optional_requirement(requirement): if 'pip' in requirement.keys(): if shell("pip install -U {}".format(" ".join(requirement['pip'])), True).success(): return True else: return False elif 'executable' in requirement.keys(): requirement_ok = True for executable in requirement['executable']: if not executable_exists(executable): requirement_ok = False if requirement_ok: return True else: return False for requirement in optional.OPTIONAL_REQUIREMENTS: if check_optional_requirement(requirement): printlog("* Success: {}".format(requirement['name'])) else: printlog("* Fail: {}".format(requirement['name'])) text = "{} - {}".format(requirement['name'], requirement['description']) requirements_failed.append((text, requirement)) section("Install *optional* non-python requirements") requirements_failed.append(("Install nothing", 'exit')) while True: requirement = user_input(requirements_failed) print('') print('') if requirement == 'exit': break guess = None printlog(requirement['name']) printlog(requirement['instruction']) if 'package_guess' in requirement.keys(): package = optional.get_guess(requirement['package_guess']) if package is not False: package_manager = optional.get_guess(optional.PackageManager) cmd = "{} {}".format(package_manager, package) print("\nOur Guess how to install:\n>{}".format(cmd)) print('') input('continue ') print('') print('') print('') if check_optional_requirement(requirement): printlog('Success!') requirements_failed -= requirement else: printlog('Sorry; but looks like this did not work...') print('')
Welcome to first and only Tarkan Fan Club on the web. Joining to this club is completely free. Please click here to fill out the simple form to be member of the club.
from random import random from random import choice from math import floor import names ''' Names is required to run this script. download from https://pypi.python.org/pypi/names/. ''' elder_population = 0 married_population = 0 single_population = 0 runtime = 0 born_this_year = 0 living = [] the_dead = [] ''' A bunch of global functions to store statistical and simulation data. ''' class Person(object): ''' This creates a class called person. ''' def __init__(self, name, gender, value, dad = "None", mom = "None"): ''' Everything a person needs to be a person is contained here. ''' self.value = value self.name = name self.gender = gender self.age = 0 self.status = "alive" self.single = True self.love_value = 0 self.love = "" """ flagged for removal^ """ self.dad = dad self.mom = mom self.fertility = round(random() * 10) self.want_child = round(random() * 5) self.children = 0 self.child_name = [] self.friends = {} self.mood = 0 self.health = 0 self.personality = round(random()*10) self.antisocial = round(random()*10) self.shallow = round(random()*20)-10 self.charisma = round(random()*20)-10 self.love_status = "None" self.child_status = [] self.hunger = 0 def __str__(self): ''' This displays all attributes of the person. ''' if self.single: return "My name is %s and I am %s and I am %s.\n and I want to have %s children.\n" % (self.name, self.gender, self.age, self.want_child) elif self.children == 0: return "My name is %s and I am %s and I am %s.\n I am married to %s and I want to have %s children.\n" % (self.name, self.gender, self.age, self.love, self.want_child) else: return "My name is %s and I am %s and I am %s.\n I am married to %s and I have %s children.\n" % (self.name, self.gender, self.age, self.love, self.children) def get_age(self): ''' This is a method to get the age ''' return "%s is %s years old" % (self.name, self.age) def get_family(self): ''' This method gets the family members ''' if (self.dad == "none"): if not self.single: if self.children > 0: return "%s is married to %s.\n%s has %s kids named %s." % (self.name, self.love, self.name, self.children,self.child_name) else: return "%s is married to %s.\n%s has no kids" % (self.name, self.love, self.name) else: return "%s has no family!" % self.name elif not self.single: if self.children > 0: return "%s parents are %s and %s\n%s is married to %s.\n%s has %s kids named %s." % (self.name, self.dad, self.mom, self.name, self.love, self.name, self.children,self.child_name) else: return "%s parents are %s and %s\n%s is married to %s.\n%s has no kids" % (self.name, self.dad, self.mom, self.name, self.love, self.name) else: return "%s parents are %s and %s" % (self.name, self.dad, self.mom) return "%s is not married" % self.name def get_friends(self): ''' This method lists the friends the person has ''' for key in self.friends: print key, self.friends[key] ''' functions that are called by elapse_time() ''' def elapse_time(years=100): ''' This moves time forward in the simulation. ''' global runtime global born_this_year global single_population elder_population = 0 single_population = 0 born_this_year = 0 print "running simulation for %s years..." % years print "Would you like to monitor the population? (y/n)" response = raw_input() for i in range(years): t = 0 while t < len(living)-1: if living [t].status != "dead": time(living [t]) if (living [t].love == "" and living[t].single == False): print living [t].name print living [t].value print living [t].love_value print living [t] print living [t].get_family() print len(living) print("something") wait = input("PRESS ENTER TO CONTINUE.") break t += 1 runtime += 1 if response == "y": print "Population is %s in the year %s" % (len(living), runtime) sim_stats.present_stats(years) class Statistics(object): """ statistical data & methods stored here. """ def __init__(self): self.counter = 0 self.name = "" def most_popular(self): for t in living: if t.single: single_population += 1 if len(t.friends) > counter: self.name = t.name def present_stats(self,years): print "\nSimulation ran for %s years this time. Total runtime %s" % (str(years), runtime) print "Population is %s" % len(living) print "\nOut of %s people, %s made it to their 80s" % (len(living), elder_population) print "%s babies we born in %s years" % (born_this_year, years) print "Out of %s people, %s married and have changed their sirnames" % (len(living), married_population) print "Out of %s people, %s never married" % (len(living), single_population) print "%s have died since the beginning of this simulation." % len(the_dead) print "%s has the most friends" % self.name def get_info(self): ''' A function that searches the person list for a match. ''' if type(name) == str and len(living)>0: for i in living: if living [i].name == name: return living [i].__str__() else: return "Invalid entry. Please enter a string." def who_is(self, s): ''' Lists people's names based on parameters ''' if (s == living or s == the_dead): for i in s: print s [i].name def count_people(self, s): ''' Lists people with parameters ''' if (s == the_dead or s == living): return len(s) else: return totalPop def who_is_married(self, s = "all"): ''' A function that lists married people. ''' if s != "all": for i in s: if not s [i].single: print s [i].name else: for i in living: if not living [i].single: print living [i].name for i in the_dead: if not the_dead [i].single: print the_dead [i].name def who_is_has_children(self, s="all",t = True): ''' Lists who has children ''' if t: if s != "all": for i in s: if s [i].children > 0: return s [i].name else: for i in living: if s [i].children > 0: return s [i].name for i in the_dead: if s [i].children > 0: return s [i].name else: if s != "all": for i in s: if s [i].children < 1: return s [i].name else: for i in living: if s [i].children < 1: return s [i].name for i in the_dead: if s [i].children < 1: return s [i].name def count_has_children(self, s = "all",t = True): ''' counts parents ''' counter = 0 if (t and s!= "all"): for i in s: if s [i].children > 0: counter += 1 return counter elif (not t and s!= "all"): for i in s: if s [i].children < 1: counter += 1 return counter elif t: for i in living: if s [i].children > 0: counter += 1 for i in the_dead: if s [i].children > 0: counter += 1 return counter else: for i in living: if s [i].children < 1: counter += 1 for i in the_dead: if s [i].children < 1: counter += 1 return counter def count_married(self, s=0): ''' counts married ''' if (s == the_dead or s == living): counter = 0 for i in living: if not i.single: counter += 1 return counter else: return married_population def time(your): ''' This simulates a year of living for the person and his likelihood of dying that year ''' global living global elder_population global born_this_year your.age += 1 if your.age > 79: elder_population += 1 """if round(random() * 100)/100 + float(your.age) / 800 > 1: #This is the mortality algorithm. mortality(your) """ if round(random() * 100)/100 + float(your.age) / 800 > 1: your.status = "dead" the_dead.append(living[your.value]) if your.love_status != "none": if (not your.single and your.love_status): living[your.love_value].love_status = False living[your.love_value].love_value = len(the_dead)-1 elif not your.single and your.love_status: the_dead[your.love_value].love_status = False the_dead[your.love_value].love_value = len(the_dead)-1 number = len(living)-1-your.value for i in range(number): if not living[i+(len(living)-number)].single: if living[i+(len(living)-number)].love_status: if living[i+(len(living)-number)].love_value > living[i+(len(living)-number)].value: living[i+(len(living)-number)].love_value -= 1 living[i+(len(living)-number)].value -= 1 del living[your.value] else: make_friends(your) #Every year entities meet new people if your.single: get_love(your) make_friends(your) #And have a chance to find love. if not your.single and your.love_status: born_this_year += repro(your, living[your.love_value]) def make_friends(your): ''' allows people to gain friends ''' randomFactor = int(round(((your.age/100)+random())*10)) for i in range(randomFactor): their = living[int(round(random()*(len(living)-1)))] found = False for j in your.friends: if j == their.name or j == your.name: found = True break if found != True: test_of_friendship(your, their) def test_of_friendship(your,their): ''' The initial test of friendship between strangers ''' friendship_constant = 5 personality_score = (your.personality + their.personality) - (your.antisocial + their.antisocial) attraction = (your.charisma + their.shallow + your.shallow + their.charisma) totalScore = personality_score + attraction*random() if totalScore > (your.antisocial + their.antisocial)*random(): your.friends [their.name] = their.charisma + their.personality their.friends [your.name] = your.charisma + your.personality #print str(your.name) +" has made a friend with "+str(their.name) else: pass #print str(your.name) +" failed to make friends." def get_love(your): ''' This function searches for a couple ''' if (your.age > 18 and your.single): global married_population for i in range(5): y = int(round(random() * len(living)) - 1) if (your.gender != living [y].gender and living [y].age > 18 and living [y].status == "alive" and living [y].single): #print "%s courts %s" % (your.name, living [y].name) if (round(random() * your.age) / 40) > 0: your.single = False living [y].single = False your.love_status = True living [y].love_status = True if your.gender == "female": your.name = changeName(your.name, living [y].name) married_population += 2 living [y].love_value = your.value your.love_value = y your.love = living [y].name living [y].love = your.name break else: living [y].name = changeName(living [y].name, your.name) your.love = living [y].name living [y].love = your.name married_population += 2 living [y].love_value = your.value your.love_value = y break def changeName (hers, his): """ This changes the wife's surname """ oldName = hers newName = "" for i in range(len(hers)): if hers [i] == " ": newName = hers [:i] break for i in range (len(his)): if his[i] == " ": newName = newName + his [i:] break return newName def repro(his, hers): """ This function tests if a couple will have a child. """ global born_this_year fertilityrate = ((his.fertility+hers.fertility) * (1 - ((his.age+hers.age) / 100))) / 2 if (his.children < (round((his.want_child + hers.want_child) / 2)) and random()*fertilityrate > 1): his.children += 1 hers.children += 1 gender = choice(["male", "female"]) child_name = changeName (str(names.get_first_name(gender))+" ",his.name) living.append(Person(str(child_name), gender, len(living),his.name,hers.name)) his.child_name.append(child_name) hers.child_name.append(child_name) return 1 else: return 0 """ Simulation setup and restart functions below """ """ Information gathering functions below """ ''' The menu activates on startup. ''' def main_menu(): answer = "" while answer != "0": ''' This is the main menu where the simulation is controlled from. ''' print "\nWhat would you like to do?" print "1. Start Simulation\n2. Elapse Simulation\n3. Population Information\n4. Quick Start\n5. Restart Simulation\n0. Quit" answer = raw_input() if answer == "1" or answer == "5": print "\nhow large of a population would you like to simulate? 100 should be the max." answer = raw_input() if type(answer) != str or answer == "": print "\nApologies. You entered an invalid input.\n \n" else: restart(int(answer)) elif answer == "2": print "\nhow long do you wish to elapse? no more than 300." answer = raw_input() if type(answer) != str: print "\nApologies. You entered an invalid input.\n \n" else: elapse_time(int(answer)) elif answer == "4": restart(20) elapse_time(200) elif answer == "3": """ This is where the crap starts. Statistics galore! God help me. """ while answer != "0": print "\n1. Count alive\n2. Count dead\n3. Count married\n4. Name search \n5. List Alive \n6. List dead\n7. List Married\n8. List Married and Alive\n9. List Married and Dead\n10. Count Has Children and Alive\n0. Return" answer = raw_input() if answer == "1": """ Count alive """ print sim_stats.count_people(living) elif answer == "2": """ Count dead """ print sim_stats.count_people(the_dead) elif answer == "3": print sim_stats.count_married() elif answer == "4": print "\nPlease enter his or her name." answer = raw_input() if type(answer) != str or answer == "": print "\nApologies. You entered an invalid input.\n \n" else: search_value = "nothing" gender = "" for i in living: if answer == living [i].name: search_value = i break if search_value != "nothing": print "found %s! What do you want to do?" % gender while answer != "0": print "\n1. About %s\n2. Family\n3. Age\n4. Friends\n0. Return" % gender answer = raw_input() if type(answer) != str: print "\nApologies. You entered an invalid input.\n \n" elif answer == "1": print "searching..." print living[search_value].__str__() elif answer == "2": print living[search_value].get_family() elif answer == "3": print living[search_value].get_age() elif answer == "4": print living[search_value].get_friends() elif answer == "0": pass else: print "\nCould you repeat that? \n \n" answer = 1 else: print "Didn't find answer." elif answer == "5": print sim_stats.who_is(living) elif answer == "6": sim_stats.who_is(the_dead) elif answer == "7": sim_stats.who_is_married() elif answer == "8": sim_stats.who_is_married(living) elif answer == "9": sim_stats.who_is_married(the_dead) elif answer == "10": print sim_stats.count_has_children(living) answer = 1 print "\nreturning to main menu" else: print "\nCould you repeat that? \n \n" def sim_setup(p): ''' This starts the simulation by preparing the first group of people. ''' print "\nJust a moment...\n\n" for i in range(p): living.append(i) gender = choice(["male", "female"]) living[i] = Person (str(nahttps://github.com/jackellice2/Population-Simulator/new/master#fullscreen_blob_contentsmes.get_full_name(gender)), gender, i) print "%s people successfully created!\n" % len(living) def restart(p): ''' Restarts the simulation. ''' global living global the_dead global runtime global elder_population global married_population global single_population runtime = 0 elder_population = 0 married_population = 0 single_population = 0 del living[:] del the_dead[:] sim_setup(p) sim_stats = Statistics() main_menu() print "\nGood Bye!"
ESRO Seafood imports deep frozen and fresh fish and seafood from all over the world. We purchase only the best fish and seafood. We do so on a large scale, purchasing exclusive varieties directly at the source and exporting directly to customers in and outside Europe. We offer a complete product range for the retail, food service and wholesale and industries. ESRO Seafood stands for unrivaled quality. For honest, sustainable products, largely marketed under the own Blue World ESRO label. From Canadian Black Cod to Chilean mussel meat, from Japanese Hamachi fillet to Sashimi Tuna from Vietnam: ESRO Seafood offers you the best from the seven world seas. Blue World guarantees you get the very best quality. We purchase our fish and seafood direct from the source. Whether on a boat or in a factory, all selected products are immediately provided with our guaranteed quality label. This means they are handles with the utmost of care, are sea frozen and subject to intensive control with regard to hygiene, processing, packaging and logistics. Sustainability is paramount. We work exclusively with EU-certified suppliers. These are all local companies that we visit several times a year to ensure that they meet the high ESRO standards. We also check that products are processed in accordance with the specific wishes of our customers: at the production site in the country of origin. Our wild fish is MSC-certified, whereas our farmed fish is ASC-certified. ESRO Seafood supplies an unprecedentedly wide range of special fish and seafood. We have huge purchasing power and serve a vast range of specialized clients throughout Europe and beyond. We guarantee product availability. Most products are kept in stock in our cold storage. This also enables us to steer clear of sudden price fluctuations as much as possible. ESRO Seafood delivers precisely what you want, in the way that you want. Deviating units, a foreign language on your packaging or a mix of products: almost everything is possible. Our clients rely on our support and assistance in finding the best solutions. ESRO Seafood was established in 2009 as part of the ESRO Food Group. From its head office in the Netherlands, the company serves customers throughout Europe and beyond. Far-reaching specialization, customer-oriented flexibility and uncompromising adherence to the highest quality standards, are the foundation of our success. It is also the basis of our long-term partnerships with clients looking for the very best quality at a fair price.
# Copyright (c) 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import re import unittest try: import unittest.mock as mock except ImportError: import mock from cloudbaseinit.tests import testutils class ExtendVolumesPluginTests(unittest.TestCase): def setUp(self): self._ctypes_mock = mock.MagicMock() self._comtypes_mock = mock.MagicMock() self._module_patcher = mock.patch.dict( 'sys.modules', {'comtypes': self._comtypes_mock, 'ctypes': self._ctypes_mock}) self._module_patcher.start() extendvolumes = importlib.import_module('cloudbaseinit.plugins.' 'windows.extendvolumes') self._extend_volumes = extendvolumes.ExtendVolumesPlugin() def tearDown(self): self._module_patcher.stop() @mock.patch('cloudbaseinit.plugins.windows.extendvolumes' '.ExtendVolumesPlugin._get_volume_index') @mock.patch('cloudbaseinit.plugins.windows.extendvolumes' '.ExtendVolumesPlugin._extend_volume') @mock.patch('cloudbaseinit.utils.windows.vds.IVdsVolume') def test_extend_volumes(self, _vds_mock, mock_extend_volume, mock_get_volume_index): mock_pack = mock.MagicMock() mock_volume_idxs = mock.MagicMock() mock_enum = mock.MagicMock() mock_unk = mock.MagicMock() mock_c = mock.MagicMock() mock_volume = mock.MagicMock() mock_properties = mock.MagicMock() mock_pack.QueryVolumes.return_value = mock_enum mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)] mock_unk.QueryInterface.return_value = mock_volume mock_volume.GetProperties.return_value = mock_properties self._ctypes_mock.wstring_at.return_value = 'fake name' mock_get_volume_index.return_value = mock_volume_idxs self._extend_volumes._extend_volumes(mock_pack, [mock_volume_idxs]) mock_pack.QueryVolumes.assert_called_once_with() mock_enum.Next.assert_called_with(1) mock_unk.QueryInterface.assert_called_once_with(_vds_mock) mock_volume.GetProperties.assert_called_once_with() self._ctypes_mock.wstring_at.assert_called_with( mock_properties.pwszName) mock_get_volume_index.assert_called_once_with('fake name') mock_extend_volume.assert_called_once_with(mock_pack, mock_volume, mock_properties) self._ctypes_mock.windll.ole32.CoTaskMemFree.assert_called_once_with( mock_properties.pwszName) def test_get_volume_index(self): mock_value = mock.MagicMock() re.match = mock.MagicMock(return_value=mock_value) mock_value.group.return_value = '9999' response = self._extend_volumes._get_volume_index('$2') mock_value.group.assert_called_once_with(1) self.assertTrue(response == 9999) @mock.patch('cloudbaseinit.plugins.windows.extendvolumes' '.ExtendVolumesPlugin._get_volume_extents_to_resize') @mock.patch('cloudbaseinit.utils.windows.vds.VDS_INPUT_DISK') def test_extend_volume(self, mock_VDS_INPUT_DISK, mock_get_volume_extents_to_resize): mock_disk = mock.MagicMock() mock_pack = mock.MagicMock() mock_volume = mock.MagicMock() mock_properties = mock.MagicMock() mock_volume_extent = mock.MagicMock() mock_async = mock.MagicMock() mock_get_volume_extents_to_resize.return_value = [(mock_volume_extent, 9999)] mock_VDS_INPUT_DISK.return_value = mock_disk mock_volume.Extend.return_value = mock_async self._extend_volumes._extend_volume(mock_pack, mock_volume, mock_properties) mock_get_volume_extents_to_resize.assert_called_once_with( mock_pack, mock_properties.id) self._ctypes_mock.wstring_at.assert_called_with( mock_properties.pwszName) mock_volume.Extend.assert_called_once_with( mock_VDS_INPUT_DISK.__mul__()(), 1) mock_async.Wait.assert_called_once_with() @mock.patch('cloudbaseinit.utils.windows.vds.IVdsDisk') @mock.patch('cloudbaseinit.utils.windows.vds.VDS_DISK_EXTENT') def test_get_volume_extents_to_resize(self, mock_VDS_DISK_EXTENT, mock_IVdsDisk): mock_pack = mock.MagicMock() mock_extents_p = mock.MagicMock() mock_unk = mock.MagicMock() mock_c = mock.MagicMock() mock_disk = mock.MagicMock() mock_enum = mock.MagicMock() fake_volume_id = '$1' mock_array = mock.MagicMock() mock_array.volumeId = fake_volume_id mock_pack.QueryDisks.return_value = mock_enum mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)] mock_unk.QueryInterface.return_value = mock_disk mock_disk.QueryExtents.return_value = (mock_extents_p, 1) mock_VDS_DISK_EXTENT.__mul__().from_address.return_value = [mock_array] response = self._extend_volumes._get_volume_extents_to_resize( mock_pack, fake_volume_id) mock_pack.QueryDisks.assert_called_once_with() mock_enum.Next.assert_called_with(1) mock_unk.QueryInterface.assert_called_once_with(mock_IVdsDisk) self._ctypes_mock.addressof.assert_called_with(mock_extents_p.contents) mock_VDS_DISK_EXTENT.__mul__().from_address.assert_called_with( self._ctypes_mock.addressof(mock_extents_p.contents)) self._ctypes_mock.pointer.assert_called_once_with( mock_VDS_DISK_EXTENT()) self.assertEqual([], response) self._ctypes_mock.windll.ole32.CoTaskMemFree.assert_called_with( mock_extents_p) @mock.patch('cloudbaseinit.utils.windows.vds.' 'VDS_QUERY_SOFTWARE_PROVIDERS') @mock.patch('cloudbaseinit.utils.windows.vds.IVdsSwProvider') def test_query_providers(self, mock_IVdsSwProvider, mock_VDS_QUERY_SOFTWARE_PROVIDERS): mock_svc = mock.MagicMock() mock_enum = mock.MagicMock() mock_unk = mock.MagicMock() mock_c = mock.MagicMock() mock_svc.QueryProviders.return_value = mock_enum mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)] mock_unk.QueryInterface.return_value = 'fake providers' response = self._extend_volumes._query_providers(mock_svc) mock_svc.QueryProviders.assert_called_once_with( mock_VDS_QUERY_SOFTWARE_PROVIDERS) mock_enum.Next.assert_called_with(1) mock_unk.QueryInterface.assert_called_once_with(mock_IVdsSwProvider) self.assertEqual(['fake providers'], response) @mock.patch('cloudbaseinit.utils.windows.vds.IVdsPack') def test_query_packs(self, mock_IVdsPack): mock_provider = mock.MagicMock() mock_enum = mock.MagicMock() mock_unk = mock.MagicMock() mock_c = mock.MagicMock() mock_provider.QueryPacks.return_value = mock_enum mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)] mock_unk.QueryInterface.return_value = 'fake packs' response = self._extend_volumes._query_packs(mock_provider) mock_provider.QueryPacks.assert_called_once_with() mock_enum.Next.assert_called_with(1) mock_unk.QueryInterface.assert_called_once_with(mock_IVdsPack) self.assertEqual(['fake packs'], response) def test_get_volumes_to_extend(self): with testutils.ConfPatcher('volumes_to_extend', '1'): response = self._extend_volumes._get_volumes_to_extend() self.assertEqual([1], response) @mock.patch('cloudbaseinit.utils.windows.vds.load_vds_service') @mock.patch('cloudbaseinit.plugins.windows.extendvolumes.' 'ExtendVolumesPlugin._query_providers') @mock.patch('cloudbaseinit.plugins.windows.extendvolumes.' 'ExtendVolumesPlugin._query_packs') @mock.patch('cloudbaseinit.plugins.windows.extendvolumes.' 'ExtendVolumesPlugin._extend_volumes') def test_execute(self, mock_extend_volumes, mock_query_packs, mock_query_providers, mock_load_vds_service): mock_svc = mock.MagicMock() fake_providers = ['fake providers'] fake_packs = ['fake packs'] mock_service = mock.MagicMock() fake_data = 'fake data' mock_load_vds_service.return_value = mock_svc mock_query_providers.return_value = fake_providers mock_query_packs.return_value = fake_packs with testutils.ConfPatcher('volumes_to_extend', '1'): self._extend_volumes.execute(mock_service, fake_data) mock_query_providers.assert_called_once_with(mock_svc) mock_query_packs.assert_called_once_with('fake providers') mock_extend_volumes.assert_called_with('fake packs', [1])
This monthly Global Integrator features highlights on developments in traditional medicine and alternative and integrative health during a 1-month period. Here are 32 selections from India, Gambia, Vietnam, indigenous Canada, South Africa, Nigeria, Ethiopia, and elsewhere for the month of January 2015. A professor speaking to the Nigerian Academy of Sciences makes the case for why Nigeria must develop medicine from natural sources. In late-January meetings with Nguyen Hoang Son, deputy director of the Traditional Medicine Administration in Vietnam, traditional healers signed an oath to not use rhino horn medicinally. Leading British herbal agencies are now among the 140 organizations and businesses endorsing the Botanical Adulterants Program led by 3 US not-for-profit entities. In South Africa, the Herbal Drugs Research Unit at Tshwane University of Technology will begin looking into the healing properties of the country’s indigenous plants. They estimate that there are 200,000 traditional healers in the country. This widely linked article from Indian Country media in the United States speaks to scientific findings that support traditional healing agents in used by indigenous peoples there. The Ethiopian Ministry of Culture and Tourism in collaboration with Jimma University organized a conference on “Ethiopian Traditional Medicine: Underutilized/Threatened Heritage and its Prospects” January 16-17, 2015. See photo of participants. Kirkland & Ellis is acting for Hong Kong’s China Traditional Chinese Medicine in a proposed $1.3-billion purchase of the largest manufacturer of concentrated traditional Chinese medicine granules in China. The major British indexed medical journal The Lancet included this thoughtful article urging collaborative engagement between traditional and complementary systems of medicine and conventional biomedicine for mental illness. Plans for major study on Ayurveda’s potential led by author Deepak Chopra, MD, received attention from multiple media. The cleverly titled “T(ha)ime Machine: Global Trends Turn Back to Thai Wisdom,” hosted by the Office of Knowledge Management and Development at Royal Paragon Hall, sought to draw attention to the nation’s traditional medicine traditions. The crossover issues of healthcare integration and colonialism is the subject of this posting on the indigenous Canadian child who died recently after her parents chose to forego Western methods. British Member of Parliament David Tredinnick is keeping up his call for the government to explore contributions of complementary medicine practitioners as a partial solution to the medical crisis there. Also in Gambia, before the end of 2015 the government, in collaboration with its Traditional Healers Association (TRAHAS), will host a World Summit of Natural Medicine Practitioners in Banjul in collaboration with the West African Health Organization. A delegation of Russian military doctors visited Chinese doctors to explore integration of these methods in care of their soldiers. Russian Defense Minister Sergey Shoygu had previously expressed interest in establishing a Center of Traditional Chinese Medicine in Moscow. In the city of Saurimo, Angola, a 2-day meeting was held to promote natural products “used in treating endemic diseases.” The event was promoted by the Provincial Directorate of Culture in Lunda Sul and the provincial governor, Cândida Narciso, pictured, attended. In Karachi, conservative Pakistani president Mamnoon Hussain urged “practitioners as well as proponents of alternative medicine to focus towards scientific research as well standardisation of education and training in homoeopathy” and traditional practices. Insurance payments for complementary and alternative medicine practices are under assault following a leaked government report. A new goods and services tax in Malaysia may end up shutting down up to a third (1800) of the 6000 “Chinese medicine halls” in that country due to challenges in meeting computerization and other requirements. The central government of India has announced that it will financially support a new Ayurvedic clinical research center on 50 acres in Kerala to help “scientifically validate traditional systems of healing.” The center will include a strong biotech unit. The government’s goal is to expand the already robust market for Ayurvedic medicine 5-fold by 2020. Apparently, the Indian complex will also include India’s first center for the interdisciplinary study of medicine, the Amrita Institute for Integrated Medicine and Research Center. A “reverse pharmacology process” is being explored in Palau for use of medicinal plants for diabetes and other nonpharmacological diseases following publication of a major study. An excellent overview of the Indian government’s plans with its AYUSH department and the globalization of Ayurveda is here. Recent action in India has stimulated this reflection on the AYUSH department and the global movement for “integrative medicine” with the Arizona Center for Integrative Medicine (United States) referenced. This article in Forbes speaks to the efforts in China to make a demarcation between those traditional medicines that can be added to food and those that cannot. This is in a country where food is definitely viewed as medicine. One of Shanghai’s 157 “intangible cultural traditions” was celebrated in the Chinese Global Times: the TCM practice of orthopedics.
#!/usr/bin/env python # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # try: import paramiko from boto.manage.cmdshell import SSHClient except ImportError: paramiko = None SSHClient = None from tests.compat import mock, unittest class TestSSHTimeout(unittest.TestCase): @unittest.skipIf(not paramiko, 'Paramiko missing') def test_timeout(self): client_tmp = paramiko.SSHClient def client_mock(): client = client_tmp() client.connect = mock.Mock(name='connect') return client paramiko.SSHClient = client_mock paramiko.RSAKey.from_private_key_file = mock.Mock() server = mock.Mock() test = SSHClient(server) self.assertEqual(test._ssh_client.connect.call_args[1]['timeout'], None) test2 = SSHClient(server, timeout=30) self.assertEqual(test2._ssh_client.connect.call_args[1]['timeout'], 30)
wife for the weekend release day! Two bets, two rings…one wild night in Vegas! And check out this fab fan art! The Last Plus One ~ release day!
#!/usr/bin/env python # coding: utf-8 import sys import time import threading import StringIO from PyQt4 import QtGui, QtCore from src.util.app_util import save_cache, load_cache class MainWindow(QtGui.QMainWindow): """ プロジェクトをグラフィカルに操作するUIウィンドウ """ LABEL_TEXT_MODEL_PATH = "Model File (.off or .obj) Path" LABEL_TEXT_GRID_PATH = "Grid Path" LABEL_TEXT_CLA_PATH = ".cla File Path" LABEL_TEXT_SAVE_PATH = "Save Path" LABEL_TEXT_N_DIV_PATH = "N-Division" LABEL_TEXT_GRID_SCALE_PATH = "Grid Scale" LABEL_TEXT_GROUP = "Settings" BUTTON_TEXT_FILE_DIALOG = "..." BUTTON_TEXT_CREATE = "create" DIALOG_TITLE_FILE = "open file" DIALOG_TITLE_FOLDER = "choice folder" FILE_DIALOG_INIT_PATH = "../res" CACHE_PATH = "../.cache" KEY_MODEL_PATH = "model_path" KEY_GRID_PATH = "grid_path" KEY_CLA_PATH = "cla_path" KEY_SAVE_PATH = "save_path" KEY_N_DIV = "n_div" KEY_GRID_SCALE = "grid_scale" class SygnalHost(QtCore.QObject): """ 単一シグナルを持つオブジェクト """ sygnal = QtCore.pyqtSignal() def __init__(self, title, x, y, width, height, create_button_click_handler): """ :type title: str :param title: ウィンドウタイトル :type x: int :param x: ウィンドウのx座標 :type y: int :param y: ウィンドウのy座標 :type width: int :param width: ウィンドウの幅 :type height: int :param height: ウィンドウの高さ """ super(MainWindow, self).__init__() self.setGeometry(x, y, width, height) self.setWindowTitle(title) self.parent_widget = QtGui.QWidget() self.create_button_click_handler = create_button_click_handler ### result layout ### self.te_result = QtGui.QTextEdit(self) vl_result = QtGui.QVBoxLayout() vl_result.addWidget(self.te_result) ### path input layout ### # text box self.tb_model_path = self.get_cached_line_edit(MainWindow.CACHE_PATH, MainWindow.KEY_MODEL_PATH) self.tb_grid_path = self.get_cached_line_edit(MainWindow.CACHE_PATH, MainWindow.KEY_GRID_PATH) self.tb_cla_path = self.get_cached_line_edit(MainWindow.CACHE_PATH, MainWindow.KEY_CLA_PATH) self.tb_save_path = self.get_cached_line_edit(MainWindow.CACHE_PATH, MainWindow.KEY_SAVE_PATH) self.tb_n_div = self.get_cached_line_edit(MainWindow.CACHE_PATH, MainWindow.KEY_N_DIV) self.tb_grid_scale = self.get_cached_line_edit(MainWindow.CACHE_PATH, MainWindow.KEY_GRID_SCALE) # button btn_fd_model_path = self.get_file_dialog_button(self.tb_model_path, MainWindow.KEY_MODEL_PATH, False) btn_fd_grid_path = self.get_file_dialog_button(self.tb_grid_path, MainWindow.KEY_GRID_PATH, True) btn_fd_cla_path = self.get_file_dialog_button(self.tb_cla_path, MainWindow.KEY_CLA_PATH, True) btn_fd_save_path = self.get_file_dialog_button(self.tb_save_path, MainWindow.KEY_SAVE_PATH, False) # path layout row hl_model_path = self.get_file_path_layout(self.tb_model_path, btn_fd_model_path) hl_grid_path = self.get_file_path_layout(self.tb_grid_path, btn_fd_grid_path) hl_cla_path = self.get_file_path_layout(self.tb_cla_path, btn_fd_cla_path) hl_save_path = self.get_file_path_layout(self.tb_save_path, btn_fd_save_path) # path layout vl_path = QtGui.QVBoxLayout() vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_MODEL_PATH)) vl_path.addLayout(hl_model_path) vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_GRID_PATH)) vl_path.addLayout(hl_grid_path) vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_CLA_PATH)) vl_path.addLayout(hl_cla_path) vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_SAVE_PATH)) vl_path.addLayout(hl_save_path) vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_N_DIV_PATH)) vl_path.addWidget(self.tb_n_div) vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_GRID_SCALE_PATH)) vl_path.addWidget(self.tb_grid_scale) gw_path = QtGui.QGroupBox(MainWindow.LABEL_TEXT_GROUP) gw_path.setLayout(vl_path) vl_path_group = QtGui.QVBoxLayout() vl_path_group.addWidget(gw_path) ### create button layout ### self.btn_create = QtGui.QPushButton(self) self.btn_create.setText(MainWindow.BUTTON_TEXT_CREATE) vl_button = QtGui.QVBoxLayout() vl_button.addWidget(self.btn_create) self.connect(self.btn_create, QtCore.SIGNAL('clicked()'), self.on_create_button_clicked) # combine path input layout and create button layout. vl_path_button = QtGui.QVBoxLayout() vl_path_button.addLayout(vl_path_group) vl_path_button.addLayout(vl_button) # inflate parent_layout = QtGui.QHBoxLayout() parent_layout.addLayout(vl_result) parent_layout.addLayout(vl_path_button) # set layout self.parent_widget.setLayout(parent_layout) self.setCentralWidget(self.parent_widget) self.show() # start std-output to tb_result. self.__show_stdout_as_result() def get_cached_line_edit(self, cache_path, cache_key): """ QLineEditを返す QLineEditには前回最後に入力した内容が入る :type cache_path: str :param cache_path: キャッシュ保存パス :type cache_key: str :param cache_key: キャッシュされたデータのキー :type: QtGui.QLineEdit :return: キャッシュされたパスを読み込んだQLineEditオブジェクト """ line_edit = QtGui.QLineEdit(self) cache = load_cache(cache_path, cache_key) if cache is not None: line_edit.setText(cache) return line_edit def get_file_dialog_button(self, line_edit, cache_key, is_file): """ ファイルダイアログを開くQPushButtonを返す ファイルダイアログで読み込まれたパスはキャッシュされる :type line_edit: QtGui.QLineEdit :param line_edit: パスを入力するQLineEditオブジェクト :type cache_key: str :param cache_key: キャッシュされたデータのキー :type is_file: bool :param is_file: ファイルパス読み込みかどうか(Falseの場合ディレクトリパス読み込み) :rtype: QtGui.QPushButton :return: ファイルダイアログを開くQPushButton """ button = QtGui.QPushButton(self) button.setText(MainWindow.BUTTON_TEXT_FILE_DIALOG) def handler(): if is_file: f_dialog = QtGui.QFileDialog.getOpenFileName title = MainWindow.DIALOG_TITLE_FILE else: f_dialog = QtGui.QFileDialog.getExistingDirectory title = MainWindow.DIALOG_TITLE_FOLDER path = f_dialog(self, title, MainWindow.FILE_DIALOG_INIT_PATH) line_edit.setText(path) save_cache(MainWindow.CACHE_PATH, cache_key, path) self.connect(button, QtCore.SIGNAL('clicked()'), handler) return button def get_file_path_layout(self, line_edit, button): """ ファイルパスを入力するQLineEditと ファイルダイアログを開くQPushButtonを統合したレイアウトを返す :type line_edit: QtGui.QLineEdit :param line_edit: QLineEditオブジェクト :type button: QtGui.QPushButton :param button: QPushButtonオブジェクト """ hl_model_path = QtGui.QHBoxLayout() hl_model_path.addWidget(line_edit) hl_model_path.addWidget(button) return hl_model_path def set_on_create_button_click_handler(self, handler): """ createボタンが押された時のサブハンドラを設定 :type handler: func(**kwarg) :param handler: サブハンドラ関数 """ self.create_button_click_handler = handler def on_create_button_clicked(self): """ createボタンが押された時のメインハンドラ """ try: kwarg = { MainWindow.KEY_MODEL_PATH: str(self.tb_model_path.text()), MainWindow.KEY_GRID_PATH: str(self.tb_grid_path.text()), MainWindow.KEY_CLA_PATH: str(self.tb_cla_path.text()), MainWindow.KEY_SAVE_PATH: str(self.tb_save_path.text()), MainWindow.KEY_N_DIV: int(str(self.tb_n_div.text())), MainWindow.KEY_GRID_SCALE: float( str(self.tb_grid_scale.text()))} except (ValueError, TypeError), e: if "n_div" in e.message: QtGui.QMessageBox.critical(self, "", "N-Division should be a number.") elif MainWindow.KEY_GRID_SCALE in e.message: QtGui.QMessageBox.critical(self, "", "Grid Scale should be a number.") else: QtGui.QMessageBox.critical(self, "", "Check paths in text boxes.") # サブハンドラが非Noneの場合、GUI上の入力値を渡して呼ぶ if self.create_button_click_handler is not None: self.create_button_click_handler(kwarg) def __show_stdout(self): """ 標準出力を文字列として逐次取得し、GUI上に表示する """ stdout_as_string_io = sys.stdout stderr_as_string_io = sys.stderr stdout_as_string_io.seek(0) stderr_as_string_io.seek(0) text_out = stdout_as_string_io.read() text_err = stderr_as_string_io.read() self.te_result.setText( self.te_result.toPlainText() + text_out + text_err) cursor = self.te_result.textCursor() cursor.movePosition(QtGui.QTextCursor.End) self.te_result.setTextCursor(cursor) stdout_as_string_io.close() stderr_as_string_io.close() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.stdout.write(text_out) sys.stderr.write(text_err) sys.stdout = StringIO.StringIO() sys.stderr = StringIO.StringIO() def __show_stdout_as_result(self, duration=1): """ 標準出力を文字列として逐次取得する """ # pyqt signal to show standard output on te_result. signal_stdout = MainWindow.SygnalHost() # connect signal_stdout.sygnal.connect(self.__show_stdout) def emit_signal(): # change sys.stdout to StringIO. sys.stdout = StringIO.StringIO() sys.stderr = StringIO.StringIO() while True: signal_stdout.sygnal.emit() time.sleep(duration) threading.Thread(target=emit_signal).start() def main(title, x, y, width, height, create_button_click_handler): """ GUI Main関数 :type init: func(QtGui.QMainWindow) :param init: 初期化関数 :type title: str :param title: ウィンドウタイトル :type x: int :param x: ウィンドウのx座標 :type y: int :param y: ウィンドウのy座標 :type width: int :param width: ウィンドウの幅 :type height: int :param height: ウィンドウの高さ """ app = QtGui.QApplication(sys.argv) window = MainWindow(title, x, y, width, height, create_button_click_handler) sys.exit(app.exec_())
Adelaide has had pop-up shops, pop-up restaurants, and now we have our own city-wide installation of pop-up fountains, courtesy of SA Water. Hundreds of them in fact, activating our streets with flying rubble and cool, refreshing water every year. And the best part is, you never know when you'll get to see one! Maybe one will appear at your bus stop in the morning and prevent you from getting to work! Maybe it'll happen outside your house overnight and flood your master bedroom! Maybe you'll be walking down the street one day and a pop-up fountain will surprise you by shooting you up into the air with large slabs of pavement! It's all so exciting! Much better than boring old regular fountains, which don't even damage property or threaten to kill you. Take that, Rome. Surely with five burst water mains in the last nine days and dozens since January, we're at the stage where we can officially consider "The Burst Water Main State" for our license plates? Imagine the slogans SA Tourism could come up with: "Adelaide: You won't stop gushing over us!" or "SA: We're fit to bursting with top attractions!". Meanwhile Adelaide's sister city in New Zealand is Christchurch, but I reckon there's a fresh case for changing it to Rotorua. The way we're going, we'll have more geysers than they do by the end of the financial year. Yesterday The Advertiser revealed SA Water had cherry-picked this single positive statistic from an independent report, ignoring the other parts that showed it was the worst major utility in the nation for fixing faults and the second-worst for litres lost per customer due to pipe failure. Colour me unsurprised. Because specious reasoning though it may be, when a person encounters three burst water mains in the space of two days, it's difficult to convince them that there isn't a problem. On Wednesday I had to attend a meeting at Adelaide Town Hall, and was greeted by crews cleaning up a burst water main next door, on Pirie St. Three hours later I went to a cafe in Hyde Park, where crowds of people were trooping in and out of the toilets. "Sorry about that, we're the only ones in the neighbourhood with a flushing loo today," the owner told me, pointing out the SA Water crews working on a burst a block down the road. The next morning I awoke to a text message from my dad: "Have you got running water? Burst water main near your place". I'm beginning to think it's me. Last week I even had to reschedule a meeting with a colleague because a water main had burst outside his front door in the night, flooding not only his floors but his ceiling as the geyser rained down on his house, smashing through the roof. The poor sod is now holed up in a serviced apartment for god knows how long while repairs are done. Perhaps an invisible main has burst quietly under my house; that would explain why my bill for a household of two people with virtually no garden is costing me almost $2000 a year. You have to hand it to SA Water: not only are our water bills some of the highest in the nation, but now so is our actual water - anywhere from six to 20 metres. First published in The Advertiser on May 14, 2016. CLICK HERE to view the original article.
# Miro - an RSS based video player application # Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 # Participatory Culture Foundation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the OpenSSL # library. # # You must obey the GNU General Public License in all respects for all of # the code used other than OpenSSL. If you modify file(s) with this # exception, you may extend this exception to your version of the file(s), # but you are not obligated to do so. If you do not wish to do so, delete # this exception statement from your version. If you delete this exception # statement from all source files in the program, then also delete it here. """miro.frontends.widgets.gtk.controls -- Contains the ControlBox and CustomControl classes. These handle the custom buttons/sliders used during playback. """ from __future__ import division import math import gtk import gobject from miro.frontends.widgets.gtk import wrappermap from miro.frontends.widgets.gtk.base import Widget from miro.frontends.widgets.gtk.simple import Label, Image from miro.frontends.widgets.gtk.drawing import (CustomDrawingMixin, Drawable, ImageSurface) from miro.plat.frontends.widgets import timer from miro.frontends.widgets import widgetconst class CustomControlMixin(CustomDrawingMixin): def do_expose_event(self, event): CustomDrawingMixin.do_expose_event(self, event) if self.is_focus(): style = self.get_style() style.paint_focus(self.window, self.state, event.area, self, None, self.allocation.x, self.allocation.y, self.allocation.width, self.allocation.height) class CustomButtonWidget(CustomControlMixin, gtk.Button): def draw(self, wrapper, context): if self.is_active(): wrapper.state = 'pressed' elif self.state == gtk.STATE_PRELIGHT: wrapper.state = 'hover' else: wrapper.state = 'normal' wrapper.draw(context, wrapper.layout_manager) self.set_focus_on_click(False) def is_active(self): return self.state == gtk.STATE_ACTIVE class ContinuousCustomButtonWidget(CustomButtonWidget): def is_active(self): return (self.state == gtk.STATE_ACTIVE or wrappermap.wrapper(self).button_down) class DragableCustomButtonWidget(CustomButtonWidget): def __init__(self): CustomButtonWidget.__init__(self) self.button_press_x = None self.set_events(self.get_events() | gtk.gdk.POINTER_MOTION_MASK) def do_button_press_event(self, event): self.button_press_x = event.x self.last_drag_event = None gtk.Button.do_button_press_event(self, event) def do_button_release_event(self, event): self.button_press_x = None gtk.Button.do_button_release_event(self, event) def do_motion_notify_event(self, event): DRAG_THRESHOLD = 15 if self.button_press_x is None: # button not down return if (self.last_drag_event != 'right' and event.x > self.button_press_x + DRAG_THRESHOLD): wrappermap.wrapper(self).emit('dragged-right') self.last_drag_event = 'right' elif (self.last_drag_event != 'left' and event.x < self.button_press_x - DRAG_THRESHOLD): wrappermap.wrapper(self).emit('dragged-left') self.last_drag_event = 'left' def do_clicked(self): # only emit clicked if we didn't emit dragged-left or dragged-right if self.last_drag_event is None: wrappermap.wrapper(self).emit('clicked') class _DragInfo(object): """Info about the start of a drag. Attributes: - button: button that started the drag - start_pos: position of the slider - click_pos: position of the click Note that start_pos and click_pos will be different if the user clicks inside the slider. """ def __init__(self, button, start_pos, click_pos): self.button = button self.start_pos = start_pos self.click_pos = click_pos class CustomScaleMixin(CustomControlMixin): def __init__(self): CustomControlMixin.__init__(self) self.drag_info = None self.min = self.max = 0.0 def get_range(self): return self.min, self.max def set_range(self, min, max): self.min = float(min) self.max = float(max) gtk.Range.set_range(self, min, max) def is_continuous(self): return wrappermap.wrapper(self).is_continuous() def is_horizontal(self): # this comes from a mixin pass def gtk_scale_class(self): if self.is_horizontal(): return gtk.HScale else: return gtk.VScale def get_slider_pos(self, value=None): if value is None: value = self.get_value() if self.is_horizontal(): size = self.allocation.width else: size = self.allocation.height ratio = (float(value) - self.min) / (self.max - self.min) start_pos = self.slider_size() / 2.0 return start_pos + ratio * (size - self.slider_size()) def slider_size(self): return wrappermap.wrapper(self).slider_size() def _event_pos(self, event): """Get the position of an event. If we are horizontal, this will be the x coordinate. If we are vertical, the y. """ if self.is_horizontal(): return event.x else: return event.y def do_button_press_event(self, event): if self.drag_info is not None: return current_pos = self.get_slider_pos() event_pos = self._event_pos(event) pos_difference = abs(current_pos - event_pos) # only move the slider if the click was outside its boundaries # (#18840) if pos_difference > self.slider_size() / 2.0: self.move_slider(event_pos) current_pos = event_pos self.drag_info = _DragInfo(event.button, current_pos, event_pos) self.grab_focus() wrappermap.wrapper(self).emit('pressed') def do_motion_notify_event(self, event): if self.drag_info is not None: event_pos = self._event_pos(event) delta = event_pos - self.drag_info.click_pos self.move_slider(self.drag_info.start_pos + delta) def move_slider(self, new_pos): """Move the slider so that it's centered on new_pos.""" if self.is_horizontal(): size = self.allocation.width else: size = self.allocation.height slider_size = self.slider_size() new_pos -= slider_size / 2 size -= slider_size ratio = max(0, min(1, float(new_pos) / size)) self.set_value(ratio * (self.max - self.min)) wrappermap.wrapper(self).emit('moved', self.get_value()) if self.is_continuous(): wrappermap.wrapper(self).emit('changed', self.get_value()) def handle_drag_out_of_bounds(self): if not self.is_continuous(): self.set_value(self.start_value) def do_button_release_event(self, event): if self.drag_info is None or event.button != self.drag_info.button: return self.drag_info = None if (self.is_continuous and (0 <= event.x < self.allocation.width) and (0 <= event.y < self.allocation.height)): wrappermap.wrapper(self).emit('changed', self.get_value()) wrappermap.wrapper(self).emit('released') def do_scroll_event(self, event): wrapper = wrappermap.wrapper(self) if self.is_horizontal(): if event.direction == gtk.gdk.SCROLL_UP: event.direction = gtk.gdk.SCROLL_DOWN elif event.direction == gtk.gdk.SCROLL_DOWN: event.direction = gtk.gdk.SCROLL_UP if (wrapper._scroll_step is not None and event.direction in (gtk.gdk.SCROLL_UP, gtk.gdk.SCROLL_DOWN)): # handle the scroll ourself if event.direction == gtk.gdk.SCROLL_DOWN: delta = wrapper._scroll_step else: delta = -wrapper._scroll_step self.set_value(self.get_value() + delta) else: # let GTK handle the scroll self.gtk_scale_class().do_scroll_event(self, event) # Treat mouse scrolls as if the user clicked on the new position wrapper.emit('pressed') wrapper.emit('changed', self.get_value()) wrapper.emit('released') def do_move_slider(self, scroll): if self.is_horizontal(): if scroll == gtk.SCROLL_STEP_UP: scroll = gtk.SCROLL_STEP_DOWN elif scroll == gtk.SCROLL_STEP_DOWN: scroll = gtk.SCROLL_STEP_UP elif scroll == gtk.SCROLL_PAGE_UP: scroll = gtk.SCROLL_PAGE_DOWN elif scroll == gtk.SCROLL_PAGE_DOWN: scroll = gtk.SCROLL_PAGE_UP elif scroll == gtk.SCROLL_START: scroll = gtk.SCROLL_END elif scroll == gtk.SCROLL_END: scroll = gtk.SCROLL_START return self.gtk_scale_class().do_move_slider(self, scroll) class CustomHScaleWidget(CustomScaleMixin, gtk.HScale): def __init__(self): CustomScaleMixin.__init__(self) gtk.HScale.__init__(self) def is_horizontal(self): return True class CustomVScaleWidget(CustomScaleMixin, gtk.VScale): def __init__(self): CustomScaleMixin.__init__(self) gtk.VScale.__init__(self) def is_horizontal(self): return False gobject.type_register(CustomButtonWidget) gobject.type_register(ContinuousCustomButtonWidget) gobject.type_register(DragableCustomButtonWidget) gobject.type_register(CustomHScaleWidget) gobject.type_register(CustomVScaleWidget) class CustomControlBase(Drawable, Widget): def __init__(self): Widget.__init__(self) Drawable.__init__(self) self._gtk_cursor = None self._entry_handlers = None def _connect_enter_notify_handlers(self): if self._entry_handlers is None: self._entry_handlers = [ self.wrapped_widget_connect('enter-notify-event', self.on_enter_notify), self.wrapped_widget_connect('leave-notify-event', self.on_leave_notify), self.wrapped_widget_connect('button-release-event', self.on_click) ] def _disconnect_enter_notify_handlers(self): if self._entry_handlers is not None: for handle in self._entry_handlers: self._widget.disconnect(handle) self._entry_handlers = None def set_cursor(self, cursor): if cursor == widgetconst.CURSOR_NORMAL: self._gtk_cursor = None self._disconnect_enter_notify_handlers() elif cursor == widgetconst.CURSOR_POINTING_HAND: self._gtk_cursor = gtk.gdk.Cursor(gtk.gdk.HAND2) self._connect_enter_notify_handlers() else: raise ValueError("Unknown cursor: %s" % cursor) def on_enter_notify(self, widget, event): self._widget.window.set_cursor(self._gtk_cursor) def on_leave_notify(self, widget, event): if self._widget.window: self._widget.window.set_cursor(None) def on_click(self, widget, event): self.emit('clicked') return True class CustomButton(CustomControlBase): def __init__(self): """Create a new CustomButton. active_image will be displayed while the button is pressed. The image must have the same size. """ CustomControlBase.__init__(self) self.set_widget(CustomButtonWidget()) self.create_signal('clicked') self.forward_signal('clicked') class ContinuousCustomButton(CustomControlBase): def __init__(self): CustomControlBase.__init__(self) self.set_widget(ContinuousCustomButtonWidget()) self.button_down = False self.button_held = False self.timeout = None self.create_signal('clicked') self.create_signal('held-down') self.create_signal('released') self.wrapped_widget_connect('pressed', self.on_pressed) self.wrapped_widget_connect('released', self.on_released) self.wrapped_widget_connect('clicked', self.on_clicked) self.initial_delay = 0.6 self.repeat_delay = 0.3 def set_delays(self, initial_delay, repeat_delay): self.initial_delay = initial_delay self.repeat_delay = repeat_delay def on_pressed(self, widget): if self.timeout: timer.cancel(self.timeout) self.button_down = True self.button_held = False self.timeout = timer.add(self.initial_delay, self.on_button_hold) def on_button_hold(self): self.button_held = True self.emit('held-down') self.timeout = timer.add(self.repeat_delay, self.on_button_hold) def on_released(self, widget): if self.timeout: timer.cancel(self.timeout) self.timeout = None self.button_down = self.button_held = False self.queue_redraw() self.emit('released') def on_clicked(self, widget): if self.timeout: timer.cancel(self.timeout) if not self.button_held: self.emit('clicked') class DragableCustomButton(CustomControlBase): def __init__(self): CustomControlBase.__init__(self) self.set_widget(DragableCustomButtonWidget()) self.create_signal('clicked') self.create_signal('dragged-left') self.create_signal('dragged-right') class CustomSlider(CustomControlBase): def __init__(self): CustomControlBase.__init__(self) self.create_signal('pressed') self.create_signal('released') self.create_signal('changed') self.create_signal('moved') self._scroll_step = None if self.is_horizontal(): self.set_widget(CustomHScaleWidget()) else: self.set_widget(CustomVScaleWidget()) self.wrapped_widget_connect('move-slider', self.on_slider_move) def on_slider_move(self, widget, scrolltype): self.emit('changed', widget.get_value()) self.emit('moved', widget.get_value()) def get_value(self): return self._widget.get_value() def set_value(self, value): self._widget.set_value(value) def get_range(self): return self._widget.get_range() def get_slider_pos(self, value=None): """Get the position for the slider for our current value. This will return position that the slider should be centered on to display the value. It will be the x coordinate if is_horizontal() is True and the y coordinate otherwise. This method takes into acount the size of the slider when calculating the position. The slider position will start at (slider_size / 2) and will end (slider_size / 2) px before the end of the widget. :param value: value to get the position for. Defaults to the current value """ return self._widget.get_slider_pos(value) def set_range(self, min_value, max_value): self._widget.set_range(min_value, max_value) # set_digits controls the precision of the scale by limiting changes # to a certain number of digits. If the range is [0, 1], this code # will give us 4 digits of precision, which seems reasonable. range = max_value - min_value self._widget.set_digits(int(round(math.log10(10000.0 / range)))) def set_increments(self, small_step, big_step, scroll_step=None): """Set the increments to scroll. :param small_step: scroll amount for up/down :param big_step: scroll amount for page up/page down. :param scroll_step: scroll amount for mouse wheel, or None to make this 2 times the small step """ self._widget.set_increments(small_step, big_step) self._scroll_step = scroll_step def to_miro_volume(value): """Convert from 0 to 1.0 to 0.0 to MAX_VOLUME. """ if value == 0: return 0.0 return value * widgetconst.MAX_VOLUME def to_gtk_volume(value): """Convert from 0.0 to MAX_VOLUME to 0 to 1.0. """ if value > 0.0: value = (value / widgetconst.MAX_VOLUME) return value if hasattr(gtk.VolumeButton, "get_popup"): # FIXME - Miro on Windows has an old version of gtk (2.16) and # doesn't have the get_popup method. Once we upgrade and # fix that, we can take out the hasattr check. class VolumeMuter(Label): """Empty space that has a clicked signal so it can be dropped in place of the VolumeMuter. """ def __init__(self): Label.__init__(self) self.create_signal("clicked") class VolumeSlider(Widget): """VolumeSlider that uses the gtk.VolumeButton(). """ def __init__(self): Widget.__init__(self) self.set_widget(gtk.VolumeButton()) self.wrapped_widget_connect('value-changed', self.on_value_changed) self._widget.get_popup().connect("hide", self.on_hide) self.create_signal('changed') self.create_signal('released') def on_value_changed(self, *args): value = self.get_value() self.emit('changed', value) def on_hide(self, *args): self.emit('released') def get_value(self): value = self._widget.get_property('value') return to_miro_volume(value) def set_value(self, value): value = to_gtk_volume(value) self._widget.set_property('value', value) class ClickableImageButton(CustomButton): """Image that can send clicked events. If max_width and/or max_height are specified, resizes the image proportionally such that all constraints are met. """ def __init__(self, image_path, max_width=None, max_height=None): CustomButton.__init__(self) self.max_width = max_width self.max_height = max_height self.image = None self._width, self._height = None, None if image_path: self.set_path(image_path) self.set_cursor(widgetconst.CURSOR_POINTING_HAND) def set_path(self, path): image = Image(path) if self.max_width: image = image.resize_for_space(self.max_width, self.max_height) self.image = ImageSurface(image) self._width, self._height = image.width, image.height def size_request(self, layout): w = self._width h = self._height if not w: w = self.max_width if not h: h = self.max_height return w, h def draw(self, context, layout): if self.image: self.image.draw(context, 0, 0, self._width, self._height) w = self._width h = self._height if not w: w = self.max_width if not h: h = self.max_height w = min(context.width, w) h = min(context.height, h) context.rectangle(0, 0, w, h) context.set_color((0, 0, 0)) # black context.set_line_width(1) context.stroke()
We'll Meet Again is aimed at reuniting long-lost friends and family from the Dorset area. If you have a managed to track down anyone using this service, or know of any other success stories relating to it, we would love to hear them. Use the form to submit your own message and read current submissions below. PHIL MATTHEWS has tracked down his old friends Bryn Davies, Tommy Jensen and Paul Elms. BRYN DAVIES has tracked down his old friend Phil Matthews, via our ex-pats section. The men worked together when Bryn contracted the Carpet Fitting from the Woolco Store in Castle Lane. Ian says: “Many funny memories from incidents while working in people’s homes and from our social life. The days of : The Outlook, Maison Royale, Buccaneer Bar etc" . IAN MCKINNON has e-mailed us to say he has made contacted with his long-lost friend Jan and has spoken to her on the phone for the first time in 43 years. JOAN HUNTER (nee MARTIN) has made contact with her best friend after 35 years apart - thanks to We’ll Meet Again. Her friend is now living in Australia and Joan is in Canada. LYNN HANGER has been reunited with her cousin Annette St John, thanks to We’ll Meet Again readers. Lynn said: “I had been trying to trace her myself for many years. Many thanks to everyone who contacted me." TONY O'HARA and old friend YVONNE MARSH are back in touch for the first time in 38 years, thanks to the Daily Echo and We’ll Meet Again. Tony says: We are now conversing, with both our partners knowledge and understanding and gradually going through a detailed life history since we last saw each other. This has meant so much to me. I have spent may hours on the Internet hoping that somehow I could find her, alas with a new married name I stood little chance, that was until her friend saw the article and took time to tell Yvonne. My thanks to that friend. STEVE PITTMAN says ‘thank you’ to the people who cared enough to make We'll Meet Again possible. I have managed to get in touch with many of the school pal. Words cannot express the satisfaction of re-kindling the memories of some 60 to 65 years of time, long past. Through clicks of the mouse it shows that we should never give up - it is possible. Places and faces once more become visible, even though dimmed somewhat with the passing of time. Keep up the good work! MARIAN FERN (nee SCOTT) has heard from two people she knew many years ago. Janet, a friend from junior school days, (about 50 years ago,) and a workmate from the early 60s. Wonderful to hear from them after all these years. What a marvelous thing the Internet is. I only wish I had not resisted learning how to use a computer for so long. If I could give anyone of 55+ years advice, it would be to grab this technology with both hands, and learn how to use it to the fullest. TRACEY HACKMAN managed to track down her cousin Peter who lives in Victoria, Australia. His side of the family emigrated in 1966. The Hackman's are a big Weymouth family. Norwegian KRISTINE BREKKE managed to track down James Hanslip of Bournemouth thanks to We’ll Meet Again. MAUREEN CAPPER and her friend MURIEL JELLET , now living in Canada, are back in touch after many years, thanks to We’ll Meet Again. Maureen has struck lucky a second time with Searchline. She emailed us to say she had heard from her old friend Terry. I am in Lincolnshire for a few months looking after an old friend who is ill and I do miss my home in Poole so thank you Echo for bringing home a little bit closer to me. WE’LL MEET AGAIN has helped DIANE AND ROY PHILLIPS Diane and Roy Phillips get in touch with two different friends from their motorbike days in the 60s. They are emailing each other on a regular basis and are very grateful for the help. BRENDA DAVIDSON emailed us to say: What a wonderful site this is! My husband and I have found our old friends Edwin and Joan Simpson. Their son who no longer lives in the Bournemouth/Poole area was putting an ad in the Bournemouth Echo to sell a car when his wife decided to browse through the lost friends section, and lo and behold, there was my ad for their family. The son contacted us through e-mail and it was such a joyous occasion. We have spoken on the phone, and plans are in the making for visiting each other. It has been more than 40 years since we saw each other. Thank you for such a great service. We'll Meet Again has played a part in helping to improve the safety of American children in Houston, Texas. Brenda Davidson of Houston used the service to try and track down some old friends in Dorset and as a result met Alec Smith of Bournemouth. Mr Smith helps out at Steetwise Centre, a street scene simulation that helps children to become aware of the dangers lurking in their community, and he showed Mrs Davidson around the East Howe centre. She was very impressed and is hopeful a similar project can be set up in Texas. DIANE PHILLIPS emailed us to say she managed to get in touch with old neighbours, who emigrated to Australia 30 years ago. And I. J. PALMER has found a cousin in Canada. They lost contact 63 years ago.
#-*- coding: utf-8 -*- from sklearn.cluster import KMeans as sk_KMeans from .clustering import Clustering class KMeans(Clustering): """docstring for KMeans.""" def __init__(self, data, n_clusters = 8, init = 'k-means++', n_init = 10, max_iter = 300, tol = 0.0001, precompute_distances = 'auto', verbose = 0, random_state = None, copy_x = True, n_jobs = 1, algorithm = 'auto'): super(KMeans, self).__init__() self.data = data self.n_clusters = n_clusters self.init = init self.n_init = n_init self.max_iter = max_iter self.tol = tol self.precompute_distances = precompute_distances self.verbose = verbose self.random_state = random_state self.copy_x = copy_x self.n_jobs = n_jobs self.algorithm = algorithm def execute(self): """Constroi o modelo de clusterizacao.""" self.model = sk_KMeans(n_clusters = self.n_clusters, init = self.init, n_init = self.n_init, max_iter = self.max_iter, tol = self.tol, precompute_distances = self.precompute_distances, verbose = self.verbose, random_state = self.random_state, copy_x = self.copy_x, n_jobs = self.n_jobs, algorithm = self.algorithm).fit(self.data) self.clusters = super().make_clusters(self.data, self.model.labels_) @property def labels_(self): """Retorna os labels dos elementos do dataset.""" return self.model.labels_ @property def clusters_(self): """Retorna um dicionaro onde os indices dos grupos sao as chaves.""" return self.clusters @property def model_(self): """Retorna o modelo de agrupamento.""" return self.model_
Looking for something more extreme than the typical face mask? We've literally got you covered with this highly detailed, three-dimensional samurai warrior skeletal face mask! Terrorize your buddies at your next airsoft match with this soldier skeletal face mask. It is made of super durable ABS composite material with a distressed matte iron finish. The attention to detail is incredible, and it will convey the seriousness of your game! Fully adjustable; one size.
# -*- coding: utf-8 -*- # © 2014-2015 ACSONE SA/NV (<http://acsone.eu>) # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). import xlwt from openerp.report import report_sxw from openerp.addons.report_xls.report_xls import report_xls import logging _logger = logging.getLogger(__name__) class MisBuilderXlsParser(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(MisBuilderXlsParser, self).__init__( cr, uid, name, context=context) self.context = context class MisBuilderXls(report_xls): def __init__(self, name, table, rml=False, parser=False, header=True, store=False): super(MisBuilderXls, self).__init__( name, table, rml, parser, header, store) # Cell Styles _xs = self.xls_styles # header rh_cell_format = _xs['bold'] + _xs['fill'] + \ _xs['borders_all'] + _xs['right'] self.rh_cell_style = xlwt.easyxf(rh_cell_format) self.rh_cell_style_date = xlwt.easyxf( rh_cell_format, num_format_str=report_xls.date_format) # lines self.mis_rh_cell_style = xlwt.easyxf( _xs['borders_all'] + _xs['bold'] + _xs['fill']) def generate_xls_report(self, _p, _xs, data, objects, wb): report_name = objects[0].name ws = wb.add_sheet(report_name[:31]) ws.panes_frozen = True ws.remove_splits = True ws.portrait = 0 # Landscape ws.fit_width_to_pages = 1 row_pos = 0 # set print header/footer ws.header_str = self.xls_headers['standard'] ws.footer_str = self.xls_footers['standard'] # Title c_specs = [ ('report_name', 1, 0, 'text', report_name), ] row_data = self.xls_row_template(c_specs, ['report_name']) row_pos = self.xls_write_row( ws, row_pos, row_data, row_style=xlwt.easyxf(_xs['xls_title'])) row_pos += 1 # get the computed result of the report data = self.pool.get('mis.report.instance').compute( self.cr, self.uid, objects[0].id, self.context) # Column headers header_name_list = [''] col_specs_template = {'': {'header': [1, 30, 'text', ''], 'header_date': [1, 1, 'text', '']}} for col in data['header'][0]['cols']: col_specs_template[col['name']] = {'header': [1, 30, 'text', col['name']], 'header_date': [1, 1, 'text', col['date']]} header_name_list.append(col['name']) c_specs = map( lambda x: self.render(x, col_specs_template, 'header'), header_name_list) row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs]) row_pos = self.xls_write_row( ws, row_pos, row_data, row_style=self.rh_cell_style, set_column_size=True) c_specs = map(lambda x: self.render( x, col_specs_template, 'header_date'), header_name_list) row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs]) row_pos = self.xls_write_row( ws, row_pos, row_data, row_style=self.rh_cell_style_date) ws.set_horz_split_pos(row_pos) ws.set_vert_split_pos(1) for line in data['content']: col = 0 ws.write(row_pos, col, line['kpi_name'], self.mis_rh_cell_style) for value in line['cols']: col += 1 num_format_str = '#' if value.get('dp'): num_format_str += '.' num_format_str += '0' * int(value['dp']) if value.get('prefix'): num_format_str = '"%s"' % value['prefix'] + num_format_str if value.get('suffix'): num_format_str += ' "%s"' % value['suffix'] kpi_cell_style = xlwt.easyxf( _xs['borders_all'] + _xs['right'], num_format_str=num_format_str) if value.get('val'): val = value['val'] if value.get('is_percentage'): val = val / 0.01 ws.write(row_pos, col, val, kpi_cell_style) else: ws.write(row_pos, col, value['val_r'], kpi_cell_style) row_pos += 1 MisBuilderXls('report.mis.report.instance.xls', 'mis.report.instance', parser=MisBuilderXlsParser)
I installed a Lexx slip-on exhaust on my 2007 CRF250X a couple weeks ago and yesterday had to unbolt it to lift up the sub frame. The muffler slid right off. I'm wondering if I didn't apply enough silicone. Either that, or I should have allowed the silicone to cure for a day or so before starting the engine. However, the last step in the install instructions is to start the engine, bring it up to temperature, and check for leaks. I'm thinking maybe this compromised the bond. Thoughts? Are there aftermarket headers available that clamp onto the muffler so I don't have to worry about this everytime I lift the subframe?
#!/usr/bin/env python """ Tool for compiling Android toolchain ==================================== This tool intend to replace all the previous tools/ in shell script. """ from __future__ import print_function import sys from sys import stdout from os.path import join, dirname, realpath, exists, isdir, basename from os import listdir, unlink, makedirs, environ, chdir, getcwd, walk, uname import os import zipfile import tarfile import importlib import io import json import glob import shutil import fnmatch import re from functools import wraps from datetime import datetime from distutils.spawn import find_executable try: from urllib.request import FancyURLopener except ImportError: from urllib import FancyURLopener import argparse from appdirs import user_data_dir curdir = dirname(__file__) sys.path.insert(0, join(curdir, "tools", "external")) import sh import logging import contextlib import imp from colorama import Style, Fore logger = logging.getLogger('p4a') # logger.setLevel(logging.DEBUG) if not hasattr(logger, 'touched'): # Necessary as importlib reloads # this, which would add a second # handler and reset the level logger.setLevel(logging.INFO) logger.touched = True ch = logging.StreamHandler(stdout) formatter = logging.Formatter('{}[%(levelname)s]{}: %(message)s'.format( Style.BRIGHT, Style.RESET_ALL)) ch.setFormatter(formatter) logger.addHandler(ch) # logger.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) info = logger.info debug = logger.debug warning = logger.warning IS_PY3 = sys.version_info[0] >= 3 def info_main(*args): logger.info(''.join([Style.BRIGHT, Fore.GREEN] + list(args) + [Style.RESET_ALL, Fore.RESET])) def shprint(command, *args, **kwargs): kwargs["_iter"] = True kwargs["_out_bufsize"] = 1 kwargs["_err_to_out"] = True if len(logger.handlers) > 1: logger.removeHandler(logger.handlers[1]) command_path = str(command).split('/') command_string = command_path[-1] # if len(command_path) > 1: # command_string = '.../' + command_string print('args are', args) string = ' '.join(['running', Style.DIM, command_string] + list(args)) # If logging is not in DEBUG mode, trim the command if necessary if logger.level > logging.DEBUG: short_string = string if len(string) > 100: short_string = string[:100] + '... (and {} more)'.format(len(string) - 100) logger.info(short_string + Style.RESET_ALL) else: logger.debug(string + Style.RESET_ALL) output = command(*args, **kwargs) need_closing_newline = False for line in output: if logger.level > logging.DEBUG: string = '\r' + 'working ... ' + line[:100].replace('\n', '').rstrip() + ' ...' if len(string) < 20: continue if len(string) < 120: string = string + ' '*(120 - len(string)) sys.stdout.write(string) sys.stdout.flush() need_closing_newline = True else: logger.debug(''.join([Style.DIM, '\t', line.rstrip()])) if logger.level > logging.DEBUG and need_closing_newline: print() return output # shprint(sh.ls, '-lah') # exit(1) def require_prebuilt_dist(func): '''Decorator for ToolchainCL methods. If present, the method will automatically make sure a dist has been built before continuing or, if no dists are present or can be obtained, will raise an error. ''' @wraps(func) def wrapper_func(self, args): ctx = self.ctx ctx.prepare_build_environment() dist = self._dist if dist.needs_build: info('No dist exists that meets your requirements, so one will ' 'be built.') args = build_dist_from_args(ctx, dist, args) func(self, args) return wrapper_func def get_directory(filename): if filename.endswith('.tar.gz'): return basename(filename[:-7]) elif filename.endswith('.tgz'): return basename(filename[:-4]) elif filename.endswith('.tar.bz2'): return basename(filename[:-8]) elif filename.endswith('.tbz2'): return basename(filename[:-5]) elif filename.endswith('.zip'): return basename(filename[:-4]) info('Unknown file extension for {}'.format(filename)) exit(1) def which(program, path_env): import os def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in path_env.split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None @contextlib.contextmanager def current_directory(new_dir): cur_dir = getcwd() logger.info(''.join((Fore.CYAN, '-> directory context ', new_dir, Fore.RESET))) chdir(new_dir) yield logger.info(''.join((Fore.CYAN, '<- directory context ', cur_dir, Fore.RESET))) chdir(cur_dir) def cache_execution(f): def _cache_execution(self, *args, **kwargs): state = self.ctx.state key = "{}.{}".format(self.name, f.__name__) force = kwargs.pop("force", False) if args: for arg in args: key += ".{}".format(arg) key_time = "{}.at".format(key) if key in state and not force: print("# (ignored) {} {}".format(f.__name__.capitalize(), self.name)) return print("{} {}".format(f.__name__.capitalize(), self.name)) f(self, *args, **kwargs) state[key] = True state[key_time] = str(datetime.utcnow()) return _cache_execution class ChromeDownloader(FancyURLopener): version = ( 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' '(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36') urlretrieve = ChromeDownloader().retrieve class JsonStore(object): """Replacement of shelve using json, needed for support python 2 and 3. """ def __init__(self, filename): super(JsonStore, self).__init__() self.filename = filename self.data = {} if exists(filename): try: with io.open(filename, encoding='utf-8') as fd: self.data = json.load(fd) except ValueError: print("Unable to read the state.db, content will be replaced.") def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): self.data[key] = value self.sync() def __delitem__(self, key): del self.data[key] self.sync() def __contains__(self, item): return item in self.data def get(self, item, default=None): return self.data.get(item, default) def keys(self): return self.data.keys() def remove_all(self, prefix): for key in self.data.keys()[:]: if not key.startswith(prefix): continue del self.data[key] self.sync() def sync(self): # http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python/14870531#14870531 if IS_PY3: with open(self.filename, 'w') as fd: json.dump(self.data, fd, ensure_ascii=False) else: with io.open(self.filename, 'w', encoding='utf-8') as fd: fd.write(unicode(json.dumps(self.data, ensure_ascii=False))) class Arch(object): def __init__(self, ctx): super(Arch, self).__init__() self.ctx = ctx def __str__(self): return self.arch @property def include_dirs(self): return [ "{}/{}".format( self.ctx.include_dir, d.format(arch=self)) for d in self.ctx.include_dirs] def get_env(self): include_dirs = [ "-I{}/{}".format( self.ctx.include_dir, d.format(arch=self)) for d in self.ctx.include_dirs] env = {} env["CFLAGS"] = " ".join([ "-DANDROID", "-mandroid", "-fomit-frame-pointer", "--sysroot", self.ctx.ndk_platform]) env["CXXFLAGS"] = env["CFLAGS"] env["LDFLAGS"] = " ".join(['-lm']) py_platform = sys.platform if py_platform in ['linux2', 'linux3']: py_platform = 'linux' if self.ctx.ndk_ver == 'r5b': toolchain_prefix = 'arm-eabi' toolchain_version = '4.4.0' elif self.ctx.ndk_ver[:2] in ('r7', 'r8'): toolchain_prefix = 'arm-linux-androideabi' toolchain_version = '4.4.3' elif self.ctx.ndk_ver[:2] == 'r9': toolchain_prefix = 'arm-linux-androideabi' toolchain_version = '4.9' elif self.ctx.ndk_ver[:3] == 'r10': toolchain_prefix = 'arm-linux-androideabi' toolchain_version = '4.9' else: warning('Error: NDK not supported by these tools?') exit(1) env['TOOLCHAIN_PREFIX'] = toolchain_prefix env['TOOLCHAIN_VERSION'] = toolchain_version cc = find_executable('{toolchain_prefix}-gcc'.format( toolchain_prefix=toolchain_prefix), path=environ['PATH']) if cc is None: warning('Couldn\'t find executable for CC. Exiting.') exit(1) env['CC'] = '{toolchain_prefix}-gcc {cflags}'.format( toolchain_prefix=toolchain_prefix, cflags=env['CFLAGS']) env['CXX'] = '{toolchain_prefix}-g++ {cxxflags}'.format( toolchain_prefix=toolchain_prefix, cxxflags=env['CXXFLAGS']) # AND: Not sure if these are still important env['AR'] = '{}-ar'.format(toolchain_prefix) env['RANLIB'] = '{}-ranlib'.format(toolchain_prefix) env['LD'] = '{}-ld'.format(toolchain_prefix) env['STRIP'] = '{}-strip --strip-unneeded'.format(toolchain_prefix) env['MAKE'] = 'make -j5' env['READELF'] = '{}-readelf'.format(toolchain_prefix) hostpython_recipe = Recipe.get_recipe('hostpython2', self.ctx) # AND: This hardcodes python version 2.7, needs fixing # AND: This also hardcodes armeabi, which isn't even correct, don't forget to fix! env['BUILDLIB_PATH'] = join(hostpython_recipe.get_build_dir('armeabi'), 'build', 'lib.linux-{}-2.7'.format(uname()[-1])) env['PATH'] = environ['PATH'] # AND: This stuff is set elsewhere in distribute.sh. Does that matter? env['ARCH'] = self.arch # env['LIBLINK_PATH'] = join(self.ctx.build_dir, 'other_builds', 'objects') # ensure_dir(env['LIBLINK_PATH']) # AND: This should be elsewhere return env class ArchAndroid(Arch): arch = "armeabi" # class ArchSimulator(Arch): # sdk = "iphonesimulator" # arch = "i386" # triple = "i386-apple-darwin11" # version_min = "-miphoneos-version-min=6.0.0" # sysroot = sh.xcrun("--sdk", "iphonesimulator", "--show-sdk-path").strip() # class Arch64Simulator(Arch): # sdk = "iphonesimulator" # arch = "x86_64" # triple = "x86_64-apple-darwin13" # version_min = "-miphoneos-version-min=7.0" # sysroot = sh.xcrun("--sdk", "iphonesimulator", "--show-sdk-path").strip() # class ArchIOS(Arch): # sdk = "iphoneos" # arch = "armv7" # triple = "arm-apple-darwin11" # version_min = "-miphoneos-version-min=6.0.0" # sysroot = sh.xcrun("--sdk", "iphoneos", "--show-sdk-path").strip() # class Arch64IOS(Arch): # sdk = "iphoneos" # arch = "arm64" # triple = "aarch64-apple-darwin13" # version_min = "-miphoneos-version-min=7.0" # sysroot = sh.xcrun("--sdk", "iphoneos", "--show-sdk-path").strip() class Graph(object): # Taken from python-for-android/depsort def __init__(self): # `graph`: dict that maps each package to a set of its dependencies. self.graph = {} def add(self, dependent, dependency): """Add a dependency relationship to the graph""" self.graph.setdefault(dependent, set()) self.graph.setdefault(dependency, set()) if dependent != dependency: self.graph[dependent].add(dependency) def add_optional(self, dependent, dependency): """Add an optional (ordering only) dependency relationship to the graph Only call this after all mandatory requirements are added """ if dependent in self.graph and dependency in self.graph: self.add(dependent, dependency) def find_order(self): """Do a topological sort on a dependency graph :Parameters: :Returns: iterator, sorted items form first to last """ graph = dict((k, set(v)) for k, v in self.graph.items()) while graph: # Find all items without a parent leftmost = [l for l, s in graph.items() if not s] if not leftmost: raise ValueError('Dependency cycle detected! %s' % graph) # If there is more than one, sort them for predictable order leftmost.sort() for result in leftmost: # Yield and remove them from the graph yield result graph.pop(result) for bset in graph.values(): bset.discard(result) class Context(object): '''A build context. If anything will be built, an instance this class will be instantiated and used to hold all the build state.''' env = environ.copy() root_dir = None # the filepath of toolchain.py storage_dir = None # the root dir where builds and dists will be stored build_dir = None # in which bootstraps are copied for building and recipes are built dist_dir = None # the Android project folder where everything ends up libs_dir = None javaclass_dir = None ccache = None # whether to use ccache cython = None # the cython interpreter name sdk_dir = None # the directory of the android sdk ndk_dir = None # the directory of the android ndk ndk_platform = None # the ndk platform directory ndk_ver = None # the ndk version, defaults to r9 android_api = None # the android api target, defaults to 14 dist_name = None bootstrap = None bootstrap_build_dir = None recipe_build_order = None # Will hold the list of all built recipes @property def packages_path(self): '''Where packages are downloaded before being unpacked''' return join(self.storage_dir, 'packages') @property def templates_dir(self): return join(self.root_dir, 'templates') def setup_dirs(self): '''Calculates all the storage and build dirs, and makes sure the directories exist where necessary.''' self.root_dir = realpath(dirname(__file__)) # AND: TODO: Allow the user to set the build_dir self.storage_dir = user_data_dir('python-for-android') # self.storage_dir = self.root_dir self.build_dir = join(self.storage_dir, 'build') self.libs_dir = join(self.build_dir, 'libs') self.dist_dir = join(self.storage_dir, 'dists') self.javaclass_dir = join(self.build_dir, 'java') ensure_dir(self.storage_dir) ensure_dir(self.build_dir) ensure_dir(self.libs_dir) ensure_dir(self.dist_dir) ensure_dir(self.javaclass_dir) @property def android_api(self): if self._android_api is None: raise ValueError('Tried to access android_api but it has not ' 'been set - this should not happen, something ' 'went wrong!') return self._android_api @android_api.setter def android_api(self, value): self._android_api = value @property def ndk_ver(self): if self._ndk_ver is None: raise ValueError('Tried to access android_api but it has not ' 'been set - this should not happen, something ' 'went wrong!') return self._ndk_ver @ndk_ver.setter def ndk_ver(self, value): self._ndk_ver = value @property def sdk_dir(self): if self._sdk_dir is None: raise ValueError('Tried to access android_api but it has not ' 'been set - this should not happen, something ' 'went wrong!') return self._sdk_dir @sdk_dir.setter def sdk_dir(self, value): self._sdk_dir = value @property def ndk_dir(self): if self._ndk_dir is None: raise ValueError('Tried to access android_api but it has not ' 'been set - this should not happen, something ' 'went wrong!') return self._ndk_dir @ndk_dir.setter def ndk_dir(self, value): self._ndk_dir = value def prepare_build_environment(self): '''Checks that build dependencies exist and sets internal variables for the Android SDK etc. ..warning:: This *must* be called before trying any build stuff ''' if self._build_env_prepared: return ok = True # AND: We should check for ndk-build and ant? self.android_api = environ.get('ANDROIDAPI', '14') self.ndk_ver = environ.get('ANDROIDNDKVER', 'r9') self.sdk_dir = environ.get('ANDROIDSDK', None) if self.sdk_dir is None: ok = False self.ndk_dir = environ.get('ANDROIDNDK', None) if self.ndk_dir is None: ok = False else: self.ndk_platform = join( self.ndk_dir, 'platforms', 'android-{}'.format(self.android_api), 'arch-arm') print('ndk platform', self.ndk_platform) if not exists(self.ndk_platform): warning('ndk_platform doesn\'t exist') ok = False virtualenv = None if virtualenv is None: virtualenv = sh.which('virtualenv2') if virtualenv is None: virtualenv = sh.which('virtualenv-2.7') if virtualenv is None: virtualenv = sh.which('virtualenv') if virtualenv is None: raise IOError('Couldn\'t find a virtualenv executable, ' 'you must install this to use p4a.') self.virtualenv = virtualenv info('Found virtualenv at {}'.format(virtualenv)) # path to some tools self.ccache = sh.which("ccache") if not self.ccache: info("ccache is missing, the build will not be optimized in the future.") for cython_fn in ("cython2", "cython-2.7", "cython"): cython = sh.which(cython_fn) if cython: self.cython = cython break if not self.cython: ok = False warning("Missing requirement: cython is not installed") # Modify the path so that sh finds modules appropriately py_platform = sys.platform if py_platform in ['linux2', 'linux3']: py_platform = 'linux' if self.ndk_ver == 'r5b': toolchain_prefix = 'arm-eabi' toolchain_version = '4.4.0' elif self.ndk_ver[:2] in ('r7', 'r8'): toolchain_prefix = 'arm-linux-androideabi' toolchain_version = '4.4.3' elif self.ndk_ver[:2] == 'r9': toolchain_prefix = 'arm-linux-androideabi' toolchain_version = '4.9' elif self.ndk_ver[:3] == 'r10': toolchain_prefix = 'arm-linux-androideabi' toolchain_version = '4.9' else: warning('Error: NDK not supported by these tools?') exit(1) environ['PATH'] = ('{ndk_dir}/toolchains/{toolchain_prefix}-{toolchain_version}/' 'prebuilt/{py_platform}-x86/bin/:{ndk_dir}/toolchains/' '{toolchain_prefix}-{toolchain_version}/prebuilt/' '{py_platform}-x86_64/bin/:{ndk_dir}:{sdk_dir}/' 'tools:{path}').format( sdk_dir=self.sdk_dir, ndk_dir=self.ndk_dir, toolchain_prefix=toolchain_prefix, toolchain_version=toolchain_version, py_platform=py_platform, path=environ.get('PATH')) # AND: Are these necessary? Where to check for and and ndk-build? # check the basic tools for tool in ("pkg-config", "autoconf", "automake", "libtool", "tar", "bzip2", "unzip", "make", "gcc", "g++"): if not sh.which(tool): warning("Missing requirement: {} is not installed".format( tool)) if not ok: sys.exit(1) def __init__(self): super(Context, self).__init__() self.include_dirs = [] self._build_env_prepared = False # root of the toolchain self.setup_dirs() # AND: Currently only the Android architecture is supported self.archs = ( ArchAndroid(self), ) ensure_dir(join(self.build_dir, 'bootstrap_builds')) ensure_dir(join(self.build_dir, 'other_builds')) # where everything else is built # # remove the most obvious flags that can break the compilation self.env.pop("LDFLAGS", None) self.env.pop("ARCHFLAGS", None) self.env.pop("CFLAGS", None) # set the state self.state = JsonStore(join(self.dist_dir, "state.db")) def prepare_bootstrap(self, bs): bs.ctx = self self.bootstrap = bs self.bootstrap.prepare_build_dir() self.bootstrap_build_dir = self.bootstrap.build_dir def prepare_dist(self, name): self.dist_name = name self.bootstrap.prepare_dist_dir(self.dist_name) def get_site_packages_dir(self, arch=None): '''Returns the location of site-packages in the python-install build dir. ''' # AND: This *must* be replaced with something more general in # order to support multiple python versions and/or multiple # archs. return join(self.build_dir, 'python-install', 'lib', 'python2.7', 'site-packages') def get_libs_dir(self, arch): '''The libs dir for a given arch.''' ensure_dir(join(self.libs_dir, arch)) # AND: See warning: warning('Ensuring libs dir in get_libs_dir, should fix this ' 'to ensure elsewhere') return join(self.libs_dir, arch) class Distribution(object): '''State container for information about a distribution (i.e. an Android project). This is separate from a Bootstrap because the Bootstrap is concerned with building and populating the dist directory, whereas the dist itself could also come from e.g. a binary download. ''' ctx = None name = None # A name identifying the dist. May not be None. needs_build = False # Whether the dist needs compiling url = None dist_dir = None # Where the dist dir ultimately is. Should not be None. recipes = [] description = '' # A long description def __init__(self, ctx): self.ctx = ctx def __str__(self): return '<Distribution: name {} with recipes ({})>'.format( # self.name, ', '.join([recipe.name for recipe in self.recipes])) self.name, ', '.join(self.recipes)) def __repr__(self): return str(self) @classmethod def get_distribution(cls, ctx, name=None, recipes=[], allow_download=True, force_build=False, allow_build=True, extra_dist_dirs=[], require_perfect_match=False): '''Takes information about the distribution, and decides what kind of distribution it will be. If parameters conflict (e.g. a dist with that name already exists, but doesn't have the right set of recipes), an error is thrown. Parameters ---------- name : str The name of the distribution. If a dist with this name already ' exists, it will be used. recipes : list The recipes that the distribution must contain. allow_download : bool Whether binary dists may be downloaded. allow_build : bool Whether the distribution may be built from scratch if necessary. This is always False on e.g. Windows. force_download: bool If True, only downloaded dists are considered. force_build : bool If True, the dist is forced to be built locally. extra_dist_dirs : list Any extra directories in which to search for dists. require_perfect_match : bool If True, will only match distributions with precisely the correct set of recipes. ''' # AND: This whole function is a bit hacky, it needs checking # properly to make sure it follows logically correct # possibilities existing_dists = Distribution.get_distributions(ctx) needs_build = True # whether the dist needs building, will be returned possible_dists = existing_dists # 0) Check if a dist with that name already exists if name is not None and name: possible_dists = [d for d in possible_dists if d.name == name] # 1) Check if any existing dists meet the requirements _possible_dists = [] for dist in possible_dists: for recipe in recipes: if recipe not in dist.recipes: break else: _possible_dists.append(dist) possible_dists = _possible_dists if possible_dists: info('Of the existing distributions, the following meet ' 'the given requirements:') for dist in possible_dists: info('\tname {}: recipes ({})'.format(dist.name, ', '.join(dist.recipes))) else: info('No existsing dists meet the given requirements!') # If any dist has perfect recipes, return it for dist in possible_dists: if force_build: continue if (set(dist.recipes) == set(recipes) or (set(recipes).issubset(set(dist.recipes)) and not require_perfect_match)): info('{} has compatible recipes, using this one'.format(dist.name)) return dist assert len(possible_dists) < 2 if not name and possible_dists: info('Asked for dist with name {} with recipes ({}), but a dist ' 'with this name already exists and has incompatible recipes ' '({})'.format(name, ', '.join(recipes), ', '.join(possible_dists[0].recipes))) info('No compatible dist found, so exiting.') exit(1) # # 2) Check if any downloadable dists meet the requirements # online_dists = [('testsdl2', ['hostpython2', 'sdl2_image', # 'sdl2_mixer', 'sdl2_ttf', # 'python2', 'sdl2', # 'pyjniussdl2', 'kivysdl2'], # 'https://github.com/inclement/sdl2-example-dist/archive/master.zip'), # ] # _possible_dists = [] # for dist_name, dist_recipes, dist_url in online_dists: # for recipe in recipes: # if recipe not in dist_recipes: # break # else: # dist = Distribution(ctx) # dist.name = dist_name # dist.url = dist_url # _possible_dists.append(dist) # # if _possible_dists # If we got this far, we need to build a new dist dist = Distribution(ctx) dist.needs_build = True if not name: filen = 'unnamed_dist_{}' i = 1 while exists(join(ctx.dist_dir, filen.format(i))): i += 1 name = filen.format(i) dist.name = name dist.dist_dir = join(ctx.dist_dir, dist.name) dist.recipes = recipes return dist @classmethod def get_distributions(cls, ctx, extra_dist_dirs=[]): '''Returns all the distributions found locally.''' if extra_dist_dirs: warning('extra_dist_dirs argument to get_distributions is not yet implemented') exit(1) dist_dir = ctx.dist_dir folders = glob.glob(join(dist_dir, '*')) for dir in extra_dist_dirs: folders.extend(glob.glob(join(dir, '*'))) dists = [] for folder in folders: if exists(join(folder, 'dist_info.json')): with open(join(folder, 'dist_info.json')) as fileh: dist_info = json.load(fileh) dist = cls(ctx) dist.name = folder.split('/')[-1] # AND: also equal # to # dist_info['dist_name']...which # one should we # use? dist.dist_dir = folder dist.needs_build = False dist.recipes = dist_info['recipes'] dists.append(dist) return dists def save_info(self): ''' Save information about the distribution in its dist_dir. ''' with current_directory(self.dist_dir): info('Saving distribution info') with open('dist_info.json', 'w') as fileh: json.dump({'dist_name': self.name, 'recipes': self.ctx.recipe_build_order}, fileh) def load_info(self): with current_directory(self.dist_dir): filen = 'dist_info.json' if not exists(filen): return None with open('dist_info.json', 'r') as fileh: dist_info = json.load(fileh) return dist_info class Bootstrap(object): '''An Android project template, containing recipe stuff for compilation and templated fields for APK info. ''' name = '' jni_subdir = '/jni' ctx = None build_dir = None dist_dir = None dist_name = None distribution = None recipe_depends = [] # Other things a Bootstrap might need to track (maybe separately): # ndk_main.c # whitelist.txt # blacklist.txt @property def dist_dir(self): if self.distribution is None: warning('Tried to access {}.dist_dir, but {}.distribution ' 'is None'.format(self, self)) exit(1) return self.distribution.dist_dir @property def jni_dir(self): return self.name + self.jni_subdir def get_build_dir(self): return join(self.ctx.build_dir, 'bootstrap_builds', self.name) def get_dist_dir(self, name): return join(self.ctx.dist_dir, name) @property def name(self): modname = self.__class__.__module__ return modname.split(".", 2)[-1] def prepare_build_dir(self): '''Ensure that a build dir exists for the recipe. This same single dir will be used for building all different archs.''' self.build_dir = self.get_build_dir() shprint(sh.cp, '-r', join(self.bootstrap_dir, 'build'), # join(self.ctx.root_dir, # 'bootstrap_templates', # self.name), self.build_dir) def prepare_dist_dir(self, name): # self.dist_dir = self.get_dist_dir(name) ensure_dir(self.dist_dir) def run_distribute(self): # print('Default bootstrap being used doesn\'t know how to distribute...failing.') # exit(1) with current_directory(self.dist_dir): info('Saving distribution info') with open('dist_info.json', 'w') as fileh: json.dump({'dist_name': self.ctx.dist_name, 'bootstrap': self.ctx.bootstrap.name, 'recipes': self.ctx.recipe_build_order}, fileh) # AND: This method must be replaced by manual dir setting, in # order to allow for user dirs # def get_bootstrap_dir(self): # return(dirname(__file__)) @classmethod def list_bootstraps(cls): forbidden_dirs = ('__pycache__', ) bootstraps_dir = join(dirname(__file__), 'bootstraps') for name in listdir(bootstraps_dir): if name in forbidden_dirs: continue filen = join(bootstraps_dir, name) if isdir(filen): yield name @classmethod def get_bootstrap(cls, name, ctx): '''Returns an instance of a bootstrap with the given name. This is the only way you should access a bootstrap class, as it sets the bootstrap directory correctly. ''' # AND: This method will need to check user dirs, and access # bootstraps in a slightly different way if not hasattr(cls, 'bootstraps'): cls.bootstraps = {} if name in cls.bootstraps: return cls.bootstraps[name] mod = importlib.import_module('pythonforandroid.bootstraps.{}'.format(name)) if len(logger.handlers) > 1: logger.removeHandler(logger.handlers[1]) bootstrap = mod.bootstrap bootstrap.bootstrap_dir = join(ctx.root_dir, 'bootstraps', name) bootstrap.ctx = ctx return bootstrap class Recipe(object): url = None '''The address from which the recipe may be downloaded. This is not essential, it may be omitted if the source is available some other way, such as via the :class:`IncludedFilesBehaviour` mixin. If the url includes the version, you may (and probably should) replace this with ``{version}``, which will automatically be replaced by the :attr:`version` string during download. .. note:: Methods marked (internal) are used internally and you probably don't need to call them, but they are available if you want. ''' version = None '''A string giving the version of the software the recipe describes, e.g. ``2.0.3`` or ``master``.''' md5sum = None '''The md5sum of the source from the :attr:`url`. Non-essential, but you should try to include this, it is used to check that the download finished correctly. ''' depends = [] '''A list containing the names of any recipes that this recipe depends on. ''' conflicts = [] # AND: Not currently used '''A list containing the names of any recipes that are known to be incompatible with this one.''' # patches = [] # '''Filepaths (relative to the recipe script) for any pathches that are # to be applied. By default, these are applied in prebuild_arch, so # if you override this but want to use patches then don't forget to # call super(). # name = None # name for the recipe dir archs = ['armeabi'] # will android use this? @property def versioned_url(self): '''A property returning the url of the recipe with ``{version}`` replaced by the :attr:`url`. If accessing the url, you should use this property, *not* access the url directly.''' if self.url is None: return None return self.url.format(version=self.version) def download_file(self, url, filename, cwd=None): """ (internal) Download an ``url`` to a ``filename``. """ if not url: return def report_hook(index, blksize, size): if size <= 0: progression = '{0} bytes'.format(index * blksize) else: progression = '{0:.2f}%'.format( index * blksize * 100. / float(size)) stdout.write('- Download {}\r'.format(progression)) stdout.flush() if cwd: filename = join(cwd, filename) if exists(filename): unlink(filename) info('Downloading {} from {}'.format(self.name, url)) urlretrieve(url, filename, report_hook) return filename def extract_file(self, filename, cwd): """ (internal) Extract the `filename` into the directory `cwd`. """ if not filename: return info("Extract {} into {}".format(filename, cwd)) if filename.endswith(".tgz") or filename.endswith(".tar.gz"): shprint(sh.tar, "-C", cwd, "-xvzf", filename) elif filename.endswith(".tbz2") or filename.endswith(".tar.bz2"): shprint(sh.tar, "-C", cwd, "-xvjf", filename) elif filename.endswith(".zip"): zf = zipfile.ZipFile(filename) zf.extractall(path=cwd) zf.close() else: warning("Error: cannot extract, unrecognized extension for {}".format( filename)) raise Exception() # def get_archive_rootdir(self, filename): # if filename.endswith(".tgz") or filename.endswith(".tar.gz") or \ # filename.endswith(".tbz2") or filename.endswith(".tar.bz2"): # archive = tarfile.open(filename) # root = archive.next().path.split("/") # return root[0] # elif filename.endswith(".zip"): # with zipfile.ZipFile(filename) as zf: # return dirname(zf.namelist()[0]) # else: # print("Error: cannot detect root directory") # print("Unrecognized extension for {}".format(filename)) # raise Exception() def apply_patch(self, filename): """ Apply a patch from the current recipe directory into the current build directory. """ info("Applying patch {}".format(filename)) filename = join(self.recipe_dir, filename) # AND: get_build_dir shouldn't need to hardcode armeabi sh.patch("-t", "-d", self.get_build_dir('armeabi'), "-p1", "-i", filename) def copy_file(self, filename, dest): info("Copy {} to {}".format(filename, dest)) filename = join(self.recipe_dir, filename) dest = join(self.build_dir, dest) shutil.copy(filename, dest) def append_file(self, filename, dest): info("Append {} to {}".format(filename, dest)) filename = join(self.recipe_dir, filename) dest = join(self.build_dir, dest) with open(filename, "rb") as fd: data = fd.read() with open(dest, "ab") as fd: fd.write(data) # def has_marker(self, marker): # """ # Return True if the current build directory has the marker set # """ # return exists(join(self.build_dir, ".{}".format(marker))) # def set_marker(self, marker): # """ # Set a marker info the current build directory # """ # with open(join(self.build_dir, ".{}".format(marker)), "w") as fd: # fd.write("ok") # def delete_marker(self, marker): # """ # Delete a specific marker # """ # try: # unlink(join(self.build_dir, ".{}".format(marker))) # except: # pass @property def name(self): modname = self.__class__.__module__ return modname.split(".", 2)[-1] # @property # def archive_fn(self): # bfn = basename(self.url.format(version=self.version)) # fn = "{}/{}-{}".format( # self.ctx.cache_dir, # self.name, bfn) # return fn @property def filtered_archs(self): '''Return archs of self.ctx that are valid build archs for the Recipe.''' result = [] for arch in self.ctx.archs: if not self.archs or (arch.arch in self.archs): result.append(arch) return result def get_build_container_dir(self, arch): '''Given the arch name, returns the directory where it will be built.''' return join(self.ctx.build_dir, 'other_builds', self.name, arch) def get_build_dir(self, arch): '''Given the arch name, returns the directory where the downloaded/copied package will be built.''' # if self.url is not None: # return join(self.get_build_container_dir(arch), # get_directory(self.versioned_url)) return join(self.get_build_container_dir(arch), self.name) def get_recipe_dir(self): # AND: Redundant, an equivalent property is already set by get_recipe return join(self.ctx.root_dir, 'recipes', self.name) # Public Recipe API to be subclassed if needed def ensure_build_container_dir(self): info_main('Preparing build dir for {}'.format(self.name)) build_dir = self.get_build_container_dir('armeabi') ensure_dir(build_dir) def download_if_necessary(self): info_main('Downloading {}'.format(self.name)) user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower())) if user_dir is not None: info('P4A_{}_DIR is set, skipping download for {}'.format( self.name, self.name)) return self.download() def download(self): if self.url is None: info('Skipping {} download as no URL is set'.format(self.name)) return url = self.versioned_url shprint(sh.mkdir, '-p', join(self.ctx.packages_path, self.name)) with current_directory(join(self.ctx.packages_path, self.name)): filename = shprint(sh.basename, url).stdout[:-1].decode('utf-8') do_download = True marker_filename = '.mark-{}'.format(filename) if exists(filename): if not exists(marker_filename): shprint(sh.rm, filename) elif self.md5sum: current_md5 = shprint(sh.md5sum, filename) print('downloaded md5: {}'.format(current_md5)) print('expected md5: {}'.format(self.md5sum)) print('md5 not handled yet, exiting') exit(1) else: do_download = False info('{} download already cached, skipping'.format(self.name)) # Should check headers here! warning('Should check headers here! Skipping for now.') # If we got this far, we will download if do_download: print('Downloading {} from {}'.format(self.name, url)) shprint(sh.rm, '-f', marker_filename) self.download_file(url, filename) shprint(sh.touch, marker_filename) if self.md5sum is not None: print('downloaded md5: {}'.format(current_md5)) print('expected md5: {}'.format(self.md5sum)) print('md5 not handled yet, exiting') exit(1) def unpack(self, arch): info_main('Unpacking {} for {}'.format(self.name, arch)) build_dir = self.get_build_container_dir(arch) user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower())) if user_dir is not None: info('P4A_{}_DIR exists, symlinking instead'.format( self.name.lower())) # AND: Currently there's something wrong if I use ln, fix this warning('Using git clone instead of symlink...fix this!') if exists(self.get_build_dir(arch)): return shprint(sh.rm, '-rf', build_dir) shprint(sh.mkdir, '-p', build_dir) shprint(sh.rmdir, build_dir) ensure_dir(build_dir) # shprint(sh.ln, '-s', user_dir, join(build_dir, get_directory(self.versioned_url))) shprint(sh.git, 'clone', user_dir, self.get_build_dir('armeabi')) return if self.url is None: info('Skipping {} unpack as no URL is set'.format(self.name)) return filename = shprint(sh.basename, self.versioned_url).stdout[:-1].decode('utf-8') # AND: TODO: Use tito's better unpacking method with current_directory(build_dir): directory_name = self.get_build_dir(arch) # AND: Could use tito's get_archive_rootdir here if not exists(directory_name) or not isdir(directory_name): extraction_filename = join(self.ctx.packages_path, self.name, filename) if (extraction_filename.endswith('.tar.gz') or extraction_filename.endswith('.tgz')): sh.tar('xzf', extraction_filename) root_directory = shprint( sh.tar, 'tzf', extraction_filename).stdout.decode( 'utf-8').split('\n')[0].strip('/') if root_directory != directory_name: shprint(sh.mv, root_directory, directory_name) elif (extraction_filename.endswith('.tar.bz2') or extraction_filename.endswith('.tbz2')): info('Extracting {} at {}'.format(extraction_filename, filename)) sh.tar('xjf', extraction_filename) root_directory = sh.tar('tjf', extraction_filename).stdout.decode( 'utf-8').split('\n')[0].strip('/') if root_directory != directory_name: shprint(sh.mv, root_directory, directory_name) elif extraction_filename.endswith('.zip'): sh.unzip(extraction_filename) import zipfile fileh = zipfile.ZipFile(extraction_filename, 'r') root_directory = fileh.filelist[0].filename.strip('/') if root_directory != directory_name: shprint(sh.mv, root_directory, directory_name) else: raise Exception('Could not extract {} download, it must be .zip, ' '.tar.gz or .tar.bz2') else: info('{} is already unpacked, skipping'.format(self.name)) def get_recipe_env(self, arch=None): """Return the env specialized for the recipe """ if arch is None: arch = self.filtered_archs[0] return arch.get_env() # @property # def archive_root(self): # key = "{}.archive_root".format(self.name) # value = self.ctx.state.get(key) # if not key: # value = self.get_archive_rootdir(self.archive_fn) # self.ctx.state[key] = value # return value # def execute(self): # if self.custom_dir: # self.ctx.state.remove_all(self.name) # self.download() # self.extract() # self.build_all() # AND: Will need to change how this works # @property # def custom_dir(self): # """Check if there is a variable name to specify a custom version / # directory to use instead of the current url. # """ # d = environ.get("P4A_{}_DIR".format(self.name.lower())) # if not d: # return # if not exists(d): # return # return d # def prebuild(self): # self.prebuild_arch(self.ctx.archs[0]) # AND: Need to change # # this to support # # multiple archs # def build(self): # self.build_arch(self.ctx.archs[0]) # Same here! # def postbuild(self): # self.postbuild_arch(self.ctx.archs[0]) def prebuild_arch(self, arch): prebuild = "prebuild_{}".format(arch.arch) if hasattr(self, prebuild): getattr(self, prebuild)() else: print('{} has no {}, skipping'.format(self.name, prebuild)) def should_build(self): '''Should perform any necessary test and return True only if it needs building again. ''' return True def build_arch(self, arch): build = "build_{}".format(arch.arch) if hasattr(self, build): getattr(self, build)() def postbuild_arch(self, arch): postbuild = "postbuild_{}".format(arch.arch) if hasattr(self, postbuild): getattr(self, postbuild)() def prepare_build_dir(self, arch): '''Copies the recipe data into a build dir for the given arch. By default, this unpacks a downloaded recipe. You should override it (or use a Recipe subclass with different behaviour) if you want to do something else. ''' self.unpack(arch) @classmethod def list_recipes(cls): forbidden_dirs = ('__pycache__', ) recipes_dir = join(dirname(__file__), "recipes") for name in listdir(recipes_dir): if name in forbidden_dirs: continue fn = join(recipes_dir, name) if isdir(fn): yield name @classmethod def get_recipe(cls, name, ctx): if not hasattr(cls, "recipes"): cls.recipes = {} if name in cls.recipes: return cls.recipes[name] mod = importlib.import_module("pythonforandroid.recipes.{}".format(name)) if len(logger.handlers) > 1: logger.removeHandler(logger.handlers[1]) recipe = mod.recipe recipe.recipe_dir = join(ctx.root_dir, "recipes", name) recipe.ctx = ctx return recipe class IncludedFilesBehaviour(object): '''Recipe mixin class that will automatically unpack files included in the recipe directory.''' src_filename = None def prepare_build_dir(self, arch): if self.src_filename is None: print('IncludedFilesBehaviour failed: no src_filename specified') exit(1) shprint(sh.cp, '-a', join(self.get_recipe_dir(), self.src_filename), self.get_build_dir(arch)) class NDKRecipe(Recipe): '''A recipe class for recipes built in an Android project jni dir with an Android.mk. These are not cached separatly, but built in the bootstrap's own building directory. In the future they should probably also copy their contents from a standalone set of ndk recipes, but for now the bootstraps include all their recipe code. ''' dir_name = None # The name of the recipe build folder in the jni dir def get_build_container_dir(self, arch): return self.get_jni_dir() def get_build_dir(self, arch): if self.dir_name is None: raise ValueError('{} recipe doesn\'t define a dir_name, but ' 'this is necessary'.format(self.name)) return join(self.get_build_container_dir(arch), self.dir_name) def get_jni_dir(self): return join(self.ctx.bootstrap.build_dir, 'jni') # def download_if_necessary(self): # info_main('Downloading {}'.format(self.name)) # info('{} is an NDK recipe, it is alread included in the ' # 'bootstrap (for now), so skipping'.format(self.name)) # # Do nothing; in the future an NDKRecipe can copy its # # contents to the bootstrap build dir, but for now each # # bootstrap already includes available recipes (as was # # already the case in p4a) # def prepare_build_dir(self, arch): # info_main('Unpacking {} for {}'.format(self.name, arch)) # info('{} is included in the bootstrap, unpacking currently ' # 'unnecessary, so skipping'.format(self.name)) class PythonRecipe(Recipe): site_packages_name = None # The name of the module in # site_packages (i.e. as a python # module) def should_build(self): # AND: This should be different for each arch and use some # kind of data store to know what has been built in a given # python env print('name is', self.site_packages_name, type(self)) name = self.site_packages_name if name is None: name = self.name if exists(join(self.ctx.get_site_packages_dir(), name)): info('Python package already exists in site-packages') return False print('site packages', self.ctx.get_site_packages_dir()) info('{} apparently isn\'t already in site-packages'.format(name)) return True def build_arch(self, arch): '''Install the Python module by calling setup.py install with the target Python dir.''' super(PythonRecipe, self).build_arch(arch) self.install_python_package() # @cache_execution # def install(self): # self.install_python_package() # self.reduce_python_package() def install_python_package(self, name=None, env=None, is_dir=True): '''Automate the installation of a Python package (or a cython package where the cython components are pre-built).''' arch = self.filtered_archs[0] if name is None: name = self.name if env is None: env = self.get_recipe_env(arch) info('Installing {} into site-packages'.format(self.name)) with current_directory(self.get_build_dir(arch.arch)): hostpython = sh.Command(self.ctx.hostpython) shprint(hostpython, 'setup.py', 'install', '-O2', _env=env) # def install_python_package(self, name=None, env=None, is_dir=True): # """Automate the installation of a Python package into the target # site-packages. # It will works with the first filtered_archs, and the name of the recipe. # """ # arch = self.filtered_archs[0] # if name is None: # name = self.name # if env is None: # env = self.get_recipe_env(arch) # print("Install {} into the site-packages".format(name)) # build_dir = self.get_build_dir(arch.arch) # chdir(build_dir) # hostpython = sh.Command(self.ctx.hostpython) # iosbuild = join(build_dir, "iosbuild") # shprint(hostpython, "setup.py", "install", "-O2", # "--prefix", iosbuild, # _env=env) # dest_dir = join(self.ctx.site_packages_dir, name) # if is_dir: # if exists(dest_dir): # shutil.rmtree(dest_dir) # func = shutil.copytree # else: # func = shutil.copy # func( # join(iosbuild, "lib", # self.ctx.python_ver_dir, "site-packages", name), # dest_dir) # def reduce_python_package(self): # """Feel free to remove things you don't want in the final # site-packages. # """ # pass class CompiledComponentsPythonRecipe(PythonRecipe): pre_build_ext = False def build_arch(self, arch): '''Build any cython components, then install the Python module by calling setup.py install with the target Python dir. ''' Recipe.build_arch(self, arch) # AND: Having to directly call the # method like this is nasty...could # use tito's method of having an # install method that always runs # after everything else but isn't # used by a normal recipe. self.build_compiled_components(arch) self.install_python_package() def build_compiled_components(self, arch): info('Building compiled components in {}'.format(self.name)) env = self.get_recipe_env(arch) with current_directory(self.get_build_dir(arch.arch)): hostpython = sh.Command(self.ctx.hostpython) shprint(hostpython, 'setup.py', 'build_ext', '-v') build_dir = glob.glob('build/lib.*')[0] shprint(sh.find, build_dir, '-name', '"*.o"', '-exec', env['STRIP'], '{}', ';', _env=env) class CythonRecipe(PythonRecipe): pre_build_ext = False cythonize = True def build_arch(self, arch): '''Build any cython components, then install the Python module by calling setup.py install with the target Python dir. ''' Recipe.build_arch(self, arch) # AND: Having to directly call the # method like this is nasty...could # use tito's method of having an # install method that always runs # after everything else but isn't # used by a normal recipe. self.build_cython_components(arch) self.install_python_package() def build_cython_components(self, arch): # AND: Should we use tito's cythonize methods? How do they work? info('Cythonizing anything necessary in {}'.format(self.name)) env = self.get_recipe_env(arch) with current_directory(self.get_build_dir(arch.arch)): hostpython = sh.Command(self.ctx.hostpython) info('Trying first build of {} to get cython files: this is ' 'expected to fail'.format(self.name)) try: shprint(hostpython, 'setup.py', 'build_ext', _env=env) except sh.ErrorReturnCode_1: print() info('{} first build failed (as expected)'.format(self.name)) info('Running cython where appropriate') shprint(sh.find, self.get_build_dir('armeabi'), '-iname', '*.pyx', '-exec', self.ctx.cython, '{}', ';', _env=env) info('ran cython') shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env) print('stripping') build_lib = glob.glob('./build/lib*') shprint(sh.find, build_lib[0], '-name', '*.o', '-exec', env['STRIP'], '{}', ';', _env=env) print('stripped!?') # exit(1) # def cythonize_file(self, filename): # if filename.startswith(self.build_dir): # filename = filename[len(self.build_dir) + 1:] # print("Cythonize {}".format(filename)) # cmd = sh.Command(join(self.ctx.root_dir, "tools", "cythonize.py")) # shprint(cmd, filename) # def cythonize_build(self): # if not self.cythonize: # return # root_dir = self.build_dir # for root, dirnames, filenames in walk(root_dir): # for filename in fnmatch.filter(filenames, "*.pyx"): # self.cythonize_file(join(root, filename)) # def biglink(self): # dirs = [] # for root, dirnames, filenames in walk(self.build_dir): # if fnmatch.filter(filenames, "*.so.libs"): # dirs.append(root) # cmd = sh.Command(join(self.ctx.root_dir, "tools", "biglink")) # shprint(cmd, join(self.build_dir, "lib{}.a".format(self.name)), *dirs) def get_recipe_env(self, arch): env = super(CythonRecipe, self).get_recipe_env(arch) env['LDFLAGS'] = env['LDFLAGS'] + ' -L{}'.format( self.ctx.get_libs_dir(arch.arch)) env['LDSHARED'] = join(self.ctx.root_dir, 'tools', 'liblink') env['LIBLINK'] = 'NOTNONE' env['NDKPLATFORM'] = self.ctx.ndk_platform # Every recipe uses its own liblink path, object files are collected and biglinked later liblink_path = join(self.get_build_container_dir(arch.arch), 'objects_{}'.format(self.name)) env['LIBLINK_PATH'] = liblink_path ensure_dir(liblink_path) return env def build_recipes(names, ctx): # Put recipes in correct build order graph = Graph() recipe_to_load = set(names) bs = ctx.bootstrap if bs.recipe_depends: info('Bootstrap requires recipes {}'.format(bs.recipe_depends)) recipe_to_load = recipe_to_load.union(set(bs.recipe_depends)) recipe_to_load = list(recipe_to_load) recipe_loaded = [] python_modules = [] while recipe_to_load: name = recipe_to_load.pop(0) if name in recipe_loaded: continue try: recipe = Recipe.get_recipe(name, ctx) except ImportError: info('No recipe named {}; will attempt to install with pip'.format(name)) python_modules.append(name) continue graph.add(name, name) info('Loaded recipe {} (depends on {})'.format(name, recipe.depends)) for depend in recipe.depends: graph.add(name, depend) recipe_to_load += recipe.depends recipe_loaded.append(name) build_order = list(graph.find_order()) info("Recipe build order is {}".format(build_order)) ctx.recipe_build_order = build_order recipes = [Recipe.get_recipe(name, ctx) for name in build_order] # download is arch independent info_main('# Downloading recipes ') for recipe in recipes: recipe.download_if_necessary() for arch in ctx.archs: info_main('# Building all recipes for arch {}'.format(arch.arch)) info_main('# Unpacking recipes') for recipe in recipes: ensure_dir(recipe.get_build_container_dir(arch.arch)) recipe.prepare_build_dir(arch.arch) info_main('# Prebuilding recipes') # 2) prebuild packages for recipe in recipes: info_main('Prebuilding {} for {}'.format(recipe.name, arch.arch)) recipe.prebuild_arch(arch) # 3) build packages info_main('# Building recipes') for recipe in recipes: info_main('Building {} for {}'.format(recipe.name, arch.arch)) if recipe.should_build(): recipe.build_arch(arch) else: info('{} said it is already built, skipping'.format(recipe.name)) # 4) biglink everything # AND: Should make this optional (could use info_main('# Biglinking object files') biglink(ctx, arch) # 5) postbuild packages info_main('# Postbuilding recipes') for recipe in recipes: info_main('Postbuilding {} for {}'.format(recipe.name, arch.arch)) recipe.postbuild_arch(arch) info_main('# Installing pure Python modules') run_pymodules_install(ctx, python_modules) return def run_pymodules_install(ctx, modules): if not modules: info('There are no Python modules to install, skipping') return info('The requirements ({}) don\'t have recipes, attempting to install ' 'them with pip'.format(', '.join(modules))) info('If this fails, it may mean that the module has compiled ' 'components and needs a recipe.') venv = sh.Command(ctx.virtualenv) with current_directory(join(ctx.build_dir)): shprint(venv, '--python=python2.7', 'venv') info('Creating a requirements.txt file for the Python modules') with open('requirements.txt', 'w') as fileh: for module in modules: fileh.write('{}\n'.format(module)) info('Installing Python modules with pip') # AND: This doesn't work yet shprint(sh.bash, '-c', '''"source venv/bin/activate && env CC=/bin/false CXX=/bin/false pip install --target '{}' -r requirements.txt"'''.format(ctx.get_site_packages_dir())) def biglink(ctx, arch): # First, collate object files from each recipe info('Collating object files from each recipe') obj_dir = join(ctx.bootstrap.build_dir, 'collated_objects') ensure_dir(obj_dir) recipes = [Recipe.get_recipe(name, ctx) for name in ctx.recipe_build_order] for recipe in recipes: recipe_obj_dir = join(recipe.get_build_container_dir(arch.arch), 'objects_{}'.format(recipe.name)) if not exists(recipe_obj_dir): info('{} recipe has no biglinkable files dir, skipping'.format(recipe.name)) continue files = glob.glob(join(recipe_obj_dir, '*')) if not len(files): info('{} recipe has no biglinkable files, skipping'.format(recipe.name)) info('{} recipe has object files, copying'.format(recipe.name)) files.append(obj_dir) shprint(sh.cp, '-r', *files) # AND: Shouldn't hardcode ArchAndroid! In reality need separate # build dirs for each arch arch = ArchAndroid(ctx) env = ArchAndroid(ctx).get_env() env['LDFLAGS'] = env['LDFLAGS'] + ' -L{}'.format( join(ctx.bootstrap.build_dir, 'obj', 'local', 'armeabi')) if not len(glob.glob(join(obj_dir, '*'))): info('There seem to be no libraries to biglink, skipping.') return info('Biglinking') # bl = sh.Command(join(ctx.root_dir, 'tools', 'biglink')) print('ldflags are', env['LDFLAGS']) # shprint(bl, join(ctx.libs_dir, 'libpymodules.so'), # env['LIBLINK_PATH'], _env=env) biglink_function( join(ctx.libs_dir, 'libpymodules.so'), obj_dir.split(' '), # env['LIBLINK_PATH'].split(' '), # AND: This line should be obselete now extra_link_dirs=[join(ctx.bootstrap.build_dir, 'obj', 'local', 'armeabi')], env=env) def biglink_function(soname, objs_paths, extra_link_dirs=[], env=None): print('objs_paths are', objs_paths) sofiles = [] for directory in objs_paths: for fn in os.listdir(directory): fn = os.path.join(directory, fn) if not fn.endswith(".so.o"): continue if not os.path.exists(fn[:-2] + ".libs"): continue sofiles.append(fn[:-2]) # The raw argument list. args = [ ] for fn in sofiles: afn = fn + ".o" libsfn = fn + ".libs" args.append(afn) with open(libsfn) as fd: data = fd.read() args.extend(data.split(" ")) unique_args = [ ] while args: a = args.pop() if a in ('-L', ): continue if a not in unique_args: unique_args.insert(0, a) for dir in extra_link_dirs: link = '-L{}'.format(dir) if link not in unique_args: unique_args.append(link) # print('Biglink create %s library' % soname) # print('Biglink arguments:') # for arg in unique_args: # print(' %s' % arg) cc_name = env['CC'] cc = sh.Command(cc_name.split()[0]) cc = cc.bake(*cc_name.split()[1:]) shprint(cc, '-shared', '-O3', '-o', soname, *unique_args, _env=env) # args = os.environ['CC'].split() + \ # ['-shared', '-O3', '-o', soname] + \ # unique_args # sys.exit(subprocess.call(args)) def ensure_dir(filename): if not exists(filename): makedirs(filename) def dist_from_args(ctx, dist_args): '''Parses out any distribution-related arguments, and uses them to obtain a Distribution class instance for the build. ''' return Distribution.get_distribution( ctx, name=dist_args.dist_name, recipes=split_argument_list(dist_args.requirements), allow_download=dist_args.allow_download, allow_build=dist_args.allow_build, extra_dist_dirs=split_argument_list(dist_args.extra_dist_dirs), require_perfect_match=dist_args.require_perfect_match) def build_dist_from_args(ctx, dist, args_list): '''Parses out any bootstrap related arguments, and uses them to build a dist.''' parser = argparse.ArgumentParser( description='Create a newAndroid project') parser.add_argument('--bootstrap', help=('The name of the bootstrap type, \'pygame\' ' 'or \'sdl2\''), default='sdl2') args, unknown = parser.parse_known_args(args_list) bs = Bootstrap.get_bootstrap(args.bootstrap, ctx) info_main('# Creating dist with with {} bootstrap'.format(bs.name)) bs.distribution = dist info('Dist will have name {} and recipes ({})'.format( dist.name, ', '.join(dist.recipes))) ctx.dist_name = bs.distribution.name ctx.prepare_bootstrap(bs) ctx.prepare_dist(ctx.dist_name) recipes = dist.recipes build_recipes(recipes, ctx) ctx.bootstrap.run_distribute() info_main('# Your distribution was created successfully, exiting.') info('Dist can be found at (for now) {}'.format(join(ctx.dist_dir, ctx.dist_name))) return unknown def split_argument_list(l): if not len(l): return [] return re.split(r'[ ,]*', l) # def create_dist class ToolchainCL(object): def __init__(self): self._ctx = None parser = argparse.ArgumentParser( description="Tool for managing the iOS / Python toolchain", usage="""toolchain <command> [<args>] Currently available commands: create Build an android project with all recipes Available commands: Not yet confirmed Planned commands: recipes distributions build_dist symlink_dist copy_dist clean_all status clean_builds clean_download_cache clean_dists """) parser.add_argument("command", help="Command to run") parser.add_argument('--debug', dest='debug', action='store_true', help='Display debug output and all build info') # Options for specifying the Distribution parser.add_argument( '--dist_name', help='The name of the distribution to use or create', default='') parser.add_argument( '--requirements', help='Dependencies of your app, should be recipe names or Python modules', default='') parser.add_argument( '--allow_download', help='Allow binary dist download.', default=False, type=bool) parser.add_argument( '--allow_build', help='Allow compilation of a new distribution.', default=True, type=bool) parser.add_argument( '--force_build', help='Force compilation of a new distribution.', default=False, type=bool) parser.add_argument( '--extra_dist_dirs', help='Directories in which to look for distributions', default='') parser.add_argument( '--require_perfect_match', help=('Whether the dist recipes must ' 'perfectly match those requested.'), type=bool, default=False) args, unknown = parser.parse_known_args(sys.argv[1:]) self.dist_args = args if args.debug: logger.setLevel(logging.DEBUG) # import ipdb # ipdb.set_trace() # AND: Fail nicely if the args aren't handled yet if args.extra_dist_dirs: warning('Received --extra_dist_dirs but this arg currently is not ' 'handled, exiting.') exit(1) if args.allow_download: warning('Received --allow_download but this arg currently is not ' 'handled, exiting.') exit(1) # if args.allow_build: # warning('Received --allow_build but this arg currently is not ' # 'handled, exiting.') # exit(1) if not hasattr(self, args.command): print('Unrecognized command') parser.print_help() exit(1) getattr(self, args.command)(unknown) # def build(self): # parser = argparse.ArgumentParser( # description="Build the toolchain") # parser.add_argument("recipe", nargs="+", help="Recipe to compile") # parser.add_argument("--arch", help="Restrict compilation to this arch") # args = parser.parse_args(sys.argv[2:]) # ctx = Context() # # if args.arch: # # archs = args.arch.split() # # ctx.archs = [arch for arch in ctx.archs if arch.arch in archs] # # print("Architectures restricted to: {}".format(archs)) # build_recipes(args.recipe, ctx) @property def ctx(self): if self._ctx is None: self._ctx = Context() return self._ctx def recipes(self, args): parser = argparse.ArgumentParser( description="List all the available recipes") parser.add_argument( "--compact", action="store_true", help="Produce a compact list suitable for scripting") parser.add_argument( '--color', type=bool, default=True, help='Whether the output should be coloured') args = parser.parse_args(args) if args.compact: print(" ".join(list(Recipe.list_recipes()))) else: ctx = self.ctx for name in Recipe.list_recipes(): recipe = Recipe.get_recipe(name, ctx) version = str(recipe.version) if args.color: print('{Fore.BLUE}{Style.BRIGHT}{recipe.name:<12} ' '{Style.RESET_ALL}{Fore.LIGHTBLUE_EX}' '{version:<8}{Style.RESET_ALL}'.format( recipe=recipe, Fore=Fore, Style=Style, version=version)) print(' {Fore.GREEN}depends: {recipe.depends}' '{Fore.RESET}'.format(recipe=recipe, Fore=Fore)) if recipe.conflicts: print(' {Fore.RED}conflicts: {recipe.conflicts}' '{Fore.RESET}'.format(recipe=recipe, Fore=Fore)) else: print("{recipe.name:<12} {recipe.version:<8}".format( recipe=recipe)) print(' depends: {recipe.depends}'.format(recipe=recipe)) print(' conflicts: {recipe.conflicts}'.format(recipe=recipe)) def bootstraps(self, args): '''List all the bootstraps available to build with.''' print(list(Bootstrap.list_bootstraps())) def clean_all(self, args): '''Delete all build components; the package cache, package builds, bootstrap builds and distributions.''' parser = argparse.ArgumentParser( description="Clean the build cache, downloads and dists") args = parser.parse_args(args) ctx = Context() if exists(ctx.build_dir): shutil.rmtree(ctx.build_dir) if exists(ctx.dist_dir): shutil.rmtree(ctx.dist_dir) if exists(ctx.packages_path): shutil.rmtree(ctx.packages_path) def clean_dists(self, args): '''Delete all compiled distributions in the internal distribution directory.''' parser = argparse.ArgumentParser( description="Delete any distributions that have been built.") args = parser.parse_args(args) ctx = Context() if exists(ctx.dist_dir): shutil.rmtree(ctx.dist_dir) def clean_builds(self, args): '''Delete all build caches for each recipe. This does *not* delete the package download cache or the final distributions. ''' parser = argparse.ArgumentParser( description="Delete all build files (but not download caches)") args = parser.parse_args(args) ctx = Context() if exists(ctx.dist_dir): shutil.rmtree(ctx.dist_dir) if exists(ctx.build_dir): shutil.rmtree(ctx.build_dir) def clean_download_cache(self, args): ''' Deletes any downloaded recipe packages. This does *not* delete the build caches or final distributions. ''' parser = argparse.ArgumentParser( description="Delete all download caches") args = parser.parse_args(args) ctx = Context() if exists(ctx.packages_path): shutil.rmtree(ctx.packages_path) # def status(self, args): # parser = argparse.ArgumentParser( # description="Give a status of the build") # args = parser.parse_args(args) # ctx = Context() # # AND: TODO # print('This isn\'t implemented yet, but should list all currently existing ' # 'distributions, the modules they include, and all the build caches.') # exit(1) @require_prebuilt_dist def export_dist(self, args): '''Copies a created dist to an output dir. This makes it easy to navigate to the dist to investigate it or call build.py, though you do not in general need to do this and can use the apk command instead. ''' parser = argparse.ArgumentParser( description='Copy a created dist to a given directory') parser.add_argument('--output', help=('The output dir to copy to'), required=True) args = parser.parse_args(args) ctx = self.ctx dist = dist_from_args(ctx, self.dist_args) if dist.needs_build: info('You asked to export a dist, but there is no dist with suitable ' 'recipes available. For now, you must create one first with ' 'the create argument.') exit(1) shprint(sh.cp, '-r', dist.dist_dir, args.output) @require_prebuilt_dist def symlink_dist(self, args): '''Symlinks a created dist to an output dir. This makes it easy to navigate to the dist to investigate it or call build.py, though you do not in general need to do this and can use the apk command instead. ''' parser = argparse.ArgumentParser( description='Symlink a created dist to a given directory') parser.add_argument('--output', help=('The output dir to copy to'), required=True) args = parser.parse_args(args) ctx = self.ctx dist = dist_from_args(ctx, self.dist_args) if dist.needs_build: info('You asked to symlink a dist, but there is no dist with suitable ' 'recipes available. For now, you must create one first with ' 'the create argument.') exit(1) shprint(sh.ln, '-s', dist.dist_dir, args.output) # def _get_dist(self): # ctx = self.ctx # dist = dist_from_args(ctx, self.dist_args) @property def _dist(self): ctx = self.ctx dist = dist_from_args(ctx, self.dist_args) return dist @require_prebuilt_dist def apk(self, args): '''Create an APK using the given distribution.''' # AND: Need to add a parser here for any extra options # parser = argparse.ArgumentParser( # description='Build an APK') # args = parser.parse_args(args) ctx = self.ctx dist = self._dist # dist = dist_from_args(ctx, self.dist_args) # if dist.needs_build: # info('No dist exists that meets your requirements, so one will ' # 'be built.') # args = build_dist_from_args(ctx, dist, args) build = imp.load_source('build', join(dist.dist_dir, 'build.py')) with current_directory(dist.dist_dir): build.parse_args(args) shprint(sh.ant, 'debug') # AND: This is very crude, needs improving. Also only works # for debug for now. info_main('# Copying APK to current directory') apks = glob.glob(join(dist.dist_dir, 'bin', '*-*-debug.apk')) if len(apks) == 0: raise ValueError('Couldn\'t find the built APK') if len(apks) > 1: info('More than one built APK found...guessing you ' 'just built {}'.format(apks[-1])) shprint(sh.cp, apks[-1], './') @require_prebuilt_dist def create(self, args): '''Create a distribution directory if it doesn't already exist, run any recipes if necessary, and build the apk. ''' pass # The decorator does this for us # ctx = self.ctx # dist = dist_from_args(ctx, self.dist_args) # if not dist.needs_build: # info('You asked to create a distribution, but a dist with this name ' # 'already exists. If you don\'t want to use ' # 'it, you must delete it and rebuild, or create your ' # 'new dist with a different name.') # exit(1) # info('Ready to create dist {}, contains recipes {}'.format( # dist.name, ', '.join(dist.recipes))) # build_dist_from_args(ctx, dist, args) def print_context_info(self, args): '''Prints some debug information about which system paths python-for-android will internally use for package building, along with information about where the Android SDK and NDK will be called from.''' ctx = Context() for attribute in ('root_dir', 'build_dir', 'dist_dir', 'libs_dir', 'ccache', 'cython', 'sdk_dir', 'ndk_dir', 'ndk_platform', 'ndk_ver', 'android_api'): print('{} is {}'.format(attribute, getattr(ctx, attribute))) def dists(self, args): '''The same as :meth:`distributions`.''' self.distributions(args) def distributions(self, args): '''Lists all distributions currently available (i.e. that have already been built).''' ctx = Context() dists = Distribution.get_distributions(ctx) infos = [] for dist in dists: infos.append('{Fore.GREEN}{Style.BRIGHT}{name}{Style.RESET_ALL}: ' 'includes recipes ({Fore.GREEN}{recipes}' '{Style.RESET_ALL})'.format( name=dist.name, recipes=', '.join(dist.recipes), Fore=Fore, Style=Style)) print('{Style.BRIGHT}Distributions stored internally are:' '{Style.RESET_ALL}'.format(Style=Style, Fore=Fore)) for line in infos: print('\t' + line) if __name__ == "__main__": ToolchainCL()
I saw a card similar to this on Pinterest (designed by Jolene Harms) and knew we had to make it at our Simple, Beautiful Card Class earlier this month. I featured a few of my favorite retiring stamp sets at our Card Class, and this is one of those sets that I'm REALLY sad to see go! I just love how you can make it look like you watercolored the flowers and leaves with a couple of stamps!! I even tricked a few people at class (they thought we were using our Aquapainter or Blender Pens at first!). If you become a subscriber to my Youtube channel, you'll never miss out on a thing (I promise)!! So, what did you think of this card?? Do you love the stamp set now as much as I do?? I'd love to hear what you thought of this card!! Make sure to use Host Code: 2KQSARS4 when you order! Make sure to order the Penned & Painted Stamp Set before it retires on May 31 (it's just a few days away!!). at the Gathering Inkspiration Stamp Studio (209 E Superior St in Alma, MI) on Wednesday, June 14 anytime from 4-9pm or Friday, June 16 from 1-3pm. We make 4 cards for $15 or the class is FREE with an order! And… mark your calendars going forward…. This class meets the 2nd Wednesday of every month anytime from 4-9pm and the following Friday from 1-3pm. So, if June doesn’t work for you, maybe another month will! Just RSVP to me at amy.frillici@yahoo.com for the next class or to answer any questions you might have.
# -*- coding: utf-8 -*- """Exposes the caffe solvers.""" # pylint: disable=E1101, F0401, C0103, R0913, R0914, W0212, E1121, E0611, W0406 # pylint: disable=duplicate-code, too-many-lines from __future__ import print_function from . import monitoring as _monitoring from . import parallel as _parallel # CAREFUL! This must be imported pre any caffe-related import! from .tools import pbufToPyEnum as _pbufToPyEnum import time as _time import logging as _logging import hashlib import copy from tempfile import NamedTemporaryFile as _NamedTemporaryFile import numpy as _np import google.protobuf.text_format as _gprototext import caffe as _caffe import caffe.proto.caffe_pb2 as _caffe_pb2 #: Describes the type of the solver used. All solver types supported by caffe #: are available. SolverType = _pbufToPyEnum(_caffe_pb2.SolverParameter.SolverType) #: Describes the Phase used. All solver types supported by caffe #: are available. _Phase = _pbufToPyEnum(_caffe_pb2.Phase) _HAS_ITER_SIZE = hasattr(_caffe_pb2.SolverParameter, 'iter_size') try: _ADAM_SOLVER_CLASS = _caffe.AdamSolver _ADAM_SOLVER_ENUM = SolverType.ADAM except AttributeError: # pragma: no cover _ADAM_SOLVER_CLASS = None _ADAM_SOLVER_ENUM = None try: _ADADELTA_SOLVER_CLASS = _caffe.AdaDeltaSolver _ADADELTA_SOLVER_ENUM = SolverType.ADADELTA except AttributeError: # pragma: no cover _ADADELTA_SOLVER_CLASS = None _ADADELTA_SOLVER_ENUM = None try: _ADAGRAD_SOLVER_CLASS = _caffe.AdaGradSolver _ADAGRAD_SOLVER_ENUM = SolverType.ADAGRAD except AttributeError: # pragma: no cover _ADAGRAD_SOLVER_CLASS = None _ADAGRAD_SOLVER_ENUM = None try: _RMSPROP_SOLVER_CLASS = _caffe.RMSPropSolver _RMSPROP_SOLVER_ENUM = SolverType.RMSPROP except AttributeError: # pragma: no cover _RMSPROP_SOLVER_CLASS = None _RMSPROP_SOLVER_ENUM = None _LOGGER = _logging.getLogger(__name__) # pylint: disable=too-many-instance-attributes class Solver(object): """Describes the Solver concept.""" _solver_types = {} _caffe_solver_type = None _solver_type = None def __init__(self, **kwargs): r""" Constructor. :param iter_size: int>0. The number of batches the gradient is accumulated over (not available in older caffe versions). :param lr_policy: string in ['fixed', 'step', ...] The policy to use to adjust the learning rate during fitting. Taken from ``solver.cpp``: * fixed: always return base_lr. * step: return base_lr \* gamma ^ (floor(iter / step)) * exp: return base_lr \* gamma ^ iter * inv: return base_lr \* (1 + gamma \* iter) ^ (- power) * multistep: similar to step but it allows non uniform steps defined by stepvalue * poly: the effective learning rate follows a polynomial decay, to be zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) * sigmoid: the effective learning rate follows a sigmod decay return base_lr ( 1/(1 + exp(-gamma \* (iter - stepsize)))) :param base_lr: float or None. The base learning rate to use. :param gamma: float or None. :param power: float or None. :param weight_decay: float or None. Use weight decay to reduce the weights at each step. :param regularization_type: string in ['L1', 'L2']. Specifies how the ``weight_decay`` is applied. :param step_stepsize: float or None. The stepsize for the step policy. :param stepvalue: list(int) or None. The stepvalue parameter for the multistep policy. :param clip_gradients: float or None. Clips the gradients to the specified value. :param random_seed: int>0 or None. If specified, seeds the solver for reproducible results. Otherwise, it uses a time dependent seed. :param snapshot_prefix: string or None. If the ``Checkpointer`` monitor is used, this prefix is used to create the snapshots. :param debug_info: bool. If set to ``True``, gives additional output in the logs. """ self._net = None self._parameter_hash = None self._parameter_dict = dict() self.update_parameters(**kwargs) # some default internal parameters self._parameter_dict['snapshot_after_train'] = False self._parameter_dict['solver_type'] = self._caffe_solver_type # every solver can append its on assertions or overwrite the given ones self._asserts = [] if _HAS_ITER_SIZE: self._asserts.append(self.Assert_iter_size) self._asserts.append(self.Assert_regularization_types) self._asserts.append(self.Assert_policy) self._solver = None self._print_warning = False self._train_net_dummy = None self._test_net_dummy = None self._parallel_train_filler = None self._parallel_test_filler = None self._parallel_batch_res_train = None self._parallel_batch_res_test = None def restore(self, filename, net=None): """Restore the solverstate from a file.""" if self._net is None: assert net is not None, ('you must specify a net on which the ' 'restored solver will be used!') if net is not None: # The method self._Update_net must not be used here, since it # is allowed to use a new net. self._net = net self._Update_solver() self._solver.restore(filename) @classmethod def Get_required_arguments(cls): """The minimum number of required parameters.""" return ['base_lr'] @classmethod def Get_optional_arguments(cls): """ Get the optional parameters. Optional parameters and some of which are None not all combinations are possible, this is enforced by various asserts when calling Get_parameter_dict(). """ ret_dict = {'debug_info': False, 'weight_decay': None, 'lr_policy': 'fixed', 'regularization_type': 'L2', 'power': None, 'gamma': None, 'snapshot_prefix': None, 'stepsize': None, 'stepvalue': None, 'clip_gradients': None, 'random_seed': None, 'net': None} if _HAS_ITER_SIZE: ret_dict['iter_size'] = 1 return ret_dict def fit(self, # pylint: disable=too-many-statements, too-many-branches iterations, X=None, X_val=None, input_processing_flags=None, test_iterations=0, test_interval=0, test_initialization=False, train_callbacks=None, test_callbacks=None, net=None, read_input_batch_size_from_blob_name=None, use_fit_phase_for_validation=False, allow_test_phase_for_train=False, shuffle=False): r""" fit the network to specific data. Use monitors from the module :py:mod:`barrista.monitoring` as callbacks to monitor the state of the net and create checkpoints. This method offers the following kwargs to monitors (* indicates, that the values are only available at test time, - indicates, that the value is not necessarily available): * max_iter, * iter, * batch_size, * net, * testnet\[only if there is a test phase, i.e., X_val is set] * solver, * callback_signal\[is automatically set by the fit function], * X\-[only if provided by the user], * X_val\-[only if provided by the user], * [the following fields are only set if the corresponding loss/accuracy layer exists for the train and/or test phase. It can also be set by providing a custom ResultExtractor] * loss\-, * test_loss\*, * accuracy\-, * test_accuracy\*-, :param iterations: int. The number of training iterations to do. This is the plain number of iterations, completely disregarding the batch size, i.e., for ``iterations`` being 10 and ``batch_size`` being 10, just one batch is forward propagated. :param X: dict of numpy.ndarray or None. If specified, is used as input data. It is used sequentially, so shuffle it pre, if required. The keys of the dict have to have a corresponding layer name in the net. :param X_val: dict of numpy.ndarray or None. If specified and ``test_interval>0``, it is used as input data. It is used sequentially, so shuffle it pre, if required. The keys of the dict have to have a corresponding layer name in the net. :param input_processing_flags: dict(string, string) or None. See ``CyclingDataMonitor.__init__`` for the ``input_processing_flags`` parameter. In short, if you specify your sample via list, you may specify for each blob, whether they should be padded 'p', or resized 'r' to match the network input size. If they fit perfectly, you may specify 'n' or omit the parameter and use ``None``. :param test_iterations: int. The number of test iterations to determine the validation score, if ``test_interval>0``. :param test_interval: int. The number of iterations between runs on the validation set. Is specified in plain iterations, disregarding batch size. Hence, it must be a multiple of the batch size. :param test_initialization: bool. Whether to do a run on the validation set pre the training is started to get an initial score. :param train_callbacks: list(barrista.monitoring.Monitor). List of callback callables. Will be called pre and post training batch is processed. This list will be processed sequentially, meaning that monitors in the sequence can provide information for later monitors as done with ResultExtractor. :param test_callbacks: list(callable). List of callback callables. Will be called for pre and post testing and pre and post each batch of testing processed. This list will be processed sequentially, meaning that monitors in the sequence can provide information for later monitors as done with ResultExtractor. :param read_input_batch_size_from_blob_name: string. The name of the layer to take the input batch size from (as the first dimension of its first blob). Must be specified if the network does not have explicit inputs (e.g., when trained from an LMDB). :param use_fit_phase_for_validation: bool. If set to True, do not change the phase of the net for running a validation step during training. This can be helpful to reduce memory consumption. This ignores the TEST phase of the net completely, but it's not necessary to use it if the data is provided by the Python layers. :param allow_test_phase_for_train: bool. If set to True, allow using a network in its TEST phase to be trained. May make sense in exotic settings, but should prevent bugs. If not set to True, an AssertionError is raised in this scenario. Why is this so important? The ``DropoutLayer`` and ``PoolLayer`` (in the case of stochastic pooling) are sensitive to this parameter and results are very different for the two settings. :param shuffle: bool. If set to True, shuffle the training data every epoch. The test data is not shuffled. Default: False. """ if net is not None: from barrista import net as _net assert isinstance(net, _net.Net), ( 'net must be an instance of barrista.net.Net') self._Update_net(net) assert self._net is not None, ( 'neither the solver was initialized with a net nor', 'the fit function was called with one') assert self._net._mode == _Phase.TRAIN or allow_test_phase_for_train, ( 'The network must be in TRAIN phase for fitting! If you really ' 'want to, you can override this requirement by setting ' 'the optional parameter `allow_test_phase_for_train` to True.' ) train_callbacks = self._Assert_callbacks(self._net, train_callbacks, 'train') testnet = self._Init_testnet(test_interval, use_fit_phase_for_validation) if testnet is not None: test_callbacks = self._Assert_callbacks(testnet, test_callbacks, 'test') else: test_callbacks = [] batch_size, test_iterations = self._Get_batch_size( self._net, testnet, test_interval, test_iterations, X_val, read_input_batch_size_from_blob_name) self._Assert_iterations( batch_size, iterations, test_interval, test_iterations, self._parameter_dict.get('stepvalue') ) if self._parameter_dict.get('stepvalue') is not None: self._parameter_dict['stepvalue'] = [ val / batch_size for val in self._parameter_dict['stepvalue']] self._Init_cycling_monitor(X, X_val, input_processing_flags, batch_size, test_interval, train_callbacks, test_callbacks, shuffle) run_pre = True iteration = 0 cbparams = dict() cbparams['max_iter'] = iterations cbparams['batch_size'] = batch_size cbparams['iter'] = 0 cbparams['net'] = self._net cbparams['testnet'] = testnet cbparams['solver'] = self cbparams['X'] = X cbparams['X_val'] = X_val cbparams['test_iterations'] = test_iterations cbparams['test_interval'] = test_interval cbparams['train_callbacks'] = train_callbacks cbparams['test_callbacks'] = test_callbacks cbparams['callback_signal'] = 'initialize_train' for cb in train_callbacks: cb(cbparams) if test_interval > 0: cbparams['callback_signal'] = 'initialize_test' for cb in test_callbacks: cb(cbparams) try: _parallel.init_prebatch( self, self._net, train_callbacks, True) if test_interval > 0: _parallel.init_prebatch( self, testnet, test_callbacks, False) while iteration <= iterations: cbparams['iter'] = iteration # Check whether to test the net. if (( # pylint: disable=too-many-boolean-expressions test_interval > 0 and iteration % test_interval == 0 and iteration > 0 ) or ( iteration == 0 and test_initialization ) or ( test_interval > 0 and iteration + batch_size > iterations ) ): ############################################################### # testing loop ############################################################### test_iter = 0 run_pre = True # Pretest gets called if necessary in `run_prebatch`. while test_iter < test_iterations: cbparams['callback_signal'] = 'pre_test_batch' _parallel.run_prebatch( self, test_callbacks, cbparams, False, cbparams['iter'], run_pre) # pylint: disable=W0212 testnet._forward(0, len(testnet.layers) - 1) cbparams['callback_signal'] = 'post_test_batch' for cb in test_callbacks: cb(cbparams) test_iter += batch_size run_pre = False cbparams['callback_signal'] = 'post_test' for cb in test_callbacks: cb(cbparams) run_pre = True if iteration == iterations: break ################################################################### # training loop ################################################################### # `pre_fit` gets called if necessary in `run_prebatch`. PRETRBATCH_BEGINPOINT = _time.time() cbparams['callback_signal'] = 'pre_train_batch' _parallel.run_prebatch( self, train_callbacks, cbparams, True, cbparams['iter'] + batch_size, run_pre) run_pre = False PRETRBATCH_DURATION = _time.time() - PRETRBATCH_BEGINPOINT _LOGGER.debug("Pre-batch preparation time: %03.3fs.", PRETRBATCH_DURATION) TRBATCH_BEGINPOINT = _time.time() self.step(1) TRBATCH_DURATION = _time.time() - TRBATCH_BEGINPOINT _LOGGER.debug("Batch processing time: %03.3fs.", TRBATCH_DURATION) POSTTRBATCH_BEGINPOINT = _time.time() cbparams['callback_signal'] = 'post_train_batch' for cb in train_callbacks: cb(cbparams) POSTTRBATCH_DURATION = _time.time() - POSTTRBATCH_BEGINPOINT _LOGGER.debug("Post-batch processing time: %03.3fs.", POSTTRBATCH_DURATION) iteration += batch_size finally: for cb in set(train_callbacks + test_callbacks): if not isinstance(cb, _monitoring.ParallelMonitor): try: cb.finalize(cbparams) except Exception as ex: # pylint: disable=broad-except _LOGGER.fatal(str(ex)) continue _parallel.finalize_prebatch(self, cbparams) if self._parameter_dict.get('stepvalue') is not None: self._parameter_dict['stepvalue'] = [ val * batch_size for val in self._parameter_dict['stepvalue']] def step(self, number_of_batches): """Run ``number_of_batches`` solver steps.""" tmp_hash = self.Get_parameter_hash(self.Get_parameter_dict()) if self._parameter_hash != tmp_hash: if self._print_warning: # pragma: no cover _LOGGER.warn('WARNING: ---------------------------------------------') _LOGGER.warn('you are re-initializing a new solver which will delete') _LOGGER.warn('the weight history of the solver.') _LOGGER.warn('Only use this option if you know what you are doing!') self._print_warning = False self._Update_solver() return self._solver.step(number_of_batches) def Get_parameter_dict(self): """Get the solver describing parameters in a dictionary.""" # work our stack of assertions followed by a weak copy of the dict for Tmp_assert in self._asserts: assert Tmp_assert() return copy.copy(self._parameter_dict) def Assert_iter_size(self): """Enforce the parameter constraints.""" return self._parameter_dict['iter_size'] > 0 def Assert_regularization_types(self): """Enforce the parameter constraints.""" return self._parameter_dict['regularization_type'] in ['L1', 'L2'] def Assert_policy(self): # pylint: disable=R0911 """Enforce the parameter constraints.""" # although redundant this allows to have a quick check # of what is really required without loading the actuall net which # might take a bit of time if self._parameter_dict['lr_policy'] == 'fixed': return 'base_lr' in self._parameter_dict if self._parameter_dict['lr_policy'] == 'step': return 'gamma' in self._parameter_dict if self._parameter_dict['lr_policy'] == 'exp': return 'gamma' in self._parameter_dict if self._parameter_dict['lr_policy'] == 'inv': return ('gamma' in self._parameter_dict and 'power' in self._parameter_dict) if self._parameter_dict['lr_policy'] == 'multistep': return ('stepvalue' in self._parameter_dict and 'base_lr' in self._parameter_dict and 'gamma' in self._parameter_dict) if self._parameter_dict['lr_policy'] == 'poly': return 'power' in self._parameter_dict if self._parameter_dict['lr_policy'] == 'sigmoid': return 'stepsize' in self._parameter_dict return False @classmethod def Get_parameter_hash(cls, solver_parameter_dict): """Get a has of the parameter dict.""" hash_obj = hashlib.md5() for key in sorted(solver_parameter_dict.keys()): hash_obj.update(str(key).encode('utf-8')) hash_obj.update(str(solver_parameter_dict[key]).encode('utf-8')) return str(hash_obj.hexdigest()) @classmethod def Get_caffe_solver_instance(cls, solver_parameter_dict, net): """Get a caffe solver object.""" # now we actually create a instance of the solver solver_message = _caffe_pb2.SolverParameter(**solver_parameter_dict) messagestr = _gprototext.MessageToString(solver_message) with _NamedTemporaryFile(mode='w+b', suffix='.prototxt') as tmpfile: tmpfile.write(bytes(messagestr.encode('utf-8'))) tmpfile.flush() try: # Newer version of caffe with full solver init support. return cls.Get_caffe_solver_class( solver_parameter_dict['solver_type'])._caffe_solver_class( tmpfile.name, net, _caffe._caffe.NetVec(), True) except TypeError: # Fallback for older, patched versions. return cls.Get_caffe_solver_class( solver_parameter_dict['solver_type'])._caffe_solver_class( tmpfile.name, net) raise Exception('could not initialize solver class') @classmethod def Get_solver_class(cls, solver_type): """Get the solver class as string.""" return cls._solver_types[solver_type] @classmethod def Get_caffe_solver_class(cls, caffe_solver_type): """Get the solver class as ``caffe_solver_type``.""" return cls._solver_types[caffe_solver_type] @classmethod def Register_solver(cls, solver_class): """Register a solver class.""" assert issubclass(solver_class, Solver) if solver_class._solver_type in cls._solver_types: raise Exception( ' '.join('solver', solver_class._solver_type, 'already defined')) if solver_class._caffe_solver_type in cls._solver_types: raise Exception( ' '.join('solver', solver_class._solver_type, 'already defined')) # we register with both access types cls._solver_types[solver_class._caffe_solver_type] = solver_class cls._solver_types[solver_class._solver_type] = solver_class def _Update_solver(self): """Re-initialize the solver.""" # we (re-)initialize the solver self._solver = self.Get_caffe_solver_instance( self.Get_parameter_dict(), self._net) self._parameter_hash = self.Get_parameter_hash( self.Get_parameter_dict()) # we only want to see the warning once self._print_warning = True def update_parameters(self, **kwargs): """Update the solver parameters.""" # adding the default keys if they are not yet set for argument, default in list(self.Get_optional_arguments().items()): if argument not in self._parameter_dict and default is not None: self._parameter_dict[argument] = default # first add all parameters which are actually required for arg_key, arg_value in list(kwargs.items()): if arg_key in self.Get_required_arguments(): self._parameter_dict[arg_key] = arg_value # make sure that all required arguments are set tmp_required_arguments = set(self.Get_required_arguments()) intersection = tmp_required_arguments.intersection(set(kwargs.keys())) if intersection != tmp_required_arguments: raise Exception(' '.join( ['we are missing required arguments', str(list(kwargs.keys())), 'vs', str(self.Get_required_arguments())])) for arg_key, arg_value in list(kwargs.items()): # the very special case of passing the net # this will not be passed as a parameter to the parameter dict # but we will ensure that the net is always the same # as the one used for initialization if arg_key == 'net': self._Update_net(arg_value) continue if arg_key in list(self.Get_optional_arguments().keys()): self._parameter_dict[arg_key] = arg_value # we make sure that there is no spelling mistake in the kwargs total_arguments = set(self.Get_required_arguments()) total_arguments = total_arguments.union( list(self.Get_optional_arguments().keys())) for argument in list(kwargs.keys()): if argument not in total_arguments: raise Exception(' '.join( ['argument', argument, 'is not supported'])) def _Update_net(self, net): """Check that the net remains the same.""" # since the user could potentially provide two different nets to # the solver, which is not supported, thus we check that the net # has not changed if net is None: return if self._net is not None: if id(self._net) != id(net): raise Exception(' '.join( ['a solver works only with one network', 'the network has to remain the same'])) self._net = net def _Get_batch_size(self, # pylint: disable=R0201 net, testnet, test_interval, test_iterations, X_val, read_input_batch_size_from_blob_name): """Get the batch size and the test iterations.""" if len(net.inputs) > 0: # Otherwise, a DB backend is used. batch_size = net.blobs[net.inputs[0]].data.shape[0] if testnet is not None: assert (testnet.blobs[net.inputs[0]].data.shape[0] == batch_size), ("Validation and fit network batch size " "must agree!") if (test_interval != 0 and test_iterations == 0 and X_val is not None): if isinstance(X_val, dict): if len(X_val.values()[0]) % batch_size != 0: _LOGGER.warn( "The number of test samples is not a multiple " "of the batch size. Test performance estimates " "will be slightly off.") test_iterations = _np.ceil(float(len(X_val.values()[0])) / float(batch_size)) * batch_size else: if len(X_val) % batch_size != 0: _LOGGER.warn( "The number of test samples is not a multiple " "of the batch size. Test performance estimates " "will be slightly off.") test_iterations = _np.ceil(float(len(X_val)) / float(batch_size)) * batch_size if read_input_batch_size_from_blob_name is not None: tmp_batch_size = net.blobs[ read_input_batch_size_from_blob_name].data.shape[0] assert (tmp_batch_size == batch_size), ( "The input size and the first dimension of " "the blob to read the batch size from don't " "match: {}, {}.".format(tmp_batch_size, batch_size)) return batch_size, test_iterations # some kind of backend is used assert read_input_batch_size_from_blob_name is not None, ( 'no inputs thus the batch_size must be determined from a blob') batch_size = net.blobs[ read_input_batch_size_from_blob_name].data.shape[0] return batch_size, test_iterations @classmethod def _Assert_iterations(cls, batch_size, iterations, test_interval, test_iterations, multistep_stepvalue): """Make sure iterations follow all of our rules.""" # namely being a multiple of the batch_size assert iterations % batch_size == 0, ( 'Error: iterations do not match {} {}'.format(iterations, batch_size)) if test_interval > 0: assert test_iterations > 0, ( 'Test iterations must be > 0 but is {}'.format( test_iterations)) # Set the configurable arguments. assert test_iterations >= 0, ( 'Test iterations must be >= 0 but is {}'.format( test_iterations)) assert test_interval >= 0, ( 'Test interval must be >= 0 but is {}'.format( test_iterations)) assert test_interval % batch_size == 0, ( 'The test interval must be a multiple of the batch size: {}, {}', test_iterations, batch_size) if multistep_stepvalue is not None: for val in multistep_stepvalue: assert val % batch_size == 0, ( "The step values must be multiples of the batch size " "(is given in sample iterations)! Is %d, batch size %d." % ( val, batch_size)) @classmethod def _Assert_callbacks(cls, net, callbacks, phase): """Assert the callbacks work properly.""" if callbacks is None: callbacks = [] assert isinstance(callbacks, list), ( 'callbacks have to be in a list {} {}'.format( str(callbacks), type(callbacks))) for callback in callbacks: assert isinstance(callback, _monitoring.Monitor), ( 'a callback has to derive from montoring.Monitor') if 'loss' in list(net.blobs.keys()): callbacks.insert(0, _monitoring.ResultExtractor( phase + '_loss', 'loss')) if 'accuracy' in list(net.blobs.keys()): callbacks.insert(0, _monitoring.ResultExtractor( phase + '_accuracy', 'accuracy')) return callbacks @classmethod def _Init_cycling_monitor(cls, X, X_val, input_processing_flags, batch_size, test_interval, train_callbacks, test_callbacks, shuffle): """ Convencience initialization function. ...such that the user can simply provide X, X_val dicts and we internally create the CyclingDataMonitors. """ if X is not None: assert len(list(X.values())[0]) >= batch_size # safety measure, we do not want to have two different data # monitors in the same callback list for callback in train_callbacks: assert not isinstance(callback, _monitoring.DataMonitor), ( 'if we use X we cannot use a data monitor') tmp_data_monitor = _monitoring.CyclingDataMonitor( X=X, input_processing_flags=input_processing_flags, shuffle=shuffle) train_callbacks.insert(0, tmp_data_monitor) if test_interval > 0 and X_val is not None: assert X_val is not None if X is not None: assert len(list(X_val.values())) == len(list(X.values())) # safety measure, we do not want to have two different data # monitors in the same callback list for callback in test_callbacks: assert not isinstance(callback, _monitoring.DataMonitor), ( 'if we use X_val we cannot use a data monitor') tmp_data_monitor = _monitoring.CyclingDataMonitor( X=X_val, input_processing_flags=input_processing_flags) test_callbacks.insert(0, tmp_data_monitor) def _Init_testnet(self, test_interval, use_fit_phase_for_validation): """Initialize the test phase network.""" testnet = None if test_interval > 0: if use_fit_phase_for_validation: testnet = self._net else: # Setup the test net. test_netspec = self._net._specification.copy() test_netspec.phase = _Phase.TEST test_netspec.predict_inputs = None test_netspec.predict_input_shapes = None testnet = test_netspec.instantiate() testnet.share_with(self._net) return testnet class SGDSolver(Solver): r""" Thin wrapper for the vanilla SGD solver provided by the caffe framework. :param momentum: float or None. The momentum to use. Multiplies the former gradient with this factor and adds it to the gradient in the following step. """ _solver_type = 'sgd' _caffe_solver_type = SolverType.SGD _caffe_solver_class = _caffe.SGDSolver def __init__(self, **kwargs): """Constructor.""" Solver.__init__(self, **kwargs) @classmethod def Get_required_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" return Solver.Get_required_arguments() @classmethod def Get_optional_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" optional_arguments = Solver.Get_optional_arguments() optional_arguments['momentum'] = 0.0 return optional_arguments class AdagradSolver(Solver): r""" Thin wrapper for the Adagrad solver provided by the caffe framework. To understand how this solver works please inspect the cplusplus implementation in solver.cpp. The corresponding publication is called 'Adaptive Subgradient Methods for Online Learning and Stochastic Optimization' by John Duchi, Elad Hazan, Yoram Singer :param momentum: float or None. The momentum to use. Multiplies the former gradient with this factor and adds it to the gradient in the following step. """ _solver_type = 'adagrad' _caffe_solver_type = _ADAGRAD_SOLVER_ENUM _caffe_solver_class = _ADAGRAD_SOLVER_CLASS def __init__(self, **kwargs): """See :py:class:`barrista.solver.Solver`.""" Solver.__init__(self, **kwargs) @classmethod def Get_required_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" required_arguments = Solver.Get_required_arguments() required_arguments.append('delta') return required_arguments @classmethod def Get_optional_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" return Solver.Get_optional_arguments() class NesterovSolver(Solver): r""" Thin wrapper for the Nesterov solver provided by the caffe framework. To understand how this solver works please inspect the cplusplus implementation in solver.cpp. :param momentum: float or None. The momentum to use. Multiplies the former gradient with this factor and adds it to the gradient in the following step. """ _solver_type = 'nesterov' _caffe_solver_type = SolverType.NESTEROV _caffe_solver_class = _caffe.NesterovSolver def __init__(self, **kwargs): """See :py:class:`barrista.solver.Solver`.""" Solver.__init__(self, **kwargs) @classmethod def Get_required_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" return Solver.Get_required_arguments() @classmethod def Get_optional_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" optional_arguments = Solver.Get_optional_arguments() optional_arguments['momentum'] = 0.0 return optional_arguments class RMSPropSolver(Solver): r""" Thin wrapper for the RMSProp solver provided by the caffe framework. To understand how this solver works please inspect the cplusplus implementation in solver.cpp. This solver has been discussed in a lecture given by Hinton. www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf :param rms_decay: float MeanSquare(t) = rms_decay*MeanSquare(t-1)+(1-rms_decay)*SquareGradient(t) :param delta: float numerical stability [useful choice 1E-8] """ _solver_type = 'rmsprop' _caffe_solver_type = _RMSPROP_SOLVER_ENUM _caffe_solver_class = _RMSPROP_SOLVER_CLASS def __init__(self, **kwargs): """See :py:class:`barrista.solver.Solver`.""" Solver.__init__(self, **kwargs) @classmethod def Get_required_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" required_arguments = Solver.Get_required_arguments() required_arguments.append('rms_decay') required_arguments.append('delta') return required_arguments @classmethod def Get_optional_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" return Solver.Get_optional_arguments() class AdaDeltaSolver(Solver): r""" Thin wrapper for the AdaDelta solver provided by the caffe framework. To understand how this solver works please inspect the cplusplus implementation in solver.cpp. The corresponding arxiv paper is called 'ADADELTA: An Adaptive Learning Rate Method' by Matthew D. Zeiler. :param delta: float numerical stability [useful choice 1E-8] :param momentum: float or None. The momentum to use. Multiplies the former gradient with this factor and adds it to the gradient in the following step. """ _solver_type = 'adadelta' _caffe_solver_type = _ADADELTA_SOLVER_ENUM _caffe_solver_class = _ADADELTA_SOLVER_CLASS def __init__(self, **kwargs): """See :py:class:`barrista.solver.Solver`.""" Solver.__init__(self, **kwargs) @classmethod def Get_required_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" required_arguments = Solver.Get_required_arguments() required_arguments.append('momentum') return required_arguments @classmethod def Get_optional_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" optional_arguments = Solver.Get_optional_arguments() # epsilon optional_arguments['delta'] = 1E-8 return optional_arguments class AdamSolver(Solver): r""" Thin wrapper for the Adam solver provided by the caffe framework. To understand how this solver works please inspect the cplusplus implementation in solver.cpp. The corresponding arxiv paper is called ' Adam: A Method for Stochastic Optimization ' by Diederik Kingma, Jimmy Ba :param base_lr: float [useful choice 0.001] :param momentum: float. beta 1 useful default 0.9 :param momentum2: float. beta 2 useful default 0.999 :param delta: float numerical stability [useful choice 1E-8] """ _solver_type = 'adam' _caffe_solver_type = _ADAM_SOLVER_ENUM _caffe_solver_class = _ADAM_SOLVER_CLASS def __init__(self, **kwargs): """See :py:class:`barrista.solver.Solver`.""" Solver.__init__(self, **kwargs) @classmethod def Get_required_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" return Solver.Get_required_arguments() @classmethod def Get_optional_arguments(cls): """See :py:class:`barrista.solver.Solver`.""" optional_arguments = Solver.Get_optional_arguments() # beta 1 optional_arguments['momentum'] = 0.9 # beta 2 optional_arguments['momentum2'] = 0.999 # epsilon optional_arguments['delta'] = 1E-8 return optional_arguments # register the locally specified solver Solver.Register_solver(SGDSolver) Solver.Register_solver(AdagradSolver) Solver.Register_solver(NesterovSolver) if _RMSPROP_SOLVER_CLASS is not None: Solver.Register_solver(RMSPropSolver) if _ADADELTA_SOLVER_CLASS is not None: Solver.Register_solver(AdaDeltaSolver) if _ADAM_SOLVER_CLASS is not None: Solver.Register_solver(AdamSolver) Get_solver_class = Solver.Get_solver_class Get_caffe_solver_class = Solver.Get_caffe_solver_class
Our Boutique — Centre & Main Chocolate Co. You can find us now in the not-so-hidden gem that is Warkworth, Ontario. Chocolate is like wine. When it grows in different parts of the world, it develops distinct nuances, due to variations in climate, soil, season and variety of cacao. At Centre & Main we're delighted to offer a wide array of Single Origin chocolates, which highlight and celebrate these distinct flavours. Our current selection includes: Ecuador, Peru, Haiti, Dominican Republic, Venezuela, Brazil, Mexico, Ghana, Uganda, Sâo Tomé Tanzania and Vietnam. Discover the wide and always changing array of our chocolate in the Bar Bar! Each one is blended with natural ingredients in order to offer the most exquisite flavour experience. We also create a selection of chocolate barks which feature chunky inclusions like toasted nuts. tart cherries, candied oranges and generous sprinklings of our house-made English toffee using a rich and creamy local butter.
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Device.user' db.add_column(u'machine_device', 'user', self.gf('django.db.models.fields.related.OneToOneField')(default='', to=orm['auth.User'], unique=True), keep_default=False) def backwards(self, orm): # Deleting field 'Device.user' db.delete_column(u'machine_device', 'user_id') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'machine.device': { 'Meta': {'object_name': 'Device'}, 'field_1': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'field_2': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}) } } complete_apps = ['machine']
What happened to Stick’s Site – Did he Sell it? I started reading Stickmanbangkok before I came to Thailand a couple years ago. So – 2 1/2 years of reading it off and on. I’d choose an article a day almost during the peak of my interest in it. I have been back there to read submissions over the last couple months – since the craziness with the other guy that outed him – and I’m just bored out of my cranium with it. The guys writing are boring me, but even more so – the comment at the end – is DEFINITELY not STICK anymore. It’s like a German computer is writing a comment. The comments are lame, and provoke no thought whatsoever. Stick’s site is going down fast. Are there ANY readers left? Add to this the fact that stick stopped writing and publishing a weekly column. What is left? 6000 stories from farangs that paid too much, drank too much and don’t do anything all day but think up these stupid commentaries on their own lives that nobody cares about but themselves. If any of these guys had half a noggin – they’d start a blog of their own and make some money off their writing. It’s free to start a blog at blogger and there’s everything you could possibly need. You can add google advertisements to your pages very simply. Dana – man, if you have as many readers as everyone THINKS you do – you are really missing out. If you are wise – and I think you must be VERY wise, you would pull your stuff from Stick’s site and start your own blog of stories as soon as possible. The amount of content you’ve built up after 160+ articles would make Google Gods smile down on you in a few months after posting them to your own site. There’s only two rules of success for blogging. 1.) blog everyday, about 300 words or more focused around some topic that others search on or are interested in. 2.) put ads on your site. If you do that for 6 months, a year, you’ll see thai baht rain down from heaven. I don’t think anyone else has any kind of following on that site – but Dana could really make a blog work. I’m sure of it. I think it’s quite strange to see all these people GIVING away their content to Stick’s site and for what? A thanks? Thank yourself and put your writing skills and dedication to it to work. Stick – WHERE DID YOU GO? Stick, you could probably triple your traffic by doing one thing… this one thing would take weeks to accomplish. Months probably… I am betting you’d triple your traffic from Google… not only that, you would have MORE people coming everyday and more people reading more pages everyday. Arrange all your submissions by topic. Search engines would eat it up. Visitors that are coming to your site would eat it up because they’d no longer need to read a paragraph in the archive to decide what it’s about and whether they want to read the whole thing or not. If you had it arranged by TOPIC – we’d know what it is and be able to choose more easily what we read about. Most of us are so tired of the same old stories – we’d love to be able to skip them entirely. Free tips – unedited feedback- there you go. I’ll take same if you have it! Thailand gal – who is doodee? Anybody that has that as a nickname… well, I’d have to go look at the site to see what it’s all about! Agree with all you’ve said. I stopped reading that site a long time ago. It’s too lopsided, doesn’t offer much to think about and is kind of shallow and unsatisfying. I read your site, Doodee’s site and a few others to really keep in touch with what’s going on over there. They are well-written and varied enough to keep my attention. You’re right about the uncensored feedback. I don’t have much interest in leaving comments for those who moderate or censor. It’s too easy to delete the spam without having that hindrance.
import sys from datetime import datetime from utils.readable import readable_size, readable_time class FTPTracker: """Tracks ftp upload and download progress. Displays progress bar Args: file_size (int): size of tracked file bar_length (int, optional): length of output bar. Defaults to 50 Attributes: size_written (int): number of bytes that are already written file_size (int): size of tracked file bar_length (int): length of output bar. Defaults to 50 start_time (datetime): ftp transfer start time """ def __init__(self, file_size, bar_length=50): self.size_written = 0 self.file_size = file_size self.bar_length = bar_length self.start_time = datetime.now() def percentage(self): """ Returns: str: completeness percentage in string form. """ return '{0:.1f}'.format(100 * (self.size_written / float(self.file_size))) def bar_filled(self): """ Returns: int: rounded value of how much bar is filled """ return round(self.bar_length * self.size_written / float(self.file_size)) def rate(self): """ Returns: float: transfer rate measured in bytes per second """ return self.size_written / (datetime.now() - self.start_time).total_seconds() def eta(self): """ Returns: float: approximately how much time is left """ return (self.file_size - self.size_written) / self.rate() def bar_string(self): """ Returns: str: bar string format """ bar_filled = self.bar_filled() bar = '#' * bar_filled + '-' * (self.bar_length - bar_filled) return '\r |{bar}| {percentage}% {size_written}/{file_size} {rate}/s {eta}'.format( bar=bar, percentage=self.percentage(), size_written=readable_size(self.size_written).split()[0], file_size=readable_size(self.file_size).split()[0], rate=readable_size(self.rate()).split()[0], eta=readable_time(self.eta()) ) def handle(self, block): """Handles bar output""" self.size_written += len(block) sys.stdout.write(self.bar_string()) if self.size_written == self.file_size: sys.stdout.write('\n') sys.stdout.flush() sys.stdout.write('\033[K') # Clears the end of the line to prevent output overlapping
On 22 January 1963, France’s President, the General de Gaulle, and Germany’s Chancellor, Dr Konrad Adenauer, signed in Paris the Elysee Treaty on cooperation between the two countries that marked the beginning of an era of friendship and cooperation between Germany and France after more than a century of tensions and wars, which led to the 20th century’s two world wars that involved so many countries and provoked millions of casualties. It took the great leadership of both Heads of State and Government to create the conditions of the reconciliation and cooperation 18 years after the end of the war. 56 years later, on the 22 January 2019, a new treaty will be signed by France’s President, Emmanuel Macron, and Germany’s Chancellor, Dr Angela Merkel, in Aachen in Germany. The representatives of the European Union institutions will also be present. While the first treaty was about reconciliation, the new one focuses on convergence, on bringing closer our socio-economic models and on joint positions in the international fora, in the context of the European integration. In addition to the treaty between the two Governments, an agreement will be signed between the Bundestag, the German parliament, and the Assemblée Nationale, the French parliament, creating a 100 members strong French-German Parliamentary Assembly. In a volatile international and European context, France and Germany recall, in the solemn form of a treaty between our two States, their shared commitment to values as the rule of law and multilateralism as well as their joint responsibility to build a sovereign and united Europe.
import time import logging import random import threading from socket import error as SocketError import file_ipc import stats from config import NODES_EACH_THREAD from stats_models import RedisNodeStatus, ProxyStatus from models.base import db from models.polling_stat import PollingStat class Poller(threading.Thread): def __init__(self, nodes, algalon_client): threading.Thread.__init__(self) self.daemon = True self.nodes = nodes logging.debug('Poller %x distributed %d nodes', id(self), len(self.nodes)) self.algalon_client = algalon_client def run(self): for node in self.nodes: logging.debug('Poller %x collect for %s:%d', id(self), node['host'], node['port']) node.collect_stats(self._emit_data, self._send_alarm) def _send_alarm(self, message, trace): if self.algalon_client is not None: self.algalon_client.send_alarm(message, trace) def _emit_data(self, addr, points): try: stats.client.write_points(addr, points) except (SocketError, stats.StatisticError, StandardError), e: logging.exception(e) CACHING_NODES = {} def _load_from(cls, nodes): def update_node_settings(node, file_settings): node.suppress_alert = file_settings.get('suppress_alert') node.balance_plan = file_settings.get('balance_plan') r = [] for n in nodes: if (n['host'], n['port']) in CACHING_NODES: cache_node = CACHING_NODES[(n['host'], n['port'])] r.append(cache_node) update_node_settings(cache_node, n) continue loaded_node = cls.get_by(n['host'], n['port']) CACHING_NODES[(n['host'], n['port'])] = loaded_node update_node_settings(loaded_node, n) r.append(loaded_node) return r def save_polling_stat(nodes, proxies): nodes_ok = [] nodes_fail = [] proxies_ok = [] proxies_fail = [] for n in nodes: if n.details['stat']: nodes_ok.append(n.addr) else: nodes_fail.append(n.addr) for p in proxies: if p.details['stat']: proxies_ok.append(p.addr) else: proxies_fail.append(p.addr) db.session.add(PollingStat(nodes_ok, nodes_fail, proxies_ok, proxies_fail)) class NodeStatCollector(threading.Thread): def __init__(self, app, interval, algalon_client): threading.Thread.__init__(self) self.daemon = True self.app = app self.interval = interval self.algalon_client = algalon_client def _shot(self): poll = file_ipc.read_poll() nodes = _load_from(RedisNodeStatus, poll['nodes']) proxies = _load_from(ProxyStatus, poll['proxies']) # commit because `get_by` may create new nodes # to reattach session they must be persisted db.session.commit() all_nodes = nodes + proxies random.shuffle(all_nodes) pollers = [Poller(all_nodes[i: i + NODES_EACH_THREAD], self.algalon_client) for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)] for p in pollers: p.start() time.sleep(self.interval) for p in pollers: p.join() for p in pollers: for n in p.nodes: n.add_to_db() save_polling_stat(nodes, proxies) db.session.commit() logging.debug('Total %d nodes, %d proxies', len(nodes), len(proxies)) try: file_ipc.write_details({n.addr: n.details for n in nodes}, {p.addr: p.details for p in proxies}) except StandardError, e: logging.exception(e) def run(self): with self.app.app_context(): while True: try: self._shot() except Exception as e: logging.exception(e)
January 1, 2014 - Shadbelly Style Blog Now Live January 1st! Welcome to the "soft" launch of www.shadbelly.com, a style blog for the dressage obsessed. I am an adult amateur with not even a glimmer of a shadbelly in my future, yet I find playing with different styles, watching others who are pushing the style envelope a bit, and even just the process of planning a coordinate "look" for me and my horse, are some of the reasons I love the sport of dressage. In my humble opinion, US dressage style could use a little bit of shaking up. I've had tack store owners tell me over and over "you can't wear that for dressage"...well watch me! We'll explore the recent loosening of the dressage attire guidelines by the USEF and figure out together how to get out of those frumpy old dressage coats we've all been stuck in for years. There's lots to talk about! We may be vastly different in the way we feed, shoe and train our horses, but even the most polar opposite dressage riders can find common ground and talk browbands! So thanks for checking us out. We're still busy working out the kinks, but we welcome your ideas! December 12, 2013 by Shadbelly Style.
# coding=utf-8 """ Diamond collector that monitors relevant MySQL performance_schema values For now only monitors replication load [Blog](http://bit.ly/PbSkbN) announcement. [Snippet](http://bit.ly/SHwYhT) to build example graph. #### Dependencies * MySQLdb * MySQL 5.5.3+ """ from __future__ import division try: import MySQLdb from MySQLdb import MySQLError except ImportError: MySQLdb = None import diamond import time import re class MySQLPerfCollector(diamond.collector.Collector): def process_config(self): super(MySQLPerfCollector, self).process_config() self.db = None self.last_wait_count = {} self.last_wait_sum = {} self.last_timestamp = {} self.last_data = {} self.monitors = { 'slave_sql': { 'wait/synch/cond/sql/MYSQL_RELAY_LOG::update_cond': 'wait_for_update', 'wait/io/file/innodb/innodb_data_file': 'innodb_data_file', 'wait/io/file/innodb/innodb_log_file': 'innodb_log_file', 'wait/io/file/myisam/dfile': 'myisam_dfile', 'wait/io/file/myisam/kfile': 'myisam_kfile', 'wait/io/file/sql/binlog': 'binlog', 'wait/io/file/sql/relay_log_info': 'relaylog_info', 'wait/io/file/sql/relaylog': 'relaylog', 'wait/synch/mutex/innodb': 'innodb_mutex', 'wait/synch/mutex': 'other_mutex', 'wait/synch/rwlock': 'rwlocks', 'wait/io': 'other_io', }, 'slave_io': { 'wait/io/file/sql/relaylog_index': 'relaylog_index', 'wait/synch/mutex/sql/MYSQL_RELAY_LOG::LOCK_index': 'relaylog_index_lock', 'wait/synch/mutex/sql/Master_info::data_lock': 'master_info_lock', 'wait/synch/mutex/mysys/IO_CACHE::append_buffer_lock': 'append_buffer_lock', 'wait/synch/mutex/sql/LOG::LOCK_log': 'log_lock', 'wait/io/file/sql/master_info': 'master_info', 'wait/io/file/sql/relaylog': 'relaylog', 'wait/synch/mutex': 'other_mutex', 'wait/synch/rwlock': 'rwlocks', 'wait/io': 'other_io', } } if self.config['hosts'].__class__.__name__ != 'list': self.config['hosts'] = [self.config['hosts']] # Move legacy config format to new format if 'host' in self.config: hoststr = "%s:%s@%s:%s/%s" % ( self.config['user'], self.config['passwd'], self.config['host'], self.config['port'], self.config['db'], ) self.config['hosts'].append(hoststr) def get_default_config_help(self): config_help = super(MySQLPerfCollector, self).get_default_config_help() config_help.update({ 'hosts': 'List of hosts to collect from. Format is ' + 'yourusername:yourpassword@host:' + 'port/performance_schema[/nickname]', 'slave': 'Collect Slave Replication Metrics', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(MySQLPerfCollector, self).get_default_config() config.update({ 'path': 'mysql', # Connection settings 'hosts': [], 'slave': 'False', }) return config def connect(self, params): if MySQLdb is None: self.log.error('Unable to import MySQLdb') return try: self.db = MySQLdb.connect(**params) except MySQLError, e: self.log.error('MySQLPerfCollector couldnt connect to database %s', e) return {} self.log.debug('MySQLPerfCollector: Connected to database.') def query_list(self, query, params): cursor = self.db.cursor() cursor.execute(query, params) return list(cursor.fetchall()) def slave_load(self, nickname, thread): data = self.query_list(""" SELECT his.event_name, his.sum_timer_wait, his.count_star, cur.event_name, UNIX_TIMESTAMP(SYSDATE()) FROM events_waits_summary_by_thread_by_event_name his JOIN threads thr USING (thread_id) JOIN events_waits_current cur USING (thread_id) WHERE name = %s ORDER BY his.event_name """, (thread,)) wait_sum = sum([x[1] for x in data]) wait_count = sum([x[2] for x in data]) timestamp = int(time.time()) if 0 in data and len(data[0]) > 5: cur_event_name, timestamp = data[0][3:] if thread not in self.last_wait_sum: # Avoid bogus data self.last_wait_sum[thread] = wait_sum self.last_wait_count[thread] = wait_count self.last_timestamp[thread] = timestamp self.last_data[thread] = data return wait_delta = wait_sum - self.last_wait_sum[thread] time_delta = (timestamp - self.last_timestamp[thread]) * 1000000000000 if time_delta == 0: return # Summarize a few things thread_name = thread[thread.rfind('/') + 1:] data.append( ['wait/synch/mutex/innodb', sum([x[1] for x in data if x[0].startswith('wait/synch/mutex/innodb')])]) data.append( ['wait/synch/mutex', sum([x[1] for x in data if (x[0].startswith('wait/synch/mutex') and x[0] not in self.monitors[thread_name])]) - data[-1][1]]) data.append( ['wait/synch/rwlock', sum([x[1] for x in data if x[0].startswith('wait/synch/rwlock')])]) data.append( ['wait/io', sum([x[1] for x in data if (x[0].startswith('wait/io') and x[0] not in self.monitors[thread_name])])]) for d in zip(self.last_data[thread], data): if d[0][0] in self.monitors[thread_name]: self.publish(nickname + thread_name + '.' + self.monitors[thread_name][d[0][0]], (d[1][1] - d[0][1]) / time_delta * 100) # Also log what's unaccounted for. This is where Actual Work gets done self.publish(nickname + thread_name + '.other_work', float(time_delta - wait_delta) / time_delta * 100) self.last_wait_sum[thread] = wait_sum self.last_wait_count[thread] = wait_count self.last_timestamp[thread] = timestamp self.last_data[thread] = data def collect(self): for host in self.config['hosts']: matches = re.search( '^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)$', host) if not matches: continue params = {} params['host'] = matches.group(3) try: params['port'] = int(matches.group(4)) except ValueError: params['port'] = 3306 params['db'] = matches.group(5) params['user'] = matches.group(1) params['passwd'] = matches.group(2) nickname = matches.group(6) if len(nickname): nickname += '.' self.connect(params=params) if self.config['slave']: self.slave_load(nickname, 'thread/sql/slave_io') self.slave_load(nickname, 'thread/sql/slave_sql') self.db.close()
Our second Volume of Easter Eggstravaganza Bundle is here! Includes 32 Easter-related packs with over 1500++ elements, patterns, cliparts and cards for all your creative needs. This entire set is usually worth $393, but now it is available for only $19 at a limited time! That's a massive 95% OFF for this bundle! Grab it before it's too late! This set of high quality hand painted easter watercolor: bunnies, feathers, and floral elements. Boho style. Perfect graphic for DIY, wedding invitations, greeting cards, quotes, blogs, posters and more.
#!/usr/bin/env python # coding: UTF-8 from __future__ import division from poirank.transition import init_data, trans import pylab poi_axis, axis_poi, data = init_data(tuple(range(2,30))) #tensor = trans(data, len(axis_poi)) # print "data: ", data data_length = len(data) print "data_length: ", data_length def poi_coorelation(order): esum = 0.0 for i in range(data_length): esum = esum + data[i] expectation = esum / data_length print "expectation: ", expectation vsum = 0.0 for j in range(data_length): vsum += pow((data[j] - expectation), 2) print "vsum: ", vsum variance = vsum / data_length tsum = 0.0 for k in range(data_length - order): tsum += (data[k] - expectation) * (data[k + order] - expectation) print "tsum: ", tsum ar = tsum / vsum print "ar: ", ar return ar order = 1 y_values = [] x_values = [] while order <= 40: ar = poi_coorelation(order) y_values.append(ar) x_values.append(order) order += 1 pylab.plot(x_values, y_values, 'rs',linewidth=1, linestyle="-") pylab.xlabel(u"马尔科夫链阶数") pylab.ylabel(u"自相关系数") pylab.title(u"马尔科夫链阶数与自相关系数的关系(兴趣点序列长度为3868)") pylab.legend(loc='center right') pylab.show() #(2,3) 26,29 #(2,10) 494,2600 #(2,20) 853,3868 # from scipy import stats # def measure(n): # "Measurement model, return two coupled measurements." # m1 = np.random.normal(size=n) # m2 = np.random.normal(scale=0.5, size=n) # return m1+m2, m1-m2 # m1, m2 = measure(2000) # xmin = m1.min() # xmax = m1.max() # ymin = m2.min() # ymax = m2.max() # 对数据执行内核密度估计: # X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] # positions = np.vstack([X.ravel(), Y.ravel()]) # values = np.vstack([m1, m2]) # kernel = stats.gaussian_kde(values) # Z = np.reshape(kernel(positions).T, X.shape) # 绘制的结果: # import matplotlib.pyplot as plt # fig = plt.figure() # ax = fig.add_subplot(111) # ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, # extent=[xmin, xmax, ymin, ymax]) # ax.plot(m1, m2, 'k.', markersize=2) # ax.set_xlim([xmin, xmax]) # ax.set_ylim([ymin, ymax]) # plt.show()
Krames Online - What Is Gestational Diabetes? Eating the right foods will help you keep your blood sugar at safe levels for you and your baby. Gestational diabetes is diabetes that happens only during pregnancy. Normally, as food is digested, it turns into sugar (glucose) that goes into your bloodstream. Your body makes a substance called insulin that helps your cells use this blood sugar for energy. Changes that occur in your body during pregnancy may cause your blood sugar to be too high. This can be risky for both you and your baby. You can take steps to control your blood sugar and reduce these risks. Eating right. Eating the right foods is the main way to control your blood sugar. You need to eat a variety of foods from each of the food groups each day. To help you with the changes that may be needed in your diet, you will likely work with a registered dietitian. This is an expert on food and nutrition. The dietitian can help you understand how specific foods affect your blood sugar. He or she can also teach you the skills you need to plan healthy, balanced meals. Getting exercise. Your body uses more blood sugar when you exercise. Your healthcare team can decide on the best kind of exercise for you, and the best times for you to exercise. Checking your blood sugar. You will most likely check your blood sugar at home 2 or more times a day. Your healthcare team will teach you how to do this. They will also discuss your blood sugar goals with you. Your blood sugar may also be tested every week or so in the lab. If your blood sugar remains too high you may need to take insulin shots during your pregnancy. Your baby may grow too large. If your blood sugar stays too high, your baby may grow too large (macrosomia) to be delivered vaginally. Shoulder dystocia is one complication that may occur during delivery. Shoulder dystocia is when the baby's shoulder is stuck behind the pubic bone. If there is a shoulder dystocia, the arms and shoulders could be injured. This may result in permanent arm damage. The baby can also have low oxygen levels (hypoxia) and acidemia if the dystocia can't be corrected. Hypoxia can lead to cerebral palsy or rarely, death. Your baby’s organs may not be fully developed before birth. If you have diabetes, your baby may need to be delivered early. This may be because of complications with the pregnancy. Or it may be because of possible risks to you or your baby. If your baby is delivered early, his or her lungs may not work well. This is called respiratory distress syndrome (RDS). Your baby's liver also may not work properly, and your baby may have yellowing of the skin and eyes (jaundice) after birth. Your baby’s blood sugar may be low after birth. If your blood sugar is too high, your baby makes extra insulin. The baby will continue to make extra insulin right after birth. He or she may need to be treated for low blood sugar. Your baby could be stillborn. This is very rare, but your baby could die before birth if your blood sugar stays high for too long. High blood pressure. High blood sugar makes you more likely to have high blood pressure during your pregnancy (preeclampsia). This is a danger to your health that could lead to early delivery for your baby. Infections. High blood sugar makes you more likely to have bladder, kidney, and vaginal infections. Trouble breathing. You may be uncomfortable or short of breath. High blood sugar can cause too much fluid around the baby (polyhydramnios). Your abdomen gets big and pushes on your lungs. Prolonged labor. Your delivery may be harder, and recovery may take longer. If your blood sugar stays too high, your baby may grow too large. A large baby might cause injury to you during birth. Or the baby may have to be delivered by cesarean section (C-section). This means making a cut (incision) in your abdomen and uterus. Needing a C-section is one of the most common risks of gestational diabetes. Women who have gestational diabetes are at higher risk of developing type 2 diabetes. To help reduce your risk, lose weight if you’re overweight. Be as active as you can. Eat more fruits and vegetables and fewer processed foods. And have your doctor screen you regularly for diabetes. You will also be at increased risk of developing gestational diabetes with your next pregnancy.
#!/usr/bin/python import gconf, os class PhameratorConfiguration: def __init__(self): # add keys if needed self.client = gconf.client_get_default() self.gconf_dir = '/apps/phamerator' if not self.client.dir_exists(self.gconf_dir): self.client.add_dir(self.gconf_dir, gconf.CLIENT_PRELOAD_NONE) entries = self.client.all_entries(self.gconf_dir) self.gconf_strings = [ '/apps/phamerator/gene_color', '/apps/phamerator/default_save_folder', '/apps/phamerator/pham_circle_color_scheme' ] self.gconf_bools = [ '/apps/phamerator/show_pham_names', '/apps/phamerator/show_alignment_text', '/apps/phamerator/show_alignment', '/apps/phamerator/show_domains' ] self.gconf_ints = [] self.gconf_floats = ['/apps/phamerator/transparency_adjustment'] entries = self.client.all_entries('/apps/phamerator') #for entry in entries: print entry.get_key() # for each bool, check if it's in the gconf database # and add it if needed keys = [] for entry in entries: keys.append(entry.get_key()) for bool in self.gconf_bools: if bool not in keys: self.client.set_bool(bool, True) print "can't find %s in %s" % (bool, self.gconf_bools) # for each float, check if it's in the gconf database # and add a defualt value if needed keys = [] for entry in entries: keys.append(entry.get_key()) for flt in self.gconf_floats: if flt not in keys: if flt == '/apps/phamerator/transparency_adjustment': self.client.set_float(flt, 1.0) # for each string, check if it's in the gconf database # and, if not, add a reasonable default value keys = [] for entry in entries: keys.append(entry.get_key()) for s in self.gconf_strings: try: if s not in keys: if s == '/apps/phamerator/gene_color': self.client.set_string('/apps/phamerator/gene_color', 'pham') elif s == '/apps/phamerator/default_save_folder': self.client.set_string('/apps/phamerator/default_save_folder', os.environ['HOME']) elif s == '/apps/phamerator/pham_circle_color_scheme': self.client.set_string('/apps/phamerator/pham_circle_color_scheme', 'alignmentType') except: pass def set_bool(self, key, param): print 'setting bool %s:%s' % (key, param) self.client.set_bool(key, param) def get_bool(self, key): print 'getting bool %s' % (key) return self.client.get_bool(key) def set_float(self, key, param): print 'setting float %s:%s' % (key, param) self.client.set_float(key, param) def get_float(self, key): print 'getting float %s' % (key) return self.client.get_float(key) def set(self, key, param): print 'setting string %s:%s' % (key, param) self.client.set_string(key, param) def get(self, key): print 'getting string %s' % (key) return self.client.get_string(key)
My good friend Elliott Johnson died last week. I had known Elliott for many years. His first job was at Number 10 Cafe where we worked together. He did his work experience with me when I was County Councillor. Elliott was a Conservative activist, as am I, and we had fought many campaigns side by side. There is nothing bad to say about Elliott. He did not have a bad bone in his body. He was just the most generous, caring, decent man. He had such immense talent and was so noble and kind. He was also the most true and loyal friend anybody could ever wish for. I am so immensely sad, there are no more words.
from datetime import date import pandas as pd import xarray as xr from tikon.central import Módulo, SimulMódulo, Modelo, Exper, Parcela from tikon.central.res import Resultado from tikon.datos import Obs from tikon.utils import EJE_TIEMPO class Res1_1(Resultado): nombre = 'res 1_1' unids = None class Res1_2(Resultado): nombre = 'res 1_2' unids = None class Res2_1(Resultado): nombre = 'res 2_1' unids = None class Res2_2(Resultado): nombre = 'res 2_2' unids = None class SimulMódulo1(SimulMódulo): resultados = [Res1_1, Res1_2] class Módulo1(Módulo): nombre = 'módulo 1' cls_simul = SimulMódulo1 class SimulMódulo2(SimulMódulo): resultados = [Res2_1, Res2_2] class Módulo2(Módulo): nombre = 'módulo 2' cls_simul = SimulMódulo2 class MiObs(Obs): mód = 'módulo 1' var = 'res 1_1' obs_1_1 = MiObs( datos=xr.DataArray( 1.5, coords={EJE_TIEMPO: pd.date_range(date.today(), periods=10, freq='D')}, dims=[EJE_TIEMPO] ) ) exper = Exper('exper', Parcela('parcela')) exper_obs_1_1 = Exper('exper', Parcela('parcela'), obs=obs_1_1) modelo = Modelo([Módulo1, Módulo2])
Laton High School will electronically submit your CAL GRANT GPA verification for every senior to the California Student Aid Commission so they may be considered for a Cal Grant Award, but you must submit your own FAFSA. If you DO NOT wish to have your student’s information submitted electronically, please pick up the Opt-Out from the counseling office and return it to your student’s counselor by Friday, September 15th. If not received, your information will be uploaded (California Code of Education 69432.9) on or before October 1, 2018.
# Copyright (c) 2004 Python Software Foundation. # All rights reserved. # Written by Eric Price <eprice at tjhsst.edu> # and Facundo Batista <facundo at taniquetil.com.ar> # and Raymond Hettinger <python at rcn.com> # and Aahz (aahz at pobox.com) # and Tim Peters """ These are the test cases for the Decimal module. There are two groups of tests, Arithmetic and Behaviour. The former test the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter test the pythonic behaviour according to PEP 327. Cowlishaw's tests can be downloaded from: http://speleotrove.com/decimal/dectest.zip This test module can be called from command line with one parameter (Arithmetic or Behaviour) to test each part, or without parameter to test both parts. If you're working through IDLE, you can import this test module and call test_main() with the corresponding argument. """ import math import os, sys import operator import warnings import pickle, copy import unittest import numbers import locale from test.support import run_unittest, run_doctest, is_resource_enabled from test.support import check_warnings, TestFailed import random import time import warnings import fractions try: import threading except ImportError: threading = None import cdecimal as C import decimal as P orig_sys_decimal = sys.modules['decimal'] py_minor = sys.version_info[1] # Testing all failures of API functions for _decimal. First, # the number of API calls in a test case is determined. Then, # the testcase is run with all possible API failures, checking # that FailAPIException is properly raised. def assertRaises(expEx, func, *args, **kwargs): """assertRaises has to reraise FailAPIException.""" try: func(*args, **kwargs) except Exception as e: if e.__class__ is C.FailAPIException: raise C.FailAPIException if not e.__class__ is expEx: raise e def withFailpoint(func): """Wrap a function for testing all possible API failures.""" def iter_failpoint(testcase, *args): # These tests will not work. setattr(testcase, 'assertRaises', assertRaises) # Determine number of API calls. C.setapicalls(0) C.setfailpoint(0) func(testcase, *args) n = C.getapicalls() # Fail at each possible API call. for i in range(1, n+1): C.setapicalls(0) C.setfailpoint(i) try: func(testcase, *args) except C.FailAPIException: continue # Error: FailAPIException was not raised raise TestFailed("FailAPIException not raised in: %s" % func) C.setapicalls(0) C.setfailpoint(0) return iter_failpoint class ProtectFail(object): """Protect code regions that modify global state (e.g. lines that set or restore global context values). Otherwise it would not be possible to rerun a test case several times.""" def __enter__(self): if hasattr(C, 'setfailpoint'): self.calls = C.getapicalls() self.fpoint = C.getfailpoint() C.setfailpoint(0) def __exit__(self, *_): if hasattr(C, 'setfailpoint'): C.setfailpoint(self.fpoint) if self.fpoint: if self.calls < self.fpoint <= C.getapicalls(): # Pretend that API calls in the protected block failed. raise C.FailAPIException def protectfail(): return ProtectFail() if hasattr(C, 'setfailpoint'): # Functions that are iterated several times must use # the same random sequence each time. randseed = int(time.time()) # Implicit initialization of the module context must # be tested first. for i in range(1, 100): C.setapicalls(0) C.setfailpoint(i) try: C.getcontext() except C.FailAPIException as e: continue C.setapicalls(0) C.setfailpoint(0) # Useful Test Constant Signals = { C: tuple(C.getcontext().flags.keys()) if C else None, P: tuple(P.getcontext().flags.keys()) } # Signals ordered with respect to precedence: when an operation # produces multiple signals, signals occurring later in the list # should be handled before those occurring earlier in the list. OrderedSignals = { C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow, C.Overflow, C.DivisionByZero, C.InvalidOperation] if C else None, P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow, P.Overflow, P.DivisionByZero, P.InvalidOperation] } def assert_signals(cls, context, attr, expected): d = getattr(context, attr) cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d)) RoundingModes = { C: (C.ROUND_UP, C.ROUND_DOWN, C.ROUND_CEILING, C.ROUND_FLOOR, C.ROUND_HALF_UP, C.ROUND_HALF_DOWN, C.ROUND_HALF_EVEN, C.ROUND_05UP) if C else None, P: (P.ROUND_UP, P.ROUND_DOWN, P.ROUND_CEILING, P.ROUND_FLOOR, P.ROUND_HALF_UP, P.ROUND_HALF_DOWN, P.ROUND_HALF_EVEN, P.ROUND_05UP) } # Tests are built around these assumed context defaults. # test_main() restores the original context. ORIGINAL_CONTEXT = { C: C.getcontext().copy() if C else None, P: P.getcontext().copy() } def init(m): if not m: return DefaultTestContext = m.Context( prec=9, rounding=m.ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0) ) m.setcontext(DefaultTestContext) TESTDATADIR = 'decimaltestdata' if __name__ == '__main__': file = sys.argv[0] else: file = __file__ testdir = os.path.dirname(file) or os.curdir directory = testdir + os.sep + TESTDATADIR + os.sep skip_expected = not os.path.isdir(directory) # Make sure it actually raises errors when not expected and caught in flags # Slower, since it runs some things several times. EXTENDEDERRORTEST = False # decorator for skipping tests on non-IEEE 754 platforms requires_IEEE_754 = unittest.skipUnless( float.__getformat__("double").startswith("IEEE"), "test requires IEEE 754 doubles") class IBMTestCases(unittest.TestCase): """Class which tests the Decimal class against the IBM test cases.""" def setUp(self): self.context = self.decimal.Context() self.readcontext = self.decimal.Context() self.ignore_list = ['#'] # List of individual .decTest test ids that correspond to tests that # we're skipping for one reason or another. self.skipped_test_ids = set([ # Skip implementation-specific scaleb tests. 'scbx164', 'scbx165', # Skip large powers for _power_exact: 'extr1700', 'extr1701', 'extr1702', 'extr1703', # For some operations (currently exp, ln, log10, power), the decNumber # reference implementation imposes additional restrictions on the context # and operands. These restrictions are not part of the specification; # however, the effect of these restrictions does show up in some of the # testcases. We skip testcases that violate these restrictions, since # Decimal behaves differently from decNumber for these testcases so these # testcases would otherwise fail. 'expx901', 'expx902', 'expx903', 'expx905', 'lnx901', 'lnx902', 'lnx903', 'lnx905', 'logx901', 'logx902', 'logx903', 'logx905', 'powx1183', 'powx1184', 'powx4001', 'powx4002', 'powx4003', 'powx4005', 'powx4008', 'powx4010', 'powx4012', 'powx4014', ]) if self.decimal == C: # status has additional Subnormal, Underflow self.skipped_test_ids.add('pwsx803') self.skipped_test_ids.add('pwsx805') # Correct rounding (skipped for decNumber, too) self.skipped_test_ids.add('powx4302') self.skipped_test_ids.add('powx4303') self.skipped_test_ids.add('powx4342') self.skipped_test_ids.add('powx4343') # http://bugs.python.org/issue7049 self.skipped_test_ids.add('pwmx325') self.skipped_test_ids.add('pwmx326') elif py_minor < 3: self.skipped_test_ids.add('minx1030') self.skipped_test_ids.add('plux1031') # Map test directives to setter functions. self.ChangeDict = {'precision' : self.change_precision, 'rounding' : self.change_rounding_method, 'maxexponent' : self.change_max_exponent, 'minexponent' : self.change_min_exponent, 'clamp' : self.change_clamp} # Name adapter to be able to change the Decimal and Context # interface without changing the test files from Cowlishaw. self.NameAdapter = {'and':'logical_and', 'apply':'_apply', 'class':'number_class', 'comparesig':'compare_signal', 'comparetotal':'compare_total', 'comparetotmag':'compare_total_mag', 'copy':'copy_decimal', 'copyabs':'copy_abs', 'copynegate':'copy_negate', 'copysign':'copy_sign', 'divideint':'divide_int', 'invert':'logical_invert', 'iscanonical':'is_canonical', 'isfinite':'is_finite', 'isinfinite':'is_infinite', 'isnan':'is_nan', 'isnormal':'is_normal', 'isqnan':'is_qnan', 'issigned':'is_signed', 'issnan':'is_snan', 'issubnormal':'is_subnormal', 'iszero':'is_zero', 'maxmag':'max_mag', 'minmag':'min_mag', 'nextminus':'next_minus', 'nextplus':'next_plus', 'nexttoward':'next_toward', 'or':'logical_or', 'reduce':'normalize', 'remaindernear':'remainder_near', 'samequantum':'same_quantum', 'squareroot':'sqrt', 'toeng':'to_eng_string', 'tointegral':'to_integral_value', 'tointegralx':'to_integral_exact', 'tosci':'to_sci_string', 'xor':'logical_xor'} # Map test-case names to roundings. self.RoundingDict = {'ceiling' : self.decimal.ROUND_CEILING, 'down' : self.decimal.ROUND_DOWN, 'floor' : self.decimal.ROUND_FLOOR, 'half_down' : self.decimal.ROUND_HALF_DOWN, 'half_even' : self.decimal.ROUND_HALF_EVEN, 'half_up' : self.decimal.ROUND_HALF_UP, 'up' : self.decimal.ROUND_UP, '05up' : self.decimal.ROUND_05UP} # Map the test cases' error names to the actual errors. self.ErrorNames = {'clamped' : self.decimal.Clamped, 'conversion_syntax' : self.decimal.InvalidOperation, 'division_by_zero' : self.decimal.DivisionByZero, 'division_impossible' : self.decimal.InvalidOperation, 'division_undefined' : self.decimal.InvalidOperation, 'inexact' : self.decimal.Inexact, 'invalid_context' : self.decimal.InvalidOperation, 'invalid_operation' : self.decimal.InvalidOperation, 'overflow' : self.decimal.Overflow, 'rounded' : self.decimal.Rounded, 'subnormal' : self.decimal.Subnormal, 'underflow' : self.decimal.Underflow} # The following functions return True/False rather than a # Decimal instance. self.LogicalFunctions = ('is_canonical', 'is_finite', 'is_infinite', 'is_nan', 'is_normal', 'is_qnan', 'is_signed', 'is_snan', 'is_subnormal', 'is_zero', 'same_quantum') def read_unlimited(self, v, context): """Work around the limitations of the 32-bit _decimal version. The guaranteed maximum values for prec, Emax etc. are 425000000, but higher values usually work, except for rare corner cases. In particular, all of the IBM tests pass with maximum values of 1070000000.""" if self.decimal == C and self.decimal.MAX_EMAX == 425000000: self.readcontext.unsafe_setprec(1070000000) self.readcontext.unsafe_setemax(1070000000) self.readcontext.unsafe_setemin(-1070000000) return self.readcontext.create_decimal(v) else: return self.decimal.Decimal(v, context) def eval_file(self, file): global skip_expected if skip_expected: raise unittest.SkipTest return with open(file) as f: for line in f: line = line.replace('\r\n', '').replace('\n', '') #print line try: t = self.eval_line(line) except self.decimal.DecimalException as exception: #Exception raised where there shouldn't have been one. self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line) return def eval_line(self, s): if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'): s = (s.split('->')[0] + '->' + s.split('->')[1].split('--')[0]).strip() else: s = s.split('--')[0].strip() for ignore in self.ignore_list: if s.find(ignore) >= 0: #print s.split()[0], 'NotImplemented--', ignore return if not s: return elif ':' in s: return self.eval_directive(s) else: return self.eval_equation(s) def eval_directive(self, s): funct, value = (x.strip().lower() for x in s.split(':')) if funct == 'rounding': value = self.RoundingDict[value] else: try: value = int(value) except ValueError: pass funct = self.ChangeDict.get(funct, (lambda *args: None)) funct(value) def eval_equation(self, s): if not TEST_ALL and not hasattr(C, 'setfailpoint') and \ random.random() < 0.90: return with protectfail(): self.context.clear_flags() try: Sides = s.split('->') L = Sides[0].strip().split() id = L[0] if DEBUG: print("Test ", id, end=" ") funct = L[1].lower() valstemp = L[2:] L = Sides[1].strip().split() ans = L[0] exceptions = L[1:] except (TypeError, AttributeError, IndexError): raise self.decimal.InvalidOperation def FixQuotes(val): val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote') val = val.replace("'", '').replace('"', '') val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"') return val if id in self.skipped_test_ids: return fname = self.NameAdapter.get(funct, funct) if fname == 'rescale': return funct = getattr(self.context, fname) vals = [] conglomerate = '' quote = 0 theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions] for exception in Signals[self.decimal]: self.context.traps[exception] = 1 #Catch these bugs... for exception in theirexceptions: self.context.traps[exception] = 0 for i, val in enumerate(valstemp): if val.count("'") % 2 == 1: quote = 1 - quote if quote: conglomerate = conglomerate + ' ' + val continue else: val = conglomerate + val conglomerate = '' v = FixQuotes(val) if fname in ('to_sci_string', 'to_eng_string'): if EXTENDEDERRORTEST: for error in theirexceptions: self.context.traps[error] = 1 try: funct(self.context.create_decimal(v)) except error: pass except Signals[self.decimal] as e: self.fail("Raised %s in %s when %s disabled" % \ (e, s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) self.context.traps[error] = 0 v = self.context.create_decimal(v) else: v = self.read_unlimited(v, self.context) vals.append(v) ans = FixQuotes(ans) # three argument power/powmod (deprecated) if self.decimal == C: if fname == 'power' and len(vals) == 3: # name is different fname = 'powmod' funct = getattr(self.context, fname) if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'): for error in theirexceptions: self.context.traps[error] = 1 try: funct(*vals) except error: pass except Signals[self.decimal] as e: self.fail("Raised %s in %s when %s disabled" % \ (e, s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) self.context.traps[error] = 0 # as above, but add traps cumulatively, to check precedence ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions] for error in ordered_errors: self.context.traps[error] = 1 try: funct(*vals) except error: pass except Signals[self.decimal] as e: self.fail("Raised %s in %s; expected %s" % (type(e), s, error)) else: self.fail("Did not raise %s in %s" % (error, s)) # reset traps for error in ordered_errors: self.context.traps[error] = 0 if DEBUG: print("--", self.context) try: result = str(funct(*vals)) if fname in self.LogicalFunctions: result = str(int(eval(result))) # 'True', 'False' -> '1', '0' except Signals[self.decimal] as error: self.fail("Raised %s in %s" % (error, s)) except Exception as e: #Catch any error long enough to state the test case. # Errors are expected with failpoints. if not hasattr(C, 'setfailpoint'): print("ERROR:", s) raise e.__class__ myexceptions = self.getexceptions() myexceptions.sort(key=repr) theirexceptions.sort(key=repr) self.assertEqual(result, ans, 'Incorrect answer for ' + s + ' -- got ' + result) self.assertEqual(myexceptions, theirexceptions, 'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions)) return def getexceptions(self): return [e for e in Signals[self.decimal] if self.context.flags[e]] def change_precision(self, prec): if self.decimal == C and self.decimal.MAX_PREC == 425000000: self.context.unsafe_setprec(prec) else: self.context.prec = prec def change_rounding_method(self, rounding): self.context.rounding = rounding def change_min_exponent(self, exp): if self.decimal == C and self.decimal.MAX_PREC == 425000000: self.context.unsafe_setemin(exp) else: self.context.Emin = exp def change_max_exponent(self, exp): if self.decimal == C and self.decimal.MAX_PREC == 425000000: self.context.unsafe_setemax(exp) else: self.context.Emax = exp def change_clamp(self, clamp): if py_minor <= 2: self.context._clamp = clamp else: self.context.clamp = clamp class CIBMTestCases(IBMTestCases): decimal = C class PyIBMTestCases(IBMTestCases): decimal = P # The following classes test the behaviour of Decimal according to PEP 327 class ExplicitConstructionTest(unittest.TestCase): '''Unit tests for Explicit Construction cases of Decimal.''' def test_explicit_empty(self): Decimal = self.decimal.Decimal self.assertEqual(Decimal(), Decimal("0")) def test_explicit_from_None(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, Decimal, None) def test_explicit_from_int(self): Decimal = self.decimal.Decimal #positive d = Decimal(45) self.assertEqual(str(d), '45') #very large positive d = Decimal(500000123) self.assertEqual(str(d), '500000123') #negative d = Decimal(-45) self.assertEqual(str(d), '-45') #zero d = Decimal(0) self.assertEqual(str(d), '0') # single word longs for n in range(0, 32): for sign in (-1, 1): for x in range(-5, 5): i = sign * (2**n + x) d = Decimal(i) self.assertEqual(str(d), str(i)) def test_explicit_from_string(self): Decimal = self.decimal.Decimal InvalidOperation = self.decimal.InvalidOperation localcontext = self.decimal.localcontext #empty self.assertEqual(str(Decimal('')), 'NaN') #int self.assertEqual(str(Decimal('45')), '45') #float self.assertEqual(str(Decimal('45.34')), '45.34') #engineer notation self.assertEqual(str(Decimal('45e2')), '4.5E+3') #just not a number self.assertEqual(str(Decimal('ugly')), 'NaN') #leading and trailing whitespace permitted self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4') self.assertEqual(str(Decimal(' -7.89')), '-7.89') self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679') with localcontext() as c: c.traps[InvalidOperation] = True # Invalid string self.assertRaises(InvalidOperation, Decimal, "xyz") # Two arguments max self.assertRaises(TypeError, Decimal, "1234", "x", "y") def test_explicit_from_tuples(self): Decimal = self.decimal.Decimal #zero d = Decimal( (0, (0,), 0) ) self.assertEqual(str(d), '0') #int d = Decimal( (1, (4, 5), 0) ) self.assertEqual(str(d), '-45') #float d = Decimal( (0, (4, 5, 3, 4), -2) ) self.assertEqual(str(d), '45.34') #weird d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.34913534E-17') #inf d = Decimal( (0, (), "F") ) self.assertEqual(str(d), 'Infinity') #wrong number of items self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) ) #bad sign self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2)) #bad exp self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') ) #bad coefficients self.assertRaises(ValueError, Decimal, (1, "xyz", 2) ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) ) self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) ) self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) ) def test_explicit_from_bool(self): Decimal = self.decimal.Decimal self.assertIs(bool(Decimal(0)), False) self.assertIs(bool(Decimal(1)), True) self.assertEqual(Decimal(False), Decimal(0)) self.assertEqual(Decimal(True), Decimal(1)) def test_explicit_from_Decimal(self): Decimal = self.decimal.Decimal #positive d = Decimal(45) e = Decimal(d) self.assertEqual(str(e), '45') #very large positive d = Decimal(500000123) e = Decimal(d) self.assertEqual(str(e), '500000123') #negative d = Decimal(-45) e = Decimal(d) self.assertEqual(str(e), '-45') #zero d = Decimal(0) e = Decimal(d) self.assertEqual(str(e), '0') @requires_IEEE_754 def test_explicit_from_float(self): if hasattr(C, 'setfailpoint'): random.seed(randseed) Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext if self.decimal == C: FloatOperation = self.decimal.FloatOperation if py_minor <= 1: with localcontext() as c: # TypeError in 3.1 c.clear_flags() self.assertRaises(TypeError, Decimal, 7.5) self.assertFalse(c.flags[FloatOperation]) c.traps[FloatOperation] = True self.assertRaises(TypeError, Decimal, 7.5) self.assertFalse(c.flags[FloatOperation]) return else: with localcontext() as c: c.clear_flags() self.assertEqual(Decimal(7.5), 7.5) self.assertTrue(c.flags[FloatOperation]) c.traps[FloatOperation] = True self.assertRaises(FloatOperation, Decimal, 7.5) self.assertTrue(c.flags[FloatOperation]) r = Decimal.from_float(0.1) self.assertEqual(type(r), Decimal) self.assertEqual(str(r), '0.1000000000000000055511151231257827021181583404541015625') self.assertTrue(Decimal.from_float(float('nan')).is_qnan()) self.assertTrue(Decimal.from_float(float('inf')).is_infinite()) self.assertTrue(Decimal.from_float(float('-inf')).is_infinite()) self.assertEqual(str(Decimal.from_float(float('nan'))), str(Decimal('NaN'))) self.assertEqual(str(Decimal.from_float(float('inf'))), str(Decimal('Infinity'))) self.assertEqual(str(Decimal.from_float(float('-inf'))), str(Decimal('-Infinity'))) self.assertEqual(str(Decimal.from_float(float('-0.0'))), str(Decimal('-0'))) for i in range(200): x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) self.assertEqual(x, float(Decimal.from_float(x))) # roundtrip def test_explicit_context_create_decimal(self): Decimal = self.decimal.Decimal InvalidOperation = self.decimal.InvalidOperation Rounded = self.decimal.Rounded nc = copy.copy(self.decimal.getcontext()) nc.prec = 3 # empty d = Decimal() self.assertEqual(str(d), '0') d = nc.create_decimal() self.assertEqual(str(d), '0') # from None self.assertRaises(TypeError, nc.create_decimal, None) # from int d = nc.create_decimal(456) self.assertTrue(isinstance(d, Decimal)) self.assertEqual(nc.create_decimal(45678), nc.create_decimal('457E+2')) # from string d = Decimal('456789') self.assertEqual(str(d), '456789') d = nc.create_decimal('456789') self.assertEqual(str(d), '4.57E+5') # leading and trailing whitespace should result in a NaN; # spaces are already checked in Cowlishaw's test-suite, so # here we just check that a trailing newline results in a NaN self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN') # from tuples d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.34913534E-17') d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(str(d), '-4.35E-17') # from Decimal prevdec = Decimal(500000123) d = Decimal(prevdec) self.assertEqual(str(d), '500000123') d = nc.create_decimal(prevdec) self.assertEqual(str(d), '5.00E+8') # more integers nc.prec = 28 nc.traps[InvalidOperation] = True for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0, 2**31-1, 2**31, 2**63-1, 2**63]: d = nc.create_decimal(v) self.assertTrue(isinstance(d, Decimal)) self.assertEqual(int(d), v) nc.prec = 3 nc.traps[Rounded] = True self.assertRaises(Rounded, nc.create_decimal, 1234) # from string nc.prec = 28 self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17') self.assertEqual(str(nc.create_decimal('45')), '45') self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity') self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123') # invalid arguments self.assertRaises(InvalidOperation, nc.create_decimal, "xyz") self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25)) self.assertRaises(TypeError, nc.create_decimal, "1234", "5678") # too many NaN payload digits nc.prec = 3 self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345') self.assertRaises(InvalidOperation, nc.create_decimal, Decimal('NaN12345')) nc.traps[InvalidOperation] = False self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN') self.assertTrue(nc.flags[InvalidOperation]) nc.flags[InvalidOperation] = False self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN') self.assertTrue(nc.flags[InvalidOperation]) def test_explicit_context_create_from_float(self): if hasattr(C, 'setfailpoint'): random.seed(randseed) Decimal = self.decimal.Decimal Context = self.decimal.Context nc = Context() if self.decimal == C: FloatOperation = self.decimal.FloatOperation if py_minor <= 1: # TypeError in 3.1 nc.clear_flags() self.assertRaises(TypeError, nc.create_decimal, 7.5) self.assertFalse(nc.flags[FloatOperation]) nc.traps[FloatOperation] = True self.assertRaises(TypeError, nc.create_decimal, 7.5) self.assertFalse(nc.flags[FloatOperation]) nc.traps[FloatOperation] = False return else: nc.clear_flags() self.assertEqual(nc.create_decimal(7.5), 7.5) self.assertTrue(nc.flags[FloatOperation]) nc.traps[FloatOperation] = True self.assertRaises(FloatOperation, nc.create_decimal, 7.5) self.assertTrue(nc.flags[FloatOperation]) nc.traps[FloatOperation] = False r = nc.create_decimal_from_float(0.1) self.assertEqual(type(r), Decimal) self.assertEqual(str(r), '0.1000000000000000055511151231') self.assertTrue(nc.create_decimal_from_float(float('nan')).is_qnan()) self.assertTrue(nc.create_decimal_from_float(float('inf')).is_infinite()) self.assertTrue(nc.create_decimal_from_float(float('-inf')).is_infinite()) self.assertEqual(str(nc.create_decimal_from_float(float('nan'))), str(nc.create_decimal('NaN'))) self.assertEqual(str(nc.create_decimal_from_float(float('inf'))), str(nc.create_decimal('Infinity'))) self.assertEqual(str(nc.create_decimal_from_float(float('-inf'))), str(nc.create_decimal('-Infinity'))) self.assertEqual(str(nc.create_decimal_from_float(float('-0.0'))), str(nc.create_decimal('-0'))) nc.prec = 100 for i in range(200): x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) self.assertEqual(x, float(nc.create_decimal_from_float(x))) # roundtrip def test_unicode_digits(self): Decimal = self.decimal.Decimal test_values = { '\uff11': '1', '\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372', '-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400', } for input, expected in test_values.items(): self.assertEqual(str(Decimal(input)), expected) class CExplicitConstructionTest(ExplicitConstructionTest): decimal = C class PyExplicitConstructionTest(ExplicitConstructionTest): decimal = P class ImplicitConstructionTest(unittest.TestCase): '''Unit tests for Implicit Construction cases of Decimal.''' def test_implicit_from_None(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals()) def test_implicit_from_int(self): Decimal = self.decimal.Decimal #normal self.assertEqual(str(Decimal(5) + 45), '50') #exceeding precision self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000)) def test_implicit_from_string(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals()) def test_implicit_from_float(self): Decimal = self.decimal.Decimal self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals()) def test_implicit_from_Decimal(self): Decimal = self.decimal.Decimal self.assertEqual(Decimal(5) + Decimal(45), Decimal(50)) def test_rop(self): Decimal = self.decimal.Decimal # Allow other classes to be trained to interact with Decimals class E: def __divmod__(self, other): return 'divmod ' + str(other) def __rdivmod__(self, other): return str(other) + ' rdivmod' def __lt__(self, other): return 'lt ' + str(other) def __gt__(self, other): return 'gt ' + str(other) def __le__(self, other): return 'le ' + str(other) def __ge__(self, other): return 'ge ' + str(other) def __eq__(self, other): return 'eq ' + str(other) def __ne__(self, other): return 'ne ' + str(other) self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10') self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod') self.assertEqual(eval('Decimal(10) < E()'), 'gt 10') self.assertEqual(eval('Decimal(10) > E()'), 'lt 10') self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10') self.assertEqual(eval('Decimal(10) >= E()'), 'le 10') self.assertEqual(eval('Decimal(10) == E()'), 'eq 10') self.assertEqual(eval('Decimal(10) != E()'), 'ne 10') # insert operator methods and then exercise them oplist = [ ('+', '__add__', '__radd__'), ('-', '__sub__', '__rsub__'), ('*', '__mul__', '__rmul__'), ('/', '__truediv__', '__rtruediv__'), ('%', '__mod__', '__rmod__'), ('//', '__floordiv__', '__rfloordiv__'), ('**', '__pow__', '__rpow__') ] for sym, lop, rop in oplist: setattr(E, lop, lambda self, other: 'str' + lop + str(other)) setattr(E, rop, lambda self, other: str(other) + rop + 'str') self.assertEqual(eval('E()' + sym + 'Decimal(10)'), 'str' + lop + '10') self.assertEqual(eval('Decimal(10)' + sym + 'E()'), '10' + rop + 'str') class CImplicitConstructionTest(ImplicitConstructionTest): decimal = C class PyImplicitConstructionTest(ImplicitConstructionTest): decimal = P class FormatTest(unittest.TestCase): '''Unit tests for the format function.''' def test_formatting(self): Decimal = self.decimal.Decimal if py_minor <= 1 and self.decimal == P: raise unittest.SkipTest("requires Python 3.2") # triples giving a format, a Decimal, and the expected result test_values = [ ('e', '0E-15', '0e-15'), ('e', '2.3E-15', '2.3e-15'), ('e', '2.30E+2', '2.30e+2'), # preserve significant zeros ('e', '2.30000E-15', '2.30000e-15'), ('e', '1.23456789123456789e40', '1.23456789123456789e+40'), ('e', '1.5', '1.5e+0'), ('e', '0.15', '1.5e-1'), ('e', '0.015', '1.5e-2'), ('e', '0.0000000000015', '1.5e-12'), ('e', '15.0', '1.50e+1'), ('e', '-15', '-1.5e+1'), ('e', '0', '0e+0'), ('e', '0E1', '0e+1'), ('e', '0.0', '0e-1'), ('e', '0.00', '0e-2'), ('.6e', '0E-15', '0.000000e-9'), ('.6e', '0', '0.000000e+6'), ('.6e', '9.999999', '9.999999e+0'), ('.6e', '9.9999999', '1.000000e+1'), ('.6e', '-1.23e5', '-1.230000e+5'), ('.6e', '1.23456789e-3', '1.234568e-3'), ('f', '0', '0'), ('f', '0.0', '0.0'), ('f', '0E-2', '0.00'), ('f', '0.00E-8', '0.0000000000'), ('f', '0E1', '0'), # loses exponent information ('f', '3.2E1', '32'), ('f', '3.2E2', '320'), ('f', '3.20E2', '320'), ('f', '3.200E2', '320.0'), ('f', '3.2E-6', '0.0000032'), ('.6f', '0E-15', '0.000000'), # all zeros treated equally ('.6f', '0E1', '0.000000'), ('.6f', '0', '0.000000'), ('.0f', '0', '0'), # no decimal point ('.0f', '0e-2', '0'), ('.0f', '3.14159265', '3'), ('.1f', '3.14159265', '3.1'), ('.4f', '3.14159265', '3.1416'), ('.6f', '3.14159265', '3.141593'), ('.7f', '3.14159265', '3.1415926'), # round-half-even! ('.8f', '3.14159265', '3.14159265'), ('.9f', '3.14159265', '3.141592650'), ('g', '0', '0'), ('g', '0.0', '0.0'), ('g', '0E1', '0e+1'), ('G', '0E1', '0E+1'), ('g', '0E-5', '0.00000'), ('g', '0E-6', '0.000000'), ('g', '0E-7', '0e-7'), ('g', '-0E2', '-0e+2'), ('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig ('.1g', '3.14159265', '3'), ('.2g', '3.14159265', '3.1'), ('.5g', '3.14159265', '3.1416'), ('.7g', '3.14159265', '3.141593'), ('.8g', '3.14159265', '3.1415926'), # round-half-even! ('.9g', '3.14159265', '3.14159265'), ('.10g', '3.14159265', '3.14159265'), # don't pad ('%', '0E1', '0%'), ('%', '0E0', '0%'), ('%', '0E-1', '0%'), ('%', '0E-2', '0%'), ('%', '0E-3', '0.0%'), ('%', '0E-4', '0.00%'), ('.3%', '0', '0.000%'), # all zeros treated equally ('.3%', '0E10', '0.000%'), ('.3%', '0E-10', '0.000%'), ('.3%', '2.34', '234.000%'), ('.3%', '1.234567', '123.457%'), ('.0%', '1.23', '123%'), ('e', 'NaN', 'NaN'), ('f', '-NaN123', '-NaN123'), ('+g', 'NaN456', '+NaN456'), ('.3e', 'Inf', 'Infinity'), ('.16f', '-Inf', '-Infinity'), ('.0g', '-sNaN', '-sNaN'), ('', '1.00', '1.00'), # test alignment and padding ('6', '123', ' 123'), ('<6', '123', '123 '), ('>6', '123', ' 123'), ('^6', '123', ' 123 '), ('=+6', '123', '+ 123'), ('#<10', 'NaN', 'NaN#######'), ('#<10', '-4.3', '-4.3######'), ('#<+10', '0.0130', '+0.0130###'), ('#< 10', '0.0130', ' 0.0130###'), ('@>10', '-Inf', '@-Infinity'), ('#>5', '-Inf', '-Infinity'), ('?^5', '123', '?123?'), ('%^6', '123', '%123%%'), (' ^6', '-45.6', '-45.6 '), ('/=10', '-45.6', '-/////45.6'), ('/=+10', '45.6', '+/////45.6'), ('/= 10', '45.6', ' /////45.6'), # thousands separator (',', '1234567', '1,234,567'), (',', '123456', '123,456'), (',', '12345', '12,345'), (',', '1234', '1,234'), (',', '123', '123'), (',', '12', '12'), (',', '1', '1'), (',', '0', '0'), (',', '-1234567', '-1,234,567'), (',', '-123456', '-123,456'), ('7,', '123456', '123,456'), ('8,', '123456', ' 123,456'), ('08,', '123456', '0,123,456'), # special case: extra 0 needed ('+08,', '123456', '+123,456'), # but not if there's a sign (' 08,', '123456', ' 123,456'), ('08,', '-123456', '-123,456'), ('+09,', '123456', '+0,123,456'), # ... with fractional part... ('07,', '1234.56', '1,234.56'), ('08,', '1234.56', '1,234.56'), ('09,', '1234.56', '01,234.56'), ('010,', '1234.56', '001,234.56'), ('011,', '1234.56', '0,001,234.56'), ('012,', '1234.56', '0,001,234.56'), ('08,.1f', '1234.5', '01,234.5'), # no thousands separators in fraction part (',', '1.23456789', '1.23456789'), (',%', '123.456789', '12,345.6789%'), (',e', '123456', '1.23456e+5'), (',E', '123456', '1.23456E+5'), # issue 6850 ('a=-7.0', '0.12345', 'aaaa0.1'), ] for fmt, d, result in test_values: self.assertEqual(format(Decimal(d), fmt), result) # bytes format argument self.assertRaises(TypeError, Decimal(1).__format__, b'-020') def test_n_format(self): Decimal = self.decimal.Decimal try: from locale import CHAR_MAX except ImportError: return def make_grouping(lst): return ''.join([chr(x) for x in lst]) if self.decimal == C else lst def get_fmt(x, override=None, fmt='n'): if self.decimal == C: return Decimal(x).__format__(fmt, override) else: return Decimal(x).__format__(fmt, _localeconv=override) # Set up some localeconv-like dictionaries en_US = { 'decimal_point' : '.', 'grouping' : make_grouping([3, 3, 0]), 'thousands_sep' : ',' } fr_FR = { 'decimal_point' : ',', 'grouping' : make_grouping([CHAR_MAX]), 'thousands_sep' : '' } ru_RU = { 'decimal_point' : ',', 'grouping': make_grouping([3, 3, 0]), 'thousands_sep' : ' ' } crazy = { 'decimal_point' : '&', 'grouping': make_grouping([1, 4, 2, CHAR_MAX]), 'thousands_sep' : '-' } dotsep_wide = { 'decimal_point' : b'\xc2\xbf'.decode('utf-8'), 'grouping': make_grouping([3, 3, 0]), 'thousands_sep' : b'\xc2\xb4'.decode('utf-8') } self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7') self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7') self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7') self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7') self.assertEqual(get_fmt(123456789, en_US), '123,456,789') self.assertEqual(get_fmt(123456789, fr_FR), '123456789') self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789') self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3') self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8') self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8') self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8') self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8') # zero padding self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234') self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234') self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234') self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234') self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345') self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345') self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345') self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345') self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345') self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345') self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6') self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6') self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6') self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6') self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6') self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6') self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6') self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6') # wide char separator and decimal point self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'), '-0´000´000´000´001¿5') def test_wide_char_separator_decimal_point(self): # locale with wide char separator and decimal point Decimal = self.decimal.Decimal try: locale.setlocale(locale.LC_ALL, 'ps_AF') except locale.Error: return self.assertEqual(format(Decimal('100000000.123'), 'n'), '100٬000٬000٫123') locale.resetlocale() class CFormatTest(FormatTest): decimal = C class PyFormatTest(FormatTest): decimal = P class ArithmeticOperatorsTest(unittest.TestCase): '''Unit tests for all arithmetic operators, binary and unary.''' def test_addition(self): Decimal = self.decimal.Decimal d1 = Decimal('-11.1') d2 = Decimal('22.2') #two Decimals self.assertEqual(d1+d2, Decimal('11.1')) self.assertEqual(d2+d1, Decimal('11.1')) #with other type, left c = d1 + 5 self.assertEqual(c, Decimal('-6.1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 + d1 self.assertEqual(c, Decimal('-6.1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 += d2 self.assertEqual(d1, Decimal('11.1')) #inline with other type d1 += 5 self.assertEqual(d1, Decimal('16.1')) def test_subtraction(self): Decimal = self.decimal.Decimal d1 = Decimal('-11.1') d2 = Decimal('22.2') #two Decimals self.assertEqual(d1-d2, Decimal('-33.3')) self.assertEqual(d2-d1, Decimal('33.3')) #with other type, left c = d1 - 5 self.assertEqual(c, Decimal('-16.1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 - d1 self.assertEqual(c, Decimal('16.1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 -= d2 self.assertEqual(d1, Decimal('-33.3')) #inline with other type d1 -= 5 self.assertEqual(d1, Decimal('-38.3')) def test_multiplication(self): Decimal = self.decimal.Decimal d1 = Decimal('-5') d2 = Decimal('3') #two Decimals self.assertEqual(d1*d2, Decimal('-15')) self.assertEqual(d2*d1, Decimal('-15')) #with other type, left c = d1 * 5 self.assertEqual(c, Decimal('-25')) self.assertEqual(type(c), type(d1)) #with other type, right c = 5 * d1 self.assertEqual(c, Decimal('-25')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 *= d2 self.assertEqual(d1, Decimal('-15')) #inline with other type d1 *= 5 self.assertEqual(d1, Decimal('-75')) def test_division(self): Decimal = self.decimal.Decimal d1 = Decimal('-5') d2 = Decimal('2') #two Decimals self.assertEqual(d1/d2, Decimal('-2.5')) self.assertEqual(d2/d1, Decimal('-0.4')) #with other type, left c = d1 / 4 self.assertEqual(c, Decimal('-1.25')) self.assertEqual(type(c), type(d1)) #with other type, right c = 4 / d1 self.assertEqual(c, Decimal('-0.8')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 /= d2 self.assertEqual(d1, Decimal('-2.5')) #inline with other type d1 /= 4 self.assertEqual(d1, Decimal('-0.625')) def test_floor_division(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1//d2, Decimal('2')) self.assertEqual(d2//d1, Decimal('0')) #with other type, left c = d1 // 4 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 // d1 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 //= d2 self.assertEqual(d1, Decimal('2')) #inline with other type d1 //= 2 self.assertEqual(d1, Decimal('1')) def test_powering(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1**d2, Decimal('25')) self.assertEqual(d2**d1, Decimal('32')) #with other type, left c = d1 ** 4 self.assertEqual(c, Decimal('625')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 ** d1 self.assertEqual(c, Decimal('16807')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 **= d2 self.assertEqual(d1, Decimal('25')) #inline with other type d1 **= 4 self.assertEqual(d1, Decimal('390625')) def test_module(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals self.assertEqual(d1%d2, Decimal('1')) self.assertEqual(d2%d1, Decimal('2')) #with other type, left c = d1 % 4 self.assertEqual(c, Decimal('1')) self.assertEqual(type(c), type(d1)) #with other type, right c = 7 % d1 self.assertEqual(c, Decimal('2')) self.assertEqual(type(c), type(d1)) #inline with decimal d1 %= d2 self.assertEqual(d1, Decimal('1')) #inline with other type d1 %= 4 self.assertEqual(d1, Decimal('1')) def test_floor_div_module(self): Decimal = self.decimal.Decimal d1 = Decimal('5') d2 = Decimal('2') #two Decimals (p, q) = divmod(d1, d2) self.assertEqual(p, Decimal('2')) self.assertEqual(q, Decimal('1')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) #with other type, left (p, q) = divmod(d1, 4) self.assertEqual(p, Decimal('1')) self.assertEqual(q, Decimal('1')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) #with other type, right (p, q) = divmod(7, d1) self.assertEqual(p, Decimal('1')) self.assertEqual(q, Decimal('2')) self.assertEqual(type(p), type(d1)) self.assertEqual(type(q), type(d1)) def test_unary_operators(self): Decimal = self.decimal.Decimal self.assertEqual(+Decimal(45), Decimal(+45)) # + self.assertEqual(-Decimal(45), Decimal(-45)) # - self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs def test_nan_comparisons(self): # comparisons involving signaling nans signal InvalidOperation # order comparisons (<, <=, >, >=) involving only quiet nans # also signal InvalidOperation # equality comparisons (==, !=) involving only quiet nans # don't signal, but return False or True respectively. Decimal = self.decimal.Decimal InvalidOperation = self.decimal.InvalidOperation localcontext = self.decimal.localcontext if py_minor <= 1 and self.decimal == P: raise unittest.SkipTest("requires Python 3.2") n = Decimal('NaN') s = Decimal('sNaN') i = Decimal('Inf') f = Decimal('2') qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n) snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s) order_ops = operator.lt, operator.le, operator.gt, operator.ge equality_ops = operator.eq, operator.ne # results when InvalidOperation is not trapped for x, y in qnan_pairs + snan_pairs: for op in order_ops + equality_ops: got = op(x, y) expected = True if op is operator.ne else False self.assertIs(expected, got, "expected {0!r} for operator.{1}({2!r}, {3!r}); " "got {4!r}".format( expected, op.__name__, x, y, got)) # repeat the above, but this time trap the InvalidOperation with localcontext() as ctx: ctx.traps[InvalidOperation] = 1 for x, y in qnan_pairs: for op in equality_ops: got = op(x, y) expected = True if op is operator.ne else False self.assertIs(expected, got, "expected {0!r} for " "operator.{1}({2!r}, {3!r}); " "got {4!r}".format( expected, op.__name__, x, y, got)) for x, y in snan_pairs: for op in equality_ops: self.assertRaises(InvalidOperation, operator.eq, x, y) self.assertRaises(InvalidOperation, operator.ne, x, y) for x, y in qnan_pairs + snan_pairs: for op in order_ops: self.assertRaises(InvalidOperation, op, x, y) def test_copy_sign(self): Decimal = self.decimal.Decimal if py_minor <= 1 and self.decimal == P: raise unittest.SkipTest("requires Python 3.2") d = Decimal(1).copy_sign(Decimal(-2)) self.assertEqual(Decimal(1).copy_sign(-2), d) self.assertRaises(TypeError, Decimal(1).copy_sign, '-2') class CArithmeticOperatorsTest(ArithmeticOperatorsTest): decimal = C class PyArithmeticOperatorsTest(ArithmeticOperatorsTest): decimal = P # The following are two functions used to test threading in the next class def thfunc1(cls): Decimal = cls.decimal.Decimal InvalidOperation = cls.decimal.InvalidOperation DivisionByZero = cls.decimal.DivisionByZero Overflow = cls.decimal.Overflow Underflow = cls.decimal.Underflow Inexact = cls.decimal.Inexact getcontext = cls.decimal.getcontext localcontext = cls.decimal.localcontext d1 = Decimal(1) d3 = Decimal(3) test1 = d1/d3 cls.finish1.set() cls.synchro.wait() test2 = d1/d3 with localcontext() as c2: cls.assertTrue(c2.flags[Inexact]) cls.assertRaises(DivisionByZero, c2.divide, d1, 0) cls.assertTrue(c2.flags[DivisionByZero]) with localcontext() as c3: cls.assertTrue(c3.flags[Inexact]) cls.assertTrue(c3.flags[DivisionByZero]) cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN')) cls.assertTrue(c3.flags[InvalidOperation]) del c3 cls.assertFalse(c2.flags[InvalidOperation]) del c2 cls.assertEqual(test1, Decimal('0.333333333333333333333333')) cls.assertEqual(test2, Decimal('0.333333333333333333333333')) c1 = getcontext() cls.assertTrue(c1.flags[Inexact]) for sig in Overflow, Underflow, DivisionByZero, InvalidOperation: cls.assertFalse(c1.flags[sig]) return def thfunc2(cls): Decimal = cls.decimal.Decimal InvalidOperation = cls.decimal.InvalidOperation DivisionByZero = cls.decimal.DivisionByZero Overflow = cls.decimal.Overflow Underflow = cls.decimal.Underflow Inexact = cls.decimal.Inexact getcontext = cls.decimal.getcontext localcontext = cls.decimal.localcontext d1 = Decimal(1) d3 = Decimal(3) test1 = d1/d3 thiscontext = getcontext() thiscontext.prec = 18 test2 = d1/d3 with localcontext() as c2: cls.assertTrue(c2.flags[Inexact]) cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999) cls.assertTrue(c2.flags[Overflow]) with localcontext(thiscontext) as c3: cls.assertTrue(c3.flags[Inexact]) cls.assertFalse(c3.flags[Overflow]) c3.traps[Underflow] = True cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999) cls.assertTrue(c3.flags[Underflow]) del c3 cls.assertFalse(c2.flags[Underflow]) cls.assertFalse(c2.traps[Underflow]) del c2 cls.synchro.set() cls.finish2.set() cls.assertEqual(test1, Decimal('0.333333333333333333333333')) cls.assertEqual(test2, Decimal('0.333333333333333333')) cls.assertFalse(thiscontext.traps[Underflow]) cls.assertTrue(thiscontext.flags[Inexact]) for sig in Overflow, Underflow, DivisionByZero, InvalidOperation: cls.assertFalse(thiscontext.flags[sig]) return class ThreadingTest(unittest.TestCase): '''Unit tests for thread local contexts in Decimal.''' # Take care executing this test from IDLE, there's an issue in threading # that hangs IDLE and I couldn't find it def test_threading(self): DefaultContext = self.decimal.DefaultContext if self.decimal == C and not self.decimal.HAVE_THREADS: self.skipTest("compiled without threading") # Test the "threading isolation" of a Context. Also test changing # the DefaultContext, which acts as a template for the thread-local # contexts. save_prec = DefaultContext.prec save_emax = DefaultContext.Emax save_emin = DefaultContext.Emin DefaultContext.prec = 24 DefaultContext.Emax = 425000000 DefaultContext.Emin = -425000000 with protectfail(): self.synchro = threading.Event() self.finish1 = threading.Event() self.finish2 = threading.Event() th1 = threading.Thread(target=thfunc1, args=(self,)) th2 = threading.Thread(target=thfunc2, args=(self,)) th1.start() th2.start() self.finish1.wait() self.finish2.wait() for sig in Signals[self.decimal]: self.assertFalse(DefaultContext.flags[sig]) DefaultContext.prec = save_prec DefaultContext.Emax = save_emax DefaultContext.Emin = save_emin return @unittest.skipUnless(threading, 'threading required') class CThreadingTest(ThreadingTest): decimal = C @unittest.skipUnless(threading, 'threading required') class PyThreadingTest(ThreadingTest): decimal = P class UsabilityTest(unittest.TestCase): '''Unit tests for Usability cases of Decimal.''' def test_comparison_operators(self): if hasattr(C, 'setfailpoint'): random.seed(randseed) Decimal = self.decimal.Decimal da = Decimal('23.42') db = Decimal('23.42') dc = Decimal('45') #two Decimals self.assertGreater(dc, da) self.assertGreaterEqual(dc, da) self.assertLess(da, dc) self.assertLessEqual(da, dc) self.assertEqual(da, db) self.assertNotEqual(da, dc) self.assertLessEqual(da, db) self.assertGreaterEqual(da, db) #a Decimal and an int self.assertGreater(dc, 23) self.assertLess(23, dc) self.assertEqual(dc, 45) #a Decimal and uncomparable self.assertNotEqual(da, 'ugly') self.assertNotEqual(da, 32.7) self.assertNotEqual(da, object()) self.assertNotEqual(da, object) # sortable a = list(map(Decimal, range(100))) b = a[:] random.shuffle(a) a.sort() self.assertEqual(a, b) def test_decimal_float_comparison(self): Decimal = self.decimal.Decimal Context = self.decimal.Context localcontext = self.decimal.localcontext def assert_attr(a, b, attr, context, signal=None): context.clear_flags() f = getattr(a, attr) if self.decimal == C: if signal == C.FloatOperation: self.assertRaises(signal, f, b) elif py_minor <= 1: # The actual TypeError is raised by # the caller of the comparison methods. self.assertIs(f(b), NotImplemented) else: self.assertIs(f(b), True) self.assertTrue(context.flags[C.FloatOperation]) else: if py_minor <= 1: self.assertIs(f(b), NotImplemented) else: self.assertIs(f(b), True) small_d = Decimal('0.25') big_d = Decimal('3.0') small_f = 0.25 big_f = 3.0 zero_d = Decimal('0.0') neg_zero_d = Decimal('-0.0') zero_f = 0.0 neg_zero_f = -0.0 inf_d = Decimal('Infinity') neg_inf_d = Decimal('-Infinity') inf_f = float('inf') neg_inf_f = float('-inf') def doit(c, signal=None): # Order for attr in '__lt__', '__le__': assert_attr(small_d, big_f, attr, c, signal) for attr in '__gt__', '__ge__': assert_attr(big_d, small_f, attr, c, signal) # Equality assert_attr(small_d, small_f, '__eq__', c, None) assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None) assert_attr(neg_zero_d, zero_f, '__eq__', c, None) assert_attr(zero_d, neg_zero_f, '__eq__', c, None) assert_attr(zero_d, zero_f, '__eq__', c, None) assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None) assert_attr(inf_d, inf_f, '__eq__', c, None) # Inequality assert_attr(small_d, big_f, '__ne__', c, None) assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None) assert_attr(neg_inf_d, inf_f, '__ne__', c, None) assert_attr(inf_d, neg_inf_f, '__ne__', c, None) assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None) def test_containers(c, signal): c.clear_flags() s = set([100.0, Decimal('100.0')]) expected_len = 2 if py_minor <= 1 else 1 self.assertEqual(len(s), expected_len) self.assertTrue(c.flags[C.FloatOperation]) c.clear_flags() if signal: self.assertRaises(signal, sorted, [1.0, Decimal('10.0')]) else: s = sorted([10.0, Decimal('10.0')]) self.assertTrue(c.flags[C.FloatOperation]) c.clear_flags() b = 10.0 in [Decimal('10.0'), 1.0] self.assertTrue(c.flags[C.FloatOperation]) c.clear_flags() b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'} self.assertTrue(c.flags[C.FloatOperation]) if self.decimal == C: nc = Context() with localcontext(nc) as c: sig = TypeError if py_minor <= 1 else None self.assertFalse(c.traps[C.FloatOperation]) doit(c, signal=sig) test_containers(c, sig) c.traps[C.FloatOperation] = True doit(c, signal=C.FloatOperation) test_containers(c, C.FloatOperation) else: # decimal.py does not have the FloatOperation signal. nc = Context() with localcontext(nc) as c: doit(c, signal=False) def test_decimal_complex_comparison(self): Decimal = self.decimal.Decimal da = Decimal('0.25') db = Decimal('3.0') self.assertNotEqual(da, (1.5+0j)) self.assertNotEqual((1.5+0j), da) if py_minor >= 2: self.assertEqual(da, (0.25+0j)) self.assertEqual((0.25+0j), da) self.assertEqual((3.0+0j), db) self.assertEqual(db, (3.0+0j)) else: self.assertNotEqual(da, (0.25+0j)) self.assertNotEqual((0.25+0j), da) self.assertNotEqual((3.0+0j), db) self.assertNotEqual(db, (3.0+0j)) self.assertNotEqual(db, (3.0+1j)) self.assertNotEqual((3.0+1j), db) self.assertIs(db.__lt__(3.0+0j), NotImplemented) self.assertIs(db.__le__(3.0+0j), NotImplemented) self.assertIs(db.__gt__(3.0+0j), NotImplemented) self.assertIs(db.__le__(3.0+0j), NotImplemented) def test_decimal_fraction_comparison(self): D = self.decimal.Decimal F = fractions.Fraction Context = self.decimal.Context localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation emax = C.MAX_EMAX if C else 999999999 emin = C.MIN_EMIN if C else -999999999 etiny = C.MIN_ETINY if C else -1999999997 c = Context(Emax=emax, Emin=emin) with localcontext(c): c.prec = emax if py_minor >= 2: self.assertLess(D(0), F(1,9999999999999999999999999999999999999)) self.assertLess(F(-1,9999999999999999999999999999999999999), D(0)) self.assertLess(F(0,1), D("1e" + str(etiny))) self.assertLess(D("-1e" + str(etiny)), F(0,1)) self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny))) self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999)) else: self.assertIs(NotImplemented, D(0).__lt__(F(1,1))) self.assertIs(NotImplemented, D(0).__le__(F(1,1))) self.assertIs(NotImplemented, D(0).__gt__(F(1,1))) self.assertIs(NotImplemented, D(0).__ge__(F(1,1))) if py_minor >= 2: self.assertEqual(D("0.1"), F(1,10)) self.assertEqual(F(1,10), D("0.1")) else: self.assertNotEqual(D("0.1"), F(1,10)) self.assertNotEqual(F(1,10), D("0.1")) c.prec = 300 self.assertNotEqual(D(1)/3, F(1,3)) self.assertNotEqual(F(1,3), D(1)/3) if py_minor >= 2: self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax))) self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999)) self.assertGreater(D('inf'), F(99999999999,123)) self.assertGreater(D('inf'), F(-99999999999,123)) self.assertLess(D('-inf'), F(99999999999,123)) self.assertLess(D('-inf'), F(-99999999999,123)) self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123)) self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan'))) self.assertNotEqual(D('nan'), F(-9,123)) self.assertNotEqual(F(-9,123), D('nan')) def test_copy_and_deepcopy_methods(self): Decimal = self.decimal.Decimal d = Decimal('43.24') c = copy.copy(d) self.assertEqual(id(c), id(d)) dc = copy.deepcopy(d) self.assertEqual(id(dc), id(d)) def test_hash_method(self): if hasattr(C, 'setfailpoint'): random.seed(randseed) Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext def hashit(d): a = hash(d) b = d.__hash__() self.assertEqual(a, b) return a #just that it's hashable hashit(Decimal(23)) hashit(Decimal('Infinity')) hashit(Decimal('-Infinity')) if py_minor >= 2: hashit(Decimal('nan123')) hashit(Decimal('-NaN')) test_values = [Decimal(sign*(2**m + n)) for m in [0, 14, 15, 16, 17, 30, 31, 32, 33, 61, 62, 63, 64, 65, 66] for n in range(-10, 10) for sign in [-1, 1]] if hasattr(C, 'setfailpoint'): test_values = random.sample(test_values, 10) test_values.extend([ Decimal("-1"), # ==> -2 Decimal("-0"), # zeros Decimal("0.00"), Decimal("-0.000"), Decimal("0E10"), Decimal("-0E12"), Decimal("10.0"), # negative exponent Decimal("-23.00000"), Decimal("1230E100"), # positive exponent Decimal("-4.5678E50"), # a value for which hash(n) != hash(n % (2**64-1)) # in Python pre-2.6 Decimal(2**64 + 2**32 - 1), # selection of values which fail with the old (before # version 2.6) long.__hash__ Decimal("1.634E100"), Decimal("90.697E100"), Decimal("188.83E100"), Decimal("1652.9E100"), Decimal("56531E100"), ]) # check that hash(d) == hash(int(d)) for integral values for value in test_values: self.assertEqual(hashit(value), hashit(int(value))) #the same hash that to an int self.assertEqual(hashit(Decimal(23)), hashit(23)) self.assertRaises(TypeError, hash, Decimal('sNaN')) self.assertTrue(hashit(Decimal('Inf'))) self.assertTrue(hashit(Decimal('-Inf'))) if py_minor >= 2: # check that the hashes of a Decimal float match when they # represent exactly the same values test_strings = ['inf', '-Inf', '0.0', '-.0e1', '34.0', '2.5', '112390.625', '-0.515625'] for s in test_strings: f = float(s) d = Decimal(s) self.assertEqual(hashit(f), hashit(d)) with localcontext() as c: # check that the value of the hash doesn't depend on the # current context (issue #1757) x = Decimal("123456789.1") c.prec = 6 h1 = hashit(x) c.prec = 10 h2 = hashit(x) c.prec = 16 h3 = hashit(x) self.assertEqual(h1, h2) self.assertEqual(h1, h3) c.prec = 10000 x = 1100 ** 1248 self.assertEqual(hashit(Decimal(x)), hashit(x)) def test_min_and_max_methods(self): Decimal = self.decimal.Decimal d1 = Decimal('15.32') d2 = Decimal('28.5') l1 = 15 l2 = 28 #between Decimals self.assertIs(min(d1,d2), d1) self.assertIs(min(d2,d1), d1) self.assertIs(max(d1,d2), d2) self.assertIs(max(d2,d1), d2) #between Decimal and long self.assertIs(min(d1,l2), d1) self.assertIs(min(l2,d1), d1) self.assertIs(max(l1,d2), d2) self.assertIs(max(d2,l1), d2) def test_as_nonzero(self): Decimal = self.decimal.Decimal #as false self.assertFalse(Decimal(0)) #as true self.assertTrue(Decimal('0.372')) def test_tostring_methods(self): #Test str and repr methods. Decimal = self.decimal.Decimal d = Decimal('15.32') self.assertEqual(str(d), '15.32') # str self.assertEqual(repr(d), "Decimal('15.32')") # repr def test_tonum_methods(self): #Test float and int methods. Decimal = self.decimal.Decimal d1 = Decimal('66') d2 = Decimal('15.32') #int self.assertEqual(int(d1), 66) self.assertEqual(int(d2), 15) #float self.assertEqual(float(d1), 66) self.assertEqual(float(d2), 15.32) #floor test_pairs = [ ('123.00', 123), ('3.2', 3), ('3.54', 3), ('3.899', 3), ('-2.3', -3), ('-11.0', -11), ('0.0', 0), ('-0E3', 0), ] for d, i in test_pairs: self.assertEqual(math.floor(Decimal(d)), i) self.assertRaises(ValueError, math.floor, Decimal('-NaN')) self.assertRaises(ValueError, math.floor, Decimal('sNaN')) self.assertRaises(ValueError, math.floor, Decimal('NaN123')) self.assertRaises(OverflowError, math.floor, Decimal('Inf')) self.assertRaises(OverflowError, math.floor, Decimal('-Inf')) #ceiling test_pairs = [ ('123.00', 123), ('3.2', 4), ('3.54', 4), ('3.899', 4), ('-2.3', -2), ('-11.0', -11), ('0.0', 0), ('-0E3', 0), ] for d, i in test_pairs: self.assertEqual(math.ceil(Decimal(d)), i) self.assertRaises(ValueError, math.ceil, Decimal('-NaN')) self.assertRaises(ValueError, math.ceil, Decimal('sNaN')) self.assertRaises(ValueError, math.ceil, Decimal('NaN123')) self.assertRaises(OverflowError, math.ceil, Decimal('Inf')) self.assertRaises(OverflowError, math.ceil, Decimal('-Inf')) #round, single argument test_pairs = [ ('123.00', 123), ('3.2', 3), ('3.54', 4), ('3.899', 4), ('-2.3', -2), ('-11.0', -11), ('0.0', 0), ('-0E3', 0), ('-3.5', -4), ('-2.5', -2), ('-1.5', -2), ('-0.5', 0), ('0.5', 0), ('1.5', 2), ('2.5', 2), ('3.5', 4), ] for d, i in test_pairs: self.assertEqual(round(Decimal(d)), i) self.assertRaises(ValueError, round, Decimal('-NaN')) self.assertRaises(ValueError, round, Decimal('sNaN')) self.assertRaises(ValueError, round, Decimal('NaN123')) self.assertRaises(OverflowError, round, Decimal('Inf')) self.assertRaises(OverflowError, round, Decimal('-Inf')) #round, two arguments; this is essentially equivalent #to quantize, which is already extensively tested test_triples = [ ('123.456', -4, '0E+4'), ('123.456', -3, '0E+3'), ('123.456', -2, '1E+2'), ('123.456', -1, '1.2E+2'), ('123.456', 0, '123'), ('123.456', 1, '123.5'), ('123.456', 2, '123.46'), ('123.456', 3, '123.456'), ('123.456', 4, '123.4560'), ('123.455', 2, '123.46'), ('123.445', 2, '123.44'), ('Inf', 4, 'NaN'), ('-Inf', -23, 'NaN'), ('sNaN314', 3, 'NaN314'), ] for d, n, r in test_triples: self.assertEqual(str(round(Decimal(d), n)), r) def test_eval_round_trip(self): Decimal = self.decimal.Decimal #with zero d = Decimal( (0, (0,), 0) ) self.assertEqual(d, eval(repr(d))) #int d = Decimal( (1, (4, 5), 0) ) self.assertEqual(d, eval(repr(d))) #float d = Decimal( (0, (4, 5, 3, 4), -2) ) self.assertEqual(d, eval(repr(d))) #weird d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) self.assertEqual(d, eval(repr(d))) def test_as_tuple(self): Decimal = self.decimal.Decimal #with zero d = Decimal(0) self.assertEqual(d.as_tuple(), (0, (0,), 0) ) #int d = Decimal(-45) self.assertEqual(d.as_tuple(), (1, (4, 5), 0) ) #complicated string d = Decimal("-4.34913534E-17") self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) # XXX non-compliant infinity payload. d = Decimal("Infinity") self.assertEqual(d.as_tuple(), (0, (0,), 'F') ) #leading zeros in coefficient should be stripped d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) ) self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) ) d = Decimal( (1, (0, 0, 0), 37) ) self.assertEqual(d.as_tuple(), (1, (0,), 37)) d = Decimal( (1, (), 37) ) self.assertEqual(d.as_tuple(), (1, (0,), 37)) #leading zeros in NaN diagnostic info should be stripped d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') ) self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') ) d = Decimal( (1, (0, 0, 0), 'N') ) self.assertEqual(d.as_tuple(), (1, (), 'N') ) d = Decimal( (1, (), 'n') ) self.assertEqual(d.as_tuple(), (1, (), 'n') ) # XXX coefficient in infinity should raise an error if self.decimal == P: d = Decimal( (0, (4, 5, 3, 4), 'F') ) self.assertEqual(d.as_tuple(), (0, (0,), 'F')) d = Decimal( (1, (0, 2, 7, 1), 'F') ) self.assertEqual(d.as_tuple(), (1, (0,), 'F')) def test_subclassing(self): # Different behaviours when subclassing Decimal Decimal = self.decimal.Decimal class MyDecimal(Decimal): pass d1 = MyDecimal(1) d2 = MyDecimal(2) d = d1 + d2 self.assertIs(type(d), Decimal) d = d1.max(d2) self.assertIs(type(d), Decimal) d = copy.copy(d1) self.assertIs(type(d), MyDecimal) self.assertEqual(d, d1) d = copy.deepcopy(d1) self.assertIs(type(d), MyDecimal) self.assertEqual(d, d1) def test_implicit_context(self): Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext # Check results when context given implicitly. (Issue 2478) c = getcontext() self.assertEqual(str(Decimal(0).sqrt()), str(c.sqrt(Decimal(0)))) def test_conversions_from_int(self): # Check that methods taking a second Decimal argument will # always accept an integer in place of a Decimal. Decimal = self.decimal.Decimal self.assertEqual(Decimal(4).compare(3), Decimal(4).compare(Decimal(3))) self.assertEqual(Decimal(4).compare_signal(3), Decimal(4).compare_signal(Decimal(3))) self.assertEqual(Decimal(4).compare_total(3), Decimal(4).compare_total(Decimal(3))) self.assertEqual(Decimal(4).compare_total_mag(3), Decimal(4).compare_total_mag(Decimal(3))) self.assertEqual(Decimal(10101).logical_and(1001), Decimal(10101).logical_and(Decimal(1001))) self.assertEqual(Decimal(10101).logical_or(1001), Decimal(10101).logical_or(Decimal(1001))) self.assertEqual(Decimal(10101).logical_xor(1001), Decimal(10101).logical_xor(Decimal(1001))) self.assertEqual(Decimal(567).max(123), Decimal(567).max(Decimal(123))) self.assertEqual(Decimal(567).max_mag(123), Decimal(567).max_mag(Decimal(123))) self.assertEqual(Decimal(567).min(123), Decimal(567).min(Decimal(123))) self.assertEqual(Decimal(567).min_mag(123), Decimal(567).min_mag(Decimal(123))) self.assertEqual(Decimal(567).next_toward(123), Decimal(567).next_toward(Decimal(123))) self.assertEqual(Decimal(1234).quantize(100), Decimal(1234).quantize(Decimal(100))) self.assertEqual(Decimal(768).remainder_near(1234), Decimal(768).remainder_near(Decimal(1234))) self.assertEqual(Decimal(123).rotate(1), Decimal(123).rotate(Decimal(1))) self.assertEqual(Decimal(1234).same_quantum(1000), Decimal(1234).same_quantum(Decimal(1000))) self.assertEqual(Decimal('9.123').scaleb(-100), Decimal('9.123').scaleb(Decimal(-100))) self.assertEqual(Decimal(456).shift(-1), Decimal(456).shift(Decimal(-1))) self.assertEqual(Decimal(-12).fma(Decimal(45), 67), Decimal(-12).fma(Decimal(45), Decimal(67))) self.assertEqual(Decimal(-12).fma(45, 67), Decimal(-12).fma(Decimal(45), Decimal(67))) self.assertEqual(Decimal(-12).fma(45, Decimal(67)), Decimal(-12).fma(Decimal(45), Decimal(67))) class CUsabilityTest(UsabilityTest): decimal = C class PyUsabilityTest(UsabilityTest): decimal = P class PythonAPItests(unittest.TestCase): def test_abc(self): Decimal = self.decimal.Decimal self.assertTrue(issubclass(Decimal, numbers.Number)) self.assertFalse(issubclass(Decimal, numbers.Real)) self.assertTrue(isinstance(Decimal(0), numbers.Number)) self.assertFalse(isinstance(Decimal(0), numbers.Real)) def test_pickle(self): Decimal = self.decimal.Decimal # Round trip d = Decimal('-3.141590000') p = pickle.dumps(d) e = pickle.loads(p) self.assertEqual(d, e) def test_int(self): Decimal = self.decimal.Decimal ROUND_DOWN = self.decimal.ROUND_DOWN lim = 10 if hasattr(C, 'setfailpoint') else 250 for x in range(-lim, lim): s = '%0.2f' % (x / 100.0) # should work the same as for floats self.assertEqual(int(Decimal(s)), int(float(s))) # should work the same as to_integral in the ROUND_DOWN mode d = Decimal(s) r = d.to_integral(ROUND_DOWN) self.assertEqual(Decimal(int(d)), r) self.assertRaises(ValueError, int, Decimal('-nan')) self.assertRaises(ValueError, int, Decimal('snan')) self.assertRaises(OverflowError, int, Decimal('inf')) self.assertRaises(OverflowError, int, Decimal('-inf')) def test_trunc(self): Decimal = self.decimal.Decimal ROUND_DOWN = self.decimal.ROUND_DOWN lim = 10 if hasattr(C, 'setfailpoint') else 250 for x in range(-lim, lim): s = '%0.2f' % (x / 100.0) # should work the same as for floats self.assertEqual(int(Decimal(s)), int(float(s))) # should work the same as to_integral in the ROUND_DOWN mode d = Decimal(s) r = d.to_integral(ROUND_DOWN) self.assertEqual(Decimal(math.trunc(d)), r) def test_from_float(self): if hasattr(C, 'setfailpoint'): random.seed(randseed) Decimal = self.decimal.Decimal class MyDecimal(Decimal): pass self.assertTrue(issubclass(MyDecimal, Decimal)) r = MyDecimal.from_float(0.1) self.assertEqual(type(r), MyDecimal) self.assertEqual(str(r), '0.1000000000000000055511151231257827021181583404541015625') bigint = 12345678901234567890123456789 self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint)) self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan()) self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite()) self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite()) self.assertEqual(str(MyDecimal.from_float(float('nan'))), str(Decimal('NaN'))) self.assertEqual(str(MyDecimal.from_float(float('inf'))), str(Decimal('Infinity'))) self.assertEqual(str(MyDecimal.from_float(float('-inf'))), str(Decimal('-Infinity'))) self.assertRaises(TypeError, MyDecimal.from_float, 'abc') lim = 10 if hasattr(C, 'setfailpoint') else 200 for i in range(lim): x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip def test_create_decimal_from_float(self): Decimal = self.decimal.Decimal Context = self.decimal.Context ROUND_DOWN = self.decimal.ROUND_DOWN ROUND_UP = self.decimal.ROUND_UP Inexact = self.decimal.Inexact context = Context(prec=5, rounding=ROUND_DOWN) self.assertEqual( context.create_decimal_from_float(math.pi), Decimal('3.1415') ) context = Context(prec=5, rounding=ROUND_UP) self.assertEqual( context.create_decimal_from_float(math.pi), Decimal('3.1416') ) context = Context(prec=5, traps=[Inexact]) self.assertRaises( Inexact, context.create_decimal_from_float, math.pi ) self.assertEqual(repr(context.create_decimal_from_float(-0.0)), "Decimal('-0')") self.assertEqual(repr(context.create_decimal_from_float(1.0)), "Decimal('1')") self.assertEqual(repr(context.create_decimal_from_float(10)), "Decimal('10')") def test_quantize(self): Decimal = self.decimal.Decimal Context = self.decimal.Context InvalidOperation = self.decimal.InvalidOperation ROUND_DOWN = self.decimal.ROUND_DOWN c = Context(Emax=99999, Emin=-99999) self.assertEqual( Decimal('7.335').quantize(Decimal('.01')), Decimal('7.34') ) self.assertEqual( Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN), Decimal('7.33') ) self.assertRaises( InvalidOperation, Decimal("10e99999").quantize, Decimal('1e100000'), context=c ) c = Context() d = Decimal("0.871831e800") x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN) self.assertEqual(x, Decimal('8.71E+799')) def test_complex(self): Decimal = self.decimal.Decimal x = Decimal("9.8182731e181273") self.assertEqual(x.real, x) self.assertEqual(x.imag, 0) self.assertEqual(x.conjugate(), x) x = Decimal("1") self.assertEqual(complex(x), complex(float(1))) self.assertRaises(AttributeError, setattr, x, 'real', 100) self.assertRaises(AttributeError, setattr, x, 'imag', 100) self.assertRaises(AttributeError, setattr, x, 'conjugate', 100) self.assertRaises(AttributeError, setattr, x, '__complex__', 100) class CPythonAPItests(PythonAPItests): decimal = C class PyPythonAPItests(PythonAPItests): decimal = P class ContextAPItests(unittest.TestCase): def test_pickle(self): Context = self.decimal.Context c = Context() e = pickle.loads(pickle.dumps(c)) if self.decimal == C: self.assertEqual(c.prec, e.prec) self.assertEqual(c.Emin, e.Emin) self.assertEqual(c.Emax, e.Emax) self.assertEqual(c.rounding, e.rounding) self.assertEqual(c.capitals, e.capitals) self.assertEqual(c.traps, e.traps) self.assertEqual(c.flags, e.flags) self.assertEqual(c._clamp, e._clamp) else: for k in vars(c): v1 = vars(c)[k] v2 = vars(e)[k] self.assertEqual(v1, v2) def test_equality_with_other_types(self): Decimal = self.decimal.Decimal self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}]) self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}]) def test_copy(self): # All copies should be deep Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy() self.assertNotEqual(id(c), id(d)) self.assertNotEqual(id(c.flags), id(d.flags)) self.assertNotEqual(id(c.traps), id(d.traps)) k1 = set(c.flags.keys()) k2 = set(d.flags.keys()) self.assertEqual(k1, k2) self.assertEqual(c.flags, d.flags) def test_abs(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.abs(Decimal(-1)) self.assertEqual(c.abs(-1), d) self.assertRaises(TypeError, c.abs, '-1') def test_add(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.add(Decimal(1), Decimal(1)) self.assertEqual(c.add(1, 1), d) self.assertEqual(c.add(Decimal(1), 1), d) self.assertEqual(c.add(1, Decimal(1)), d) self.assertRaises(TypeError, c.add, '1', 1) self.assertRaises(TypeError, c.add, 1, '1') def test_compare(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare(Decimal(1), Decimal(1)) self.assertEqual(c.compare(1, 1), d) self.assertEqual(c.compare(Decimal(1), 1), d) self.assertEqual(c.compare(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare, '1', 1) self.assertRaises(TypeError, c.compare, 1, '1') def test_compare_signal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare_signal(Decimal(1), Decimal(1)) self.assertEqual(c.compare_signal(1, 1), d) self.assertEqual(c.compare_signal(Decimal(1), 1), d) self.assertEqual(c.compare_signal(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare_signal, '1', 1) self.assertRaises(TypeError, c.compare_signal, 1, '1') def test_compare_total(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare_total(Decimal(1), Decimal(1)) self.assertEqual(c.compare_total(1, 1), d) self.assertEqual(c.compare_total(Decimal(1), 1), d) self.assertEqual(c.compare_total(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare_total, '1', 1) self.assertRaises(TypeError, c.compare_total, 1, '1') def test_compare_total_mag(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.compare_total_mag(Decimal(1), Decimal(1)) self.assertEqual(c.compare_total_mag(1, 1), d) self.assertEqual(c.compare_total_mag(Decimal(1), 1), d) self.assertEqual(c.compare_total_mag(1, Decimal(1)), d) self.assertRaises(TypeError, c.compare_total_mag, '1', 1) self.assertRaises(TypeError, c.compare_total_mag, 1, '1') def test_copy_abs(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_abs(Decimal(-1)) self.assertEqual(c.copy_abs(-1), d) self.assertRaises(TypeError, c.copy_abs, '-1') def test_copy_decimal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_decimal(Decimal(-1)) self.assertEqual(c.copy_decimal(-1), d) self.assertRaises(TypeError, c.copy_decimal, '-1') def test_copy_negate(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_negate(Decimal(-1)) self.assertEqual(c.copy_negate(-1), d) self.assertRaises(TypeError, c.copy_negate, '-1') def test_copy_sign(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.copy_sign(Decimal(1), Decimal(-2)) self.assertEqual(c.copy_sign(1, -2), d) self.assertEqual(c.copy_sign(Decimal(1), -2), d) self.assertEqual(c.copy_sign(1, Decimal(-2)), d) self.assertRaises(TypeError, c.copy_sign, '1', -2) self.assertRaises(TypeError, c.copy_sign, 1, '-2') def test_divide(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.divide(Decimal(1), Decimal(2)) self.assertEqual(c.divide(1, 2), d) self.assertEqual(c.divide(Decimal(1), 2), d) self.assertEqual(c.divide(1, Decimal(2)), d) self.assertRaises(TypeError, c.divide, '1', 2) self.assertRaises(TypeError, c.divide, 1, '2') def test_divide_int(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.divide_int(Decimal(1), Decimal(2)) self.assertEqual(c.divide_int(1, 2), d) self.assertEqual(c.divide_int(Decimal(1), 2), d) self.assertEqual(c.divide_int(1, Decimal(2)), d) self.assertRaises(TypeError, c.divide_int, '1', 2) self.assertRaises(TypeError, c.divide_int, 1, '2') def test_divmod(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.divmod(Decimal(1), Decimal(2)) self.assertEqual(c.divmod(1, 2), d) self.assertEqual(c.divmod(Decimal(1), 2), d) self.assertEqual(c.divmod(1, Decimal(2)), d) self.assertRaises(TypeError, c.divmod, '1', 2) self.assertRaises(TypeError, c.divmod, 1, '2') def test_exp(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.exp(Decimal(10)) self.assertEqual(c.exp(10), d) self.assertRaises(TypeError, c.exp, '10') def test_fma(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.fma(Decimal(2), Decimal(3), Decimal(4)) self.assertEqual(c.fma(2, 3, 4), d) self.assertEqual(c.fma(Decimal(2), 3, 4), d) self.assertEqual(c.fma(2, Decimal(3), 4), d) self.assertEqual(c.fma(2, 3, Decimal(4)), d) self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d) self.assertRaises(TypeError, c.fma, '2', 3, 4) self.assertRaises(TypeError, c.fma, 2, '3', 4) self.assertRaises(TypeError, c.fma, 2, 3, '4') if self.decimal == C or py_minor >= 3: # Issue 12079 for Context.fma ... self.assertRaises(TypeError, c.fma, Decimal('Infinity'), Decimal(0), "not a decimal") self.assertRaises(TypeError, c.fma, Decimal(1), Decimal('snan'), 1.222) # ... and for Decimal.fma. self.assertRaises(TypeError, Decimal('Infinity').fma, Decimal(0), "not a decimal") self.assertRaises(TypeError, Decimal(1).fma, Decimal('snan'), 1.222) def test_is_finite(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_finite(Decimal(10)) self.assertEqual(c.is_finite(10), d) self.assertRaises(TypeError, c.is_finite, '10') def test_is_infinite(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_infinite(Decimal(10)) self.assertEqual(c.is_infinite(10), d) self.assertRaises(TypeError, c.is_infinite, '10') def test_is_nan(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_nan(Decimal(10)) self.assertEqual(c.is_nan(10), d) self.assertRaises(TypeError, c.is_nan, '10') def test_is_normal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_normal(Decimal(10)) self.assertEqual(c.is_normal(10), d) self.assertRaises(TypeError, c.is_normal, '10') def test_is_qnan(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_qnan(Decimal(10)) self.assertEqual(c.is_qnan(10), d) self.assertRaises(TypeError, c.is_qnan, '10') def test_is_signed(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_signed(Decimal(10)) self.assertEqual(c.is_signed(10), d) self.assertRaises(TypeError, c.is_signed, '10') def test_is_snan(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_snan(Decimal(10)) self.assertEqual(c.is_snan(10), d) self.assertRaises(TypeError, c.is_snan, '10') def test_is_subnormal(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_subnormal(Decimal(10)) self.assertEqual(c.is_subnormal(10), d) self.assertRaises(TypeError, c.is_subnormal, '10') def test_is_zero(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.is_zero(Decimal(10)) self.assertEqual(c.is_zero(10), d) self.assertRaises(TypeError, c.is_zero, '10') def test_ln(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.ln(Decimal(10)) self.assertEqual(c.ln(10), d) self.assertRaises(TypeError, c.ln, '10') def test_log10(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.log10(Decimal(10)) self.assertEqual(c.log10(10), d) self.assertRaises(TypeError, c.log10, '10') def test_logb(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logb(Decimal(10)) self.assertEqual(c.logb(10), d) self.assertRaises(TypeError, c.logb, '10') def test_logical_and(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_and(Decimal(1), Decimal(1)) self.assertEqual(c.logical_and(1, 1), d) self.assertEqual(c.logical_and(Decimal(1), 1), d) self.assertEqual(c.logical_and(1, Decimal(1)), d) self.assertRaises(TypeError, c.logical_and, '1', 1) self.assertRaises(TypeError, c.logical_and, 1, '1') def test_logical_invert(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_invert(Decimal(1000)) self.assertEqual(c.logical_invert(1000), d) self.assertRaises(TypeError, c.logical_invert, '1000') def test_logical_or(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_or(Decimal(1), Decimal(1)) self.assertEqual(c.logical_or(1, 1), d) self.assertEqual(c.logical_or(Decimal(1), 1), d) self.assertEqual(c.logical_or(1, Decimal(1)), d) self.assertRaises(TypeError, c.logical_or, '1', 1) self.assertRaises(TypeError, c.logical_or, 1, '1') def test_logical_xor(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.logical_xor(Decimal(1), Decimal(1)) self.assertEqual(c.logical_xor(1, 1), d) self.assertEqual(c.logical_xor(Decimal(1), 1), d) self.assertEqual(c.logical_xor(1, Decimal(1)), d) self.assertRaises(TypeError, c.logical_xor, '1', 1) self.assertRaises(TypeError, c.logical_xor, 1, '1') def test_max(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.max(Decimal(1), Decimal(2)) self.assertEqual(c.max(1, 2), d) self.assertEqual(c.max(Decimal(1), 2), d) self.assertEqual(c.max(1, Decimal(2)), d) self.assertRaises(TypeError, c.max, '1', 2) self.assertRaises(TypeError, c.max, 1, '2') def test_max_mag(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.max_mag(Decimal(1), Decimal(2)) self.assertEqual(c.max_mag(1, 2), d) self.assertEqual(c.max_mag(Decimal(1), 2), d) self.assertEqual(c.max_mag(1, Decimal(2)), d) self.assertRaises(TypeError, c.max_mag, '1', 2) self.assertRaises(TypeError, c.max_mag, 1, '2') def test_min(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.min(Decimal(1), Decimal(2)) self.assertEqual(c.min(1, 2), d) self.assertEqual(c.min(Decimal(1), 2), d) self.assertEqual(c.min(1, Decimal(2)), d) self.assertRaises(TypeError, c.min, '1', 2) self.assertRaises(TypeError, c.min, 1, '2') def test_min_mag(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.min_mag(Decimal(1), Decimal(2)) self.assertEqual(c.min_mag(1, 2), d) self.assertEqual(c.min_mag(Decimal(1), 2), d) self.assertEqual(c.min_mag(1, Decimal(2)), d) self.assertRaises(TypeError, c.min_mag, '1', 2) self.assertRaises(TypeError, c.min_mag, 1, '2') def test_minus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.minus(Decimal(10)) self.assertEqual(c.minus(10), d) self.assertRaises(TypeError, c.minus, '10') def test_multiply(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.multiply(Decimal(1), Decimal(2)) self.assertEqual(c.multiply(1, 2), d) self.assertEqual(c.multiply(Decimal(1), 2), d) self.assertEqual(c.multiply(1, Decimal(2)), d) self.assertRaises(TypeError, c.multiply, '1', 2) self.assertRaises(TypeError, c.multiply, 1, '2') def test_next_minus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.next_minus(Decimal(10)) self.assertEqual(c.next_minus(10), d) self.assertRaises(TypeError, c.next_minus, '10') def test_next_plus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.next_plus(Decimal(10)) self.assertEqual(c.next_plus(10), d) self.assertRaises(TypeError, c.next_plus, '10') def test_next_toward(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.next_toward(Decimal(1), Decimal(2)) self.assertEqual(c.next_toward(1, 2), d) self.assertEqual(c.next_toward(Decimal(1), 2), d) self.assertEqual(c.next_toward(1, Decimal(2)), d) self.assertRaises(TypeError, c.next_toward, '1', 2) self.assertRaises(TypeError, c.next_toward, 1, '2') def test_normalize(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.normalize(Decimal(10)) self.assertEqual(c.normalize(10), d) self.assertRaises(TypeError, c.normalize, '10') def test_number_class(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() self.assertEqual(c.number_class(123), c.number_class(Decimal(123))) self.assertEqual(c.number_class(0), c.number_class(Decimal(0))) self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45))) def test_plus(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.plus(Decimal(10)) self.assertEqual(c.plus(10), d) self.assertRaises(TypeError, c.plus, '10') def test_power(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.power(Decimal(1), Decimal(4)) self.assertEqual(c.power(1, 4), d) self.assertEqual(c.power(Decimal(1), 4), d) self.assertEqual(c.power(1, Decimal(4)), d) self.assertEqual(c.power(Decimal(1), Decimal(4)), d) self.assertRaises(TypeError, c.power, '1', 4) self.assertRaises(TypeError, c.power, 1, '4') def test_quantize(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.quantize(Decimal(1), Decimal(2)) self.assertEqual(c.quantize(1, 2), d) self.assertEqual(c.quantize(Decimal(1), 2), d) self.assertEqual(c.quantize(1, Decimal(2)), d) self.assertRaises(TypeError, c.quantize, '1', 2) self.assertRaises(TypeError, c.quantize, 1, '2') def test_remainder(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.remainder(Decimal(1), Decimal(2)) self.assertEqual(c.remainder(1, 2), d) self.assertEqual(c.remainder(Decimal(1), 2), d) self.assertEqual(c.remainder(1, Decimal(2)), d) self.assertRaises(TypeError, c.remainder, '1', 2) self.assertRaises(TypeError, c.remainder, 1, '2') def test_remainder_near(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.remainder_near(Decimal(1), Decimal(2)) self.assertEqual(c.remainder_near(1, 2), d) self.assertEqual(c.remainder_near(Decimal(1), 2), d) self.assertEqual(c.remainder_near(1, Decimal(2)), d) self.assertRaises(TypeError, c.remainder_near, '1', 2) self.assertRaises(TypeError, c.remainder_near, 1, '2') def test_rotate(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.rotate(Decimal(1), Decimal(2)) self.assertEqual(c.rotate(1, 2), d) self.assertEqual(c.rotate(Decimal(1), 2), d) self.assertEqual(c.rotate(1, Decimal(2)), d) self.assertRaises(TypeError, c.rotate, '1', 2) self.assertRaises(TypeError, c.rotate, 1, '2') def test_sqrt(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.sqrt(Decimal(10)) self.assertEqual(c.sqrt(10), d) self.assertRaises(TypeError, c.sqrt, '10') def test_same_quantum(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.same_quantum(Decimal(1), Decimal(2)) self.assertEqual(c.same_quantum(1, 2), d) self.assertEqual(c.same_quantum(Decimal(1), 2), d) self.assertEqual(c.same_quantum(1, Decimal(2)), d) self.assertRaises(TypeError, c.same_quantum, '1', 2) self.assertRaises(TypeError, c.same_quantum, 1, '2') def test_scaleb(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.scaleb(Decimal(1), Decimal(2)) self.assertEqual(c.scaleb(1, 2), d) self.assertEqual(c.scaleb(Decimal(1), 2), d) self.assertEqual(c.scaleb(1, Decimal(2)), d) self.assertRaises(TypeError, c.scaleb, '1', 2) self.assertRaises(TypeError, c.scaleb, 1, '2') def test_shift(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.shift(Decimal(1), Decimal(2)) self.assertEqual(c.shift(1, 2), d) self.assertEqual(c.shift(Decimal(1), 2), d) self.assertEqual(c.shift(1, Decimal(2)), d) self.assertRaises(TypeError, c.shift, '1', 2) self.assertRaises(TypeError, c.shift, 1, '2') def test_subtract(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.subtract(Decimal(1), Decimal(2)) self.assertEqual(c.subtract(1, 2), d) self.assertEqual(c.subtract(Decimal(1), 2), d) self.assertEqual(c.subtract(1, Decimal(2)), d) self.assertRaises(TypeError, c.subtract, '1', 2) self.assertRaises(TypeError, c.subtract, 1, '2') def test_to_eng_string(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_eng_string(Decimal(10)) self.assertEqual(c.to_eng_string(10), d) self.assertRaises(TypeError, c.to_eng_string, '10') def test_to_sci_string(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_sci_string(Decimal(10)) self.assertEqual(c.to_sci_string(10), d) self.assertRaises(TypeError, c.to_sci_string, '10') def test_to_integral_exact(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_integral_exact(Decimal(10)) self.assertEqual(c.to_integral_exact(10), d) self.assertRaises(TypeError, c.to_integral_exact, '10') def test_to_integral_value(self): Decimal = self.decimal.Decimal Context = self.decimal.Context c = Context() d = c.to_integral_value(Decimal(10)) self.assertEqual(c.to_integral_value(10), d) self.assertRaises(TypeError, c.to_integral_value, '10') self.assertRaises(TypeError, c.to_integral_value, 10, 'x') class CContextAPItests(ContextAPItests): decimal = C @unittest.skipIf(py_minor <= 1, "requires Python 3.2") class PyContextAPItests(ContextAPItests): decimal = P class ContextWithStatement(unittest.TestCase): # Can't do these as docstrings until Python 2.6 # as doctest can't handle __future__ statements def test_localcontext(self): # Use a copy of the current context in the block getcontext = self.decimal.getcontext localcontext = self.decimal.localcontext orig_ctx = getcontext() with localcontext() as enter_ctx: set_ctx = getcontext() final_ctx = getcontext() self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly') self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context') self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context') def test_localcontextarg(self): # Use a copy of the supplied context in the block Context = self.decimal.Context getcontext = self.decimal.getcontext localcontext = self.decimal.localcontext localcontext = self.decimal.localcontext orig_ctx = getcontext() new_ctx = Context(prec=42) with localcontext(new_ctx) as enter_ctx: set_ctx = getcontext() final_ctx = getcontext() self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly') self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context') self.assertIsNot(new_ctx, set_ctx, 'did not copy the context') self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context') def test_nested_with_statements(self): # Use a copy of the supplied context in the block Decimal = self.decimal.Decimal Context = self.decimal.Context getcontext = self.decimal.getcontext localcontext = self.decimal.localcontext Clamped = self.decimal.Clamped Overflow = self.decimal.Overflow orig_ctx = getcontext() orig_ctx.clear_flags() new_ctx = Context(Emax=384) with localcontext() as c1: self.assertEqual(c1.flags, orig_ctx.flags) self.assertEqual(c1.traps, orig_ctx.traps) c1.traps[Clamped] = True c1.Emin = -383 self.assertNotEqual(orig_ctx.Emin, -383) self.assertRaises(Clamped, c1.create_decimal, '0e-999') self.assertTrue(c1.flags[Clamped]) with localcontext(new_ctx) as c2: self.assertEqual(c2.flags, new_ctx.flags) self.assertEqual(c2.traps, new_ctx.traps) self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2) self.assertFalse(c2.flags[Clamped]) self.assertTrue(c2.flags[Overflow]) del c2 self.assertFalse(c1.flags[Overflow]) del c1 self.assertNotEqual(orig_ctx.Emin, -383) self.assertFalse(orig_ctx.flags[Clamped]) self.assertFalse(orig_ctx.flags[Overflow]) self.assertFalse(new_ctx.flags[Clamped]) self.assertFalse(new_ctx.flags[Overflow]) def test_with_statements_gc1(self): localcontext = self.decimal.localcontext with localcontext() as c1: del c1 with localcontext() as c2: del c2 with localcontext() as c3: del c3 with localcontext() as c4: del c4 def test_with_statements_gc2(self): localcontext = self.decimal.localcontext with localcontext() as c1: with localcontext(c1) as c2: del c1 with localcontext(c2) as c3: del c2 with localcontext(c3) as c4: del c3 del c4 def test_with_statements_gc3(self): Context = self.decimal.Context localcontext = self.decimal.localcontext getcontext = self.decimal.getcontext setcontext = self.decimal.setcontext with localcontext() as c1: del c1 n1 = Context(prec=1) setcontext(n1) with localcontext(n1) as c2: del n1 self.assertEqual(c2.prec, 1) del c2 n2 = Context(prec=2) setcontext(n2) del n2 self.assertEqual(getcontext().prec, 2) n3 = Context(prec=3) setcontext(n3) self.assertEqual(getcontext().prec, 3) with localcontext(n3) as c3: del n3 self.assertEqual(c3.prec, 3) del c3 n4 = Context(prec=4) setcontext(n4) del n4 self.assertEqual(getcontext().prec, 4) with localcontext() as c4: self.assertEqual(c4.prec, 4) del c4 class CContextWithStatement(ContextWithStatement): decimal = C class PyContextWithStatement(ContextWithStatement): decimal = P class ContextFlags(unittest.TestCase): def test_flags_irrelevant(self): # check that the result (numeric result + flags raised) of an # arithmetic operation doesn't depend on the current flags Decimal = self.decimal.Decimal Context = self.decimal.Context Inexact = self.decimal.Inexact Rounded = self.decimal.Rounded Underflow = self.decimal.Underflow Clamped = self.decimal.Clamped Subnormal = self.decimal.Subnormal ROUND_HALF_EVEN = self.decimal.ROUND_HALF_EVEN def raise_error(context, flag): if self.decimal == C: context.flags[flag] = True if context.traps[flag]: raise flag else: context._raise_error(flag) context = Context(prec=9, Emin = -425000000, Emax = 425000000, rounding=ROUND_HALF_EVEN, traps=[], flags=[]) # operations that raise various flags, in the form (function, arglist) operations = [ (context._apply, [Decimal("100E-425000010")]), (context.sqrt, [Decimal(2)]), (context.add, [Decimal("1.23456789"), Decimal("9.87654321")]), (context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]), (context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]), ] # try various flags individually, then a whole lot at once flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal], [Inexact, Rounded, Underflow, Clamped, Subnormal]] for fn, args in operations: # find answer and flags raised using a clean context context.clear_flags() ans = fn(*args) flags = [k for k, v in context.flags.items() if v] for extra_flags in flagsets: # set flags, before calling operation context.clear_flags() for flag in extra_flags: raise_error(context, flag) new_ans = fn(*args) # flags that we expect to be set after the operation expected_flags = list(flags) for flag in extra_flags: if flag not in expected_flags: expected_flags.append(flag) expected_flags.sort(key=id) # flags we actually got new_flags = [k for k,v in context.flags.items() if v] new_flags.sort(key=id) self.assertEqual(ans, new_ans, "operation produces different answers depending on flags set: " + "expected %s, got %s." % (ans, new_ans)) self.assertEqual(new_flags, expected_flags, "operation raises different flags depending on flags set: " + "expected %s, got %s" % (expected_flags, new_flags)) class CContextFlags(ContextFlags): decimal = C class PyContextFlags(ContextFlags): decimal = P class SpecialContexts(unittest.TestCase): """Test the context templates.""" def test_context_templates(self): BasicContext = self.decimal.BasicContext ExtendedContext = self.decimal.ExtendedContext getcontext = self.decimal.getcontext setcontext = self.decimal.setcontext InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero Overflow = self.decimal.Overflow Underflow = self.decimal.Underflow Clamped = self.decimal.Clamped assert_signals(self, BasicContext, 'traps', [InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped] ) savecontext = getcontext().copy() basic_context_prec = BasicContext.prec extended_context_prec = ExtendedContext.prec ex = None try: BasicContext.prec = ExtendedContext.prec = 441 for template in BasicContext, ExtendedContext: setcontext(template) c = getcontext() self.assertIsNot(c, template) self.assertEqual(c.prec, 441) except Exception as e: ex = e.__class__ finally: with protectfail(): BasicContext.prec = basic_context_prec ExtendedContext.prec = extended_context_prec setcontext(savecontext) if ex: raise ex def test_default_context(self): DefaultContext = self.decimal.DefaultContext BasicContext = self.decimal.BasicContext ExtendedContext = self.decimal.ExtendedContext getcontext = self.decimal.getcontext setcontext = self.decimal.setcontext InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero Overflow = self.decimal.Overflow self.assertEqual(BasicContext.prec, 9) self.assertEqual(ExtendedContext.prec, 9) assert_signals(self, DefaultContext, 'traps', [InvalidOperation, DivisionByZero, Overflow] ) savecontext = getcontext().copy() default_context_prec = DefaultContext.prec ex = None try: c = getcontext() saveprec = c.prec DefaultContext.prec = 961 c = getcontext() self.assertEqual(c.prec, saveprec) setcontext(DefaultContext) c = getcontext() self.assertIsNot(c, DefaultContext) self.assertEqual(c.prec, 961) except Exception as e: ex = e.__class__ finally: with protectfail(): DefaultContext.prec = default_context_prec setcontext(savecontext) if ex: raise ex class CSpecialContexts(SpecialContexts): decimal = C class PySpecialContexts(SpecialContexts): decimal = P class ContextInputValidation(unittest.TestCase): def test_invalid_context(self): Context = self.decimal.Context DefaultContext = self.decimal.DefaultContext c = DefaultContext.copy() # prec, Emax for attr in ['prec', 'Emax']: setattr(c, attr, 999999) self.assertEqual(getattr(c, attr), 999999) self.assertRaises(ValueError, setattr, c, attr, -1) self.assertRaises(TypeError, setattr, c, attr, 'xyz') # Emin setattr(c, 'Emin', -999999) self.assertEqual(getattr(c, 'Emin'), -999999) self.assertRaises(ValueError, setattr, c, 'Emin', 1) self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3)) # rounding: always raise TypeError in order to get consistent # exceptions across implementations. In decimal, rounding # modes are strings, in _decimal they are integers. The idea # is to view rounding as an abstract type and not mind the # implementation details. # Hence, a user should view the rounding modes as if they # had been defined in a language that supports abstract # data types, e.g. ocaml: # # type rounding = ROUND_DOWN | ROUND_HALF_UP | ... ;; # self.assertRaises(TypeError, setattr, c, 'rounding', -1) self.assertRaises(TypeError, setattr, c, 'rounding', 9) self.assertRaises(TypeError, setattr, c, 'rounding', 1.0) self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz') # capitals, clamp for attr in ['capitals', 'clamp']: self.assertRaises(ValueError, setattr, c, attr, -1) self.assertRaises(ValueError, setattr, c, attr, 2) self.assertRaises(TypeError, setattr, c, attr, [1,2,3]) # Invalid attribute self.assertRaises(AttributeError, setattr, c, 'emax', 100) # Invalid signal dict self.assertRaises(TypeError, setattr, c, 'flags', []) self.assertRaises(TypeError, setattr, c, 'flags', {}) self.assertRaises(TypeError, setattr, c, 'traps', {'InvalidOperation':0}) # Attributes cannot be deleted for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp', 'flags', 'traps']: self.assertRaises(AttributeError, c.__delattr__, attr) # Invalid attributes self.assertRaises(TypeError, getattr, c, 9) self.assertRaises(TypeError, setattr, c, 9) # Invalid values in constructor self.assertRaises(TypeError, Context, rounding=999999) self.assertRaises(TypeError, Context, rounding='xyz') self.assertRaises(ValueError, Context, clamp=2) self.assertRaises(ValueError, Context, capitals=-1) self.assertRaises(TypeError, Context, flags=["P"]) self.assertRaises(TypeError, Context, traps=["Q"]) # Type error in conversion self.assertRaises(TypeError, Context, flags=(0,1)) self.assertRaises(TypeError, Context, traps=(1,0)) class CContextInputValidation(ContextInputValidation): decimal = C class PyContextInputValidation(unittest.TestCase): # No context input validation in decimal.py pass class Coverage(unittest.TestCase): def test_adjusted(self): Decimal = self.decimal.Decimal self.assertEqual(Decimal('1234e9999').adjusted(), 10002) # XXX raise? self.assertEqual(Decimal('nan').adjusted(), 0) self.assertEqual(Decimal('inf').adjusted(), 0) def test_canonical(self): Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext x = Decimal(9).canonical() self.assertEqual(x, 9) c = getcontext() x = c.canonical(Decimal(9)) self.assertEqual(x, 9) def test_context_repr(self): c = self.decimal.DefaultContext.copy() if py_minor <= 1 and self.decimal == P: raise unittest.SkipTest("requires Python 3.2") c.prec = 425000000 c.Emax = 425000000 c.Emin = -425000000 c.rounding = self.decimal.ROUND_HALF_DOWN c.capitals = 0 c.clamp = 1 for sig in OrderedSignals[self.decimal]: c.flags[sig] = False c.traps[sig] = False s = c.__repr__() t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \ "Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \ "flags=[], traps=[])" self.assertEqual(s, t) def test_implicit_context(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext with localcontext() as c: c.prec = 1 c.Emax = 1 c.Emin = -1 # abs self.assertEqual(abs(Decimal("-10")), 10) # add self.assertEqual(Decimal("7") + 1, 8) # divide self.assertEqual(Decimal("10") / 5, 2) # divide_int self.assertEqual(Decimal("10") // 7, 1) # fma self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1) self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True) # three arg power self.assertEqual(pow(Decimal(10), 2, 7), 2) # exp self.assertEqual(Decimal("1.01").exp(), 3) # is_normal self.assertIs(Decimal("0.01").is_normal(), False) # is_subnormal self.assertIs(Decimal("0.01").is_subnormal(), True) # ln self.assertEqual(Decimal("20").ln(), 3) # log10 self.assertEqual(Decimal("20").log10(), 1) # logb self.assertEqual(Decimal("580").logb(), 2) # logical_invert self.assertEqual(Decimal("10").logical_invert(), 1) # minus self.assertEqual(-Decimal("-10"), 10) # multiply self.assertEqual(Decimal("2") * 4, 8) # next_minus self.assertEqual(Decimal("10").next_minus(), 9) # next_plus self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1')) # normalize self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1')) # number_class self.assertEqual(Decimal("10").number_class(), '+Normal') # plus self.assertEqual(+Decimal("-1"), -1) # remainder self.assertEqual(Decimal("10") % 7, 3) # subtract self.assertEqual(Decimal("10") - 7, 3) # to_integral_exact self.assertEqual(Decimal("1.12345").to_integral_exact(), 1) # Boolean functions self.assertTrue(Decimal("1").is_canonical()) self.assertTrue(Decimal("1").is_finite()) self.assertTrue(Decimal("1").is_finite()) self.assertTrue(Decimal("snan").is_snan()) self.assertTrue(Decimal("-1").is_signed()) self.assertTrue(Decimal("0").is_zero()) self.assertTrue(Decimal("0").is_zero()) # Copy with localcontext() as c: c.prec = 10000 x = 1228 ** 1523 y = -Decimal(x) z = y.copy_abs() self.assertEqual(z, x) z = y.copy_negate() self.assertEqual(z, x) z = y.copy_sign(Decimal(1)) self.assertEqual(z, x) def test_divmod(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation DivisionByZero = self.decimal.DivisionByZero with localcontext() as c: q, r = divmod(Decimal("10912837129"), 1001) self.assertEqual(q, Decimal('10901935')) self.assertEqual(r, Decimal('194')) q, r = divmod(Decimal("NaN"), 7) self.assertTrue(q.is_nan() and r.is_nan()) c.traps[InvalidOperation] = False q, r = divmod(Decimal("NaN"), 7) self.assertTrue(q.is_nan() and r.is_nan()) c.traps[InvalidOperation] = False c.clear_flags() q, r = divmod(Decimal("inf"), Decimal("inf")) self.assertTrue(q.is_nan() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() q, r = divmod(Decimal("inf"), 101) self.assertTrue(q.is_infinite() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() q, r = divmod(Decimal(0), 0) self.assertTrue(q.is_nan() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation]) c.traps[DivisionByZero] = False c.clear_flags() q, r = divmod(Decimal(11), 0) self.assertTrue(q.is_infinite() and r.is_nan()) self.assertTrue(c.flags[InvalidOperation] and c.flags[DivisionByZero]) def test_power(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext Overflow = self.decimal.Overflow Rounded = self.decimal.Rounded with localcontext() as c: c.prec = 3 c.clear_flags() self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00')) self.assertTrue(c.flags[Rounded]) c.prec = 1 c.Emax = 1 c.Emin = -1 c.clear_flags() c.traps[Overflow] = False self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf')) self.assertTrue(c.flags[Overflow]) def test_quantize(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext InvalidOperation = self.decimal.InvalidOperation with localcontext() as c: c.prec = 1 c.Emax = 1 c.Emin = -1 c.traps[InvalidOperation] = False x = Decimal(99).quantize(Decimal("1e1")) self.assertTrue(x.is_nan()) def test_radix(self): Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext c = getcontext() self.assertEqual(Decimal("1").radix(), 10) self.assertEqual(c.radix(), 10) def test_rop(self): Decimal = self.decimal.Decimal for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__', '__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'): self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented) def test_round(self): # Python3 behavior: round() returns Decimal Decimal = self.decimal.Decimal getcontext = self.decimal.getcontext c = getcontext() c.prec = 28 self.assertEqual(str(Decimal("9.99").__round__()), "10") self.assertEqual(str(Decimal("9.99e-5").__round__()), "0") self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457") self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000") self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10") self.assertRaises(TypeError, Decimal("1.23").__round__, "5") self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8) def test_create_decimal(self): c = self.decimal.Context() self.assertRaises(ValueError, c.create_decimal, ["%"]) def test_int(self): Decimal = self.decimal.Decimal localcontext = self.decimal.localcontext with localcontext() as c: c.prec = 9999 x = Decimal(1221**1271) / 10**3923 self.assertEqual(int(x), 1) self.assertEqual(x.to_integral(), 2) def test_copy(self): Context = self.decimal.Context if py_minor <= 1 and self.decimal == P: raise unittest.SkipTest("requires Python 3.2") c = Context() c.prec = 10000 x = -(1172 ** 1712) y = c.copy_abs(x) self.assertEqual(y, -x) y = c.copy_negate(x) self.assertEqual(y, -x) y = c.copy_sign(x, 1) self.assertEqual(y, -x) class CCoverage(Coverage): decimal = C class PyCoverage(Coverage): decimal = P class PyFunctionality(unittest.TestCase): """Extra functionality in decimal.py""" def test_py_quantize_watchexp(self): # watchexp functionality Decimal = P.Decimal localcontext = P.localcontext with localcontext() as c: c.prec = 1 c.Emax = 1 c.Emin = -1 x = Decimal(99999).quantize(Decimal("1e3"), watchexp=False) self.assertEqual(x, Decimal('1.00E+5')) def test_py_alternate_formatting(self): # triples giving a format, a Decimal, and the expected result Decimal = P.Decimal localcontext = P.localcontext if py_minor <= 1: raise unittest.SkipTest("requires Python 3.2") test_values = [ # Issue 7094: Alternate formatting (specified by #) ('.0e', '1.0', '1e+0'), ('#.0e', '1.0', '1.e+0'), ('.0f', '1.0', '1'), ('#.0f', '1.0', '1.'), ('g', '1.1', '1.1'), ('#g', '1.1', '1.1'), ('.0g', '1', '1'), ('#.0g', '1', '1.'), ('.0%', '1.0', '100%'), ('#.0%', '1.0', '100.%'), ] for fmt, d, result in test_values: self.assertEqual(format(Decimal(d), fmt), result) class PyWhitebox(unittest.TestCase): """White box testing for decimal.py""" def test_py_exact_power(self): # Rarely exercised lines in _power_exact. Decimal = P.Decimal localcontext = P.localcontext with localcontext() as c: c.prec = 8 x = Decimal(2**16) ** Decimal("-0.5") self.assertEqual(x, Decimal('0.00390625')) x = Decimal(2**16) ** Decimal("-0.6") self.assertEqual(x, Decimal('0.0012885819')) x = Decimal("256e7") ** Decimal("-0.5") x = Decimal(152587890625) ** Decimal('-0.0625') self.assertEqual(x, Decimal("0.2")) x = Decimal("152587890625e7") ** Decimal('-0.0625') x = Decimal(5**2659) ** Decimal('-0.0625') c.prec = 1 x = Decimal("152587890625") ** Decimal('-0.5') c.prec = 201 x = Decimal(2**578) ** Decimal("-0.5") def test_py_immutability_operations(self): # Do operations and check that it didn't change change internal objects. Decimal = P.Decimal DefaultContext = P.DefaultContext setcontext = P.setcontext c = DefaultContext.copy() c.traps = dict((s, 0) for s in OrderedSignals[P]) setcontext(c) d1 = Decimal('-25e55') b1 = Decimal('-25e55') d2 = Decimal('33e+33') b2 = Decimal('33e+33') def checkSameDec(operation, useOther=False): if useOther: eval("d1." + operation + "(d2)") self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) self.assertEqual(d2._sign, b2._sign) self.assertEqual(d2._int, b2._int) self.assertEqual(d2._exp, b2._exp) else: eval("d1." + operation + "()") self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) return Decimal(d1) self.assertEqual(d1._sign, b1._sign) self.assertEqual(d1._int, b1._int) self.assertEqual(d1._exp, b1._exp) checkSameDec("__abs__") checkSameDec("__add__", True) checkSameDec("__divmod__", True) checkSameDec("__eq__", True) checkSameDec("__ne__", True) checkSameDec("__le__", True) checkSameDec("__lt__", True) checkSameDec("__ge__", True) checkSameDec("__gt__", True) checkSameDec("__float__") checkSameDec("__floordiv__", True) checkSameDec("__hash__") checkSameDec("__int__") checkSameDec("__trunc__") checkSameDec("__mod__", True) checkSameDec("__mul__", True) checkSameDec("__neg__") checkSameDec("__bool__") checkSameDec("__pos__") checkSameDec("__pow__", True) checkSameDec("__radd__", True) checkSameDec("__rdivmod__", True) checkSameDec("__repr__") checkSameDec("__rfloordiv__", True) checkSameDec("__rmod__", True) checkSameDec("__rmul__", True) checkSameDec("__rpow__", True) checkSameDec("__rsub__", True) checkSameDec("__str__") checkSameDec("__sub__", True) checkSameDec("__truediv__", True) checkSameDec("adjusted") checkSameDec("as_tuple") checkSameDec("compare", True) checkSameDec("max", True) checkSameDec("min", True) checkSameDec("normalize") checkSameDec("quantize", True) checkSameDec("remainder_near", True) checkSameDec("same_quantum", True) checkSameDec("sqrt") checkSameDec("to_eng_string") checkSameDec("to_integral") def test_py_decimal_id(self): Decimal = P.Decimal d = Decimal(45) e = Decimal(d) self.assertEqual(str(e), '45') self.assertNotEqual(id(d), id(e)) def test_py_rescale(self): # Coverage Decimal = P.Decimal ROUND_UP = P.ROUND_UP localcontext = P.localcontext with localcontext() as c: x = Decimal("NaN")._rescale(3, ROUND_UP) self.assertTrue(x.is_nan()) def test_py__round(self): # Coverage Decimal = P.Decimal ROUND_UP = P.ROUND_UP self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP) class CFunctionality(unittest.TestCase): """Extra functionality in _decimal""" def test_c_ieee_context(self): # issue 8786: Add support for IEEE 754 contexts to decimal module. IEEEContext = C.IEEEContext DECIMAL32 = C.DECIMAL32 DECIMAL64 = C.DECIMAL64 DECIMAL128 = C.DECIMAL128 def assert_rest(self, context): self.assertEqual(context.clamp, 1) assert_signals(self, context, 'traps', []) assert_signals(self, context, 'flags', []) c = IEEEContext(DECIMAL32) self.assertEqual(c.prec, 7) self.assertEqual(c.Emax, 96) self.assertEqual(c.Emin, -95) assert_rest(self, c) c = IEEEContext(DECIMAL64) self.assertEqual(c.prec, 16) self.assertEqual(c.Emax, 384) self.assertEqual(c.Emin, -383) assert_rest(self, c) c = IEEEContext(DECIMAL128) self.assertEqual(c.prec, 34) self.assertEqual(c.Emax, 6144) self.assertEqual(c.Emin, -6143) assert_rest(self, c) # Invalid values self.assertRaises(OverflowError, IEEEContext, 2**63) self.assertRaises(ValueError, IEEEContext, -1) self.assertRaises(ValueError, IEEEContext, 1024) def test_apply(self): # Decimal("9.9999999").apply() applies the current context. Decimal = C.Decimal localcontext = C.localcontext with localcontext() as c: c.prec = 5 c.Emax = 99999 c.Emin = -99999 d = c.copy() d.prec = 4 x = Decimal("123456") self.assertEqual(str(x.apply()), "1.2346E+5") self.assertEqual(str(c.apply(x)), "1.2346E+5") self.assertEqual(str(x.apply(d)), "1.235E+5") self.assertEqual(str(d.apply(x)), "1.235E+5") self.assertRaises(TypeError, x.apply, "p") self.assertRaises(TypeError, x.apply, "p", "q") self.assertRaises(TypeError, c.apply, "p") x = Decimal(1171**2221) self.assertEqual(str(x.apply()), "1.8402E+6815") self.assertEqual(str(c.apply(x)), "1.8402E+6815") self.assertEqual(str(d.apply(x)), "1.840E+6815") def test_c_float_operation_default(self): Decimal = C.Decimal Context = C.Context Inexact = C.Inexact DecInexact = C.DecInexact FloatOperation= C.FloatOperation DecFloatOperation= C.DecFloatOperation context = Context() self.assertFalse(context.flags[FloatOperation]) self.assertFalse(context.traps[FloatOperation]) self.assertFalse(context._flags&DecFloatOperation) self.assertFalse(context._traps&DecFloatOperation) context.settraps([Inexact, FloatOperation]) self.assertEqual(context._traps, DecInexact|DecFloatOperation) self.assertTrue(context.traps[FloatOperation]) self.assertTrue(context.traps[Inexact]) def test_c_powmod(self): Decimal = C.Decimal Context = C.Context c = Context() d = c.powmod(Decimal(1), Decimal(4), Decimal(2)) self.assertEqual(c.powmod(1, 4, 2), d) self.assertEqual(c.powmod(Decimal(1), 4, 2), d) self.assertEqual(c.powmod(1, Decimal(4), 2), d) self.assertEqual(c.powmod(1, 4, Decimal(2)), d) self.assertEqual(c.powmod(Decimal(1), Decimal(4), 2), d) self.assertRaises(TypeError, c.powmod, '1', 4, 2) self.assertRaises(TypeError, c.powmod, 1, '4', 2) self.assertRaises(TypeError, c.powmod, 1, 4, '2') def test_c_context(self): Context = C.Context c = Context(flags=C.DecClamped, traps=C.DecRounded) self.assertEqual(c._flags, C.DecClamped) self.assertEqual(c._traps, C.DecRounded) def test_sundry(self): Decimal = C.Decimal # mpd_isinteger self.assertTrue(Decimal("1.234e5").is_integer()) self.assertTrue(Decimal("snan").is_special()) # Extra functions self.assertEqual(Decimal(-1).abs(), 1) self.assertEqual(Decimal(1).minus(), -1) self.assertEqual(Decimal(1).plus(), 1) self.assertEqual(Decimal(1).add(1), 2) self.assertEqual(Decimal(12).div(2), 6) self.assertEqual(Decimal(10).divint(7), 1) self.assertEqual(Decimal(10).mul(12), 120) self.assertEqual(Decimal(10).rem(7), 3) self.assertEqual(Decimal(10).sub(7), 3) self.assertEqual(Decimal(10).divmod(7), (1, 3)) def test_constants(self): # Condition flags cond = ( C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero, C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError, C.DecInexact, C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError, C.DecFloatOperation, C.DecOverflow, C.DecRounded, C.DecSubnormal, C.DecUnderflow ) # Architecture dependent context limits if C.MAX_EMAX > 425000000: self.assertEqual(C.MAX_PREC, 999999999999999999) self.assertEqual(C.MAX_EMAX, 999999999999999999) self.assertEqual(C.MIN_EMIN, -999999999999999999) self.assertEqual(C.MIN_ETINY, -1999999999999999997) else: self.assertEqual(C.MAX_PREC, 425000000) self.assertEqual(C.MAX_EMAX, 425000000) self.assertEqual(C.MIN_EMIN, -425000000) self.assertEqual(C.MIN_ETINY, -849999999) # IEEEContext self.assertEqual(C.DECIMAL32, 32) self.assertEqual(C.DECIMAL64, 64) self.assertEqual(C.DECIMAL128, 128) self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512) # Rounding modes for i, v in enumerate(RoundingModes[C]): self.assertEqual(v, i) self.assertEqual(C.ROUND_TRUNC, 8) # Conditions for i, v in enumerate(cond): self.assertEqual(v, 1<<i) self.assertEqual(C.DecIEEEInvalidOperation, C.DecConversionSyntax| C.DecDivisionImpossible| C.DecDivisionUndefined| C.DecFpuError| C.DecInvalidContext| C.DecInvalidOperation| C.DecMallocError) self.assertEqual(C.DecErrors, C.DecIEEEInvalidOperation| C.DecDivisionByZero) self.assertEqual(C.DecTraps, C.DecErrors|C.DecOverflow|C.DecUnderflow) class CWhitebox(unittest.TestCase): """Whitebox testing for _decimal""" def test_bignum(self): # Not exactly whitebox, but too slow with pydecimal. if hasattr(C, 'setfailpoint'): random.seed(randseed) Decimal = C.Decimal localcontext = C.localcontext b1 = 10**35 b2 = 10**36 with localcontext() as c: c.prec = 1000000 r = 1 if hasattr(C, 'setfailpoint') else 5 for i in range(r): a = random.randrange(b1, b2) b = random.randrange(1000, 1200) x = a ** b y = Decimal(a) ** Decimal(b) self.assertEqual(x, y) def test_c_input_restriction(self): # Too large for _decimal to be converted exactly Decimal = C.Decimal InvalidOperation = C.InvalidOperation Context = C.Context localcontext = C.localcontext with localcontext(Context()): self.assertRaises(InvalidOperation, Decimal, "1e9999999999999999999") def test_c_context_repr(self): # This test is _decimal-only because flags are not printed # in the same order. DefaultContext = C.DefaultContext FloatOperation = C.FloatOperation ROUND_HALF_DOWN = C.ROUND_HALF_DOWN c = DefaultContext.copy() c.prec = 425000000 c.Emax = 425000000 c.Emin = -425000000 c.rounding = ROUND_HALF_DOWN c.capitals = 0 c.clamp = 1 for sig in OrderedSignals[C]: c.flags[sig] = True c.traps[sig] = True c.flags[FloatOperation] = True c.traps[FloatOperation] = True s = c.__repr__() t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \ "Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \ "flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \ "FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \ "traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \ "FloatOperation, Overflow, Rounded, Subnormal, Underflow])" self.assertEqual(s, t) def test_c_context_errors(self): Context = C.Context InvalidOperation = C.InvalidOperation Overflow = C.Overflow HAVE_CONFIG_64 = (C.MAX_PREC > 425000000) c = Context() # SignalDict: input validation self.assertRaises(TypeError, c.flags.__setitem__, 801, 0) self.assertRaises(TypeError, c.traps.__setitem__, 801, 0) self.assertRaises(ValueError, c.flags.__delitem__, Overflow) self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation) self.assertRaises(TypeError, setattr, c, 'flags', ['x']) self.assertRaises(TypeError, setattr, c,'traps', ['y']) self.assertRaises(TypeError, setattr, c, 'flags', {0:1}) self.assertRaises(TypeError, setattr, c, 'traps', {0:1}) self.assertRaises(TypeError, c.setflags, ['x']) self.assertRaises(TypeError, c.settraps, ['y']) self.assertRaises(TypeError, c.setflags, 'x') self.assertRaises(TypeError, c.settraps, 'y') # Input corner cases int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1 gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9 # prec, Emax, Emin for attr in ['prec', 'Emax']: self.assertRaises(ValueError, setattr, c, attr, gt_max_emax) self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax) # prec, Emax, Emin in context constructor self.assertRaises(ValueError, Context, prec=gt_max_emax) self.assertRaises(ValueError, Context, Emax=gt_max_emax) self.assertRaises(ValueError, Context, Emin=-gt_max_emax) # Overflow in conversion self.assertRaises(OverflowError, Context, prec=int_max+1) self.assertRaises(OverflowError, Context, Emax=int_max+1) self.assertRaises(OverflowError, Context, Emin=-int_max-2) self.assertRaises(OverflowError, Context, rounding=int_max+1) self.assertRaises(OverflowError, Context, clamp=int_max+1) self.assertRaises(OverflowError, Context, capitals=int_max+1) self.assertRaises(OverflowError, Context, _allcr=int_max+1) # OverflowError, general ValueError for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp', '_allcr'): self.assertRaises(OverflowError, setattr, c, attr, int_max+1) self.assertRaises(OverflowError, setattr, c, attr, -int_max-2) if sys.platform != 'win32': self.assertRaises(ValueError, setattr, c, attr, int_max) self.assertRaises(ValueError, setattr, c, attr, -int_max-1) # OverflowError, general TypeError for attr in ('rounding', '_flags', '_traps'): self.assertRaises(OverflowError, setattr, c, attr, int_max+1) self.assertRaises(OverflowError, setattr, c, attr, -int_max-2) if sys.platform != 'win32': self.assertRaises(TypeError, setattr, c, attr, int_max) self.assertRaises(TypeError, setattr, c, attr, -int_max-1) # OverflowError: unsafe_prec, unsafe_emin, unsafe_emax self.assertRaises(OverflowError, getattr(c, 'unsafe_setprec'), int_max+1) self.assertRaises(OverflowError, getattr(c, 'unsafe_setemax'), int_max+1) self.assertRaises(OverflowError, getattr(c, 'unsafe_setemin'), -int_max-2) # capitals, clamp, _allcr for attr in ['capitals', 'clamp', '_allcr']: self.assertRaises(ValueError, setattr, c, attr, -1) self.assertRaises(ValueError, setattr, c, attr, 2) self.assertRaises(TypeError, setattr, c, attr, [1,2,3]) if HAVE_CONFIG_64: self.assertRaises(ValueError, setattr, c, attr, 2**32) self.assertRaises(ValueError, setattr, c, attr, 2**32+1) self.assertRaises(ValueError, Context, _allcr=2) # _flags, _traps for attr in ['_flags', '_traps']: self.assertRaises(TypeError, setattr, c, attr, 999999) self.assertRaises(TypeError, setattr, c, attr, 'x') def test_c_valid_context(self): # These tests are for code coverage in _decimal. DefaultContext = C.DefaultContext ROUND_HALF_UP = C.ROUND_HALF_UP Clamped = C.Clamped Underflow = C.Underflow Inexact = C.Inexact Rounded = C.Rounded Subnormal = C.Subnormal DecClamped = C.DecClamped DecUnderflow = C.DecUnderflow DecInexact = C.DecInexact DecRounded = C.DecRounded DecSubnormal = C.DecSubnormal c = DefaultContext.copy() # Exercise all getters and setters c.prec = 34 c.rounding = ROUND_HALF_UP c.Emax = 3000 c.Emin = -3000 c.capitals = 1 c.clamp = 0 c._flags = DecUnderflow c._traps = DecClamped c._allcr = 0 self.assertEqual(c.prec, 34) self.assertEqual(c.rounding, ROUND_HALF_UP) self.assertEqual(c.Emin, -3000) self.assertEqual(c.Emax, 3000) self.assertEqual(c.capitals, 1) self.assertEqual(c.clamp, 0) self.assertEqual(c._flags, DecUnderflow) self.assertEqual(c._traps, DecClamped) self.assertEqual(c._allcr, 0) self.assertEqual(c.Etiny(), -3033) self.assertEqual(c.Etop(), 2967) # Set traps/flags from list c.settraps([Clamped, Underflow]) self.assertEqual(c._traps, DecClamped|DecUnderflow) c.setflags([Inexact, Rounded, Subnormal]) self.assertEqual(c._flags, DecInexact|DecRounded|DecSubnormal) # Exercise all unsafe setters c.unsafe_setprec(999999999) c.unsafe_setemax(999999999) c.unsafe_setemin(-999999999) self.assertEqual(c.prec, 999999999) self.assertEqual(c.Emax, 999999999) self.assertEqual(c.Emin, -999999999) def test_c_round(self): # Restricted input. Decimal = C.Decimal InvalidOperation = C.InvalidOperation localcontext = C.localcontext MAX_EMAX = C.MAX_EMAX MIN_ETINY = C.MIN_ETINY int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1 with localcontext() as c: c.traps[InvalidOperation] = True self.assertRaises(InvalidOperation, Decimal("1.23").__round__, -int_max-1) self.assertRaises(InvalidOperation, Decimal("1.23").__round__, int_max) self.assertRaises(InvalidOperation, Decimal("1").__round__, int(MAX_EMAX+1)) self.assertRaises(C.InvalidOperation, Decimal("1").__round__, -int(MIN_ETINY-1)) self.assertRaises(OverflowError, Decimal("1.23").__round__, -int_max-2) self.assertRaises(OverflowError, Decimal("1.23").__round__, int_max+1) def test_c_format(self): # Restricted input Decimal = C.Decimal InvalidOperation = C.InvalidOperation Rounded = C.Rounded localcontext = C.localcontext HAVE_CONFIG_64 = (C.MAX_PREC > 425000000) self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9) self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9) self.assertRaises(TypeError, Decimal(1).__format__, []) with localcontext() as c: c.traps[InvalidOperation] = True c.traps[Rounded] = True self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10") maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1 self.assertRaises(InvalidOperation, Decimal("1.23456789").__format__, "=%d.1" % maxsize) def test_c_integral(self): Decimal = C.Decimal Inexact = C.Inexact ROUND_UP = C.ROUND_UP localcontext = C.localcontext x = Decimal(10) self.assertEqual(x.to_integral(), 10) self.assertRaises(TypeError, x.to_integral, '10') self.assertRaises(TypeError, x.to_integral, 10, 'x') self.assertRaises(TypeError, x.to_integral, 10) self.assertEqual(x.to_integral_value(), 10) self.assertRaises(TypeError, x.to_integral_value, '10') self.assertRaises(TypeError, x.to_integral_value, 10, 'x') self.assertRaises(TypeError, x.to_integral_value, 10) self.assertEqual(x.to_integral_exact(), 10) self.assertRaises(TypeError, x.to_integral_exact, '10') self.assertRaises(TypeError, x.to_integral_exact, 10, 'x') self.assertRaises(TypeError, x.to_integral_exact, 10) with localcontext() as c: x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP) self.assertEqual(x, Decimal('100000000000000000000000000')) x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP) self.assertEqual(x, Decimal('100000000000000000000000000')) c.traps[Inexact] = True self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP) def test_c_funcs(self): # Invalid arguments Decimal = C.Decimal InvalidOperation = C.InvalidOperation DivisionByZero = C.DivisionByZero ROUND_UP = C.ROUND_UP getcontext = C.getcontext localcontext = C.localcontext self.assertEqual(Decimal('9.99e10').to_sci_string(), '9.99E+10') self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9') self.assertRaises(TypeError, pow, Decimal(1), 2, "3") self.assertRaises(TypeError, Decimal(9).number_class, "x", "y") self.assertRaises(TypeError, Decimal(9).divmod, 8, "x", "y") self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y") self.assertRaises(TypeError, Decimal(9).to_sci, 3, "x", "y") self.assertRaises(TypeError, Decimal(9).to_eng, 3, "x", "y") self.assertEqual(Decimal("1.234e2007").sign(), 1) self.assertEqual(Decimal("-1.234e2007").sign(), -1) self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), [] ) self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext() ) self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), 10 ) self.assertRaises( TypeError, Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000 ) with localcontext() as c: c.clear_traps() # Invalid arguments self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y") self.assertRaises(TypeError, c.canonical, 200) self.assertRaises(TypeError, c.is_canonical, 200) self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y") self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y") self.assertEqual(str(c.canonical(Decimal(200))), '200') self.assertEqual(c.radix(), 10) c.traps[DivisionByZero] = True self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0) self.assertRaises(DivisionByZero, Decimal(9).divmod, 0) self.assertRaises(DivisionByZero, c.divmod, 9, 0) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() c.traps[InvalidOperation] = True self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0) self.assertRaises(InvalidOperation, Decimal(9).divmod, 0) self.assertRaises(InvalidOperation, c.divmod, 9, 0) self.assertTrue(c.flags[DivisionByZero]) c.traps[InvalidOperation] = True c.prec = 2 self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501) c.prec = 10 x = Decimal(2).invroot() self.assertEqual(str(x), '0.7071067812') x = c.invroot(3) self.assertEqual(str(x), '0.5773502692') c.prec = 28 x = Decimal(2).power(8) self.assertEqual(str(x), '256') x = Decimal(2).powmod(8, 31) self.assertEqual(str(x), '8') def test_c_context_templates(self): self.assertEqual( C.BasicContext._traps, C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow| C.DecUnderflow|C.DecClamped ) self.assertEqual( C.DefaultContext._traps, C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow ) def test_c_signal_dict(self): if hasattr(C, 'setfailpoint'): random.seed(randseed) # SignalDict coverage Context = C.Context DefaultContext = C.DefaultContext InvalidOperation = C.InvalidOperation DivisionByZero = C.DivisionByZero Overflow = C.Overflow Subnormal = C.Subnormal Underflow = C.Underflow Rounded = C.Rounded Inexact = C.Inexact Clamped = C.Clamped DecClamped = C.DecClamped DecInvalidOperation = C.DecInvalidOperation DecIEEEInvalidOperation = C.DecIEEEInvalidOperation def assertIsExclusivelySet(signal, signal_dict): for sig in signal_dict: if sig == signal: self.assertTrue(signal_dict[sig]) else: self.assertFalse(signal_dict[sig]) c = DefaultContext.copy() # Signal dict methods self.assertTrue(Overflow in c.traps) self.assertTrue(c.traps.has_key(Overflow)) c.clear_traps() for k in c.traps.keys(): c.traps[k] = True for v in c.traps.values(): self.assertTrue(v) c.clear_traps() for k, v in c.traps.items(): self.assertFalse(v) self.assertFalse(c.flags.get(Overflow)) self.assertIs(c.flags.get("x"), None) self.assertEqual(c.flags.get("x", "y"), "y") self.assertRaises(TypeError, c.flags.get, "x", "y", "z") self.assertEqual(len(c.flags), len(c.traps)) s = sys.getsizeof(c.flags) s = sys.getsizeof(c.traps) s = c.flags.__repr__() # Set flags/traps. c.clear_flags() c._flags = DecClamped self.assertTrue(c.flags[Clamped]) c.clear_traps() c._traps = DecInvalidOperation self.assertTrue(c.traps[InvalidOperation]) # Set flags/traps from dictionary. c.clear_flags() d = c.flags.copy() d[DivisionByZero] = True c.flags = d assertIsExclusivelySet(DivisionByZero, c.flags) c.clear_traps() d = c.traps.copy() d[Underflow] = True c.traps = d assertIsExclusivelySet(Underflow, c.traps) # Random constructors IntSignals = { Clamped: C.DecClamped, Rounded: C.DecRounded, Inexact: C.DecInexact, Subnormal: C.DecSubnormal, Underflow: C.DecUnderflow, Overflow: C.DecOverflow, DivisionByZero: C.DecDivisionByZero, InvalidOperation: C.DecIEEEInvalidOperation } IntCond = [ C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError, C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError, C.DecConversionSyntax, ] lim = 1 if hasattr(C, 'setfailpoint') else len(OrderedSignals[C]) for r in range(lim): for t in range(lim): for round in RoundingModes[C]: flags = random.sample(OrderedSignals[C], r) traps = random.sample(OrderedSignals[C], t) prec = random.randrange(1, 10000) emin = random.randrange(-10000, 0) emax = random.randrange(0, 10000) clamp = random.randrange(0, 2) caps = random.randrange(0, 2) cr = random.randrange(0, 2) c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax, capitals=caps, clamp=clamp, flags=list(flags), traps=list(traps), _allcr=cr) self.assertEqual(c.prec, prec) self.assertEqual(c.rounding, round) self.assertEqual(c.Emin, emin) self.assertEqual(c.Emax, emax) self.assertEqual(c.capitals, caps) self.assertEqual(c.clamp, clamp) self.assertEqual(c._allcr, cr) f = 0 for x in flags: f |= IntSignals[x] self.assertEqual(c._flags, f) f = 0 for x in traps: f |= IntSignals[x] self.assertEqual(c._traps, f) for cond in IntCond: c._flags = cond self.assertTrue(c._flags&DecIEEEInvalidOperation) assertIsExclusivelySet(InvalidOperation, c.flags) for cond in IntCond: c._traps = cond self.assertTrue(c._traps&DecIEEEInvalidOperation) assertIsExclusivelySet(InvalidOperation, c.traps) all_tests = [ CExplicitConstructionTest, PyExplicitConstructionTest, CImplicitConstructionTest, PyImplicitConstructionTest, CFormatTest, PyFormatTest, CArithmeticOperatorsTest, PyArithmeticOperatorsTest, CThreadingTest, PyThreadingTest, CUsabilityTest, PyUsabilityTest, CPythonAPItests, PyPythonAPItests, CContextAPItests, PyContextAPItests, CContextWithStatement, PyContextWithStatement, CContextFlags, PyContextFlags, CSpecialContexts, PySpecialContexts, CContextInputValidation, PyContextInputValidation, CCoverage, PyCoverage, CFunctionality, PyFunctionality, CWhitebox, PyWhitebox, CIBMTestCases, PyIBMTestCases, ] if py_minor <= 1: all_tests = all_tests[::2] # Wrap test functions for testing api failures. Doing this in # test_main() causes spurious refleaks, so it is done here. if hasattr(C, 'setapicalls'): for cls in all_tests: if cls == CIBMTestCases or cls == PyIBMTestCases: newfunc = withFailpoint(getattr(cls, 'eval_equation')) setattr(cls, 'eval_equation', newfunc) else: for attr in dir(cls): if attr.startswith('test_'): if attr == 'test_threading': continue newfunc = withFailpoint(getattr(cls, attr)) setattr(cls, attr, newfunc) def test_main(arith=False, verbose=None, todo_tests=None, debug=None): """ Execute the tests. Runs all arithmetic tests if arith is True or if the "decimal" resource is enabled in regrtest.py """ init(C) init(P) global TEST_ALL, DEBUG TEST_ALL = arith or is_resource_enabled('decimal') DEBUG = debug if todo_tests is None: test_classes = all_tests else: test_classes = [CIBMTestCases, PyIBMTestCases] # Dynamically build custom test definition for each file in the test # directory and add the definitions to the DecimalTest class. This # procedure insures that new files do not get skipped. for filename in os.listdir(directory): if '.decTest' not in filename or filename.startswith("."): continue head, tail = filename.split('.') if todo_tests is not None and head not in todo_tests: continue tester = lambda self, f=filename: self.eval_file(directory + f) setattr(CIBMTestCases, 'test_' + head, tester) setattr(PyIBMTestCases, 'test_' + head, tester) del filename, head, tail, tester try: run_unittest(*test_classes) if todo_tests is None: run_doctest(C, verbose) run_doctest(P, verbose) finally: if C: C.setcontext(ORIGINAL_CONTEXT[C]) P.setcontext(ORIGINAL_CONTEXT[P]) if not C: warnings.warn('C tests skipped: no module named _decimal.', UserWarning) if not orig_sys_decimal is sys.modules['decimal']: raise TestFailed("Internal error: unbalanced number of changes to " "sys.modules['decimal'].") if __name__ == '__main__': import optparse p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]") p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test') p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests') (opt, args) = p.parse_args() if opt.skip: test_main(arith=False, verbose=True) elif args: test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug) else: test_main(arith=True, verbose=True)
But the EM-5 also looks like a DSLR. The K-5 is an excellent camera. If I was going for a dslr it would be a top contender and honestly, I think it beats or is the equal to the E-M5 on almost all accounts. But it is bigger and heavier, and the lenses for it are bigger and heavier. Also, it is a dslr and it looks like one. For me, part of the appeal of the E-M5 is not having to be "that guy" who brings a dslr. I can take the camera without a grip and with a couple of primes without being conspicuous. And I can take the camera with the grip, flash gun and big zooms when I need to do that. From an outside point of view, I think Pentax is doing the right things with their camera lines, and I'm a bit confused that they're not doing better in the marketplace than they are, but I must admit I've never done more than a few test shots with a Pentax camera, so maybe I've missed something.