text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
from clldfabric import tasks tasks.init('cdk')
clld/cdk
fabfile.py
Python
apache-2.0
47
[ "CDK" ]
effcdd2e7fac43edd5abef86331e3ab861cad46abd857587d15dce01aaa736c6
# flake8: noqa # Generated with tools/generate-isocodes.py LANGUAGES = set( ['aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag', 'aah', 'aai', 'aak', 'aal', 'aan', 'aao', 'aap', 'aaq', 'aas', 'aat', 'aau', 'aaw', 'aax', 'aaz', 'ab', 'aba', 'abb', 'abc', 'abd', 'abe', 'abf', 'abg', 'abh', 'abi', 'abj', 'abl', 'abm', 'abn', 'abo', 'abp', 'abq', 'abr', 'abs', 'abt', 'abu', 'abv', 'abw', 'abx', 'aby', 'abz', 'aca', 'acb', 'acd', 'ace', 'acf', 'ach', 'aci', 'ack', 'acl', 'acm', 'acn', 'acp', 'acq', 'acr', 'acs', 'act', 'acu', 'acv', 'acw', 'acx', 'acy', 'acz', 'ada', 'adb', 'add', 'ade', 'adf', 'adg', 'adh', 'adi', 'adj', 'adl', 'adn', 'ado', 'adq', 'adr', 'ads', 'adt', 'adu', 'adw', 'adx', 'ady', 'adz', 'ae', 'aea', 'aeb', 'aec', 'aed', 'aee', 'aek', 'ael', 'aem', 'aen', 'aeq', 'aer', 'aes', 'aeu', 'aew', 'aey', 'aez', 'af', 'afb', 'afd', 'afe', 'afg', 'afh', 'afi', 'afk', 'afn', 'afo', 'afp', 'afs', 'aft', 'afu', 'afz', 'aga', 'agb', 'agc', 'agd', 'age', 'agf', 'agg', 'agh', 'agi', 'agj', 'agk', 'agl', 'agm', 'agn', 'ago', 'agq', 'agr', 'ags', 'agt', 'agu', 'agv', 'agw', 'agx', 'agy', 'agz', 'aha', 'ahb', 'ahg', 'ahh', 'ahi', 'ahk', 'ahl', 'ahm', 'ahn', 'aho', 'ahp', 'ahr', 'ahs', 'aht', 'aia', 'aib', 'aic', 'aid', 'aie', 'aif', 'aig', 'aih', 'aii', 'aij', 'aik', 'ail', 'aim', 'ain', 'aio', 'aip', 'aiq', 'air', 'ais', 'ait', 'aiw', 'aix', 'aiy', 'aja', 'ajg', 'aji', 'ajn', 'ajp', 'ajt', 'aju', 'ajw', 'ajz', 'ak', 'akb', 'akc', 'akd', 'ake', 'akf', 'akg', 'akh', 'aki', 'akj', 'akk', 'akl', 'akm', 'ako', 'akp', 'akq', 'akr', 'aks', 'akt', 'aku', 'akv', 'akw', 'akx', 'aky', 'akz', 'ala', 'alc', 'ald', 'ale', 'alf', 'alh', 'ali', 'alj', 'alk', 'all', 'alm', 'aln', 'alo', 'alp', 'alq', 'alr', 'als', 'alt', 'alu', 'alw', 'alx', 'aly', 'alz', 'am', 'ama', 'amb', 'amc', 'ame', 'amf', 'amg', 'ami', 'amj', 'amk', 'aml', 'amm', 'amn', 'amo', 'amp', 'amq', 'amr', 'ams', 'amt', 'amu', 'amv', 'amw', 'amx', 'amy', 'amz', 'an', 'ana', 'anb', 'anc', 'and', 'ane', 'anf', 'ang', 'anh', 'ani', 'anj', 'ank', 'anl', 'anm', 'ann', 'ano', 'anp', 'anq', 'anr', 'ans', 'ant', 'anu', 'anv', 'anw', 'anx', 'any', 'anz', 'aoa', 'aob', 'aoc', 'aod', 'aoe', 'aof', 'aog', 'aoh', 'aoi', 'aoj', 'aok', 'aol', 'aom', 'aon', 'aor', 'aos', 'aot', 'aou', 'aox', 'aoz', 'apb', 'apc', 'apd', 'ape', 'apf', 'apg', 'aph', 'api', 'apj', 'apk', 'apl', 'apm', 'apn', 'apo', 'app', 'apq', 'apr', 'aps', 'apt', 'apu', 'apv', 'apw', 'apx', 'apy', 'apz', 'aqc', 'aqd', 'aqg', 'aqm', 'aqn', 'aqp', 'aqr', 'aqt', 'aqz', 'ar', 'arb', 'arc', 'ard', 'are', 'arh', 'ari', 'arj', 'ark', 'arl', 'arn', 'aro', 'arp', 'arq', 'arr', 'ars', 'aru', 'arv', 'arw', 'arx', 'ary', 'arz', 'as', 'asa', 'asb', 'asc', 'asd', 'ase', 'asf', 'asg', 'ash', 'asi', 'asj', 'ask', 'asl', 'asn', 'aso', 'asp', 'asq', 'asr', 'ass', 'ast', 'asu', 'asv', 'asw', 'asx', 'asy', 'asz', 'ata', 'atb', 'atc', 'atd', 'ate', 'atg', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'aua', 'aub', 'auc', 'aud', 'aug', 'auh', 'aui', 'auj', 'auk', 'aul', 'aum', 'aun', 'auo', 'aup', 'auq', 'aur', 'aut', 'auu', 'auw', 'aux', 'auy', 'auz', 'av', 'avb', 'avd', 'avi', 'avk', 'avl', 'avm', 'avn', 'avo', 'avs', 'avt', 'avu', 'avv', 'awa', 'awb', 'awc', 'awe', 'awg', 'awh', 'awi', 'awk', 'awm', 'awn', 'awo', 'awr', 'aws', 'awt', 'awu', 'awv', 'aww', 'awx', 'awy', 'axb', 'axe', 'axg', 'axk', 'axl', 'axm', 'axx', 'ay', 'aya', 'ayb', 'ayc', 'ayd', 'aye', 'ayg', 'ayh', 'ayi', 'ayk', 'ayl', 'ayn', 'ayo', 'ayp', 'ayq', 'ayr', 'ays', 'ayt', 'ayu', 'ayy', 'ayz', 'az', 'aza', 'azb', 'azd', 'azg', 'azj', 'azm', 'azn', 'azo', 'azt', 'azz', 'ba', 'baa', 'bab', 'bac', 'bae', 'baf', 'bag', 'bah', 'baj', 'bal', 'ban', 'bao', 'bap', 'bar', 'bas', 'bau', 'bav', 'baw', 'bax', 'bay', 'bba', 'bbb', 'bbc', 'bbd', 'bbe', 'bbf', 'bbg', 'bbh', 'bbi', 'bbj', 'bbk', 'bbl', 'bbm', 'bbn', 'bbo', 'bbp', 'bbq', 'bbr', 'bbs', 'bbt', 'bbu', 'bbv', 'bbw', 'bbx', 'bby', 'bbz', 'bca', 'bcb', 'bcc', 'bcd', 'bce', 'bcf', 'bcg', 'bch', 'bci', 'bcj', 'bck', 'bcl', 'bcm', 'bcn', 'bco', 'bcp', 'bcq', 'bcr', 'bcs', 'bct', 'bcu', 'bcv', 'bcw', 'bcy', 'bcz', 'bda', 'bdb', 'bdc', 'bdd', 'bde', 'bdf', 'bdg', 'bdh', 'bdi', 'bdj', 'bdk', 'bdl', 'bdm', 'bdn', 'bdo', 'bdp', 'bdq', 'bdr', 'bds', 'bdt', 'bdu', 'bdv', 'bdw', 'bdx', 'bdy', 'bdz', 'be', 'bea', 'beb', 'bec', 'bed', 'bee', 'bef', 'beg', 'beh', 'bei', 'bej', 'bek', 'bem', 'beo', 'bep', 'beq', 'bes', 'bet', 'beu', 'bev', 'bew', 'bex', 'bey', 'bez', 'bfa', 'bfb', 'bfc', 'bfd', 'bfe', 'bff', 'bfg', 'bfh', 'bfi', 'bfj', 'bfk', 'bfl', 'bfm', 'bfn', 'bfo', 'bfp', 'bfq', 'bfr', 'bfs', 'bft', 'bfu', 'bfw', 'bfx', 'bfy', 'bfz', 'bg', 'bga', 'bgb', 'bgc', 'bgd', 'bge', 'bgf', 'bgg', 'bgi', 'bgj', 'bgk', 'bgl', 'bgn', 'bgo', 'bgp', 'bgq', 'bgr', 'bgs', 'bgt', 'bgu', 'bgv', 'bgw', 'bgx', 'bgy', 'bgz', 'bha', 'bhb', 'bhc', 'bhd', 'bhe', 'bhf', 'bhg', 'bhh', 'bhi', 'bhj', 'bhl', 'bhm', 'bhn', 'bho', 'bhp', 'bhq', 'bhr', 'bhs', 'bht', 'bhu', 'bhv', 'bhw', 'bhx', 'bhy', 'bhz', 'bi', 'bia', 'bib', 'bic', 'bid', 'bie', 'bif', 'big', 'bij', 'bik', 'bil', 'bim', 'bin', 'bio', 'bip', 'biq', 'bir', 'bit', 'biu', 'biv', 'biw', 'bix', 'biy', 'biz', 'bja', 'bjb', 'bjc', 'bje', 'bjf', 'bjg', 'bjh', 'bji', 'bjj', 'bjk', 'bjl', 'bjm', 'bjn', 'bjo', 'bjp', 'bjr', 'bjs', 'bjt', 'bju', 'bjv', 'bjw', 'bjx', 'bjy', 'bjz', 'bka', 'bkc', 'bkd', 'bkf', 'bkg', 'bkh', 'bki', 'bkj', 'bkk', 'bkl', 'bkm', 'bkn', 'bko', 'bkp', 'bkq', 'bkr', 'bks', 'bkt', 'bku', 'bkv', 'bkw', 'bkx', 'bky', 'bkz', 'bla', 'blb', 'blc', 'bld', 'ble', 'blf', 'blg', 'blh', 'bli', 'blj', 'blk', 'bll', 'blm', 'bln', 'blo', 'blp', 'blq', 'blr', 'bls', 'blt', 'blv', 'blw', 'blx', 'bly', 'blz', 'bm', 'bma', 'bmb', 'bmc', 'bmd', 'bme', 'bmf', 'bmg', 'bmh', 'bmi', 'bmj', 'bmk', 'bml', 'bmm', 'bmn', 'bmo', 'bmp', 'bmq', 'bmr', 'bms', 'bmt', 'bmu', 'bmv', 'bmw', 'bmx', 'bmz', 'bn', 'bna', 'bnb', 'bnc', 'bnd', 'bne', 'bnf', 'bng', 'bni', 'bnj', 'bnk', 'bnl', 'bnm', 'bnn', 'bno', 'bnp', 'bnq', 'bnr', 'bns', 'bnu', 'bnv', 'bnw', 'bnx', 'bny', 'bnz', 'bo', 'boa', 'bob', 'boe', 'bof', 'bog', 'boh', 'boi', 'boj', 'bok', 'bol', 'bom', 'bon', 'boo', 'bop', 'boq', 'bor', 'bot', 'bou', 'bov', 'bow', 'box', 'boy', 'boz', 'bpa', 'bpb', 'bpd', 'bpg', 'bph', 'bpi', 'bpj', 'bpk', 'bpl', 'bpm', 'bpn', 'bpo', 'bpp', 'bpq', 'bpr', 'bps', 'bpt', 'bpu', 'bpv', 'bpw', 'bpx', 'bpy', 'bpz', 'bqa', 'bqb', 'bqc', 'bqd', 'bqf', 'bqg', 'bqh', 'bqi', 'bqj', 'bqk', 'bql', 'bqm', 'bqn', 'bqo', 'bqp', 'bqq', 'bqr', 'bqs', 'bqt', 'bqu', 'bqv', 'bqw', 'bqx', 'bqy', 'bqz', 'br', 'bra', 'brb', 'brc', 'brd', 'brf', 'brg', 'brh', 'bri', 'brj', 'brk', 'brl', 'brm', 'brn', 'bro', 'brp', 'brq', 'brr', 'brs', 'brt', 'bru', 'brv', 'brw', 'brx', 'bry', 'brz', 'bs', 'bsa', 'bsb', 'bsc', 'bse', 'bsf', 'bsg', 'bsh', 'bsi', 'bsj', 'bsk', 'bsl', 'bsm', 'bsn', 'bso', 'bsp', 'bsq', 'bsr', 'bss', 'bst', 'bsu', 'bsv', 'bsw', 'bsx', 'bsy', 'bta', 'btc', 'btd', 'bte', 'btf', 'btg', 'bth', 'bti', 'btj', 'btm', 'btn', 'bto', 'btp', 'btq', 'btr', 'bts', 'btt', 'btu', 'btv', 'btw', 'btx', 'bty', 'btz', 'bua', 'bub', 'buc', 'bud', 'bue', 'buf', 'bug', 'buh', 'bui', 'buj', 'buk', 'bum', 'bun', 'buo', 'bup', 'buq', 'bus', 'but', 'buu', 'buv', 'buw', 'bux', 'buy', 'buz', 'bva', 'bvb', 'bvc', 'bvd', 'bve', 'bvf', 'bvg', 'bvh', 'bvi', 'bvj', 'bvk', 'bvl', 'bvm', 'bvn', 'bvo', 'bvp', 'bvq', 'bvr', 'bvt', 'bvu', 'bvv', 'bvw', 'bvx', 'bvy', 'bvz', 'bwa', 'bwb', 'bwc', 'bwd', 'bwe', 'bwf', 'bwg', 'bwh', 'bwi', 'bwj', 'bwk', 'bwl', 'bwm', 'bwn', 'bwo', 'bwp', 'bwq', 'bwr', 'bws', 'bwt', 'bwu', 'bww', 'bwx', 'bwy', 'bwz', 'bxa', 'bxb', 'bxc', 'bxd', 'bxe', 'bxf', 'bxg', 'bxh', 'bxi', 'bxj', 'bxk', 'bxl', 'bxm', 'bxn', 'bxo', 'bxp', 'bxq', 'bxr', 'bxs', 'bxu', 'bxv', 'bxw', 'bxz', 'bya', 'byb', 'byc', 'byd', 'bye', 'byf', 'byg', 'byh', 'byi', 'byj', 'byk', 'byl', 'bym', 'byn', 'byo', 'byp', 'byq', 'byr', 'bys', 'byt', 'byv', 'byw', 'byx', 'byz', 'bza', 'bzb', 'bzc', 'bzd', 'bze', 'bzf', 'bzg', 'bzh', 'bzi', 'bzj', 'bzk', 'bzl', 'bzm', 'bzn', 'bzo', 'bzp', 'bzq', 'bzr', 'bzs', 'bzt', 'bzu', 'bzv', 'bzw', 'bzx', 'bzy', 'bzz', 'ca', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'caj', 'cak', 'cal', 'cam', 'can', 'cao', 'cap', 'caq', 'car', 'cas', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbb', 'cbc', 'cbd', 'cbg', 'cbi', 'cbj', 'cbk', 'cbl', 'cbn', 'cbo', 'cbq', 'cbr', 'cbs', 'cbt', 'cbu', 'cbv', 'cbw', 'cby', 'cca', 'ccc', 'ccd', 'cce', 'ccg', 'cch', 'ccj', 'ccl', 'ccm', 'cco', 'ccp', 'ccr', 'cda', 'cde', 'cdf', 'cdg', 'cdh', 'cdi', 'cdj', 'cdm', 'cdn', 'cdo', 'cdr', 'cds', 'cdy', 'cdz', 'ce', 'cea', 'ceb', 'ceg', 'cek', 'cen', 'cet', 'cfa', 'cfd', 'cfg', 'cfm', 'cga', 'cgc', 'cgg', 'cgk', 'ch', 'chb', 'chc', 'chd', 'chf', 'chg', 'chh', 'chj', 'chk', 'chl', 'chm', 'chn', 'cho', 'chp', 'chq', 'chr', 'cht', 'chw', 'chx', 'chy', 'chz', 'cia', 'cib', 'cic', 'cid', 'cie', 'cih', 'cik', 'cim', 'cin', 'cip', 'cir', 'ciw', 'ciy', 'cja', 'cje', 'cjh', 'cji', 'cjk', 'cjm', 'cjn', 'cjo', 'cjp', 'cjs', 'cjv', 'cjy', 'ckb', 'ckh', 'ckl', 'ckn', 'cko', 'ckq', 'ckr', 'cks', 'ckt', 'cku', 'ckv', 'ckx', 'cky', 'ckz', 'cla', 'clc', 'cld', 'cle', 'clh', 'cli', 'clj', 'clk', 'cll', 'clm', 'clo', 'clt', 'clu', 'clw', 'cly', 'cma', 'cme', 'cmg', 'cmi', 'cml', 'cmm', 'cmn', 'cmo', 'cmr', 'cms', 'cmt', 'cna', 'cnb', 'cnc', 'cng', 'cnh', 'cni', 'cnk', 'cnl', 'cno', 'cns', 'cnt', 'cnu', 'cnw', 'cnx', 'co', 'coa', 'cob', 'coc', 'cod', 'coe', 'cof', 'cog', 'coh', 'coj', 'cok', 'col', 'com', 'con', 'coo', 'cop', 'coq', 'cot', 'cou', 'cov', 'cow', 'cox', 'coz', 'cpa', 'cpb', 'cpc', 'cpg', 'cpi', 'cpn', 'cpo', 'cps', 'cpu', 'cpx', 'cpy', 'cqd', 'cr', 'cra', 'crb', 'crc', 'crd', 'crf', 'crg', 'crh', 'cri', 'crj', 'crk', 'crl', 'crm', 'crn', 'cro', 'crq', 'crr', 'crs', 'crt', 'crv', 'crw', 'crx', 'cry', 'crz', 'cs', 'csa', 'csb', 'csc', 'csd', 'cse', 'csf', 'csg', 'csh', 'csi', 'csj', 'csk', 'csl', 'csm', 'csn', 'cso', 'csq', 'csr', 'css', 'cst', 'csv', 'csw', 'csy', 'csz', 'cta', 'ctc', 'ctd', 'cte', 'ctg', 'cth', 'ctl', 'ctm', 'ctn', 'cto', 'ctp', 'cts', 'ctt', 'ctu', 'ctz', 'cu', 'cua', 'cub', 'cuc', 'cug', 'cuh', 'cui', 'cuj', 'cuk', 'cul', 'cuo', 'cup', 'cuq', 'cur', 'cut', 'cuu', 'cuv', 'cuw', 'cux', 'cv', 'cvg', 'cvn', 'cwa', 'cwb', 'cwd', 'cwe', 'cwg', 'cwt', 'cy', 'cya', 'cyb', 'cyo', 'czh', 'czk', 'czn', 'czo', 'czt', 'da', 'daa', 'dac', 'dad', 'dae', 'dag', 'dah', 'dai', 'daj', 'dak', 'dal', 'dam', 'dao', 'daq', 'dar', 'das', 'dau', 'dav', 'daw', 'dax', 'daz', 'dba', 'dbb', 'dbd', 'dbe', 'dbf', 'dbg', 'dbi', 'dbj', 'dbl', 'dbm', 'dbn', 'dbo', 'dbp', 'dbq', 'dbr', 'dbt', 'dbu', 'dbv', 'dbw', 'dby', 'dcc', 'dcr', 'dda', 'ddd', 'dde', 'ddg', 'ddi', 'ddj', 'ddn', 'ddo', 'ddr', 'dds', 'ddw', 'de', 'dec', 'ded', 'dee', 'def', 'deg', 'deh', 'dei', 'dek', 'del', 'dem', 'den', 'dep', 'deq', 'der', 'des', 'dev', 'dez', 'dga', 'dgb', 'dgc', 'dgd', 'dge', 'dgg', 'dgh', 'dgi', 'dgk', 'dgl', 'dgn', 'dgo', 'dgr', 'dgs', 'dgt', 'dgu', 'dgw', 'dgx', 'dgz', 'dhd', 'dhg', 'dhi', 'dhl', 'dhm', 'dhn', 'dho', 'dhr', 'dhs', 'dhu', 'dhv', 'dhw', 'dhx', 'dia', 'dib', 'dic', 'did', 'dif', 'dig', 'dih', 'dii', 'dij', 'dik', 'dil', 'dim', 'din', 'dio', 'dip', 'diq', 'dir', 'dis', 'dit', 'diu', 'diw', 'dix', 'diy', 'diz', 'dja', 'djb', 'djc', 'djd', 'dje', 'djf', 'dji', 'djj', 'djk', 'djm', 'djn', 'djo', 'djr', 'dju', 'djw', 'dka', 'dkk', 'dkr', 'dks', 'dkx', 'dlg', 'dlk', 'dlm', 'dln', 'dma', 'dmb', 'dmc', 'dmd', 'dme', 'dmg', 'dmk', 'dml', 'dmm', 'dmo', 'dmr', 'dms', 'dmu', 'dmv', 'dmw', 'dmx', 'dmy', 'dna', 'dnd', 'dne', 'dng', 'dni', 'dnj', 'dnk', 'dnn', 'dnr', 'dnt', 'dnu', 'dnv', 'dnw', 'dny', 'doa', 'dob', 'doc', 'doe', 'dof', 'doh', 'doi', 'dok', 'dol', 'don', 'doo', 'dop', 'doq', 'dor', 'dos', 'dot', 'dov', 'dow', 'dox', 'doy', 'doz', 'dpp', 'drb', 'drc', 'drd', 'dre', 'drg', 'dri', 'drl', 'drn', 'dro', 'drq', 'drr', 'drs', 'drt', 'dru', 'dry', 'dsb', 'dse', 'dsh', 'dsi', 'dsl', 'dsn', 'dso', 'dsq', 'dta', 'dtb', 'dtd', 'dth', 'dti', 'dtk', 'dtm', 'dtn', 'dto', 'dtp', 'dtr', 'dts', 'dtt', 'dtu', 'dty', 'dua', 'dub', 'duc', 'dud', 'due', 'duf', 'dug', 'duh', 'dui', 'duk', 'dul', 'dum', 'dun', 'duo', 'dup', 'duq', 'dur', 'dus', 'duu', 'duv', 'duw', 'dux', 'duy', 'duz', 'dv', 'dva', 'dwa', 'dwr', 'dws', 'dwu', 'dww', 'dwy', 'dya', 'dyb', 'dyd', 'dyg', 'dyi', 'dym', 'dyn', 'dyo', 'dyu', 'dyy', 'dz', 'dza', 'dze', 'dzg', 'dzl', 'dzn', 'eaa', 'ebg', 'ebk', 'ebo', 'ebr', 'ebu', 'ecr', 'ecs', 'ecy', 'ee', 'eee', 'efa', 'efe', 'efi', 'ega', 'egl', 'ego', 'egy', 'ehu', 'eip', 'eit', 'eiv', 'eja', 'eka', 'ekc', 'eke', 'ekg', 'eki', 'ekk', 'ekl', 'ekm', 'eko', 'ekp', 'ekr', 'eky', 'el', 'ele', 'elh', 'eli', 'elk', 'elm', 'elo', 'elu', 'elx', 'ema', 'emb', 'eme', 'emg', 'emi', 'emk', 'emm', 'emn', 'emp', 'ems', 'emu', 'emw', 'emx', 'emy', 'en', 'ena', 'enb', 'enc', 'end', 'enf', 'enh', 'enl', 'enm', 'enn', 'eno', 'enq', 'enr', 'enu', 'env', 'enw', 'enx', 'eo', 'eot', 'epi', 'era', 'erg', 'erh', 'eri', 'erk', 'ero', 'err', 'ers', 'ert', 'erw', 'es', 'ese', 'esg', 'esh', 'esi', 'esk', 'esl', 'esm', 'esn', 'eso', 'esq', 'ess', 'esu', 'esy', 'et', 'etb', 'etc', 'eth', 'etn', 'eto', 'etr', 'ets', 'ett', 'etu', 'etx', 'etz', 'eu', 'eve', 'evh', 'evn', 'ewo', 'ext', 'eya', 'eyo', 'eza', 'eze', 'fa', 'faa', 'fab', 'fad', 'faf', 'fag', 'fah', 'fai', 'faj', 'fak', 'fal', 'fam', 'fan', 'fap', 'far', 'fat', 'fau', 'fax', 'fay', 'faz', 'fbl', 'fcs', 'fer', 'ff', 'ffi', 'ffm', 'fgr', 'fi', 'fia', 'fie', 'fil', 'fip', 'fir', 'fit', 'fiw', 'fj', 'fkk', 'fkv', 'fla', 'flh', 'fli', 'fll', 'fln', 'flr', 'fly', 'fmp', 'fmu', 'fnb', 'fng', 'fni', 'fo', 'fod', 'foi', 'fom', 'fon', 'for', 'fos', 'fpe', 'fqs', 'fr', 'frc', 'frd', 'frk', 'frm', 'fro', 'frp', 'frq', 'frr', 'frs', 'frt', 'fse', 'fsl', 'fss', 'fub', 'fuc', 'fud', 'fue', 'fuf', 'fuh', 'fui', 'fuj', 'fum', 'fun', 'fuq', 'fur', 'fut', 'fuu', 'fuv', 'fuy', 'fvr', 'fwa', 'fwe', 'fy', 'ga', 'gaa', 'gab', 'gac', 'gad', 'gae', 'gaf', 'gag', 'gah', 'gai', 'gaj', 'gak', 'gal', 'gam', 'gan', 'gao', 'gap', 'gaq', 'gar', 'gas', 'gat', 'gau', 'gaw', 'gax', 'gay', 'gaz', 'gba', 'gbb', 'gbd', 'gbe', 'gbf', 'gbg', 'gbh', 'gbi', 'gbj', 'gbk', 'gbl', 'gbm', 'gbn', 'gbo', 'gbp', 'gbq', 'gbr', 'gbs', 'gbu', 'gbv', 'gbw', 'gbx', 'gby', 'gbz', 'gcc', 'gcd', 'gce', 'gcf', 'gcl', 'gcn', 'gcr', 'gct', 'gd', 'gda', 'gdb', 'gdc', 'gdd', 'gde', 'gdf', 'gdg', 'gdh', 'gdi', 'gdj', 'gdk', 'gdl', 'gdm', 'gdn', 'gdo', 'gdq', 'gdr', 'gds', 'gdt', 'gdu', 'gdx', 'gea', 'geb', 'gec', 'ged', 'geg', 'geh', 'gei', 'gej', 'gek', 'gel', 'geq', 'ges', 'gev', 'gew', 'gex', 'gey', 'gez', 'gfk', 'gft', 'gga', 'ggb', 'ggd', 'gge', 'ggg', 'ggk', 'ggl', 'ggt', 'ggu', 'ggw', 'gha', 'ghc', 'ghe', 'ghh', 'ghk', 'ghl', 'ghn', 'gho', 'ghr', 'ghs', 'ght', 'gia', 'gib', 'gic', 'gid', 'gig', 'gih', 'gil', 'gim', 'gin', 'gip', 'giq', 'gir', 'gis', 'git', 'giu', 'giw', 'gix', 'giy', 'giz', 'gji', 'gjk', 'gjm', 'gjn', 'gjr', 'gju', 'gka', 'gke', 'gkn', 'gko', 'gkp', 'gku', 'gl', 'glc', 'gld', 'glh', 'gli', 'glj', 'glk', 'gll', 'glo', 'glr', 'glu', 'glw', 'gly', 'gma', 'gmb', 'gmd', 'gmg', 'gmh', 'gml', 'gmm', 'gmn', 'gmu', 'gmv', 'gmx', 'gmy', 'gmz', 'gn', 'gna', 'gnb', 'gnc', 'gnd', 'gne', 'gng', 'gnh', 'gni', 'gnk', 'gnl', 'gnm', 'gnn', 'gno', 'gnq', 'gnr', 'gnt', 'gnu', 'gnw', 'gnz', 'goa', 'gob', 'goc', 'god', 'goe', 'gof', 'gog', 'goh', 'goi', 'goj', 'gok', 'gol', 'gom', 'gon', 'goo', 'gop', 'goq', 'gor', 'gos', 'got', 'gou', 'gow', 'gox', 'goy', 'goz', 'gpa', 'gpe', 'gpn', 'gqa', 'gqi', 'gqn', 'gqr', 'gqu', 'gra', 'grb', 'grc', 'grd', 'grg', 'grh', 'gri', 'grj', 'grm', 'gro', 'grq', 'grr', 'grs', 'grt', 'gru', 'grv', 'grw', 'grx', 'gry', 'grz', 'gse', 'gsg', 'gsl', 'gsm', 'gsn', 'gso', 'gsp', 'gss', 'gsw', 'gta', 'gtu', 'gu', 'gua', 'gub', 'guc', 'gud', 'gue', 'guf', 'gug', 'guh', 'gui', 'guk', 'gul', 'gum', 'gun', 'guo', 'gup', 'guq', 'gur', 'gus', 'gut', 'guu', 'guw', 'gux', 'guz', 'gv', 'gva', 'gvc', 'gve', 'gvf', 'gvj', 'gvl', 'gvm', 'gvn', 'gvo', 'gvp', 'gvr', 'gvs', 'gvy', 'gwa', 'gwb', 'gwc', 'gwd', 'gwe', 'gwf', 'gwg', 'gwi', 'gwj', 'gwm', 'gwn', 'gwr', 'gwt', 'gwu', 'gww', 'gwx', 'gxx', 'gya', 'gyb', 'gyd', 'gye', 'gyf', 'gyg', 'gyi', 'gyl', 'gym', 'gyn', 'gyr', 'gyy', 'gza', 'gzi', 'gzn', 'ha', 'haa', 'hab', 'hac', 'had', 'hae', 'haf', 'hag', 'hah', 'hai', 'haj', 'hak', 'hal', 'ham', 'han', 'hao', 'hap', 'haq', 'har', 'has', 'hav', 'haw', 'hax', 'hay', 'haz', 'hba', 'hbb', 'hbn', 'hbo', 'hbu', 'hca', 'hch', 'hdn', 'hds', 'hdy', 'he', 'hea', 'hed', 'heg', 'heh', 'hei', 'hem', 'hgm', 'hgw', 'hhi', 'hhr', 'hhy', 'hi', 'hia', 'hib', 'hid', 'hif', 'hig', 'hih', 'hii', 'hij', 'hik', 'hil', 'hio', 'hir', 'hit', 'hiw', 'hix', 'hji', 'hka', 'hke', 'hkk', 'hks', 'hla', 'hlb', 'hld', 'hle', 'hlt', 'hlu', 'hma', 'hmb', 'hmc', 'hmd', 'hme', 'hmf', 'hmg', 'hmh', 'hmi', 'hmj', 'hmk', 'hml', 'hmm', 'hmn', 'hmp', 'hmq', 'hmr', 'hms', 'hmt', 'hmu', 'hmv', 'hmw', 'hmy', 'hmz', 'hna', 'hnd', 'hne', 'hnh', 'hni', 'hnj', 'hnn', 'hno', 'hns', 'hnu', 'ho', 'hoa', 'hob', 'hoc', 'hod', 'hoe', 'hoh', 'hoi', 'hoj', 'hol', 'hom', 'hoo', 'hop', 'hor', 'hos', 'hot', 'hov', 'how', 'hoy', 'hoz', 'hpo', 'hps', 'hr', 'hra', 'hrc', 'hre', 'hrk', 'hrm', 'hro', 'hrp', 'hrt', 'hru', 'hrw', 'hrx', 'hrz', 'hsb', 'hsh', 'hsl', 'hsn', 'hss', 'ht', 'hti', 'hto', 'hts', 'htu', 'htx', 'hu', 'hub', 'huc', 'hud', 'hue', 'huf', 'hug', 'huh', 'hui', 'huj', 'huk', 'hul', 'hum', 'huo', 'hup', 'huq', 'hur', 'hus', 'hut', 'huu', 'huv', 'huw', 'hux', 'huy', 'huz', 'hvc', 'hve', 'hvk', 'hvn', 'hvv', 'hwa', 'hwc', 'hwo', 'hy', 'hya', 'hz', 'ia', 'iai', 'ian', 'iar', 'iba', 'ibb', 'ibd', 'ibe', 'ibg', 'ibl', 'ibm', 'ibn', 'ibr', 'ibu', 'iby', 'ica', 'ich', 'icl', 'icr', 'id', 'ida', 'idb', 'idc', 'idd', 'ide', 'idi', 'idr', 'ids', 'idt', 'idu', 'ie', 'ifa', 'ifb', 'ife', 'iff', 'ifk', 'ifm', 'ifu', 'ify', 'ig', 'igb', 'ige', 'igg', 'igl', 'igm', 'ign', 'igo', 'igs', 'igw', 'ihb', 'ihi', 'ihp', 'ihw', 'ii', 'iin', 'ijc', 'ije', 'ijj', 'ijn', 'ijs', 'ik', 'ike', 'iki', 'ikk', 'ikl', 'iko', 'ikp', 'ikr', 'iks', 'ikt', 'ikv', 'ikw', 'ikx', 'ikz', 'ila', 'ilb', 'ilg', 'ili', 'ilk', 'ilm', 'ilo', 'ilp', 'ils', 'ilu', 'ilv', 'ima', 'imi', 'iml', 'imn', 'imo', 'imr', 'ims', 'imy', 'inb', 'ing', 'inh', 'inj', 'inl', 'inm', 'inn', 'ino', 'inp', 'ins', 'int', 'inz', 'io', 'ior', 'iou', 'iow', 'ipi', 'ipo', 'iqu', 'iqw', 'ire', 'irh', 'iri', 'irk', 'irn', 'irr', 'iru', 'irx', 'iry', 'is', 'isa', 'isc', 'isd', 'ise', 'isg', 'ish', 'isi', 'isk', 'ism', 'isn', 'iso', 'isr', 'ist', 'isu', 'it', 'itb', 'itd', 'ite', 'iti', 'itk', 'itl', 'itm', 'ito', 'itr', 'its', 'itt', 'itv', 'itw', 'itx', 'ity', 'itz', 'iu', 'ium', 'ivb', 'ivv', 'iwk', 'iwm', 'iwo', 'iws', 'ixc', 'ixl', 'iya', 'iyo', 'iyx', 'izh', 'izr', 'izz', 'ja', 'jaa', 'jab', 'jac', 'jad', 'jae', 'jaf', 'jah', 'jaj', 'jak', 'jal', 'jam', 'jan', 'jao', 'jaq', 'jas', 'jat', 'jau', 'jax', 'jay', 'jaz', 'jbe', 'jbi', 'jbj', 'jbk', 'jbn', 'jbo', 'jbr', 'jbt', 'jbu', 'jbw', 'jcs', 'jct', 'jda', 'jdg', 'jdt', 'jeb', 'jee', 'jeg', 'jeh', 'jei', 'jek', 'jel', 'jen', 'jer', 'jet', 'jeu', 'jgb', 'jge', 'jgk', 'jgo', 'jhi', 'jhs', 'jia', 'jib', 'jic', 'jid', 'jie', 'jig', 'jih', 'jii', 'jil', 'jim', 'jio', 'jiq', 'jit', 'jiu', 'jiv', 'jiy', 'jje', 'jjr', 'jka', 'jkm', 'jko', 'jkp', 'jkr', 'jku', 'jle', 'jls', 'jma', 'jmb', 'jmc', 'jmd', 'jmi', 'jml', 'jmn', 'jmr', 'jms', 'jmw', 'jmx', 'jna', 'jnd', 'jng', 'jni', 'jnj', 'jnl', 'jns', 'job', 'jod', 'jog', 'jor', 'jos', 'jow', 'jpa', 'jpr', 'jqr', 'jra', 'jrb', 'jrr', 'jrt', 'jru', 'jsl', 'jua', 'jub', 'juc', 'jud', 'juh', 'jui', 'juk', 'jul', 'jum', 'jun', 'juo', 'jup', 'jur', 'jus', 'jut', 'juu', 'juw', 'juy', 'jv', 'jvd', 'jvn', 'jwi', 'jya', 'jye', 'jyy', 'ka', 'kaa', 'kab', 'kac', 'kad', 'kae', 'kaf', 'kag', 'kah', 'kai', 'kaj', 'kak', 'kam', 'kao', 'kap', 'kaq', 'kav', 'kaw', 'kax', 'kay', 'kba', 'kbb', 'kbc', 'kbd', 'kbe', 'kbg', 'kbh', 'kbi', 'kbj', 'kbk', 'kbl', 'kbm', 'kbn', 'kbo', 'kbp', 'kbq', 'kbr', 'kbs', 'kbt', 'kbu', 'kbv', 'kbw', 'kbx', 'kby', 'kbz', 'kca', 'kcb', 'kcc', 'kcd', 'kce', 'kcf', 'kcg', 'kch', 'kci', 'kcj', 'kck', 'kcl', 'kcm', 'kcn', 'kco', 'kcp', 'kcq', 'kcr', 'kcs', 'kct', 'kcu', 'kcv', 'kcw', 'kcx', 'kcy', 'kcz', 'kda', 'kdc', 'kdd', 'kde', 'kdf', 'kdg', 'kdh', 'kdi', 'kdj', 'kdk', 'kdl', 'kdm', 'kdn', 'kdp', 'kdq', 'kdr', 'kdt', 'kdu', 'kdw', 'kdx', 'kdy', 'kdz', 'kea', 'keb', 'kec', 'ked', 'kee', 'kef', 'keg', 'keh', 'kei', 'kej', 'kek', 'kel', 'kem', 'ken', 'keo', 'kep', 'keq', 'ker', 'kes', 'ket', 'keu', 'kev', 'kew', 'kex', 'key', 'kez', 'kfa', 'kfb', 'kfc', 'kfd', 'kfe', 'kff', 'kfg', 'kfh', 'kfi', 'kfj', 'kfk', 'kfl', 'kfm', 'kfn', 'kfo', 'kfp', 'kfq', 'kfr', 'kfs', 'kft', 'kfu', 'kfv', 'kfw', 'kfx', 'kfy', 'kfz', 'kg', 'kga', 'kgb', 'kgd', 'kge', 'kgf', 'kgg', 'kgi', 'kgj', 'kgk', 'kgl', 'kgm', 'kgn', 'kgo', 'kgp', 'kgq', 'kgr', 'kgs', 'kgt', 'kgu', 'kgv', 'kgw', 'kgx', 'kgy', 'kha', 'khb', 'khc', 'khd', 'khe', 'khf', 'khg', 'khh', 'khj', 'khk', 'khl', 'khn', 'kho', 'khp', 'khq', 'khr', 'khs', 'kht', 'khu', 'khv', 'khw', 'khx', 'khy', 'khz', 'ki', 'kia', 'kib', 'kic', 'kid', 'kie', 'kif', 'kig', 'kih', 'kii', 'kij', 'kil', 'kim', 'kio', 'kip', 'kiq', 'kis', 'kit', 'kiu', 'kiv', 'kiw', 'kix', 'kiy', 'kiz', 'kj', 'kja', 'kjb', 'kjc', 'kjd', 'kje', 'kjf', 'kjg', 'kjh', 'kji', 'kjj', 'kjk', 'kjl', 'kjm', 'kjn', 'kjo', 'kjp', 'kjq', 'kjr', 'kjs', 'kjt', 'kju', 'kjv', 'kjx', 'kjy', 'kjz', 'kk', 'kka', 'kkb', 'kkc', 'kkd', 'kke', 'kkf', 'kkg', 'kkh', 'kki', 'kkj', 'kkk', 'kkl', 'kkm', 'kkn', 'kko', 'kkp', 'kkq', 'kkr', 'kks', 'kkt', 'kku', 'kkv', 'kkw', 'kkx', 'kky', 'kkz', 'kl', 'kla', 'klb', 'klc', 'kld', 'kle', 'klf', 'klg', 'klh', 'kli', 'klj', 'klk', 'kll', 'klm', 'kln', 'klo', 'klp', 'klq', 'klr', 'kls', 'klt', 'klu', 'klv', 'klw', 'klx', 'kly', 'klz', 'km', 'kma', 'kmb', 'kmc', 'kmd', 'kme', 'kmf', 'kmg', 'kmh', 'kmi', 'kmj', 'kmk', 'kml', 'kmm', 'kmn', 'kmo', 'kmp', 'kmq', 'kmr', 'kms', 'kmt', 'kmu', 'kmv', 'kmw', 'kmx', 'kmy', 'kmz', 'kn', 'kna', 'knb', 'knc', 'knd', 'kne', 'knf', 'kng', 'kni', 'knj', 'knk', 'knl', 'knm', 'knn', 'kno', 'knp', 'knq', 'knr', 'kns', 'knt', 'knu', 'knv', 'knw', 'knx', 'kny', 'knz', 'ko', 'koa', 'koc', 'kod', 'koe', 'kof', 'kog', 'koh', 'koi', 'kok', 'kol', 'koo', 'kop', 'koq', 'kos', 'kot', 'kou', 'kov', 'kow', 'koy', 'koz', 'kpa', 'kpb', 'kpc', 'kpd', 'kpe', 'kpf', 'kpg', 'kph', 'kpi', 'kpj', 'kpk', 'kpl', 'kpm', 'kpn', 'kpo', 'kpq', 'kpr', 'kps', 'kpt', 'kpu', 'kpv', 'kpw', 'kpx', 'kpy', 'kpz', 'kqa', 'kqb', 'kqc', 'kqd', 'kqe', 'kqf', 'kqg', 'kqh', 'kqi', 'kqj', 'kqk', 'kql', 'kqm', 'kqn', 'kqo', 'kqp', 'kqq', 'kqr', 'kqs', 'kqt', 'kqu', 'kqv', 'kqw', 'kqx', 'kqy', 'kqz', 'kr', 'kra', 'krb', 'krc', 'krd', 'kre', 'krf', 'krh', 'kri', 'krj', 'krk', 'krl', 'krm', 'krn', 'krp', 'krr', 'krs', 'krt', 'kru', 'krv', 'krw', 'krx', 'kry', 'krz', 'ks', 'ksa', 'ksb', 'ksc', 'ksd', 'kse', 'ksf', 'ksg', 'ksh', 'ksi', 'ksj', 'ksk', 'ksl', 'ksm', 'ksn', 'kso', 'ksp', 'ksq', 'ksr', 'kss', 'kst', 'ksu', 'ksv', 'ksw', 'ksx', 'ksy', 'ksz', 'kta', 'ktb', 'ktc', 'ktd', 'kte', 'ktf', 'ktg', 'kth', 'kti', 'ktj', 'ktk', 'ktl', 'ktm', 'ktn', 'kto', 'ktp', 'ktq', 'kts', 'ktt', 'ktu', 'ktv', 'ktw', 'ktx', 'kty', 'ktz', 'ku', 'kub', 'kuc', 'kud', 'kue', 'kuf', 'kug', 'kuh', 'kui', 'kuj', 'kuk', 'kul', 'kum', 'kun', 'kuo', 'kup', 'kuq', 'kus', 'kut', 'kuu', 'kuv', 'kuw', 'kux', 'kuy', 'kuz', 'kv', 'kva', 'kvb', 'kvc', 'kvd', 'kve', 'kvf', 'kvg', 'kvh', 'kvi', 'kvj', 'kvk', 'kvl', 'kvm', 'kvn', 'kvo', 'kvp', 'kvq', 'kvr', 'kvt', 'kvu', 'kvv', 'kvw', 'kvx', 'kvy', 'kvz', 'kw', 'kwa', 'kwb', 'kwc', 'kwd', 'kwe', 'kwf', 'kwg', 'kwh', 'kwi', 'kwj', 'kwk', 'kwl', 'kwm', 'kwn', 'kwo', 'kwp', 'kwr', 'kws', 'kwt', 'kwu', 'kwv', 'kww', 'kwx', 'kwy', 'kwz', 'kxa', 'kxb', 'kxc', 'kxd', 'kxf', 'kxh', 'kxi', 'kxj', 'kxk', 'kxl', 'kxm', 'kxn', 'kxo', 'kxp', 'kxq', 'kxr', 'kxs', 'kxt', 'kxu', 'kxv', 'kxw', 'kxx', 'kxy', 'kxz', 'ky', 'kya', 'kyb', 'kyc', 'kyd', 'kye', 'kyf', 'kyg', 'kyh', 'kyi', 'kyj', 'kyk', 'kyl', 'kym', 'kyn', 'kyo', 'kyp', 'kyq', 'kyr', 'kys', 'kyt', 'kyu', 'kyv', 'kyw', 'kyx', 'kyy', 'kyz', 'kza', 'kzb', 'kzc', 'kzd', 'kze', 'kzf', 'kzg', 'kzi', 'kzk', 'kzl', 'kzm', 'kzn', 'kzo', 'kzp', 'kzq', 'kzr', 'kzs', 'kzu', 'kzv', 'kzw', 'kzx', 'kzy', 'kzz', 'la', 'laa', 'lab', 'lac', 'lad', 'lae', 'laf', 'lag', 'lah', 'lai', 'laj', 'lak', 'lal', 'lam', 'lan', 'lap', 'laq', 'lar', 'las', 'lau', 'law', 'lax', 'lay', 'laz', 'lb', 'lba', 'lbb', 'lbc', 'lbe', 'lbf', 'lbg', 'lbi', 'lbj', 'lbk', 'lbl', 'lbm', 'lbn', 'lbo', 'lbq', 'lbr', 'lbs', 'lbt', 'lbu', 'lbv', 'lbw', 'lbx', 'lby', 'lbz', 'lcc', 'lcd', 'lce', 'lcf', 'lch', 'lcl', 'lcm', 'lcp', 'lcq', 'lcs', 'lda', 'ldb', 'ldd', 'ldg', 'ldh', 'ldi', 'ldj', 'ldk', 'ldl', 'ldm', 'ldn', 'ldo', 'ldp', 'ldq', 'lea', 'leb', 'lec', 'led', 'lee', 'lef', 'leh', 'lei', 'lej', 'lek', 'lel', 'lem', 'len', 'leo', 'lep', 'leq', 'ler', 'les', 'let', 'leu', 'lev', 'lew', 'lex', 'ley', 'lez', 'lfa', 'lfn', 'lg', 'lga', 'lgb', 'lgg', 'lgh', 'lgi', 'lgk', 'lgl', 'lgm', 'lgn', 'lgq', 'lgr', 'lgt', 'lgu', 'lgz', 'lha', 'lhh', 'lhi', 'lhl', 'lhm', 'lhn', 'lhp', 'lhs', 'lht', 'lhu', 'li', 'lia', 'lib', 'lic', 'lid', 'lie', 'lif', 'lig', 'lih', 'lij', 'lik', 'lil', 'lio', 'lip', 'liq', 'lir', 'lis', 'liu', 'liv', 'liw', 'lix', 'liy', 'liz', 'lja', 'lje', 'lji', 'ljl', 'ljp', 'ljw', 'ljx', 'lka', 'lkb', 'lkc', 'lkd', 'lke', 'lkh', 'lki', 'lkj', 'lkl', 'lkm', 'lkn', 'lko', 'lkr', 'lks', 'lkt', 'lku', 'lky', 'lla', 'llb', 'llc', 'lld', 'lle', 'llf', 'llg', 'llh', 'lli', 'llj', 'llk', 'lll', 'llm', 'lln', 'llo', 'llp', 'llq', 'lls', 'llu', 'llx', 'lma', 'lmb', 'lmc', 'lmd', 'lme', 'lmf', 'lmg', 'lmh', 'lmi', 'lmj', 'lmk', 'lml', 'lmn', 'lmo', 'lmp', 'lmq', 'lmr', 'lmu', 'lmv', 'lmw', 'lmx', 'lmy', 'lmz', 'ln', 'lna', 'lnb', 'lnd', 'lng', 'lnh', 'lni', 'lnj', 'lnl', 'lnm', 'lnn', 'lno', 'lns', 'lnu', 'lnw', 'lnz', 'lo', 'loa', 'lob', 'loc', 'loe', 'lof', 'log', 'loh', 'loi', 'loj', 'lok', 'lol', 'lom', 'lon', 'loo', 'lop', 'loq', 'lor', 'los', 'lot', 'lou', 'lov', 'low', 'lox', 'loy', 'loz', 'lpa', 'lpe', 'lpn', 'lpo', 'lpx', 'lra', 'lrc', 'lre', 'lrg', 'lri', 'lrk', 'lrl', 'lrm', 'lrn', 'lro', 'lrr', 'lrt', 'lrv', 'lrz', 'lsa', 'lsd', 'lse', 'lsg', 'lsh', 'lsi', 'lsl', 'lsm', 'lso', 'lsp', 'lsr', 'lss', 'lst', 'lsy', 'lt', 'ltc', 'ltg', 'lti', 'ltn', 'lto', 'lts', 'ltu', 'lu', 'lua', 'luc', 'lud', 'lue', 'luf', 'lui', 'luj', 'luk', 'lul', 'lum', 'lun', 'luo', 'lup', 'luq', 'lur', 'lus', 'lut', 'luu', 'luv', 'luw', 'luy', 'luz', 'lv', 'lva', 'lvk', 'lvs', 'lvu', 'lwa', 'lwe', 'lwg', 'lwh', 'lwl', 'lwm', 'lwo', 'lwt', 'lwu', 'lww', 'lya', 'lyg', 'lyn', 'lzh', 'lzl', 'lzn', 'lzz', 'maa', 'mab', 'mad', 'mae', 'maf', 'mag', 'mai', 'maj', 'mak', 'mam', 'man', 'maq', 'mas', 'mat', 'mau', 'mav', 'maw', 'max', 'maz', 'mba', 'mbb', 'mbc', 'mbd', 'mbe', 'mbf', 'mbh', 'mbi', 'mbj', 'mbk', 'mbl', 'mbm', 'mbn', 'mbo', 'mbp', 'mbq', 'mbr', 'mbs', 'mbt', 'mbu', 'mbv', 'mbw', 'mbx', 'mby', 'mbz', 'mca', 'mcb', 'mcc', 'mcd', 'mce', 'mcf', 'mcg', 'mch', 'mci', 'mcj', 'mck', 'mcl', 'mcm', 'mcn', 'mco', 'mcp', 'mcq', 'mcr', 'mcs', 'mct', 'mcu', 'mcv', 'mcw', 'mcx', 'mcy', 'mcz', 'mda', 'mdb', 'mdc', 'mdd', 'mde', 'mdf', 'mdg', 'mdh', 'mdi', 'mdj', 'mdk', 'mdl', 'mdm', 'mdn', 'mdp', 'mdq', 'mdr', 'mds', 'mdt', 'mdu', 'mdv', 'mdw', 'mdx', 'mdy', 'mdz', 'mea', 'meb', 'mec', 'med', 'mee', 'mef', 'meh', 'mei', 'mej', 'mek', 'mel', 'mem', 'men', 'meo', 'mep', 'meq', 'mer', 'mes', 'met', 'meu', 'mev', 'mew', 'mey', 'mez', 'mfa', 'mfb', 'mfc', 'mfd', 'mfe', 'mff', 'mfg', 'mfh', 'mfi', 'mfj', 'mfk', 'mfl', 'mfm', 'mfn', 'mfo', 'mfp', 'mfq', 'mfr', 'mfs', 'mft', 'mfu', 'mfv', 'mfw', 'mfx', 'mfy', 'mfz', 'mg', 'mga', 'mgb', 'mgc', 'mgd', 'mge', 'mgf', 'mgg', 'mgh', 'mgi', 'mgj', 'mgk', 'mgl', 'mgm', 'mgn', 'mgo', 'mgp', 'mgq', 'mgr', 'mgs', 'mgt', 'mgu', 'mgv', 'mgw', 'mgy', 'mgz', 'mh', 'mha', 'mhb', 'mhc', 'mhd', 'mhe', 'mhf', 'mhg', 'mhi', 'mhj', 'mhk', 'mhl', 'mhm', 'mhn', 'mho', 'mhp', 'mhq', 'mhr', 'mhs', 'mht', 'mhu', 'mhw', 'mhx', 'mhy', 'mhz', 'mi', 'mia', 'mib', 'mic', 'mid', 'mie', 'mif', 'mig', 'mih', 'mii', 'mij', 'mik', 'mil', 'mim', 'min', 'mio', 'mip', 'miq', 'mir', 'mis', 'mit', 'miu', 'miw', 'mix', 'miy', 'miz', 'mjb', 'mjc', 'mjd', 'mje', 'mjg', 'mjh', 'mji', 'mjj', 'mjk', 'mjl', 'mjm', 'mjn', 'mjo', 'mjp', 'mjq', 'mjr', 'mjs', 'mjt', 'mju', 'mjv', 'mjw', 'mjx', 'mjy', 'mjz', 'mk', 'mka', 'mkb', 'mkc', 'mke', 'mkf', 'mkg', 'mki', 'mkj', 'mkk', 'mkl', 'mkm', 'mkn', 'mko', 'mkp', 'mkq', 'mkr', 'mks', 'mkt', 'mku', 'mkv', 'mkw', 'mkx', 'mky', 'mkz', 'ml', 'mla', 'mlb', 'mlc', 'mle', 'mlf', 'mlh', 'mli', 'mlj', 'mlk', 'mll', 'mlm', 'mln', 'mlo', 'mlp', 'mlq', 'mlr', 'mls', 'mlu', 'mlv', 'mlw', 'mlx', 'mlz', 'mma', 'mmb', 'mmc', 'mmd', 'mme', 'mmf', 'mmg', 'mmh', 'mmi', 'mmj', 'mmk', 'mml', 'mmm', 'mmn', 'mmo', 'mmp', 'mmq', 'mmr', 'mmt', 'mmu', 'mmv', 'mmw', 'mmx', 'mmy', 'mmz', 'mn', 'mna', 'mnb', 'mnc', 'mnd', 'mne', 'mnf', 'mng', 'mnh', 'mni', 'mnj', 'mnk', 'mnl', 'mnm', 'mnn', 'mnp', 'mnq', 'mnr', 'mns', 'mnu', 'mnv', 'mnw', 'mnx', 'mny', 'mnz', 'moa', 'moc', 'mod', 'moe', 'mog', 'moh', 'moi', 'moj', 'mok', 'mom', 'moo', 'mop', 'moq', 'mor', 'mos', 'mot', 'mou', 'mov', 'mow', 'mox', 'moy', 'moz', 'mpa', 'mpb', 'mpc', 'mpd', 'mpe', 'mpg', 'mph', 'mpi', 'mpj', 'mpk', 'mpl', 'mpm', 'mpn', 'mpo', 'mpp', 'mpq', 'mpr', 'mps', 'mpt', 'mpu', 'mpv', 'mpw', 'mpx', 'mpy', 'mpz', 'mqa', 'mqb', 'mqc', 'mqe', 'mqf', 'mqg', 'mqh', 'mqi', 'mqj', 'mqk', 'mql', 'mqm', 'mqn', 'mqo', 'mqp', 'mqq', 'mqr', 'mqs', 'mqt', 'mqu', 'mqv', 'mqw', 'mqx', 'mqy', 'mqz', 'mr', 'mra', 'mrb', 'mrc', 'mrd', 'mre', 'mrf', 'mrg', 'mrh', 'mrj', 'mrk', 'mrl', 'mrm', 'mrn', 'mro', 'mrp', 'mrq', 'mrr', 'mrs', 'mrt', 'mru', 'mrv', 'mrw', 'mrx', 'mry', 'mrz', 'ms', 'msb', 'msc', 'msd', 'mse', 'msf', 'msg', 'msh', 'msi', 'msj', 'msk', 'msl', 'msm', 'msn', 'mso', 'msp', 'msq', 'msr', 'mss', 'msu', 'msv', 'msw', 'msx', 'msy', 'msz', 'mt', 'mta', 'mtb', 'mtc', 'mtd', 'mte', 'mtf', 'mtg', 'mth', 'mti', 'mtj', 'mtk', 'mtl', 'mtm', 'mtn', 'mto', 'mtp', 'mtq', 'mtr', 'mts', 'mtt', 'mtu', 'mtv', 'mtw', 'mtx', 'mty', 'mua', 'mub', 'muc', 'mud', 'mue', 'mug', 'muh', 'mui', 'muj', 'muk', 'mul', 'mum', 'muo', 'mup', 'muq', 'mur', 'mus', 'mut', 'muu', 'muv', 'mux', 'muy', 'muz', 'mva', 'mvb', 'mvd', 'mve', 'mvf', 'mvg', 'mvh', 'mvi', 'mvk', 'mvl', 'mvm', 'mvn', 'mvo', 'mvp', 'mvq', 'mvr', 'mvs', 'mvt', 'mvu', 'mvv', 'mvw', 'mvx', 'mvy', 'mvz', 'mwa', 'mwb', 'mwc', 'mwe', 'mwf', 'mwg', 'mwh', 'mwi', 'mwk', 'mwl', 'mwm', 'mwn', 'mwo', 'mwp', 'mwq', 'mwr', 'mws', 'mwt', 'mwu', 'mwv', 'mww', 'mwx', 'mwy', 'mwz', 'mxa', 'mxb', 'mxc', 'mxd', 'mxe', 'mxf', 'mxg', 'mxh', 'mxi', 'mxj', 'mxk', 'mxl', 'mxm', 'mxn', 'mxo', 'mxp', 'mxq', 'mxr', 'mxs', 'mxt', 'mxu', 'mxv', 'mxw', 'mxx', 'mxy', 'mxz', 'my', 'myb', 'myc', 'myd', 'mye', 'myf', 'myg', 'myh', 'myi', 'myj', 'myk', 'myl', 'mym', 'myo', 'myp', 'myr', 'mys', 'myu', 'myv', 'myw', 'myx', 'myy', 'myz', 'mza', 'mzb', 'mzc', 'mzd', 'mze', 'mzg', 'mzh', 'mzi', 'mzj', 'mzk', 'mzl', 'mzm', 'mzn', 'mzo', 'mzp', 'mzq', 'mzr', 'mzs', 'mzt', 'mzu', 'mzv', 'mzw', 'mzx', 'mzy', 'mzz', 'na', 'naa', 'nab', 'nac', 'nae', 'naf', 'nag', 'naj', 'nak', 'nal', 'nam', 'nan', 'nao', 'nap', 'naq', 'nar', 'nas', 'nat', 'naw', 'nax', 'nay', 'naz', 'nb', 'nba', 'nbb', 'nbc', 'nbd', 'nbe', 'nbg', 'nbh', 'nbi', 'nbj', 'nbk', 'nbm', 'nbn', 'nbo', 'nbp', 'nbq', 'nbr', 'nbs', 'nbt', 'nbu', 'nbv', 'nbw', 'nby', 'nca', 'ncb', 'ncc', 'ncd', 'nce', 'ncf', 'ncg', 'nch', 'nci', 'ncj', 'nck', 'ncl', 'ncm', 'ncn', 'nco', 'ncp', 'ncr', 'ncs', 'nct', 'ncu', 'ncx', 'ncz', 'nd', 'nda', 'ndb', 'ndc', 'ndd', 'ndf', 'ndg', 'ndh', 'ndi', 'ndj', 'ndk', 'ndl', 'ndm', 'ndn', 'ndp', 'ndq', 'ndr', 'nds', 'ndt', 'ndu', 'ndv', 'ndw', 'ndx', 'ndy', 'ndz', 'ne', 'nea', 'neb', 'nec', 'ned', 'nee', 'nef', 'neg', 'neh', 'nei', 'nej', 'nek', 'nem', 'nen', 'neo', 'neq', 'ner', 'nes', 'net', 'neu', 'nev', 'new', 'nex', 'ney', 'nez', 'nfa', 'nfd', 'nfl', 'nfr', 'nfu', 'ng', 'nga', 'ngb', 'ngc', 'ngd', 'nge', 'ngg', 'ngh', 'ngi', 'ngj', 'ngk', 'ngl', 'ngm', 'ngn', 'ngo', 'ngp', 'ngq', 'ngr', 'ngs', 'ngt', 'ngu', 'ngv', 'ngw', 'ngx', 'ngy', 'ngz', 'nha', 'nhb', 'nhc', 'nhd', 'nhe', 'nhf', 'nhg', 'nhh', 'nhi', 'nhk', 'nhm', 'nhn', 'nho', 'nhp', 'nhq', 'nhr', 'nht', 'nhu', 'nhv', 'nhw', 'nhx', 'nhy', 'nhz', 'nia', 'nib', 'nid', 'nie', 'nif', 'nig', 'nih', 'nii', 'nij', 'nik', 'nil', 'nim', 'nin', 'nio', 'niq', 'nir', 'nis', 'nit', 'niu', 'niv', 'niw', 'nix', 'niy', 'niz', 'nja', 'njb', 'njd', 'njh', 'nji', 'njj', 'njl', 'njm', 'njn', 'njo', 'njr', 'njs', 'njt', 'nju', 'njx', 'njy', 'njz', 'nka', 'nkb', 'nkc', 'nkd', 'nke', 'nkf', 'nkg', 'nkh', 'nki', 'nkj', 'nkk', 'nkm', 'nkn', 'nko', 'nkp', 'nkq', 'nkr', 'nks', 'nkt', 'nku', 'nkv', 'nkw', 'nkx', 'nkz', 'nl', 'nla', 'nlc', 'nle', 'nlg', 'nli', 'nlj', 'nlk', 'nll', 'nlo', 'nlq', 'nlu', 'nlv', 'nlw', 'nlx', 'nly', 'nlz', 'nma', 'nmb', 'nmc', 'nmd', 'nme', 'nmf', 'nmg', 'nmh', 'nmi', 'nmj', 'nmk', 'nml', 'nmm', 'nmn', 'nmo', 'nmp', 'nmq', 'nmr', 'nms', 'nmt', 'nmu', 'nmv', 'nmw', 'nmx', 'nmy', 'nmz', 'nn', 'nna', 'nnb', 'nnc', 'nnd', 'nne', 'nnf', 'nng', 'nnh', 'nni', 'nnj', 'nnk', 'nnl', 'nnm', 'nnn', 'nnp', 'nnq', 'nnr', 'nns', 'nnt', 'nnu', 'nnv', 'nnw', 'nny', 'nnz', 'no', 'noa', 'noc', 'nod', 'noe', 'nof', 'nog', 'noh', 'noi', 'noj', 'nok', 'nol', 'nom', 'non', 'nop', 'noq', 'nos', 'not', 'nou', 'nov', 'now', 'noy', 'noz', 'npa', 'npb', 'npg', 'nph', 'npi', 'npl', 'npn', 'npo', 'nps', 'npu', 'npy', 'nqg', 'nqk', 'nqm', 'nqn', 'nqo', 'nqq', 'nqy', 'nr', 'nra', 'nrb', 'nrc', 'nre', 'nrf', 'nrg', 'nri', 'nrk', 'nrl', 'nrm', 'nrn', 'nrp', 'nrr', 'nrt', 'nru', 'nrx', 'nrz', 'nsa', 'nsc', 'nsd', 'nse', 'nsf', 'nsg', 'nsh', 'nsi', 'nsk', 'nsl', 'nsm', 'nsn', 'nso', 'nsp', 'nsq', 'nsr', 'nss', 'nst', 'nsu', 'nsv', 'nsw', 'nsx', 'nsy', 'nsz', 'ntd', 'nte', 'ntg', 'nti', 'ntj', 'ntk', 'ntm', 'nto', 'ntp', 'ntr', 'ntu', 'ntw', 'ntx', 'nty', 'ntz', 'nua', 'nuc', 'nud', 'nue', 'nuf', 'nug', 'nuh', 'nui', 'nuj', 'nuk', 'nul', 'num', 'nun', 'nuo', 'nup', 'nuq', 'nur', 'nus', 'nut', 'nuu', 'nuv', 'nuw', 'nux', 'nuy', 'nuz', 'nv', 'nvh', 'nvm', 'nvo', 'nwa', 'nwb', 'nwc', 'nwe', 'nwg', 'nwi', 'nwm', 'nwo', 'nwr', 'nwx', 'nwy', 'nxa', 'nxd', 'nxe', 'nxg', 'nxi', 'nxk', 'nxl', 'nxm', 'nxn', 'nxo', 'nxq', 'nxr', 'nxu', 'nxx', 'ny', 'nyb', 'nyc', 'nyd', 'nye', 'nyf', 'nyg', 'nyh', 'nyi', 'nyj', 'nyk', 'nyl', 'nym', 'nyn', 'nyo', 'nyp', 'nyq', 'nyr', 'nys', 'nyt', 'nyu', 'nyv', 'nyw', 'nyx', 'nyy', 'nza', 'nzb', 'nzi', 'nzk', 'nzm', 'nzs', 'nzu', 'nzy', 'nzz', 'oaa', 'oac', 'oar', 'oav', 'obi', 'obk', 'obl', 'obm', 'obo', 'obr', 'obt', 'obu', 'oc', 'oca', 'och', 'oco', 'ocu', 'oda', 'odk', 'odt', 'odu', 'ofo', 'ofs', 'ofu', 'ogb', 'ogc', 'oge', 'ogg', 'ogo', 'ogu', 'oht', 'ohu', 'oia', 'oin', 'oj', 'ojb', 'ojc', 'ojg', 'ojp', 'ojs', 'ojv', 'ojw', 'oka', 'okb', 'okd', 'oke', 'okg', 'okh', 'oki', 'okj', 'okk', 'okl', 'okm', 'okn', 'oko', 'okr', 'oks', 'oku', 'okv', 'okx', 'ola', 'old', 'ole', 'olk', 'olm', 'olo', 'olr', 'olt', 'olu', 'om', 'oma', 'omb', 'omc', 'omg', 'omi', 'omk', 'oml', 'omn', 'omo', 'omp', 'omr', 'omt', 'omu', 'omw', 'omx', 'ona', 'onb', 'one', 'ong', 'oni', 'onj', 'onk', 'onn', 'ono', 'onp', 'onr', 'ons', 'ont', 'onu', 'onw', 'onx', 'ood', 'oog', 'oon', 'oor', 'oos', 'opa', 'opk', 'opm', 'opo', 'opt', 'opy', 'or', 'ora', 'orc', 'ore', 'org', 'orh', 'orn', 'oro', 'orr', 'ors', 'ort', 'oru', 'orv', 'orw', 'orx', 'ory', 'orz', 'os', 'osa', 'osc', 'osi', 'oso', 'osp', 'ost', 'osu', 'osx', 'ota', 'otb', 'otd', 'ote', 'oti', 'otk', 'otl', 'otm', 'otn', 'otq', 'otr', 'ots', 'ott', 'otu', 'otw', 'otx', 'oty', 'otz', 'oua', 'oub', 'oue', 'oui', 'oum', 'owi', 'owl', 'oyb', 'oyd', 'oym', 'oyy', 'ozm', 'pa', 'pab', 'pac', 'pad', 'pae', 'paf', 'pag', 'pah', 'pai', 'pak', 'pal', 'pam', 'pao', 'pap', 'paq', 'par', 'pas', 'pat', 'pau', 'pav', 'paw', 'pax', 'pay', 'paz', 'pbb', 'pbc', 'pbe', 'pbf', 'pbg', 'pbh', 'pbi', 'pbl', 'pbn', 'pbo', 'pbp', 'pbr', 'pbs', 'pbt', 'pbu', 'pbv', 'pby', 'pca', 'pcb', 'pcc', 'pcd', 'pce', 'pcf', 'pcg', 'pch', 'pci', 'pcj', 'pck', 'pcl', 'pcm', 'pcn', 'pcp', 'pcw', 'pda', 'pdc', 'pdi', 'pdn', 'pdo', 'pdt', 'pdu', 'pea', 'peb', 'ped', 'pee', 'pef', 'peg', 'peh', 'pei', 'pej', 'pek', 'pel', 'pem', 'peo', 'pep', 'peq', 'pes', 'pev', 'pex', 'pey', 'pez', 'pfa', 'pfe', 'pfl', 'pga', 'pgd', 'pgg', 'pgi', 'pgk', 'pgl', 'pgn', 'pgs', 'pgu', 'pgz', 'pha', 'phd', 'phg', 'phh', 'phk', 'phl', 'phm', 'phn', 'pho', 'phq', 'phr', 'pht', 'phu', 'phv', 'phw', 'pi', 'pia', 'pib', 'pic', 'pid', 'pie', 'pif', 'pig', 'pih', 'pii', 'pij', 'pil', 'pim', 'pin', 'pio', 'pip', 'pir', 'pis', 'pit', 'piu', 'piv', 'piw', 'pix', 'piy', 'piz', 'pjt', 'pka', 'pkb', 'pkc', 'pkg', 'pkh', 'pkn', 'pko', 'pkp', 'pkr', 'pks', 'pkt', 'pku', 'pl', 'pla', 'plb', 'plc', 'pld', 'ple', 'plg', 'plh', 'plj', 'plk', 'pll', 'pln', 'plo', 'plp', 'plq', 'plr', 'pls', 'plt', 'plu', 'plv', 'plw', 'ply', 'plz', 'pma', 'pmb', 'pmd', 'pme', 'pmf', 'pmh', 'pmi', 'pmj', 'pmk', 'pml', 'pmm', 'pmn', 'pmo', 'pmq', 'pmr', 'pms', 'pmt', 'pmw', 'pmx', 'pmy', 'pmz', 'pna', 'pnb', 'pnc', 'pne', 'png', 'pnh', 'pni', 'pnj', 'pnk', 'pnl', 'pnm', 'pnn', 'pno', 'pnp', 'pnq', 'pnr', 'pns', 'pnt', 'pnu', 'pnv', 'pnw', 'pnx', 'pny', 'pnz', 'poc', 'poe', 'pof', 'pog', 'poh', 'poi', 'pok', 'pom', 'pon', 'poo', 'pop', 'poq', 'pos', 'pot', 'pov', 'pow', 'pox', 'poy', 'ppe', 'ppi', 'ppk', 'ppl', 'ppm', 'ppn', 'ppo', 'ppp', 'ppq', 'pps', 'ppt', 'ppu', 'pqa', 'pqm', 'prb', 'prc', 'prd', 'pre', 'prf', 'prg', 'prh', 'pri', 'prk', 'prl', 'prm', 'prn', 'pro', 'prp', 'prq', 'prr', 'prs', 'prt', 'pru', 'prw', 'prx', 'prz', 'ps', 'psa', 'psc', 'psd', 'pse', 'psg', 'psh', 'psi', 'psl', 'psm', 'psn', 'pso', 'psp', 'psq', 'psr', 'pss', 'pst', 'psu', 'psw', 'psy', 'pt', 'pta', 'pth', 'pti', 'ptn', 'pto', 'ptp', 'ptq', 'ptr', 'ptt', 'ptu', 'ptv', 'ptw', 'pty', 'pua', 'pub', 'puc', 'pud', 'pue', 'puf', 'pug', 'pui', 'puj', 'puk', 'pum', 'puo', 'pup', 'puq', 'pur', 'put', 'puu', 'puw', 'pux', 'puy', 'pwa', 'pwb', 'pwg', 'pwi', 'pwm', 'pwn', 'pwo', 'pwr', 'pww', 'pxm', 'pye', 'pym', 'pyn', 'pys', 'pyu', 'pyx', 'pyy', 'pzn', 'qu', 'qua', 'qub', 'quc', 'qud', 'quf', 'qug', 'quh', 'qui', 'quk', 'qul', 'qum', 'qun', 'qup', 'quq', 'qur', 'qus', 'quv', 'quw', 'qux', 'quy', 'quz', 'qva', 'qvc', 'qve', 'qvh', 'qvi', 'qvj', 'qvl', 'qvm', 'qvn', 'qvo', 'qvp', 'qvs', 'qvw', 'qvy', 'qvz', 'qwa', 'qwc', 'qwh', 'qwm', 'qws', 'qwt', 'qxa', 'qxc', 'qxh', 'qxl', 'qxn', 'qxo', 'qxp', 'qxq', 'qxr', 'qxs', 'qxt', 'qxu', 'qxw', 'qya', 'qyp', 'raa', 'rab', 'rac', 'rad', 'raf', 'rag', 'rah', 'rai', 'raj', 'rak', 'ral', 'ram', 'ran', 'rao', 'rap', 'raq', 'rar', 'ras', 'rat', 'rau', 'rav', 'raw', 'rax', 'ray', 'raz', 'rbb', 'rbk', 'rbl', 'rbp', 'rcf', 'rdb', 'rea', 'reb', 'ree', 'reg', 'rei', 'rej', 'rel', 'rem', 'ren', 'rer', 'res', 'ret', 'rey', 'rga', 'rge', 'rgk', 'rgn', 'rgr', 'rgs', 'rgu', 'rhg', 'rhp', 'ria', 'rie', 'rif', 'ril', 'rim', 'rin', 'rir', 'rit', 'riu', 'rjg', 'rji', 'rjs', 'rka', 'rkb', 'rkh', 'rki', 'rkm', 'rkt', 'rkw', 'rm', 'rma', 'rmb', 'rmc', 'rmd', 'rme', 'rmf', 'rmg', 'rmh', 'rmi', 'rmk', 'rml', 'rmm', 'rmn', 'rmo', 'rmp', 'rmq', 'rms', 'rmt', 'rmu', 'rmv', 'rmw', 'rmx', 'rmy', 'rmz', 'rn', 'rnd', 'rng', 'rnl', 'rnn', 'rnp', 'rnr', 'rnw', 'ro', 'rob', 'roc', 'rod', 'roe', 'rof', 'rog', 'rol', 'rom', 'roo', 'rop', 'ror', 'rou', 'row', 'rpn', 'rpt', 'rri', 'rro', 'rrt', 'rsb', 'rsi', 'rsl', 'rsm', 'rtc', 'rth', 'rtm', 'rts', 'rtw', 'ru', 'rub', 'ruc', 'rue', 'ruf', 'rug', 'ruh', 'rui', 'ruk', 'ruo', 'rup', 'ruq', 'rut', 'ruu', 'ruy', 'ruz', 'rw', 'rwa', 'rwk', 'rwm', 'rwo', 'rwr', 'rxd', 'rxw', 'ryn', 'rys', 'ryu', 'rzh', 'sa', 'saa', 'sab', 'sac', 'sad', 'sae', 'saf', 'sah', 'saj', 'sak', 'sam', 'sao', 'saq', 'sar', 'sas', 'sat', 'sau', 'sav', 'saw', 'sax', 'say', 'saz', 'sba', 'sbb', 'sbc', 'sbd', 'sbe', 'sbf', 'sbg', 'sbh', 'sbi', 'sbj', 'sbk', 'sbl', 'sbm', 'sbn', 'sbo', 'sbp', 'sbq', 'sbr', 'sbs', 'sbt', 'sbu', 'sbv', 'sbw', 'sbx', 'sby', 'sbz', 'sc', 'scb', 'sce', 'scf', 'scg', 'sch', 'sci', 'sck', 'scl', 'scn', 'sco', 'scp', 'scq', 'scs', 'scu', 'scv', 'scw', 'scx', 'sd', 'sda', 'sdb', 'sdc', 'sde', 'sdf', 'sdg', 'sdh', 'sdj', 'sdk', 'sdl', 'sdm', 'sdn', 'sdo', 'sdp', 'sdr', 'sds', 'sdt', 'sdu', 'sdx', 'sdz', 'se', 'sea', 'seb', 'sec', 'sed', 'see', 'sef', 'seg', 'seh', 'sei', 'sej', 'sek', 'sel', 'sen', 'seo', 'sep', 'seq', 'ser', 'ses', 'set', 'seu', 'sev', 'sew', 'sey', 'sez', 'sfb', 'sfe', 'sfm', 'sfs', 'sfw', 'sg', 'sga', 'sgb', 'sgc', 'sgd', 'sge', 'sgg', 'sgh', 'sgi', 'sgj', 'sgk', 'sgm', 'sgp', 'sgr', 'sgs', 'sgt', 'sgu', 'sgw', 'sgx', 'sgy', 'sgz', 'sh', 'sha', 'shb', 'shc', 'shd', 'she', 'shg', 'shh', 'shi', 'shj', 'shk', 'shl', 'shm', 'shn', 'sho', 'shp', 'shq', 'shr', 'shs', 'sht', 'shu', 'shv', 'shw', 'shx', 'shy', 'shz', 'si', 'sia', 'sib', 'sid', 'sie', 'sif', 'sig', 'sih', 'sii', 'sij', 'sik', 'sil', 'sim', 'sip', 'siq', 'sir', 'sis', 'siu', 'siv', 'siw', 'six', 'siy', 'siz', 'sja', 'sjb', 'sjd', 'sje', 'sjg', 'sjk', 'sjl', 'sjm', 'sjn', 'sjo', 'sjp', 'sjr', 'sjs', 'sjt', 'sju', 'sjw', 'sk', 'ska', 'skb', 'skc', 'skd', 'ske', 'skf', 'skg', 'skh', 'ski', 'skj', 'skk', 'skm', 'skn', 'sko', 'skp', 'skq', 'skr', 'sks', 'skt', 'sku', 'skv', 'skw', 'skx', 'sky', 'skz', 'sl', 'slc', 'sld', 'sle', 'slf', 'slg', 'slh', 'sli', 'slj', 'sll', 'slm', 'sln', 'slp', 'slq', 'slr', 'sls', 'slt', 'slu', 'slw', 'slx', 'sly', 'slz', 'sm', 'sma', 'smb', 'smc', 'smd', 'smf', 'smg', 'smh', 'smj', 'smk', 'sml', 'smm', 'smn', 'smp', 'smq', 'smr', 'sms', 'smt', 'smu', 'smv', 'smw', 'smx', 'smy', 'smz', 'sn', 'snb', 'snc', 'sne', 'snf', 'sng', 'snh', 'sni', 'snj', 'snk', 'snl', 'snm', 'snn', 'sno', 'snp', 'snq', 'snr', 'sns', 'snu', 'snv', 'snw', 'snx', 'sny', 'snz', 'so', 'soa', 'sob', 'soc', 'sod', 'soe', 'sog', 'soh', 'soi', 'soj', 'sok', 'sol', 'soo', 'sop', 'soq', 'sor', 'sos', 'sou', 'sov', 'sow', 'sox', 'soy', 'soz', 'spb', 'spc', 'spd', 'spe', 'spg', 'spi', 'spk', 'spl', 'spm', 'spn', 'spo', 'spp', 'spq', 'spr', 'sps', 'spt', 'spu', 'spv', 'spx', 'spy', 'sq', 'sqa', 'sqh', 'sqk', 'sqm', 'sqn', 'sqo', 'sqq', 'sqr', 'sqs', 'sqt', 'squ', 'sr', 'sra', 'srb', 'src', 'sre', 'srf', 'srg', 'srh', 'sri', 'srk', 'srl', 'srm', 'srn', 'sro', 'srq', 'srr', 'srs', 'srt', 'sru', 'srv', 'srw', 'srx', 'sry', 'srz', 'ss', 'ssb', 'ssc', 'ssd', 'sse', 'ssf', 'ssg', 'ssh', 'ssi', 'ssj', 'ssk', 'ssl', 'ssm', 'ssn', 'sso', 'ssp', 'ssq', 'ssr', 'sss', 'sst', 'ssu', 'ssv', 'ssx', 'ssy', 'ssz', 'st', 'sta', 'stb', 'std', 'ste', 'stf', 'stg', 'sth', 'sti', 'stj', 'stk', 'stl', 'stm', 'stn', 'sto', 'stp', 'stq', 'str', 'sts', 'stt', 'stu', 'stv', 'stw', 'sty', 'su', 'sua', 'sub', 'suc', 'sue', 'sug', 'sui', 'suj', 'suk', 'suq', 'sur', 'sus', 'sut', 'suv', 'suw', 'sux', 'suy', 'suz', 'sv', 'sva', 'svb', 'svc', 'sve', 'svk', 'svm', 'svs', 'svx', 'sw', 'swb', 'swc', 'swf', 'swg', 'swh', 'swi', 'swj', 'swk', 'swl', 'swm', 'swn', 'swo', 'swp', 'swq', 'swr', 'sws', 'swt', 'swu', 'swv', 'sww', 'swx', 'swy', 'sxb', 'sxc', 'sxe', 'sxg', 'sxk', 'sxl', 'sxm', 'sxn', 'sxo', 'sxr', 'sxs', 'sxu', 'sxw', 'sya', 'syb', 'syc', 'syi', 'syk', 'syl', 'sym', 'syn', 'syo', 'syr', 'sys', 'syw', 'syx', 'syy', 'sza', 'szb', 'szc', 'szd', 'sze', 'szg', 'szl', 'szn', 'szp', 'szv', 'szw', 'ta', 'taa', 'tab', 'tac', 'tad', 'tae', 'taf', 'tag', 'taj', 'tak', 'tal', 'tan', 'tao', 'tap', 'taq', 'tar', 'tas', 'tau', 'tav', 'taw', 'tax', 'tay', 'taz', 'tba', 'tbb', 'tbc', 'tbd', 'tbe', 'tbf', 'tbg', 'tbh', 'tbi', 'tbj', 'tbk', 'tbl', 'tbm', 'tbn', 'tbo', 'tbp', 'tbr', 'tbs', 'tbt', 'tbu', 'tbv', 'tbw', 'tbx', 'tby', 'tbz', 'tca', 'tcb', 'tcc', 'tcd', 'tce', 'tcf', 'tcg', 'tch', 'tci', 'tck', 'tcl', 'tcm', 'tcn', 'tco', 'tcp', 'tcq', 'tcs', 'tct', 'tcu', 'tcw', 'tcx', 'tcy', 'tcz', 'tda', 'tdb', 'tdc', 'tdd', 'tde', 'tdf', 'tdg', 'tdh', 'tdi', 'tdj', 'tdk', 'tdl', 'tdm', 'tdn', 'tdo', 'tdq', 'tdr', 'tds', 'tdt', 'tdv', 'tdx', 'tdy', 'te', 'tea', 'teb', 'tec', 'ted', 'tee', 'tef', 'teg', 'teh', 'tei', 'tek', 'tem', 'ten', 'teo', 'tep', 'teq', 'ter', 'tes', 'tet', 'teu', 'tev', 'tew', 'tex', 'tey', 'tfi', 'tfn', 'tfo', 'tfr', 'tft', 'tg', 'tga', 'tgb', 'tgc', 'tgd', 'tge', 'tgf', 'tgh', 'tgi', 'tgj', 'tgn', 'tgo', 'tgp', 'tgq', 'tgr', 'tgs', 'tgt', 'tgu', 'tgv', 'tgw', 'tgx', 'tgy', 'tgz', 'th', 'thd', 'the', 'thf', 'thh', 'thi', 'thk', 'thl', 'thm', 'thn', 'thp', 'thq', 'thr', 'ths', 'tht', 'thu', 'thv', 'thw', 'thy', 'thz', 'ti', 'tia', 'tic', 'tif', 'tig', 'tih', 'tii', 'tij', 'tik', 'til', 'tim', 'tin', 'tio', 'tip', 'tiq', 'tis', 'tit', 'tiu', 'tiv', 'tiw', 'tix', 'tiy', 'tiz', 'tja', 'tjg', 'tji', 'tjl', 'tjm', 'tjn', 'tjo', 'tjs', 'tju', 'tjw', 'tk', 'tka', 'tkb', 'tkd', 'tke', 'tkf', 'tkg', 'tkl', 'tkm', 'tkn', 'tkp', 'tkq', 'tkr', 'tks', 'tkt', 'tku', 'tkv', 'tkw', 'tkx', 'tkz', 'tl', 'tla', 'tlb', 'tlc', 'tld', 'tlf', 'tlg', 'tlh', 'tli', 'tlj', 'tlk', 'tll', 'tlm', 'tln', 'tlo', 'tlp', 'tlq', 'tlr', 'tls', 'tlt', 'tlu', 'tlv', 'tlx', 'tly', 'tma', 'tmb', 'tmc', 'tmd', 'tme', 'tmf', 'tmg', 'tmh', 'tmi', 'tmj', 'tmk', 'tml', 'tmm', 'tmn', 'tmo', 'tmq', 'tmr', 'tms', 'tmt', 'tmu', 'tmv', 'tmw', 'tmy', 'tmz', 'tn', 'tna', 'tnb', 'tnc', 'tnd', 'tne', 'tng', 'tnh', 'tni', 'tnk', 'tnl', 'tnm', 'tnn', 'tno', 'tnp', 'tnq', 'tnr', 'tns', 'tnt', 'tnu', 'tnv', 'tnw', 'tnx', 'tny', 'tnz', 'to', 'tob', 'toc', 'tod', 'tof', 'tog', 'toh', 'toi', 'toj', 'tol', 'tom', 'too', 'top', 'toq', 'tor', 'tos', 'tou', 'tov', 'tow', 'tox', 'toy', 'toz', 'tpa', 'tpc', 'tpe', 'tpf', 'tpg', 'tpi', 'tpj', 'tpk', 'tpl', 'tpm', 'tpn', 'tpo', 'tpp', 'tpq', 'tpr', 'tpt', 'tpu', 'tpv', 'tpw', 'tpx', 'tpy', 'tpz', 'tqb', 'tql', 'tqm', 'tqn', 'tqo', 'tqp', 'tqq', 'tqr', 'tqt', 'tqu', 'tqw', 'tr', 'tra', 'trb', 'trc', 'trd', 'tre', 'trf', 'trg', 'trh', 'tri', 'trj', 'trl', 'trm', 'trn', 'tro', 'trp', 'trq', 'trr', 'trs', 'trt', 'tru', 'trv', 'trw', 'trx', 'try', 'trz', 'ts', 'tsa', 'tsb', 'tsc', 'tsd', 'tse', 'tsg', 'tsh', 'tsi', 'tsj', 'tsk', 'tsl', 'tsm', 'tsp', 'tsq', 'tsr', 'tss', 'tst', 'tsu', 'tsv', 'tsw', 'tsx', 'tsy', 'tsz', 'tt', 'tta', 'ttb', 'ttc', 'ttd', 'tte', 'ttf', 'ttg', 'tth', 'tti', 'ttj', 'ttk', 'ttl', 'ttm', 'ttn', 'tto', 'ttp', 'ttq', 'ttr', 'tts', 'ttt', 'ttu', 'ttv', 'ttw', 'tty', 'ttz', 'tua', 'tub', 'tuc', 'tud', 'tue', 'tuf', 'tug', 'tuh', 'tui', 'tuj', 'tul', 'tum', 'tun', 'tuo', 'tuq', 'tus', 'tuu', 'tuv', 'tux', 'tuy', 'tuz', 'tva', 'tvd', 'tve', 'tvk', 'tvl', 'tvm', 'tvn', 'tvo', 'tvs', 'tvt', 'tvu', 'tvw', 'tvy', 'tw', 'twa', 'twb', 'twc', 'twd', 'twe', 'twf', 'twg', 'twh', 'twl', 'twm', 'twn', 'two', 'twp', 'twq', 'twr', 'twt', 'twu', 'tww', 'twx', 'twy', 'txa', 'txb', 'txc', 'txe', 'txg', 'txh', 'txi', 'txj', 'txm', 'txn', 'txo', 'txq', 'txr', 'txs', 'txt', 'txu', 'txx', 'txy', 'ty', 'tya', 'tye', 'tyh', 'tyi', 'tyj', 'tyl', 'tyn', 'typ', 'tyr', 'tys', 'tyt', 'tyu', 'tyv', 'tyx', 'tyz', 'tza', 'tzh', 'tzj', 'tzl', 'tzm', 'tzn', 'tzo', 'tzx', 'uam', 'uan', 'uar', 'uba', 'ubi', 'ubl', 'ubr', 'ubu', 'uby', 'uda', 'ude', 'udg', 'udi', 'udj', 'udl', 'udm', 'udu', 'ues', 'ufi', 'ug', 'uga', 'ugb', 'uge', 'ugn', 'ugo', 'ugy', 'uha', 'uhn', 'uis', 'uiv', 'uji', 'uk', 'uka', 'ukg', 'ukh', 'ukl', 'ukp', 'ukq', 'uks', 'uku', 'ukw', 'uky', 'ula', 'ulb', 'ulc', 'ule', 'ulf', 'uli', 'ulk', 'ull', 'ulm', 'uln', 'ulu', 'ulw', 'uma', 'umb', 'umc', 'umd', 'umg', 'umi', 'umm', 'umn', 'umo', 'ump', 'umr', 'ums', 'umu', 'una', 'und', 'une', 'ung', 'unk', 'unm', 'unn', 'unr', 'unu', 'unx', 'unz', 'upi', 'upv', 'ur', 'ura', 'urb', 'urc', 'ure', 'urf', 'urg', 'urh', 'uri', 'urk', 'url', 'urm', 'urn', 'uro', 'urp', 'urr', 'urt', 'uru', 'urv', 'urw', 'urx', 'ury', 'urz', 'usa', 'ush', 'usi', 'usk', 'usp', 'usu', 'uta', 'ute', 'utp', 'utr', 'utu', 'uum', 'uun', 'uur', 'uuu', 'uve', 'uvh', 'uvl', 'uwa', 'uya', 'uz', 'uzn', 'uzs', 'vaa', 'vae', 'vaf', 'vag', 'vah', 'vai', 'vaj', 'val', 'vam', 'van', 'vao', 'vap', 'var', 'vas', 'vau', 'vav', 'vay', 'vbb', 'vbk', 've', 'vec', 'ved', 'vel', 'vem', 'veo', 'vep', 'ver', 'vgr', 'vgt', 'vi', 'vic', 'vid', 'vif', 'vig', 'vil', 'vin', 'vis', 'vit', 'viv', 'vka', 'vki', 'vkj', 'vkk', 'vkl', 'vkm', 'vko', 'vkp', 'vkt', 'vku', 'vlp', 'vls', 'vma', 'vmb', 'vmc', 'vmd', 'vme', 'vmf', 'vmg', 'vmh', 'vmi', 'vmj', 'vmk', 'vml', 'vmm', 'vmp', 'vmq', 'vmr', 'vms', 'vmu', 'vmv', 'vmw', 'vmx', 'vmy', 'vmz', 'vnk', 'vnm', 'vnp', 'vo', 'vor', 'vot', 'vra', 'vro', 'vrs', 'vrt', 'vsi', 'vsl', 'vsv', 'vto', 'vum', 'vun', 'vut', 'vwa', 'wa', 'waa', 'wab', 'wac', 'wad', 'wae', 'waf', 'wag', 'wah', 'wai', 'waj', 'wal', 'wam', 'wan', 'wao', 'wap', 'waq', 'war', 'was', 'wat', 'wau', 'wav', 'waw', 'wax', 'way', 'waz', 'wba', 'wbb', 'wbe', 'wbf', 'wbh', 'wbi', 'wbj', 'wbk', 'wbl', 'wbm', 'wbp', 'wbq', 'wbr', 'wbt', 'wbv', 'wbw', 'wca', 'wci', 'wdd', 'wdg', 'wdj', 'wdk', 'wdu', 'wdy', 'wea', 'wec', 'wed', 'weg', 'weh', 'wei', 'wem', 'weo', 'wep', 'wer', 'wes', 'wet', 'weu', 'wew', 'wfg', 'wga', 'wgb', 'wgg', 'wgi', 'wgo', 'wgu', 'wgy', 'wha', 'whg', 'whk', 'whu', 'wib', 'wic', 'wie', 'wif', 'wig', 'wih', 'wii', 'wij', 'wik', 'wil', 'wim', 'win', 'wir', 'wiu', 'wiv', 'wiy', 'wja', 'wji', 'wka', 'wkb', 'wkd', 'wkl', 'wku', 'wkw', 'wky', 'wla', 'wlc', 'wle', 'wlg', 'wli', 'wlk', 'wll', 'wlm', 'wlo', 'wlr', 'wls', 'wlu', 'wlv', 'wlw', 'wlx', 'wly', 'wma', 'wmb', 'wmc', 'wmd', 'wme', 'wmh', 'wmi', 'wmm', 'wmn', 'wmo', 'wms', 'wmt', 'wmw', 'wmx', 'wnb', 'wnc', 'wnd', 'wne', 'wng', 'wni', 'wnk', 'wnm', 'wnn', 'wno', 'wnp', 'wnu', 'wnw', 'wny', 'wo', 'woa', 'wob', 'woc', 'wod', 'woe', 'wof', 'wog', 'woi', 'wok', 'wom', 'won', 'woo', 'wor', 'wos', 'wow', 'woy', 'wpc', 'wra', 'wrb', 'wrd', 'wrg', 'wrh', 'wri', 'wrk', 'wrl', 'wrm', 'wrn', 'wro', 'wrp', 'wrr', 'wrs', 'wru', 'wrv', 'wrw', 'wrx', 'wry', 'wrz', 'wsa', 'wsg', 'wsi', 'wsk', 'wsr', 'wss', 'wsu', 'wsv', 'wtf', 'wth', 'wti', 'wtk', 'wtm', 'wtw', 'wua', 'wub', 'wud', 'wuh', 'wul', 'wum', 'wun', 'wur', 'wut', 'wuu', 'wuv', 'wux', 'wuy', 'wwa', 'wwb', 'wwo', 'wwr', 'www', 'wxa', 'wxw', 'wya', 'wyb', 'wyi', 'wym', 'wyr', 'wyy', 'xaa', 'xab', 'xac', 'xad', 'xae', 'xag', 'xai', 'xaj', 'xak', 'xal', 'xam', 'xan', 'xao', 'xap', 'xaq', 'xar', 'xas', 'xat', 'xau', 'xav', 'xaw', 'xay', 'xbb', 'xbc', 'xbd', 'xbe', 'xbg', 'xbi', 'xbj', 'xbm', 'xbn', 'xbo', 'xbp', 'xbr', 'xbw', 'xby', 'xcb', 'xcc', 'xce', 'xcg', 'xch', 'xcl', 'xcm', 'xcn', 'xco', 'xcr', 'xct', 'xcu', 'xcv', 'xcw', 'xcy', 'xda', 'xdc', 'xdk', 'xdm', 'xdy', 'xeb', 'xed', 'xeg', 'xel', 'xem', 'xep', 'xer', 'xes', 'xet', 'xeu', 'xfa', 'xga', 'xgb', 'xgd', 'xgf', 'xgg', 'xgi', 'xgl', 'xgm', 'xgr', 'xgu', 'xgw', 'xh', 'xha', 'xhc', 'xhd', 'xhe', 'xhr', 'xht', 'xhu', 'xhv', 'xib', 'xii', 'xil', 'xin', 'xir', 'xis', 'xiv', 'xiy', 'xjb', 'xjt', 'xka', 'xkb', 'xkc', 'xkd', 'xke', 'xkf', 'xkg', 'xki', 'xkj', 'xkk', 'xkl', 'xkn', 'xko', 'xkp', 'xkq', 'xkr', 'xks', 'xkt', 'xku', 'xkv', 'xkw', 'xkx', 'xky', 'xkz', 'xla', 'xlb', 'xlc', 'xld', 'xle', 'xlg', 'xli', 'xln', 'xlo', 'xlp', 'xls', 'xlu', 'xly', 'xma', 'xmb', 'xmc', 'xmd', 'xme', 'xmf', 'xmg', 'xmh', 'xmj', 'xmk', 'xml', 'xmm', 'xmn', 'xmo', 'xmp', 'xmq', 'xmr', 'xms', 'xmt', 'xmu', 'xmv', 'xmw', 'xmx', 'xmy', 'xmz', 'xna', 'xnb', 'xng', 'xnh', 'xni', 'xnk', 'xnn', 'xno', 'xnr', 'xns', 'xnt', 'xnu', 'xny', 'xnz', 'xoc', 'xod', 'xog', 'xoi', 'xok', 'xom', 'xon', 'xoo', 'xop', 'xor', 'xow', 'xpa', 'xpc', 'xpe', 'xpg', 'xpi', 'xpj', 'xpk', 'xpm', 'xpn', 'xpo', 'xpp', 'xpq', 'xpr', 'xps', 'xpt', 'xpu', 'xpy', 'xqa', 'xqt', 'xra', 'xrb', 'xrd', 'xre', 'xrg', 'xri', 'xrm', 'xrn', 'xrq', 'xrr', 'xrt', 'xru', 'xrw', 'xsa', 'xsb', 'xsc', 'xsd', 'xse', 'xsh', 'xsi', 'xsl', 'xsm', 'xsn', 'xso', 'xsp', 'xsq', 'xsr', 'xss', 'xsu', 'xsv', 'xsy', 'xta', 'xtb', 'xtc', 'xtd', 'xte', 'xtg', 'xth', 'xti', 'xtj', 'xtl', 'xtm', 'xtn', 'xto', 'xtp', 'xtq', 'xtr', 'xts', 'xtt', 'xtu', 'xtv', 'xtw', 'xty', 'xtz', 'xua', 'xub', 'xud', 'xug', 'xuj', 'xul', 'xum', 'xun', 'xuo', 'xup', 'xur', 'xut', 'xuu', 'xve', 'xvi', 'xvn', 'xvo', 'xvs', 'xwa', 'xwc', 'xwd', 'xwe', 'xwg', 'xwj', 'xwk', 'xwl', 'xwo', 'xwr', 'xwt', 'xww', 'xxb', 'xxk', 'xxm', 'xxr', 'xxt', 'xya', 'xyb', 'xyj', 'xyk', 'xyl', 'xyt', 'xyy', 'xzh', 'xzm', 'xzp', 'yaa', 'yab', 'yac', 'yad', 'yae', 'yaf', 'yag', 'yah', 'yai', 'yaj', 'yak', 'yal', 'yam', 'yan', 'yao', 'yap', 'yaq', 'yar', 'yas', 'yat', 'yau', 'yav', 'yaw', 'yax', 'yay', 'yaz', 'yba', 'ybb', 'ybe', 'ybh', 'ybi', 'ybj', 'ybk', 'ybl', 'ybm', 'ybn', 'ybo', 'ybx', 'yby', 'ych', 'ycl', 'ycn', 'ycp', 'yda', 'ydd', 'yde', 'ydg', 'ydk', 'yea', 'yec', 'yee', 'yei', 'yej', 'yel', 'yer', 'yes', 'yet', 'yeu', 'yev', 'yey', 'yga', 'ygi', 'ygl', 'ygm', 'ygp', 'ygr', 'ygs', 'ygu', 'ygw', 'yha', 'yhd', 'yhl', 'yhs', 'yi', 'yia', 'yif', 'yig', 'yih', 'yii', 'yij', 'yik', 'yil', 'yim', 'yin', 'yip', 'yiq', 'yir', 'yis', 'yit', 'yiu', 'yiv', 'yix', 'yiz', 'yka', 'ykg', 'yki', 'ykk', 'ykl', 'ykm', 'ykn', 'yko', 'ykr', 'ykt', 'yku', 'yky', 'yla', 'ylb', 'yle', 'ylg', 'yli', 'yll', 'ylm', 'yln', 'ylo', 'ylr', 'ylu', 'yly', 'ymb', 'ymc', 'ymd', 'yme', 'ymg', 'ymh', 'ymi', 'ymk', 'yml', 'ymm', 'ymn', 'ymo', 'ymp', 'ymq', 'ymr', 'yms', 'ymx', 'ymz', 'yna', 'ynd', 'yne', 'yng', 'ynk', 'ynl', 'ynn', 'yno', 'ynq', 'yns', 'ynu', 'yo', 'yob', 'yog', 'yoi', 'yok', 'yol', 'yom', 'yon', 'yot', 'yox', 'yoy', 'ypa', 'ypb', 'ypg', 'yph', 'ypm', 'ypn', 'ypo', 'ypp', 'ypz', 'yra', 'yrb', 'yre', 'yrk', 'yrl', 'yrm', 'yrn', 'yro', 'yrs', 'yrw', 'yry', 'ysc', 'ysd', 'ysg', 'ysl', 'ysn', 'yso', 'ysp', 'ysr', 'yss', 'ysy', 'yta', 'ytl', 'ytp', 'ytw', 'yty', 'yua', 'yub', 'yuc', 'yud', 'yue', 'yuf', 'yug', 'yui', 'yuj', 'yuk', 'yul', 'yum', 'yun', 'yup', 'yuq', 'yur', 'yut', 'yuw', 'yux', 'yuy', 'yuz', 'yva', 'yvt', 'ywa', 'ywg', 'ywl', 'ywn', 'ywq', 'ywr', 'ywt', 'ywu', 'yww', 'yxa', 'yxg', 'yxl', 'yxm', 'yxu', 'yxy', 'yyr', 'yyu', 'yyz', 'yzg', 'yzk', 'za', 'zaa', 'zab', 'zac', 'zad', 'zae', 'zaf', 'zag', 'zah', 'zai', 'zaj', 'zak', 'zal', 'zam', 'zao', 'zap', 'zaq', 'zar', 'zas', 'zat', 'zau', 'zav', 'zaw', 'zax', 'zay', 'zaz', 'zbc', 'zbe', 'zbl', 'zbt', 'zbw', 'zca', 'zch', 'zdj', 'zea', 'zeg', 'zeh', 'zen', 'zga', 'zgb', 'zgh', 'zgm', 'zgn', 'zgr', 'zh', 'zhb', 'zhd', 'zhi', 'zhn', 'zhw', 'zia', 'zib', 'zik', 'zil', 'zim', 'zin', 'zir', 'ziw', 'ziz', 'zka', 'zkb', 'zkd', 'zkg', 'zkh', 'zkk', 'zkn', 'zko', 'zkp', 'zkr', 'zkt', 'zku', 'zkv', 'zkz', 'zlj', 'zlm', 'zln', 'zlq', 'zma', 'zmb', 'zmc', 'zmd', 'zme', 'zmf', 'zmg', 'zmh', 'zmi', 'zmj', 'zmk', 'zml', 'zmm', 'zmn', 'zmo', 'zmp', 'zmq', 'zmr', 'zms', 'zmt', 'zmu', 'zmv', 'zmw', 'zmx', 'zmy', 'zmz', 'zna', 'zne', 'zng', 'znk', 'zns', 'zoc', 'zoh', 'zom', 'zoo', 'zoq', 'zor', 'zos', 'zpa', 'zpb', 'zpc', 'zpd', 'zpe', 'zpf', 'zpg', 'zph', 'zpi', 'zpj', 'zpk', 'zpl', 'zpm', 'zpn', 'zpo', 'zpp', 'zpq', 'zpr', 'zps', 'zpt', 'zpu', 'zpv', 'zpw', 'zpx', 'zpy', 'zpz', 'zqe', 'zra', 'zrg', 'zrn', 'zro', 'zrp', 'zrs', 'zsa', 'zsk', 'zsl', 'zsm', 'zsr', 'zsu', 'zte', 'ztg', 'ztl', 'ztm', 'ztn', 'ztp', 'ztq', 'zts', 'ztt', 'ztu', 'ztx', 'zty', 'zu', 'zua', 'zuh', 'zum', 'zun', 'zuy', 'zwa', 'zxx', 'zyb', 'zyg', 'zyj', 'zyn', 'zyp', 'zza', 'zzj'] ) COUNTRIES = set( ['AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AO', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB', 'BD', 'BE', 'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', 'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', 'DO', 'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'FI', 'FJ', 'FK', 'FM', 'FO', 'FR', 'GA', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', 'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT', 'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', 'KP', 'KR', 'KW', 'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS', 'LT', 'LU', 'LV', 'LY', 'MA', 'MC', 'MD', 'ME', 'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', 'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OM', 'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW', 'PY', 'QA', 'RE', 'RO', 'RS', 'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'SS', 'ST', 'SV', 'SX', 'SY', 'SZ', 'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI', 'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'ZA', 'ZM', 'ZW'] )
matwey/rpmlint
rpmlint/__isocodes__.py
Python
gpl-2.0
64,434
[ "ADF", "ASE", "BWA", "Elk", "MOE", "VMD", "xTB" ]
20adf674ea2d10a7ad1d84e92eff90701d15c0c74f15ff1ea0433a6fe0473617
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ******************************* espressopp.FixedTripleAngleList ******************************* .. function:: espressopp.FixedTripleAngleList(storage) :param storage: :type storage: .. function:: espressopp.FixedTripleAngleList.add(pid1, pid2, pid3) :param pid1: :param pid2: :param pid3: :type pid1: :type pid2: :type pid3: :rtype: .. function:: espressopp.FixedTripleAngleList.addTriples(triplelist) :param triplelist: :type triplelist: :rtype: .. function:: espressopp.FixedTripleAngleList.getAngle(pid1, pid2, pid3) :param pid1: :param pid2: :param pid3: :type pid1: :type pid2: :type pid3: :rtype: .. function:: espressopp.FixedTripleAngleList.getTriples() :rtype: .. function:: espressopp.FixedTripleAngleList.getTriplesAngles() :rtype: .. function:: espressopp.FixedTripleAngleList.size() :rtype: """ from espressopp import pmi import _espressopp #import espressopp from espressopp.esutil import cxxinit class FixedTripleAngleListLocal(_espressopp.FixedTripleAngleList): def __init__(self, storage): #if pmi.workerIsActive(): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, _espressopp.FixedTripleAngleList, storage) def add(self, pid1, pid2, pid3): if pmi.workerIsActive(): return self.cxxclass.add(self, pid1, pid2, pid3) def addTriples(self, triplelist): """ Each processor takes the broadcasted triplelist and adds those triples whose first particle is owned by this processor. """ if pmi.workerIsActive(): for triple in triplelist: pid1, pid2, pid3 = triple self.cxxclass.add(self, pid1, pid2, pid3) def size(self): if pmi.workerIsActive(): return self.cxxclass.size(self) def getTriples(self): if pmi.workerIsActive(): triples = self.cxxclass.getTriples(self) return triples 'returns the list of (pid1, pid2, pid3, angle(123))' def getTriplesAngles(self): if pmi.workerIsActive(): triples_angles = self.cxxclass.getTriplesAngles(self) return triples_angles def getAngle(self, pid1, pid2, pid3): if pmi.workerIsActive(): return self.cxxclass.getAngle(self, pid1, pid2, pid3) if pmi.isController: class FixedTripleAngleList(metaclass=pmi.Proxy): pmiproxydefs = dict( cls = 'espressopp.FixedTripleAngleListLocal', localcall = [ "add" ], pmicall = [ "addTriples" ], pmiinvoke = ["getTriples", "getTriplesAngles", "size"] ) def getAngle(self, pid1, pid2, pid3 ): angles = pmi.invoke(self.pmiobject, 'getAngle', pid1, pid2, pid3 ) for i in angles: if( i != -1 ): return i
espressopp/espressopp
src/FixedTripleAngleList.py
Python
gpl-3.0
4,092
[ "ESPResSo" ]
de90c3561722f3a37820d5fb8d60d7e73f8568873b68bdaa1f74393e2bf53746
#!/usr/bin/env python # Author: Andrew Jewett (jewett.aij at g mail) # http://www.chem.ucsb.edu/~sheagroup # License: 3-clause BSD License (See LICENSE.TXT) # Copyright (c) 2011, Regents of the University of California # All rights reserved. """ lttree.py lttree.py is an extension of the generic ttree.py program. This version can understand and manipulate ttree-style templates which are specialized for storing molecule-specific data for use in LAMMPS. The main difference between lttree.py and ttree.py is: Unlike ttree.py, lttree.py understands rigid-body movement commands like "rot()" and "move()" which allows it to reorient and move each copy of a molecule to a new location. (ttree.py just ignores these commands. Consequently LAMMPS input file (fragments) created with ttree.py have invalid (overlapping) atomic coordinates and must be modified or aguemted later (by loading atomic coordinates from a PDB file or an XYZ file). lttree.py understands the "Data Atoms" section of a LAMMPS data file (in addition to the various "atom_styles" which effect it). Additional LAMMPS-specific features may be added in the future. """ import sys from ttree import * from lttree_styles import * from ttree_matrix_stack import * try: unicode except NameError: # Python 3 basestring = unicode = str class LttreeSettings(BasicUISettings): def __init__(self, user_bindings_x=None, user_bindings=None, order_method='by_command'): BasicUISettings.__init__(self, user_bindings_x, user_bindings, order_method) # The following new member data indicate which columns store # LAMMPS-specific information. # The next 6 members store keep track of the different columns # of the "Data Atoms" section of a LAMMPS data file: self.column_names = [] #<--A list of column names (optional) self.ii_coords=[] #<--A list of triplets of column indexes storing coordinate data self.ii_vects=[] #<--A list of triplets of column indexes storing directional data # (such as dipole or ellipsoid orientations) self.i_atomid=None #<--An integer indicating which column has the atomid self.i_atomtype=None #<--An integer indicating which column has the atomtype self.i_molid=None #<--An integer indicating which column has the molid, if applicable self.infile=None # Name of the outermost file. This is the file # which was read at the moment parsing begins. def LttreeParseArgs(argv, settings): BasicUIParseArgs(argv, settings) # Loop over the remaining arguments not processed yet. # These arguments are specific to the lttree.py program # and are not understood by ttree.py: i = 1 while i < len(argv): #sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n') if ((argv[i].lower() == '-atomstyle') or (argv[i].lower() == '-atom-style') or (argv[i].lower() == '-atom_style')): if i+1 >= len(argv): raise InputError('Error('+g_program_name+'): The '+argv[i]+' flag should be followed by a LAMMPS\n' ' atom_style name (or single quoted string containing a space-separated\n' ' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n') settings.column_names = AtomStyle2ColNames(argv[i+1]) sys.stderr.write('\n \"'+data_atoms+'\" column format:\n') sys.stderr.write(' '+(' '.join(settings.column_names))+'\n\n') settings.ii_coords = ColNames2Coords(settings.column_names) settings.ii_vects = ColNames2Vects(settings.column_names) settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(settings.column_names) del(argv[i:i+2]) elif (argv[i].lower() == '-icoord'): if i+1 >= len(argv): raise InputError('Error: '+argv[i]+' flag should be followed by list of integers\n' ' corresponding to column numbers for coordinates in\n' ' the \"'+data_atoms+'\" section of a LAMMPS data file.\n') ilist = argv[i+1].split() if (len(ilist) % 3) != 0: raise InputError('Error: '+argv[i]+' flag should be followed by list of integers.\n' ' This is usually a list of 3 integers, but it can contain more.\n' ' The number of cooridnate columns must be divisible by 3,\n' ' (even if the simulation is in 2 dimensions)\n') settings.iaffinevects = [] for i in range(0, len(ilist)/3): cols = [int(ilist[3*i])+1, int(ilist[3*i+1])+1, int(ilist[3*i+2])+1] settings.iaffinevects.append(cols) del(argv[i:i+2]) elif (argv[i].lower() == '-ivect'): if i+1 >= len(argv): raise InputError('Error: '+argv[i]+' flag should be followed by list of integers\n' ' corresponding to column numbers for direction vectors in\n' ' the \"'+data_atoms+'\" section of a LAMMPS data file.\n') ilist = argv[i+1].split() if (len(ilist) % 3) != 0: raise InputError('Error: '+argv[i]+' flag should be followed by list of integers.\n' ' This is usually a list of 3 integers, but it can contain more.\n' ' The number of cooridnate columns must be divisible by 3,\n' ' (even if the simulation is in 2 dimensions)\n') settings.ivects = [] for i in range(0, len(ilist)/3): cols = [int(ilist[3*i])+1, int(ilist[3*i+1])+1, int(ilist[3*i+2])+1] settings.ivects.append(cols) del(argv[i:i+2]) elif ((argv[i].lower() == '-iatomid') or (argv[i].lower() == '-iid') or (argv[i].lower() == '-iatom-id')): if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))): raise InputError('Error: '+argv[i]+' flag should be followed by an integer\n' ' (>=1) indicating which column in the \"'+data_atoms+'\" section of a\n' ' LAMMPS data file contains the atom id number (typically 1).\n' ' (This argument is unnecessary if you use the -atomstyle argument.)\n') i_atomid = int(argv[i+1])-1 del(argv[i:i+2]) elif ((argv[i].lower() == '-iatomtype') or (argv[i].lower() == '-itype') or (argv[i].lower() == '-iatom-type')): if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))): raise InputError('Error: '+argv[i]+' flag should be followed by an integer\n' ' (>=1) indicating which column in the \"'+data_atoms+'\" section of a\n' ' LAMMPS data file contains the atom type.\n' ' (This argument is unnecessary if you use the -atomstyle argument.)\n') i_atomtype = int(argv[i+1])-1 del(argv[i:i+2]) elif ((argv[i].lower() == '-imolid') or (argv[i].lower() == '-imol') or (argv[i].lower() == '-imol-id') or (argv[i].lower() == '-imoleculeid') or (argv[i].lower() == '-imolecule-id')): if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))): raise InputError('Error: '+argv[i]+' flag should be followed by an integer\n' ' (>=1) indicating which column in the \"'+data_atoms+'\" section of a\n' ' LAMMPS data file contains the molecule id number.\n' ' (This argument is unnecessary if you use the -atomstyle argument.)\n') i_molid = int(argv[i+1])-1 del(argv[i:i+2]) elif ((argv[i][0] == '-') and (__name__ == "__main__")): #elif (__name__ == "__main__"): raise InputError('Error('+g_program_name+'):\n' 'Unrecogized command line argument \"'+argv[i]+'\"\n') else: i += 1 if __name__ == "__main__": # Instantiate the lexer we will be using. # (The lexer's __init__() function requires an openned file. # Assuming __name__ == "__main__", then the name of that file should # be the last remaining (unprocessed) argument in the argument list. # Otherwise, then name of that file will be determined later by the # python script which imports this module, so we let them handle it.) if len(argv) == 1: raise InputError('Error: This program requires at least one argument\n' ' the name of a file containing ttree template commands\n') elif len(argv) == 2: try: settings.lex = TemplateLexer(open(argv[1], 'r'), argv[1]) # Parse text from file except IOError: sys.stderr.write('Error: unable to open file\n' ' \"'+argv[1]+'\"\n' ' for reading.\n') sys.exit(1) del(argv[1:2]) else: # if there are more than 2 remaining arguments, problem_args = ['\"'+arg+'\"' for arg in argv[1:]] raise InputError('Syntax Error('+g_program_name+'):\n\n' ' Problem with argument list.\n' ' The remaining arguments are:\n\n' ' '+(' '.join(problem_args))+'\n\n' ' (The actual problem may be earlier in the argument list.\n' ' If these arguments are source files, then keep in mind\n' ' that this program can not parse multiple source files.)\n' ' Check the syntax of the entire argument list.\n') if len(settings.ii_coords) == 0: sys.stderr.write('########################################################\n' '## WARNING: atom_style unspecified ##\n' '## --> \"'+data_atoms+'\" column data has an unknown format ##\n' '## Assuming atom_style = \"full\" ##\n' # '########################################################\n' # '## To specify the \"'+data_atoms+'\" column format you can: ##\n' # '## 1) Use the -atomstyle \"STYLE\" argument ##\n' # '## where \"STYLE\" is a string indicating a LAMMPS ##\n' # '## atom_style, including hybrid styles.(Standard ##\n' # '## atom styles defined in 2011 are supported.) ##\n' # '## 2) Use the -atomstyle \"COL_LIST\" argument ##\n' # '## where \"COL_LIST" is a quoted list of strings ##\n' # '## indicating the name of each column. ##\n' # '## Names \"x\",\"y\",\"z\" are interpreted as ##\n' # '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n' # '## are interpreted as direction vectors. ##\n' # '## 3) Use the -icoord \"cx cy cz...\" argument ##\n' # '## where \"cx cy cz\" is a list of integers ##\n' # '## indicating the column numbers for the x,y,z ##\n' # '## coordinates of each atom. ##\n' # '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n' # '## where \"cmux cmuy cmuz...\" is a list of ##\n' # '## integers indicating the column numbers for ##\n' # '## the vector that determines the direction of a ##\n' # '## dipole or ellipsoid (ie. a rotateable vector).##\n' # '## (More than one triplet can be specified. The ##\n' # '## number of entries must be divisible by 3.) ##\n' '########################################################\n') # The default atom_style is "full" settings.column_names = AtomStyle2ColNames('full') settings.ii_coords = ColNames2Coords(settings.column_names) settings.ii_vects = ColNames2Vects(settings.column_names) settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(settings.column_names) def TransformAtomText(text, matrix): """ Apply transformations to the coordinates and other vector degrees of freedom stored in the \"Data Atoms\" section of a LAMMPS data file. This is the \"text\" argument. The \"matrix\" stores the aggregate sum of combined transformations to be applied. """ #sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n') lines = text.split('\n') for i in range(0, len(lines)): line_orig = lines[i] ic = line_orig.find('#') if ic != -1: line = line_orig[:ic] comment = ' '+line_orig[ic:].rstrip('\n') else: line = line_orig.rstrip('\n') comment = '' columns = line.split() if len(columns) > 0: if len(columns) == len(settings.column_names)+3: raise InputError('Error: lttree.py does not yet support integer unit-cell counters \n' ' within the \"'+data_atoms+'\" section of a LAMMPS data file.\n' ' Instead please add the appropriate offsets (these offsets\n' ' should be multiples of the cell size) to the atom coordinates\n' ' in the data file, and eliminate the extra columns. Then try again.\n' ' (If you get this message often, email me and I\'ll fix this limitation.)') if len(columns) < len(settings.column_names): raise InputError('Error: The number of columns in your data file does not\n' ' match the LAMMPS atom_style you selected.\n' ' Use the -atomstyle <style> command line argument.\n') x0 = [0.0, 0.0, 0.0] x = [0.0, 0.0, 0.0] # Atomic coordinates transform using "affine" transformations # (translations plus rotations [or other linear transformations]) for cxcycz in settings.ii_coords: for d in range(0,3): x0[d] = float(columns[cxcycz[d]]) AffineTransform(x, matrix, x0) # x = matrix * x0 + b for d in range(0,3): #("b" is part of "matrix") columns[cxcycz[d]] = str(x[d]) # Dipole moments and other direction-vectors # are not effected by translational movement for cxcycz in settings.ii_vects: for d in range(0,3): x0[d] = float(columns[cxcycz[d]]) LinearTransform(x, matrix, x0) # x = matrix * x0 for d in range(0,3): columns[cxcycz[d]] = str(x[d]) lines[i] = ' '.join(columns) + comment return '\n'.join(lines) def CalcCM(text_Atoms, text_Masses=None, settings=None): types2masses = None # Loop through the "Masses" section: what is the mass of each atom type? if text_Masses != None: types2masses = {} lines = text_Masses.split('\n') for i in range(0, len(lines)): line = lines[i] columns = line.split() if len(columns) == 2: atomtype = columns[0] m = float(columns[1]) types2masses[atomtype] = m lines = text_Atoms.split('\n') # Pass 1 through the "Data Atoms" section: Determine each atom's mass if text_Masses != None: assert(settings != None) for i in range(0, len(lines)): line = lines[i] columns = line.split() atomid = columns[settings.i_atomid] atomtype = columns[settings.i_atomtype] if atomtype not in types2masses[atomtype]: raise InputError('Error(lttree): You have neglected to define the mass of atom type: \"'+atomtype+'\"\n' 'Did you specify the mass of every atom type using write(\"Masses\"){}?') atomid2mass[atomid] = atomtype2mass[atomtype] # Pass 2 through the "Data Atoms" section: Find the center of mass. for i in range(0, len(lines)): line = lines[i] columns = line.split() if len(columns) > 0: if len(columns) == len(settings.column_names)+3: raise InputError('Error: lttree.py does not yet support integer unit-cell counters (ix, iy, iz)\n' ' within the \"'+data_atoms+'\" section of a LAMMPS data file.\n' ' Instead please add the appropriate offsets (these offsets\n' ' should be multiples of the cell size) to the atom coordinates\n' ' in the data file, and eliminate the extra columns. Then try again.\n' ' (If you get this message often, email me and I\'ll fix this limitation.)') if len(columns) != len(settings.column_names): raise InputError('Error: The number of columns in your data file does not\n' ' match the LAMMPS atom_style you selected.\n' ' Use the -atomstyle <style> command line argument.\n') x = [0.0, 0.0, 0.0] if atomids2masses != None: m = atomids2masses[atomid] else: m = 1.0 tot_m += m for cxcycz in settings.ii_coords: for d in range(0,3): x[d] = float(columns[cxcycz[d]]) tot_x[d] += x[d] # Note: dipole moments and other direction vectors don't effect # the center of mass. So I commented out the loop below. #for cxcycz in settings.ii_vects: # for d in range(0,3): # v[d] = float(columns[cxcycz[d]]) lines[i] = ' '.join(columns) xcm = [0.0, 0.0, 0.0] for d in range(0,3): xcm[d] = tot_x[d] / tot_m return xcm def _ExecCommands(command_list, index, global_files_content, settings, matrix_stack, current_scope_id=None, substitute_vars=True): """ _ExecCommands(): The argument "commands" is a nested list of lists of "Command" data structures (defined in ttree.py). Carry out the write() and write_once() commands (which write out the contents of the templates contain inside them). Instead of writing the files, save their contents in a string. The argument "global_files_content" should be of type defaultdict(list) It is an associative array whose key is a string (a filename) and whose value is a lists of strings (of rendered templates). """ files_content = defaultdict(list) postprocessing_commands = [] while index < len(command_list): command = command_list[index] index += 1 # For debugging only if ((not isinstance(command, StackableCommand)) and (not isinstance(command, ScopeCommand)) and (not isinstance(command, WriteFileCommand))): sys.stderr.write(str(command)+'\n') if isinstance(command, PopCommand): assert(current_scope_id != None) if command.context_node == None: command.context_node = current_scope_id if isinstance(command, PopRightCommand): matrix_stack.PopRight(which_stack = command.context_node) elif isinstance(command, PopLeftCommand): matrix_stack.PopLeft(which_stack = command.context_node) else: assert(False) elif isinstance(command, PushCommand): assert(current_scope_id != None) if command.context_node == None: command.context_node = current_scope_id # Some commands are post-processing commands, and must be # carried out AFTER all the text has been rendered. For example # the "movecm(0,0,0)" waits until all of the coordinates have # been rendered, calculates the center-of-mass, and then applies # a translation moving the center of mass to the origin (0,0,0). # We need to figure out which of these commands need to be # postponed, and which commands can be carried out now. # ("now"=pushing transformation matrices onto the matrix stack). # UNFORTUNATELY POSTPONING SOME COMMANDS MAKES THE CODE UGLY transform_list = command.contents.split('.') transform_blocks = [] i_post_process = -1 # Example: Suppose: #command.contents = '.rot(30,0,0,1).movecm(0,0,0).rot(45,1,0,0).scalecm(2.0).move(-2,1,0)' # then #transform_list = ['rot(30,0,0,1)', 'movecm(0,0,0)', 'rot(45,1,0,0)', 'scalecm(2.0)', 'move(-2,1,0)'] # Note: the first command 'rot(30,0,0,1)' is carried out now. # The remaining commands are carried out during post-processing, # (when processing the "ScopeEnd" command. # # We break up the commands into "blocks" separated by center- # of-mass transformations ('movecm', 'rotcm', or 'scalecm') # # transform_blocks = ['.rot(30,0,0,1)', # '.movecm(0,0,0).rot(45,1,0,0)', # '.scalecm(2.0).move(-2,1,0)'] i = 0 while i < len(transform_list): transform_block = '' while i < len(transform_list): transform = transform_list[i] i += 1 if transform != '': transform_block += '.' + transform transform = transform.split('(')[0] if ((transform == 'movecm') or (transform == 'rotcm') or (transform == 'scalecm')): break transform_blocks.append(transform_block) if len(postprocessing_commands) == 0: # The first block (before movecm, rotcm, or scalecm) # can be executed now by modifying the matrix stack. if isinstance(command, PushRightCommand): matrix_stack.PushCommandsRight(transform_blocks[0].strip('.'), command.srcloc, which_stack=command.context_node) elif isinstance(command, PushLeftCommand): matrix_stack.PushCommandsLeft(transform_blocks[0].strip('.'), command.srcloc, which_stack=command.context_node) # Everything else must be saved for later. postprocessing_blocks = transform_blocks[1:] else: # If we already encountered a "movecm" "rotcm" or "scalecm" # then all of the command blocks must be handled during # postprocessing. postprocessing_blocks = transform_blocks for transform_block in postprocessing_blocks: assert(isinstance(block, basestring)) if isinstance(command, PushRightCommand): postprocessing_commands.append(PushRightCommand(transform_block, command.srcloc, command.context_node)) elif isinstance(command, PushLeftCommand): postprocessing_commands.append(PushLeftCommand(transform_block, command.srcloc, command.context_node)) elif isinstance(command, WriteFileCommand): # --- Throw away lines containin references to deleted variables:--- # First: To edit the content of a template, # you need to make a deep local copy of it tmpl_list = [] for entry in command.tmpl_list: if isinstance(entry, TextBlock): tmpl_list.append(TextBlock(entry.text, entry.srcloc)) #, entry.srcloc_end)) else: tmpl_list.append(entry) # Now throw away lines with deleted variables DeleteLinesWithBadVars(tmpl_list) # --- Now render the text --- text = Render(tmpl_list, substitute_vars) # ---- Coordinates of the atoms, must be rotated # and translated after rendering. # In addition, other vectors (dipoles, ellipsoid orientations) # must be processed. # This requires us to re-parse the contents of this text # (after it has been rendered), and apply these transformations # before passing them on to the caller. if command.filename == data_atoms: text = TransformAtomText(text, matrix_stack.M) files_content[command.filename].append(text) elif isinstance(command, ScopeBegin): if isinstance(command.node, InstanceObj): if ((command.node.children != None) and (len(command.node.children) > 0)): matrix_stack.PushStack(command.node) # "command_list" is a long list of commands. # ScopeBegin and ScopeEnd are (usually) used to demarcate/enclose # the commands which are issued for a single class or # class instance. _ExecCommands() carries out the commands for # a single class/instance. If we reach a ScopeBegin(), # then recursively process the commands belonging to the child. index = _ExecCommands(command_list, index, files_content, settings, matrix_stack, command.node, substitute_vars) elif isinstance(command, ScopeEnd): if data_atoms in files_content: for ppcommand in postprocessing_commands: if data_masses in files_content: xcm = CalcCM(files_content[data_atoms], files_content[data_masses], settings) else: xcm = CalcCM(files_content[data_atoms]) if isinstance(ppcommand, PushRightCommand): matrix_stack.PushCommandsRight(ppcommand.contents, ppcommand.srcloc, xcm, which_stack=command.context_node) elif isinstance(ppcommand, PushLeftCommand): matrix_stack.PushCommandsLeft(ppcommand.contents, ppcommand.srcloc, xcm, which_stack=command.context_node) files_content[data_atoms] = \ TransformAtomText(files_content[data_atoms], matrix_stack.M) for ppcommand in postprocessing_commands: matrix_stack.Pop(which_stack = command.context_node) #(same as PopRight()) if isinstance(command.node, InstanceObj): if ((command.node.children != None) and (len(command.node.children) > 0)): matrix_stack.PopStack() # "ScopeEnd" means we're done with this class/instance. break else: assert(False) # no other command types allowed at this point # After processing the commands in this list, # merge the templates with the callers template list for filename, tmpl_list in files_content.items(): global_files_content[filename] += \ files_content[filename] return index def ExecCommands(commands, files_content, settings, substitute_vars=True): matrix_stack = MultiAffineStack() index = _ExecCommands(commands, 0, files_content, settings, matrix_stack, None, substitute_vars) assert(index == len(commands)) def WriteFiles(files_content, suffix='', write_to_stdout=True): for filename, str_list in files_content.items(): if filename != None: out_file = None if filename == '': if write_to_stdout: out_file = sys.stdout else: out_file = open(filename+suffix, 'a') if out_file != None: out_file.write(''.join(str_list)) if filename != '': out_file.close() if __name__ == "__main__": """ This is is a "main module" wrapper for invoking lttree.py as a stand alone program. This program: 1)reads a ttree file, 2)constructs a tree of class definitions (g_objectdefs) 3)constructs a tree of instantiated class objects (g_objects), 4)automatically assigns values to the variables, 5)and carries out the "write" commands to write the templates a file(s). """ g_program_name = __file__.split('/')[-1] # ='lttree.py' g_date_str = '2014-12-19' g_version_str = '0.75' ####### Main Code Below: ####### sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ') sys.stderr.write('\n(python version '+str(sys.version)+')\n') if sys.version < '2.6': raise InputError('Error: Alas, you must upgrade to a newer version of python.') try: #settings = BasicUISettings() #BasicUIParseArgs(sys.argv, settings) settings = LttreeSettings() LttreeParseArgs(sys.argv, settings) # Data structures to store the class definitionss and instances g_objectdefs = StaticObj('', None) # The root of the static tree # has name '' (equivalent to '/') g_objects = InstanceObj('', None) # The root of the instance tree # has name '' (equivalent to '/') # A list of commands to carry out g_static_commands = [] g_instance_commands = [] BasicUI(settings, g_objectdefs, g_objects, g_static_commands, g_instance_commands) # Interpret the the commands. (These are typically write() or # write_once() commands, rendering templates into text. # This step also handles coordinate transformations and delete commands. # Coordinate transformations can be applied to the rendered text # as a post-processing step. sys.stderr.write(' done\nbuilding templates...') files_content = defaultdict(list) ExecCommands(g_static_commands, files_content, settings, False) ExecCommands(g_instance_commands, files_content, settings, False) # Finally: write the rendered text to actual files. # Erase the files that will be written to: sys.stderr.write(' done\nwriting templates...') EraseTemplateFiles(g_static_commands) EraseTemplateFiles(g_instance_commands) # Write the files as templates # (with the original variable names present) WriteFiles(files_content, suffix=".template", write_to_stdout=False) # Write the files with the variables substituted by values sys.stderr.write(' done\nbuilding and rendering templates...') files_content = defaultdict(list) ExecCommands(g_static_commands, files_content, settings, True) ExecCommands(g_instance_commands, files_content, settings, True) sys.stderr.write(' done\nwriting rendered templates...\n') WriteFiles(files_content) sys.stderr.write(' done\n') # Now write the variable bindings/assignments table. sys.stderr.write('writing \"ttree_assignments.txt\" file...') open('ttree_assignments.txt', 'w').close() # <-- erase previous version. WriteVarBindingsFile(g_objectdefs) WriteVarBindingsFile(g_objects) sys.stderr.write(' done\n') except (ValueError, InputError) as err: sys.stderr.write('\n\n'+str(err)+'\n') sys.exit(-1)
crtrott/lammps
tools/moltemplate/src/lttree.py
Python
gpl-2.0
35,110
[ "LAMMPS" ]
d59e84a7b776eca04964389bae603339144f2949cada78efc1b287faff59c222
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides tools for detecting sources in an image. """ import warnings from astropy.convolution import Gaussian2DKernel, convolve from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats from astropy.utils.exceptions import AstropyUserWarning import numpy as np from .core import SegmentationImage from ..utils.exceptions import NoDetectionsWarning __all__ = ['detect_threshold', 'detect_sources', 'make_source_mask'] def detect_threshold(data, nsigma, background=None, error=None, mask=None, mask_value=None, sigclip_sigma=3.0, sigclip_iters=None): """ Calculate a pixel-wise threshold image that can be used to detect sources. Parameters ---------- data : array_like The 2D array of the image. nsigma : float The number of standard deviations per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. background : float or array_like, optional The background value(s) of the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. If the input ``data`` has been background-subtracted, then set ``background`` to ``0.0``. If `None`, then a scalar background value will be estimated using sigma-clipped statistics. error : float or array_like, optional The Gaussian 1-sigma standard deviation of the background noise in ``data``. ``error`` should include all sources of "background" error, but *exclude* the Poisson error of the sources. If ``error`` is a 2D image, then it should represent the 1-sigma background error in each pixel of ``data``. If `None`, then a scalar background rms value will be estimated using sigma-clipped statistics. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. mask_value : float, optional An image data value (e.g., ``0.0``) that is ignored when computing the image background statistics. ``mask_value`` will be ignored if ``mask`` is input. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The maximum number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. Returns ------- threshold : 2D `~numpy.ndarray` A 2D image with the same shape as ``data`` containing the pixel-wise threshold values. See Also -------- :func:`photutils.segmentation.detect_sources` Notes ----- The ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` inputs are used only if it is necessary to estimate ``background`` or ``error`` using sigma-clipped background statistics. If ``background`` and ``error`` are both input, then ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters`` are ignored. """ if background is None or error is None: data_mean, _, data_std = sigma_clipped_stats( data, mask=mask, mask_value=mask_value, sigma=sigclip_sigma, maxiters=sigclip_iters) bkgrd_image = np.zeros_like(data) + data_mean bkgrdrms_image = np.zeros_like(data) + data_std if background is None: background = bkgrd_image else: if np.isscalar(background): background = np.zeros_like(data) + background else: if background.shape != data.shape: raise ValueError('If input background is 2D, then it ' 'must have the same shape as the input ' 'data.') if error is None: error = bkgrdrms_image else: if np.isscalar(error): error = np.zeros_like(data) + error else: if error.shape != data.shape: raise ValueError('If input error is 2D, then it ' 'must have the same shape as the input ' 'data.') return background + (error * nsigma) def _make_binary_structure(ndim, connectivity): """ Make a binary structure element. Parameters ---------- ndim : int The number of array dimensions. connectivity : {4, 8} For the case of ``ndim=2``, the type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SourceExtractor uses 8-connected pixels. Returns ------- array : ndarray of int or bool The binary structure element. If ``ndim <= 2`` an array of int is returned, otherwise an array of bool is returned. """ from scipy.ndimage import generate_binary_structure if ndim == 1: selem = np.array((1, 1, 1)) elif ndim == 2: if connectivity == 4: selem = np.array(((0, 1, 0), (1, 1, 1), (0, 1, 0))) elif connectivity == 8: selem = np.ones((3, 3), dtype=int) else: raise ValueError(f'Invalid connectivity={connectivity}. ' 'Options are 4 or 8.') else: selem = generate_binary_structure(ndim, 1) return selem def _detect_sources(data, thresholds, npixels, kernel=None, connectivity=8, mask=None, deblend_skip=False): """ Detect sources above a specified threshold value in an image and return a `~photutils.segmentation.SegmentationImage` object. Detected sources must have ``npixels`` connected pixels that are each greater than the ``threshold`` value. If the filtering option is used, then the ``threshold`` is applied to the filtered image. The input ``mask`` can be used to mask pixels in the input data. Masked pixels will not be included in any source. This function does not deblend overlapping sources. First use this function to detect sources followed by :func:`~photutils.segmentation.deblend_sources` to deblend sources. Parameters ---------- data : array_like The 2D array of the image. thresholds : array-like of floats or arrays The data value or pixel-wise data values to be used for the detection thresholds. A 2D ``threshold`` must have the same shape as ``data``. See `~photutils.segmentation.detect_threshold` for one way to create a ``threshold`` image. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SourceExtractor uses 8-connected pixels. mask : array_like of bool, optional A boolean mask, with the same shape as the input ``data``, where `True` values indicate masked pixels. Masked pixels will not be included in any source. deblend_skip : bool, optional If `True` do not include the segmentation image in the output list for any threshold level where the number of detected sources is less than 2. This is useful for source deblending and improves its performance. Returns ------- segment_image : list of `~photutils.segmentation.SegmentationImage` A list of 2D segmentation images, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. If no sources are found for a given threshold, then the output list will contain `None` for that threshold. Also see the ``deblend_skip`` keyword. """ from scipy import ndimage if (npixels <= 0) or (int(npixels) != npixels): raise ValueError('npixels must be a positive integer, got ' f'"{npixels}"') if mask is not None: if mask.shape != data.shape: raise ValueError('mask must have the same shape as the input ' 'image.') if kernel is not None: with warnings.catch_warnings(): warnings.simplefilter('ignore', AstropyUserWarning) data = convolve(data, kernel, mask=mask, normalize_kernel=True) selem = _make_binary_structure(data.ndim, connectivity) segms = [] for threshold in thresholds: # ignore RuntimeWarning caused by > comparison when data contains NaNs with warnings.catch_warnings(): warnings.simplefilter('ignore', category=RuntimeWarning) data2 = data > threshold if mask is not None: data2 &= ~mask # return if threshold was too high to detect any sources if np.count_nonzero(data2) == 0: warnings.warn('No sources were found.', NoDetectionsWarning) if not deblend_skip: segms.append(None) continue segm_img, _ = ndimage.label(data2, structure=selem) # remove objects with less than npixels # NOTE: for typical data, making the cutout images is ~10x faster # than using segm_img directly segm_slices = ndimage.find_objects(segm_img) for i, slices in enumerate(segm_slices): cutout = segm_img[slices] segment_mask = (cutout == (i + 1)) if np.count_nonzero(segment_mask) < npixels: cutout[segment_mask] = 0 if np.count_nonzero(segm_img) == 0: warnings.warn('No sources were found.', NoDetectionsWarning) if not deblend_skip: segms.append(None) continue segm = object.__new__(SegmentationImage) segm._data = segm_img if deblend_skip and segm.nlabels == 1: continue segm.relabel_consecutive() segms.append(segm) return segms def detect_sources(data, threshold, npixels, kernel=None, connectivity=8, mask=None): """ Detect sources above a specified threshold value in an image and return a `~photutils.segmentation.SegmentationImage` object. Detected sources must have ``npixels`` connected pixels that are each greater than the ``threshold`` value. If the filtering option is used, then the ``threshold`` is applied to the filtered image. The input ``mask`` can be used to mask pixels in the input data. Masked pixels will not be included in any source. This function does not deblend overlapping sources. First use this function to detect sources followed by :func:`~photutils.segmentation.deblend_sources` to deblend sources. Parameters ---------- data : array_like The 2D array of the image. threshold : float or array-like The data value or pixel-wise data values to be used for the detection threshold. A 2D ``threshold`` must have the same shape as ``data``. See `~photutils.segmentation.detect_threshold` for one way to create a ``threshold`` image. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. connectivity : {4, 8}, optional The type of pixel connectivity used in determining how pixels are grouped into a detected source. The options are 4 or 8 (default). 4-connected pixels touch along their edges. 8-connected pixels touch along their edges or corners. For reference, SourceExtractor uses 8-connected pixels. mask : array_like of bool, optional A boolean mask, with the same shape as the input ``data``, where `True` values indicate masked pixels. Masked pixels will not be included in any source. Returns ------- segment_image : `~photutils.segmentation.SegmentationImage` or `None` A 2D segmentation image, with the same shape as ``data``, where sources are marked by different positive integer values. A value of zero is reserved for the background. If no sources are found then `None` is returned. See Also -------- :func:`photutils.segmentation.detect_threshold` :class:`photutils.segmentation.SegmentationImage` :func:`photutils.segmentation.deblend_sources` Examples -------- .. plot:: :include-source: from astropy.convolution import Gaussian2DKernel from astropy.stats import gaussian_fwhm_to_sigma from astropy.visualization import simple_norm import matplotlib.pyplot as plt from photutils.datasets import make_100gaussians_image from photutils.segmentation import detect_threshold, detect_sources # make a simulated image data = make_100gaussians_image() # detect the sources threshold = detect_threshold(data, nsigma=3) sigma = 3.0 * gaussian_fwhm_to_sigma # FWHM = 3. kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3) kernel.normalize() segm = detect_sources(data, threshold, npixels=5, kernel=kernel) # plot the image and the segmentation image fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 10)) norm = simple_norm(data, 'sqrt', percent=99.) ax1.imshow(data, origin='lower', interpolation='nearest', norm=norm) ax2.imshow(segm.data, origin='lower', interpolation='nearest', cmap=segm.make_cmap(seed=1234)) plt.tight_layout() """ return _detect_sources(data, (threshold,), npixels, kernel=kernel, connectivity=connectivity, mask=mask)[0] def make_source_mask(data, nsigma, npixels, mask=None, filter_fwhm=None, filter_size=3, kernel=None, sigclip_sigma=3.0, sigclip_iters=5, dilate_size=11): """ Make a source mask using source segmentation and binary dilation. Parameters ---------- data : array_like The 2D array of the image. nsigma : float The number of standard deviations per pixel above the ``background`` for which to consider a pixel as possibly being part of a source. npixels : int The number of connected pixels, each greater than ``threshold``, that an object must have to be detected. ``npixels`` must be a positive integer. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when computing the image background statistics. filter_fwhm : float, optional The full-width at half-maximum (FWHM) of the Gaussian kernel to filter the image before thresholding. ``filter_fwhm`` and ``filter_size`` are ignored if ``kernel`` is defined. filter_size : float, optional The size of the square Gaussian kernel image. Used only if ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size`` are ignored if ``kernel`` is defined. kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional The 2D array of the kernel used to filter the image before thresholding. Filtering the image will smooth the noise and maximize detectability of objects with a shape similar to the kernel. ``kernel`` overrides ``filter_fwhm`` and ``filter_size``. sigclip_sigma : float, optional The number of standard deviations to use as the clipping limit when calculating the image background statistics. sigclip_iters : int, optional The maximum number of iterations to perform sigma clipping, or `None` to clip until convergence is achieved (i.e., continue until the last iteration clips nothing) when calculating the image background statistics. dilate_size : int, optional The size of the square array used to dilate the segmentation image. Returns ------- mask : 2D bool `~numpy.ndarray` A 2D boolean image containing the source mask. """ from scipy import ndimage threshold = detect_threshold(data, nsigma, background=None, error=None, mask=mask, sigclip_sigma=sigclip_sigma, sigclip_iters=sigclip_iters) if kernel is None and filter_fwhm is not None: kernel_sigma = filter_fwhm * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel(kernel_sigma, x_size=filter_size, y_size=filter_size) if kernel is not None: kernel.normalize() segm = detect_sources(data, threshold, npixels, kernel=kernel) if segm is None: return np.zeros(data.shape, dtype=bool) selem = np.ones((dilate_size, dilate_size)) return ndimage.binary_dilation(segm.data.astype(bool), selem)
astropy/photutils
photutils/segmentation/detect.py
Python
bsd-3-clause
18,568
[ "Gaussian" ]
efcd8127c071976e8eee00cb0e3c689b64826a94937867749ce157d6390d031d
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Makes sure that all files contain proper licensing information.""" import optparse import os.path import subprocess import sys def PrintUsage(): print """Usage: python checklicenses.py [--root <root>] [tocheck] --root Specifies the repository root. This defaults to "../.." relative to the script file. This will be correct given the normal location of the script in "<root>/tools/checklicenses". --ignore-suppressions Ignores path-specific license whitelist. Useful when trying to remove a suppression/whitelist entry. tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything. Examples: python checklicenses.py python checklicenses.py --root ~/chromium/src third_party""" WHITELISTED_LICENSES = [ 'Apache (v2.0)', 'Apache (v2.0) BSD (2 clause)', 'Apache (v2.0) GPL (v2)', 'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License 'APSL (v2)', 'APSL (v2) BSD (4 clause)', 'BSD', 'BSD (2 clause)', 'BSD (2 clause) ISC', 'BSD (2 clause) MIT/X11 (BSD like)', 'BSD (3 clause)', 'BSD (3 clause) GPL (v2)', 'BSD (3 clause) ISC', 'BSD (3 clause) LGPL (v2 or later)', 'BSD (3 clause) LGPL (v2.1 or later)', 'BSD (3 clause) MIT/X11 (BSD like)', 'BSD (4 clause)', 'BSD-like', # TODO(phajdan.jr): Make licensecheck not print BSD-like twice. 'BSD-like MIT/X11 (BSD like)', 'BSL (v1.0)', 'GPL (v2) LGPL (v2.1 or later)', 'GPL (v2 or later) with Bison parser exception', 'GPL (v2 or later) with libtool exception', 'GPL (v3 or later) with Bison parser exception', 'GPL with Bison parser exception', 'ISC', 'LGPL (unversioned/unknown version)', 'LGPL (v2)', 'LGPL (v2 or later)', 'LGPL (v2.1)', 'LGPL (v2.1 or later)', 'LGPL (v3 or later)', 'MIT/X11 (BSD like)', 'MPL (v1.0) LGPL (v2 or later)', 'MPL (v1.1)', 'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)', 'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)', 'MPL (v1.1) BSD-like', 'MPL (v1.1) BSD-like GPL (unversioned/unknown version)', 'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)', 'MPL (v1.1) GPL (v2)', 'MPL (v1.1) GPL (v2) LGPL (v2 or later)', 'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)', 'MPL (v1.1) GPL (unversioned/unknown version)', 'MPL (v1.1) LGPL (v2 or later)', 'MPL (v1.1) LGPL (v2.1 or later)', 'MPL (v2.0)', 'Ms-PL', 'Public domain', 'Public domain BSD', 'Public domain BSD (3 clause)', 'Public domain BSD-like', 'Public domain LGPL (v2.1 or later)', 'libpng', 'zlib/libpng', 'SGI Free Software License B', 'University of Illinois/NCSA Open Source License (BSD like)', ] PATH_SPECIFIC_WHITELISTED_LICENSES = { 'base/hash.cc': [ # http://crbug.com/98100 'UNKNOWN', ], 'base/third_party/icu': [ # http://crbug.com/98087 'UNKNOWN', ], # http://code.google.com/p/google-breakpad/issues/detail?id=450 'breakpad/src': [ 'UNKNOWN', ], 'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092 'UNKNOWN', ], 'chrome/test/data/gpu/vt': [ 'UNKNOWN', ], 'chrome/test/data/layout_tests/LayoutTests': [ 'UNKNOWN', ], 'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095 'UNKNOWN', ], 'data/mozilla_js_tests': [ 'UNKNOWN', ], 'data/page_cycler': [ 'UNKNOWN', 'GPL (v2 or later)', ], 'data/tab_switching': [ 'UNKNOWN', ], 'native_client': [ # http://crbug.com/98099 'UNKNOWN', ], 'native_client/toolchain': [ 'BSD GPL (v2 or later)', 'BSD (2 clause) GPL (v2 or later)', 'BSD (3 clause) GPL (v2 or later)', 'BSL (v1.0) GPL', 'BSL (v1.0) GPL (v3.1)', 'GPL', 'GPL (unversioned/unknown version)', 'GPL (v2)', 'GPL (v2 or later)', 'GPL (v3.1)', 'GPL (v3 or later)', ], 'net/tools/spdyshark': [ 'GPL (v2 or later)', 'UNKNOWN', ], 'third_party/WebKit': [ 'UNKNOWN', ], 'third_party/WebKit/Websites/webkit.org/blog/wp-content/plugins/' 'akismet/akismet.php': [ 'GPL (v2 or later)' ], 'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [ 'GPL', 'GPL (v2 or later)', 'GPL (unversioned/unknown version)', ], 'third_party/active_doc': [ # http://crbug.com/98113 'UNKNOWN', ], # http://code.google.com/p/angleproject/issues/detail?id=217 'third_party/angle': [ 'UNKNOWN', ], 'third_party/bsdiff/mbsdiff.cc': [ 'UNKNOWN', ], 'third_party/bzip2': [ 'UNKNOWN', ], # http://crbug.com/222828 # http://bugs.python.org/issue17514 'third_party/chromite/third_party/argparse.py': [ 'UNKNOWN', ], # Not used. http://crbug.com/156020 # Using third_party/cros_dbus_cplusplus/cros_dbus_cplusplus.gyp instead. 'third_party/cros_dbus_cplusplus/source/autogen.sh': [ 'UNKNOWN', ], # Included in the source tree but not built. http://crbug.com/156020 'third_party/cros_dbus_cplusplus/source/examples': [ 'UNKNOWN', ], 'third_party/devscripts': [ 'GPL (v2 or later)', ], 'third_party/expat/files/lib': [ # http://crbug.com/98121 'UNKNOWN', ], 'third_party/ffmpeg': [ 'GPL', 'GPL (v2)', 'GPL (v2 or later)', 'UNKNOWN', # http://crbug.com/98123 ], 'third_party/findbugs/doc': [ # http://crbug.com/157206 'UNKNOWN', ], 'third_party/freetype2': [ # http://crbug.com/177319 'UNKNOWN', ], 'third_party/gles2_book': [ # http://crbug.com/98130 'UNKNOWN', ], 'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131 'UNKNOWN', ], 'third_party/harfbuzz': [ # http://crbug.com/98133 'UNKNOWN', ], 'third_party/hunspell': [ # http://crbug.com/98134 'UNKNOWN', ], 'third_party/hyphen/hyphen.tex': [ # http://crbug.com/157375 'UNKNOWN', ], 'third_party/iccjpeg': [ # http://crbug.com/98137 'UNKNOWN', ], 'third_party/icu': [ # http://crbug.com/98301 'UNKNOWN', ], 'third_party/jemalloc': [ # http://crbug.com/98302 'UNKNOWN', ], 'third_party/JSON': [ 'Perl', # Build-only. # License missing upstream on 3 minor files. 'UNKNOWN', # https://rt.cpan.org/Public/Bug/Display.html?id=85915 ], 'third_party/lcov': [ # http://crbug.com/98304 'UNKNOWN', ], 'third_party/lcov/contrib/galaxy/genflat.pl': [ 'GPL (v2 or later)', ], 'third_party/lcov-1.9/contrib/galaxy/genflat.pl': [ 'GPL (v2 or later)', ], 'third_party/libevent': [ # http://crbug.com/98309 'UNKNOWN', ], 'third_party/libjingle/source/talk': [ # http://crbug.com/98310 'UNKNOWN', ], 'third_party/libjingle/source_internal/talk': [ # http://crbug.com/98310 'UNKNOWN', ], 'third_party/libjpeg': [ # http://crbug.com/98313 'UNKNOWN', ], 'third_party/libjpeg_turbo': [ # http://crbug.com/98314 'UNKNOWN', ], 'third_party/libpng': [ # http://crbug.com/98318 'UNKNOWN', ], # The following files lack license headers, but are trivial. 'third_party/libusb/src/libusb/os/poll_posix.h': [ 'UNKNOWN', ], 'third_party/libusb/src/libusb/version.h': [ 'UNKNOWN', ], 'third_party/libusb/src/autogen.sh': [ 'UNKNOWN', ], 'third_party/libusb/src/config.h': [ 'UNKNOWN', ], 'third_party/libusb/src/msvc/config.h': [ 'UNKNOWN', ], 'third_party/libvpx/source': [ # http://crbug.com/98319 'UNKNOWN', ], 'third_party/libvpx/source/libvpx/examples/includes': [ 'GPL (v2 or later)', ], 'third_party/libxml': [ 'UNKNOWN', ], 'third_party/libxslt': [ 'UNKNOWN', ], 'third_party/lzma_sdk': [ 'UNKNOWN', ], 'third_party/mesa/src': [ 'GPL (v2)', 'GPL (v3 or later)', 'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception', 'UNKNOWN', # http://crbug.com/98450 ], 'third_party/modp_b64': [ 'UNKNOWN', ], 'third_party/npapi/npspy/extern/java': [ 'GPL (unversioned/unknown version)', ], 'third_party/openmax_dl/dl' : [ 'Khronos Group', ], 'third_party/openssl': [ # http://crbug.com/98451 'UNKNOWN', ], 'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2 'UNKNOWN', ], 'third_party/molokocacao': [ # http://crbug.com/98453 'UNKNOWN', ], 'third_party/npapi/npspy': [ 'UNKNOWN', ], 'third_party/ocmock/OCMock': [ # http://crbug.com/98454 'UNKNOWN', ], 'third_party/ply/__init__.py': [ 'UNKNOWN', ], 'third_party/protobuf': [ # http://crbug.com/98455 'UNKNOWN', ], # http://crbug.com/222831 # https://bitbucket.org/eliben/pyelftools/issue/12 'third_party/pyelftools': [ 'UNKNOWN', ], 'third_party/pylib': [ 'UNKNOWN', ], 'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462 'UNKNOWN', ], 'third_party/simplejson': [ 'UNKNOWN', ], 'third_party/skia': [ # http://crbug.com/98463 'UNKNOWN', ], 'third_party/snappy/src': [ # http://crbug.com/98464 'UNKNOWN', ], 'third_party/smhasher/src': [ # http://crbug.com/98465 'UNKNOWN', ], 'third_party/speech-dispatcher/libspeechd.h': [ 'GPL (v2 or later)', ], 'third_party/sqlite': [ 'UNKNOWN', ], 'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585 'UNKNOWN', ], 'third_party/talloc': [ 'GPL (v3 or later)', 'UNKNOWN', # http://crbug.com/98588 ], 'third_party/tcmalloc': [ 'UNKNOWN', # http://crbug.com/98589 ], 'third_party/tlslite': [ 'UNKNOWN', ], 'third_party/webdriver': [ # http://crbug.com/98590 'UNKNOWN', ], 'third_party/webrtc': [ # http://crbug.com/98592 'UNKNOWN', ], 'third_party/xdg-utils': [ # http://crbug.com/98593 'UNKNOWN', ], 'third_party/yasm/source': [ # http://crbug.com/98594 'UNKNOWN', ], 'third_party/zlib/contrib/minizip': [ 'UNKNOWN', ], 'third_party/zlib/trees.h': [ 'UNKNOWN', ], 'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [ 'UNKNOWN', ], 'tools/emacs': [ # http://crbug.com/98595 'UNKNOWN', ], 'tools/grit/grit/node/custom/__init__.py': [ 'UNKNOWN', ], 'tools/gyp/test': [ 'UNKNOWN', ], 'tools/histograms': [ 'UNKNOWN', ], 'tools/memory_watcher': [ 'UNKNOWN', ], 'tools/playback_benchmark': [ 'UNKNOWN', ], 'tools/python/google/__init__.py': [ 'UNKNOWN', ], 'tools/site_compare': [ 'UNKNOWN', ], 'tools/stats_viewer/Properties/AssemblyInfo.cs': [ 'UNKNOWN', ], 'tools/symsrc/pefile.py': [ 'UNKNOWN', ], 'v8/test/cctest': [ # http://crbug.com/98597 'UNKNOWN', ], 'webkit/data/ico_decoder': [ 'UNKNOWN', ], } def check_licenses(options, args): # Figure out which directory we have to check. if len(args) == 0: # No directory to check specified, use the repository root. start_dir = options.base_directory elif len(args) == 1: # Directory specified. Start here. It's supposed to be relative to the # base directory. start_dir = os.path.abspath(os.path.join(options.base_directory, args[0])) else: # More than one argument, we don't handle this. PrintUsage() return 1 print "Using base directory:", options.base_directory print "Checking:", start_dir print licensecheck_path = os.path.abspath(os.path.join(options.base_directory, 'third_party', 'devscripts', 'licensecheck.pl')) licensecheck = subprocess.Popen([licensecheck_path, '-l', '100', '-r', start_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = licensecheck.communicate() if options.verbose: print '----------- licensecheck stdout -----------' print stdout print '--------- end licensecheck stdout ---------' if licensecheck.returncode != 0 or stderr: print '----------- licensecheck stderr -----------' print stderr print '--------- end licensecheck stderr ---------' print "\nFAILED\n" return 1 success = True for line in stdout.splitlines(): filename, license = line.split(':', 1) filename = os.path.relpath(filename.strip(), options.base_directory) # All files in the build output directory are generated one way or another. # There's no need to check them. if filename.startswith('out/') or filename.startswith('sconsbuild/'): continue # For now we're just interested in the license. license = license.replace('*No copyright*', '').strip() # Skip generated files. if 'GENERATED FILE' in license: continue if license in WHITELISTED_LICENSES: continue if not options.ignore_suppressions: found_path_specific = False for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES: if (filename.startswith(prefix) and license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]): found_path_specific = True break if found_path_specific: continue print "'%s' has non-whitelisted license '%s'" % (filename, license) success = False if success: print "\nSUCCESS\n" return 0 else: print "\nFAILED\n" print "Please read", print "http://www.chromium.org/developers/adding-3rd-party-libraries" print "for more info how to handle the failure." print print "Please respect OWNERS of checklicenses.py. Changes violating" print "this requirement may be reverted." return 1 def main(): default_root = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) option_parser = optparse.OptionParser() option_parser.add_option('--root', default=default_root, dest='base_directory', help='Specifies the repository root. This defaults ' 'to "../.." relative to the script file, which ' 'will normally be the repository root.') option_parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print debug logging') option_parser.add_option('--ignore-suppressions', action='store_true', default=False, help='Ignore path-specific license whitelist.') options, args = option_parser.parse_args() return check_licenses(options, args) if '__main__' == __name__: sys.exit(main())
KitKatXperience/platform_external_chromium_org
tools/checklicenses/checklicenses.py
Python
bsd-3-clause
15,850
[ "Galaxy" ]
0d37fe4ea295bb48cb92e78b04246954c755d4f7940746258ca593074f0517b8
# -*- coding: utf-8 -*- """ ORCA Open Remote Control Application Copyright (C) 2013-2020 Carsten Thielepape Please contact me by : http://www.orca-remote.org/ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from ORCA.utils.Path import cPath def Android_SystemUserPath() -> cPath: """ Returns the Operation System User Path """ return cPath("/data")
thica/ORCA-Remote
src/ORCA/utils/Platform/android/android_GetSystemUserPath.py
Python
gpl-3.0
1,028
[ "ORCA" ]
4d4e01195df369c229cbad22246f48aa1dfdf16affc8de53dd0349463f381ecc
#!/restricted/projectnb/montilab-p/personal/eric/hydra_dev/dev_env/bin/python #Copyright 2015 Daniel Gusenleitner, Stefano Monti #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import sys import gzip import re from Bio.SeqIO.QualityIO import FastqGeneralIterator #Biopython 1.51 or later ########################################################## # # Change the following settings to suit your needs ## Read paired-end fastq filenames from commandline arguments provided #NOTE : THESE FILENAMES INCLUDE THE FULL PATH TO THE FILES INPUT_FORWARD_FILENAME = sys.argv[1] INPUT_REVERSE_FILENAME = sys.argv[2] OUTPUT_PAIRED_FORWARD_FILENAME = sys.argv[3] OUTPUT_PAIRED_REVERSE_FILENAME = sys.argv[4] def f_name(title): return re.sub('/.$', '',title.split()[0]) def r_name(title): return re.sub('/.$', '',title.split()[0]) print "Scanning reverse file to build list of names..." reverse_ids = set() paired_ids = set() for title, seq, qual in FastqGeneralIterator(gzip.open(INPUT_REVERSE_FILENAME, 'rb')): reverse_ids.add(r_name(title)) print "Processing forward file..." forward_handle = gzip.open(OUTPUT_PAIRED_FORWARD_FILENAME, "wb") #orphan_handle = gzip.open(output_orphan_filename, "wb") for title, seq, qual in FastqGeneralIterator(gzip.open(INPUT_FORWARD_FILENAME, 'rb')): nam = f_name(title) if nam in reverse_ids: #Paired paired_ids.add(nam) reverse_ids.remove(nam) #frees a little memory forward_handle.write("@%s\n%s\n+\n%s\n" % (title, seq, qual)) else: #Orphan continue forward_handle.close() del reverse_ids #frees memory, although we won't need more now print "Processing reverse file..." reverse_handle = gzip.open(OUTPUT_PAIRED_REVERSE_FILENAME, "wb") for title, seq, qual in FastqGeneralIterator(gzip.open(INPUT_REVERSE_FILENAME, 'rb')): nam = r_name(title) if nam in paired_ids: #Paired reverse_handle.write("@%s\n%s\n+\n%s\n" % (title, seq, qual)) else: #Orphan continue reverse_handle.close() print "Done"
montilab/Hydra
build/scripts-2.7/paired_ends_intersect.py
Python
apache-2.0
2,532
[ "Biopython" ]
9cb7e16fb3cca4bd63dee0f56352c1945b3cae7d927f901d79b6fea3e91aa062
# -*- coding: utf-8 -*- # Copyright 2007-2016 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import math from hyperspy._components.expression import Expression from hyperspy._components.gaussian import _estimate_gaussian_parameters sqrt2pi = math.sqrt(2 * math.pi) sigma2fwhm = 2 * math.sqrt(2 * math.log(2)) class GaussianHF(Expression): """Normalized gaussian function component, with a fwhm parameter instead of the sigma parameter, and a height parameter instead of the A parameter (scaling difference of sigma * sqrt(2*Pi)). This makes the parameter vs. peak maximum independent of sigma, and thereby makes locking of the parameter more viable. As long as there is no binning, the height parameter corresponds directly to the peak maximum, if not, the value is scaled by a linear constant (signal_axis.scale). .. math:: f(x) = h \\sqrt{2\\pi}\\mathrm{exp}{\\left[-\\frac{4 \\log{2}\\left(x-c\\right)^{2}}{W^{2}}\\right]} Parameters ----------- height: float The height of the peak. If there is no binning, this corresponds directly to the maximum, otherwise the maximum divided by signal_axis.scale centre: float Location of the gaussian maximum, also the mean position. fwhm: float The full width half maximum value, i.e. the width of the gaussian at half the value of gaussian peak (at centre). **kwargs Extra keyword arguments are passes to the ``Expression`` component. An useful keyword argument that can be used to speed up the component is `module`. See the ``Expression`` component documentation for details. The helper properties `sigma` and `A` are also defined for compatibility with `Gaussian` component. See also -------- hyperspy.components.Gaussian """ def __init__(self, height=1., fwhm=1., centre=0., module="numexpr", **kwargs): super(GaussianHF, self).__init__( expression="height * exp(-(x - centre)**2 * 4 * log(2)/fwhm**2)", name="GaussianHF", height=height, fwhm=fwhm, centre=centre, position="centre", module=module, autodoc=False, **kwargs, ) # Boundaries self.height.bmin = 0. self.height.bmax = None self.fwhm.bmin = 0. self.fwhm.bmax = None self.isbackground = False self.convolved = True def estimate_parameters(self, signal, x1, x2, only_current=False): """Estimate the gaussian by calculating the momenta. Parameters ---------- signal : Signal1D instance x1 : float Defines the left limit of the spectral range to use for the estimation. x2 : float Defines the right limit of the spectral range to use for the estimation. only_current : bool If False estimates the parameters for the full dataset. Returns ------- bool Notes ----- Adapted from http://www.scipy.org/Cookbook/FittingData Examples -------- >>> g = hs.model.components1D.GaussianHF() >>> x = np.arange(-10, 10, 0.01) >>> data = np.zeros((32, 32, 2000)) >>> data[:] = g.function(x).reshape((1, 1, 2000)) >>> s = hs.signals.Signal1D(data) >>> s.axes_manager._axes[-1].offset = -10 >>> s.axes_manager._axes[-1].scale = 0.01 >>> g.estimate_parameters(s, -10, 10, False) """ super(GaussianHF, self)._estimate_parameters(signal) axis = signal.axes_manager.signal_axes[0] centre, height, sigma = _estimate_gaussian_parameters(signal, x1, x2, only_current) if only_current is True: self.centre.value = centre self.fwhm.value = sigma * sigma2fwhm self.height.value = float(height) if self.binned: self.height.value /= axis.scale return True else: if self.height.map is None: self._create_arrays() self.height.map['values'][:] = height if self.binned: self.height.map['values'][:] /= axis.scale self.height.map['is_set'][:] = True self.fwhm.map['values'][:] = sigma * sigma2fwhm self.fwhm.map['is_set'][:] = True self.centre.map['values'][:] = centre self.centre.map['is_set'][:] = True self.fetch_stored_values() return True @property def sigma(self): return self.fwhm.value / sigma2fwhm @sigma.setter def sigma(self, value): self.fwhm.value = value * sigma2fwhm @property def A(self): return self.height.value * self.sigma * sqrt2pi @A.setter def A(self, value): self.height.value = value / (self.sigma * sqrt2pi) def integral_as_signal(self): """ Utility function to get gaussian integral as Signal1D """ return (self.height.as_signal() * self.fwhm.as_signal() * sqrt2pi / sigma2fwhm)
magnunor/hyperspy
hyperspy/_components/gaussianhf.py
Python
gpl-3.0
5,976
[ "Gaussian" ]
8e4c2c1154961e151ea15f82eecee8bc8da792cdb462a116b8ad6c973ee45e48
# coding: utf-8 # # Copyright 2012 NAMD-EMAP-FGV # # This file is part of PyPLN. You can get more information at: http://pypln.org/. # # PyPLN is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyPLN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyPLN. If not, see <http://www.gnu.org/licenses/>. from collections import Counter from pypln.backend.celery_task import PyPLNTask def _get_momenta(distribution): total, momentum_1, momentum_2, momentum_3, momentum_4 = 0, 0, 0, 0, 0 for x, y in distribution: momentum_1 += y * x momentum_2 += y * x * x momentum_3 += y * x * x * x momentum_4 += y * x * x * x * x total += y total = float(total) if total == 0: total = 1.0 return (momentum_1 / total, momentum_2 / total, momentum_3 / total, momentum_4 / total) def _histogram(freqdist): counter = Counter() for x, y in freqdist: counter[y] += 1 return sorted(counter.most_common()) class Statistics(PyPLNTask): def process(self, document): freqdist = document['freqdist'] # eg: [('word', 100), ('other', 97)] sentences = document['sentences'] # eg: [['1st', 'sentence.'], ['2nd!']] momenta = _get_momenta(_histogram(freqdist)) total_tokens = float(sum(dict(freqdist).values())) if total_tokens == 0: total_tokens = 1.0 repertoire = len(freqdist) / total_tokens sentence_repertoire_sum = 0 for sentence in sentences: sentence_length = float(len(sentence)) if sentence_length == 0: sentence_length = 1.0 sentence_repertoire_sum += len(set(sentence)) / sentence_length number_of_sentences = len(sentences) if number_of_sentences == 0: average_sentence_length = 0 sentence_repertoire = 0 else: average_sentence_length = total_tokens / number_of_sentences sentence_repertoire = sentence_repertoire_sum / number_of_sentences return {'momentum_1': momenta[0], 'momentum_2': momenta[1], 'momentum_3': momenta[2], 'momentum_4': momenta[3], 'repertoire': repertoire, 'average_sentence_length': average_sentence_length, 'average_sentence_repertoire': sentence_repertoire}
NAMD/pypln.backend
pypln/backend/workers/statistics.py
Python
gpl-3.0
2,790
[ "NAMD" ]
8a647b8b294e7d1527935ac422d8e93014bc7d3cb7ce927338d412502ba9969e
# Copyright (c) 2009-2021 The Regents of the University of Michigan # This file is part of the HOOMD-blue project, released under the BSD 3-Clause # License. """Synced list utility classes.""" from collections.abc import MutableSequence import inspect from copy import copy class _PartialIsInstance: """Allows partial function application of isinstance over classes. This is a solution to avoid lambdas to enable pickling. We cannot use functools.partial since we need to partially apply the second argument. """ def __init__(self, classes): self.classes = classes def __call__(self, instance): return isinstance(instance, self.classes) class _PartialGetAttr: """Allows partial function application of isinstance over attributes. This is a solution to avoid lambdas to enable pickling. We cannot use functools.partial since we need to partially apply the second argument. """ def __init__(self, attr): self.attr = attr def __call__(self, obj): return getattr(obj, self.attr) def identity(obj): """Returns obj.""" return obj class _SimulationPlaceHolder: """Used to ensure objects are not added to two locations at once.""" def __init__(self, obj): self._id = id(obj) def __eq__(self, other): return isinstance(other, type(self)) and self._id == other._id class SyncedList(MutableSequence): """Provides syncing and validation for a Python and C++ list. Warning: This class should not be instantiated by users, and this documentation is mostly for developers of HOOMD-blue. The class is documentated to highlight the object's API which is that of a `MutableSequence`. This class ensures that standard list methods affect both Python and C++. Args: validation (callable or class): A callable that takes one argument and returns a boolean based on whether the value is appropriate for the list. Can raise ValueError for helpful diagnosis of the problem with validation. Alternatively validation can be a class which indicates the expected type of items of the list. to_synced_list (callable, optional): A callable that takes one argument (a Python SyncedList) and does necessary conversion before adding to the C++ list. Defaults to simply passing the object to C++. iterable (iterable, optional): An iterable whose members are valid members of the SyncedList instance. Defaults to None which causes SyncedList to start with an empty list. callable_class (bool, optional): If a class is passed as validation and this is ``True`` (defaults to ``False``), then the class will be treated as a callable and not used for type checking. attach_members (bool, optional): Whether list members can be attached (defaults to ``True``). If ``True`` then the `SyncedList` object handles adding, attaching, detaching, and removing. If not, these steps are skipped regardless of synced status. """ # Also guarantees that lists remain in same order when using the public API. def __init__(self, validation, to_synced_list=None, iterable=None, callable_class=False, attach_members=True): self._attach_members = attach_members if to_synced_list is None: to_synced_list = identity if inspect.isclass(validation) and not callable_class: self._validate = _PartialIsInstance(validation) else: self._validate = validation self._to_synced_list_conversion = to_synced_list self._synced = False self._simulation = _SimulationPlaceHolder(self) self._list = [] if iterable is not None: for it in iterable: self.append(it) def __len__(self): """int: Length of the list.""" return len(self._list) def __setitem__(self, index, value): """Set self[index] to value. Detaches removed value and syncs cpp_list if necessary. """ # Convert negative to positive indices and validate index index = self._handle_int(index) value = self._validate_or_error(value) self._attach_value(value) # If synced need to change cpp_list and detach operation before # changing python list if self._synced: self._synced_list[index] = \ self._to_synced_list_conversion(value) self._detach_value(self._list[index]) self._list[index] = value def __getitem__(self, index): """Grabs the python list item.""" index = self._handle_index(index) # since _handle_index always returns a range or int we can safely use an # isinstance check here. if isinstance(index, range): return [self._list[i] for i in index] return self._list[index] def __delitem__(self, index): """Deletes an item from list. Handles detaching if necessary.""" index = self._handle_index(index) # since _handle_index always returns a range or int we can safely use an # isinstance check here. if isinstance(index, range): # We must iterate from highest value to lowest to ensure we don't # accidentally try to delete an index that doesn't exist any more. for i in sorted(index, reverse=True): del self[i] return # Since delitem may not del the underlying object, we need to # manually call detach here. if self._synced: del self._synced_list[index] old_value = self._list.pop(index) self._detach_value(old_value) def insert(self, index, value): """Insert value to list at index, handling list syncing.""" value = self._validate_or_error(value) self._attach_value(value) # Wrap index like normal but allow for inserting a new element to the # end of the list. if index < len(self): index = self._handle_int(index) if self._synced: self._synced_list.insert(index, self._to_synced_list_conversion(value)) self._list.insert(index, value) def _handle_int(self, integer): """Converts negative indices to positive and validates index.""" if integer < 0: if -integer > len(self): raise IndexError( f"Negative index {integer} is too small for list of length " f"{len(self)}") return integer % max(1, len(self)) if integer >= len(self): raise IndexError( f"Index {integer} is outside bounds of a length {len(self)}" f"list.") return integer def _handle_index(self, index): if isinstance(index, slice): return self._handle_slice(index) return self._handle_int(index) def _handle_slice(self, index): return range(0, len(self))[index] def _synced_iter(self): """Iterate over values in the list. Does nothing when not synced.""" if self._synced: yield from self._synced_list def _attach_value(self, value, raise_if_added=True): """Attaches and/or adds value to simulation if unattached. Raises an error if value is already in this or another list. """ if not self._attach_members: return if raise_if_added and value._added: raise RuntimeError(f"Object {value} cannot be added to two lists.") value._add(self._simulation) if self._synced: value._attach() def _detach_value(self, value): """Detaches and/or removes value to simulation if attached.""" if not self._attach_members: return if self._synced: value._detach() if value._added: value._remove() def _validate_or_error(self, value): """Complete error checking and processing of value.""" try: if self._validate(value): return value else: raise ValueError(f"Value {value} could not be validated.") except ValueError as verr: raise ValueError(f"Validation failed: {verr.args[0]}") from verr def _sync(self, simulation, synced_list): """Attach all list items and update for automatic attachment.""" self._simulation = simulation self._synced_list = synced_list self._synced = True # We use a try except block here to maintain valid state (_synced in # this case) even when facing an error. try: for item in self: self._attach_value(item, False) self._synced_list.append(self._to_synced_list_conversion(item)) except Exception as err: self._synced = False raise err def _unsync(self): """Detach all items, clear _synced_list, and remove cpp references.""" if not self._synced: return # while not strictly necessary we check self._attach_members here to # avoid looping unless necessary (_detach_value checks # self._attach_members as well) making the check a slight performance # bump for non-attaching members. if self._attach_members: for item in self: self._detach_value(item) self._simulation = _SimulationPlaceHolder(self) del self._synced_list self._synced = False def __getstate__(self): """Get state for pickling.""" state = copy(self.__dict__) state['_simulation'] = None state.pop('_synced_list', None) return state def __eq__(self, other): """Test for equality.""" return (len(self) == len(other) and all(a == b for a, b in zip(self, other)))
joaander/hoomd-blue
hoomd/data/syncedlist.py
Python
bsd-3-clause
10,180
[ "HOOMD-blue" ]
e958ad1239dc1d93a920ddab2c9ce895b3a30b6aa9eb920859130e1b10774f9b
# -*- coding: utf-8 -*- """ Authors: Gonzalo E. Espinoza-Dávalos IHE Delft 2017 Contact: g.espinoza@un-ihe.org Repository: https://github.com/gespinoza/waterpix Module: waterpix """ import os from .davgis import NetCDF_to_Raster import netCDF4 def output_nc_to_tiffs(output_nc, output_path): """ Create raster files from the variables in the output netcdf file """ # Output folders if not os.path.isdir(output_path): os.mkdir(output_path) path_y = os.path.join(output_path, 'yearly') path_m = os.path.join(output_path, 'monthly') path_a = os.path.join(output_path, 'additional') if not os.path.isdir(path_y): os.mkdir(path_y) if not os.path.isdir(path_m): os.mkdir(path_m) if not os.path.isdir(path_a): os.mkdir(path_a) # Read netcdf file nc_file = netCDF4.Dataset(output_nc, 'r') variables_ls = nc_file.variables.keys() time_y = nc_file.variables['time_yyyy'][:] time_m = nc_file.variables['time_yyyymm'][:] nc_file.close() # Remove variables for variable in ['latitude', 'longitude', 'time_yyyy', 'time_yyyymm', 'RoundCode', 'a_Y', 'b_Y', 'crs']: variables_ls.remove(variable) # Add sub-folders for variable in variables_ls: if '_Y' in variable: if not os.path.exists(os.path.join(path_y, variable)): os.mkdir(os.path.join(path_y, variable)) elif '_M' in variable: if not os.path.exists(os.path.join(path_m, variable)): os.mkdir(os.path.join(path_m, variable)) else: if not os.path.exists(os.path.join(path_a, variable)): os.mkdir(os.path.join(path_a, variable)) # Main Loop for variable in variables_ls: # Yearly rasters if '_Y' in variable: for time in time_y: print '{0}\t{1}'.format(variable, time) file_name = variable[:-1] + '{0}.tif'.format(time) output_tiff = os.path.join(path_y, variable, file_name) NetCDF_to_Raster(output_nc, output_tiff, variable, x_variable='longitude', y_variable='latitude', time={'variable': 'time_yyyy', 'value': time}) # Monthly rasters elif '_M' in variable: for time in time_m: print '{0}\t{1}'.format(variable, time) file_name = variable[:-1] + '{0}.tif'.format(time) output_tiff = os.path.join(path_m, variable, file_name) print output_tiff NetCDF_to_Raster(output_nc, output_tiff, variable, x_variable='longitude', y_variable='latitude', time={'variable': 'time_yyyymm', 'value': time}) # Additional rasters else: print '{0}'.format(variable) file_name = variable[:-1] + '.tif' output_tiff = os.path.join(path_a, variable, file_name) NetCDF_to_Raster(output_nc, output_tiff, variable, x_variable='longitude', y_variable='latitude') # Return return output_path
wateraccounting/wa
Models/waterpix/wp_gdal/output_nc_to_tiffs.py
Python
apache-2.0
3,388
[ "NetCDF" ]
19f71660376fe3e5a6eeefbd9fb4bf806cd02bee307490ae57ca50f1b46b41e3
#!/bin/env python import vtk import PyQt4 from PyQt4 import QtCore, QtGui from vtk.util.colors import * from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor from ui_testbed import Ui_TestbedWindow from imgVolRender import imgVolRender class MainWindow (QtGui.QMainWindow): def __init__(self, dataset=None, parent=None): super(MainWindow, self).__init__ (parent) self.setWindowTitle( "GUItestbed for LegiDTI v1.0" ) self.m_ui = Ui_TestbedWindow() self.m_ui.setupUi( self ) self.m_ui.statusbar.setStatusTip ( "ready to load" ) self.m_ui.renderView.Initialize() self.m_ui.renderView.Start() self.oldsz = None self.imgVRender = imgVolRender(self) self.dataset = dataset self.render = None self.lighting = True def event(self, e): if e.type == QtCore.QEvent.KeyPress and e.text() == "l": self.lighting = not self.lighting else: return super(MainWindow, self).event(e) return True def resizeEvent(self, QResizeEvent): if not self.oldsz: self.oldsz = QResizeEvent.size() else: #sz = QResizeEvent.size() - QtCore.QSize(10, 3*self.m_ui.statusbar.size().height()) sz = QResizeEvent.size() #osz = QResizeEvent.oldSize() osz = self.oldsz wf = sz.width()*1.0/osz.width() hf = sz.height()*1.0/osz.height() orsz = self.m_ui.renderView.size() orsz.setWidth ( orsz.width() * wf ) orsz.setHeight( orsz.height() * hf ) self.m_ui.renderView.resize( orsz ) ''' orect = self.m_ui.renderView.geometry() rect = QtCore.QRect( orect.x() * wf, orect.y() * hf, orsz.width(), orsz.height() ) self.m_ui.renderView.setGeometry( rect ) ''' self.oldsz = sz @QtCore.pyqtSlot() def on_actionLoad_triggered(self): #QtGui.QMessageBox.information(self, "Load...", "Load source dataset.") fndata = QtGui.QFileDialog.getOpenFileName(filter="All (*.*);; NIfTI (*.nii *.nii.gz)") print fndata self.dataset = str(fndata) self.draw() def draw(self): if self.render: self.m_ui.renderView.GetRenderWindow().RemoveRenderer(self.render) self.render = self.imgVRender.mount(self.dataset) self.render.SetBackground(slate_grey) self.m_ui.renderView.GetRenderWindow().AddRenderer(self.render) print "Numer of Volumes in the render: %d" % (self.render.VisibleVolumeCount()/2) def show(self): super(MainWindow, self).show() self.draw() if __name__ == "__main__": import sys if len(sys.argv) < 2: print >>sys.stderr, "No data provided, bailed out." sys.exit(1) app = QtGui.QApplication( sys.argv ) win = MainWindow(sys.argv[1]) win.show() ret = app.exec_() sys.exit( ret ) #/* sts=8 ts=8 sw=80 tw=8 */
chapering/PyVolRender
vmDTIRender.py
Python
gpl-2.0
2,628
[ "VTK" ]
9841df5ea9e8bb174263e4476dec9cd0a57ff1f91607f592095f45168535756d
# coding: utf-8 import numpy as np from .. import img_as_float from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman from .._shared.utils import _mode_deprecations def denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1, bins=10000, mode='constant', cval=0): """Denoise image using bilateral filter. This is an edge-preserving and noise reducing denoising filter. It averages pixels based on their spatial closeness and radiometric similarity. Spatial closeness is measured by the gaussian function of the euclidian distance between two pixels and a certain standard deviation (`sigma_spatial`). Radiometric similarity is measured by the gaussian function of the euclidian distance between two color values and a certain standard deviation (`sigma_range`). Parameters ---------- image : ndarray, shape (M, N[, 3]) Input image, 2D grayscale or RGB. win_size : int Window size for filtering. sigma_range : float Standard deviation for grayvalue/color distance (radiometric similarity). A larger value results in averaging of pixels with larger radiometric differences. Note, that the image will be converted using the `img_as_float` function and thus the standard deviation is in respect to the range ``[0, 1]``. If the value is ``None`` the standard deviation of the ``image`` will be used. sigma_spatial : float Standard deviation for range distance. A larger value results in averaging of pixels with larger spatial differences. bins : int Number of discrete values for gaussian weights of color filtering. A larger value results in improved accuracy. mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'} How to handle values outside the image borders. See `numpy.pad` for detail. cval : string Used in conjunction with mode 'constant', the value outside the image boundaries. Returns ------- denoised : ndarray Denoised image. References ---------- .. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf Example ------- >>> from skimage import data, img_as_float >>> astro = img_as_float(data.astronaut()) >>> astro = astro[220:300, 220:320] >>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape) >>> noisy = np.clip(noisy, 0, 1) >>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15) """ mode = _mode_deprecations(mode) return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial, bins, mode, cval) def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True): """Perform total-variation denoising using split-Bregman optimization. Total-variation denoising (also know as total-variation regularization) tries to find an image with less total-variation under the constraint of being similar to the input image, which is controlled by the regularization parameter. Parameters ---------- image : ndarray Input data to be denoised (converted using img_as_float`). weight : float Denoising weight. The smaller the `weight`, the more denoising (at the expense of less similarity to the `input`). The regularization parameter `lambda` is chosen as `2 * weight`. eps : float, optional Relative difference of the value of the cost function that determines the stop criterion. The algorithm stops when:: SUM((u(n) - u(n-1))**2) < eps max_iter : int, optional Maximal number of iterations used for the optimization. isotropic : boolean, optional Switch between isotropic and anisotropic TV denoising. Returns ------- u : ndarray Denoised image. References ---------- .. [1] http://en.wikipedia.org/wiki/Total_variation_denoising .. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1 Regularized Problems", ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf .. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising using Split Bregman" in Image Processing On Line on 2012–05–19, http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf .. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf """ return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic) def _denoise_tv_chambolle_3d(im, weight=0.2, eps=2.e-4, n_iter_max=200): """Perform total-variation denoising on 3D images. Parameters ---------- im : ndarray 3-D input data to be denoised. weight : float, optional Denoising weight. The greater `weight`, the more denoising (at the expense of fidelity to `input`). eps : float, optional Relative difference of the value of the cost function that determines the stop criterion. The algorithm stops when: (E_(n-1) - E_n) < eps * E_0 n_iter_max : int, optional Maximal number of iterations used for the optimization. Returns ------- out : ndarray Denoised array of floats. Notes ----- Rudin, Osher and Fatemi algorithm. """ px = np.zeros_like(im) py = np.zeros_like(im) pz = np.zeros_like(im) gx = np.zeros_like(im) gy = np.zeros_like(im) gz = np.zeros_like(im) d = np.zeros_like(im) i = 0 while i < n_iter_max: d = - px - py - pz d[1:] += px[:-1] d[:, 1:] += py[:, :-1] d[:, :, 1:] += pz[:, :, :-1] out = im + d E = (d ** 2).sum() gx[:-1] = np.diff(out, axis=0) gy[:, :-1] = np.diff(out, axis=1) gz[:, :, :-1] = np.diff(out, axis=2) norm = np.sqrt(gx ** 2 + gy ** 2 + gz ** 2) E += weight * norm.sum() norm *= 0.5 / weight norm += 1. px -= 1. / 6. * gx px /= norm py -= 1. / 6. * gy py /= norm pz -= 1 / 6. * gz pz /= norm E /= float(im.size) if i == 0: E_init = E E_previous = E else: if np.abs(E_previous - E) < eps * E_init: break else: E_previous = E i += 1 return out def _denoise_tv_chambolle_2d(im, weight=0.2, eps=2.e-4, n_iter_max=200): """Perform total-variation denoising on 2D images. Parameters ---------- im : ndarray Input data to be denoised. weight : float, optional Denoising weight. The greater `weight`, the more denoising (at the expense of fidelity to `input`) eps : float, optional Relative difference of the value of the cost function that determines the stop criterion. The algorithm stops when: (E_(n-1) - E_n) < eps * E_0 n_iter_max : int, optional Maximal number of iterations used for the optimization. Returns ------- out : ndarray Denoised array of floats. Notes ----- The principle of total variation denoising is explained in http://en.wikipedia.org/wiki/Total_variation_denoising. This code is an implementation of the algorithm of Rudin, Fatemi and Osher that was proposed by Chambolle in [1]_. References ---------- .. [1] A. Chambolle, An algorithm for total variation minimization and applications, Journal of Mathematical Imaging and Vision, Springer, 2004, 20, 89-97. """ px = np.zeros_like(im) py = np.zeros_like(im) gx = np.zeros_like(im) gy = np.zeros_like(im) d = np.zeros_like(im) i = 0 while i < n_iter_max: d = -px - py d[1:] += px[:-1] d[:, 1:] += py[:, :-1] out = im + d E = (d ** 2).sum() gx[:-1] = np.diff(out, axis=0) gy[:, :-1] = np.diff(out, axis=1) norm = np.sqrt(gx ** 2 + gy ** 2) E += weight * norm.sum() norm *= 0.5 / weight norm += 1 px -= 0.25 * gx px /= norm py -= 0.25 * gy py /= norm E /= float(im.size) if i == 0: E_init = E E_previous = E else: if np.abs(E_previous - E) < eps * E_init: break else: E_previous = E i += 1 return out def denoise_tv_chambolle(im, weight=0.2, eps=2.e-4, n_iter_max=200, multichannel=False): """Perform total-variation denoising on 2D and 3D images. Parameters ---------- im : ndarray (2d or 3d) of ints, uints or floats Input data to be denoised. `im` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. weight : float, optional Denoising weight. The greater `weight`, the more denoising (at the expense of fidelity to `input`). eps : float, optional Relative difference of the value of the cost function that determines the stop criterion. The algorithm stops when: (E_(n-1) - E_n) < eps * E_0 n_iter_max : int, optional Maximal number of iterations used for the optimization. multichannel : bool, optional Apply total-variation denoising separately for each channel. This option should be true for color images, otherwise the denoising is also applied in the 3rd dimension. Returns ------- out : ndarray Denoised image. Notes ----- Make sure to set the multichannel parameter appropriately for color images. The principle of total variation denoising is explained in http://en.wikipedia.org/wiki/Total_variation_denoising The principle of total variation denoising is to minimize the total variation of the image, which can be roughly described as the integral of the norm of the image gradient. Total variation denoising tends to produce "cartoon-like" images, that is, piecewise-constant images. This code is an implementation of the algorithm of Rudin, Fatemi and Osher that was proposed by Chambolle in [1]_. References ---------- .. [1] A. Chambolle, An algorithm for total variation minimization and applications, Journal of Mathematical Imaging and Vision, Springer, 2004, 20, 89-97. Examples -------- 2D example on astronaut image: >>> from skimage import color, data >>> img = color.rgb2gray(data.astronaut())[:50, :50] >>> img += 0.5 * img.std() * np.random.randn(*img.shape) >>> denoised_img = denoise_tv_chambolle(img, weight=60) 3D example on synthetic data: >>> x, y, z = np.ogrid[0:20, 0:20, 0:20] >>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2 >>> mask = mask.astype(np.float) >>> mask += 0.2*np.random.randn(*mask.shape) >>> res = denoise_tv_chambolle(mask, weight=100) """ im_type = im.dtype if not im_type.kind == 'f': im = img_as_float(im) if im.ndim == 2: out = _denoise_tv_chambolle_2d(im, weight, eps, n_iter_max) elif im.ndim == 3: if multichannel: out = np.zeros_like(im) for c in range(im.shape[2]): out[..., c] = _denoise_tv_chambolle_2d(im[..., c], weight, eps, n_iter_max) else: out = _denoise_tv_chambolle_3d(im, weight, eps, n_iter_max) else: raise ValueError('only 2-d and 3-d images may be denoised with this ' 'function') return out
jwiggins/scikit-image
skimage/restoration/_denoise.py
Python
bsd-3-clause
11,808
[ "Gaussian" ]
b95e622a54208c272dd6ab3fdf735d1c38cb8cd8bce0b02714c5adaf5852b4c2
#! /usr/bin/env python """Test script for the bsddb C module by Roger E. Masse Adapted to unittest format and expanded scope by Raymond Hettinger """ import os, sys import copy import bsddb import dbhash # Just so we know it's imported import unittest from test import test_support class TestBSDDB(unittest.TestCase): openflag = 'c' def setUp(self): self.f = self.openmethod[0](self.fname, self.openflag, cachesize=32768) self.d = dict(q='Guido', w='van', e='Rossum', r='invented', t='Python', y='') for k, v in self.d.iteritems(): self.f[k] = v def tearDown(self): self.f.sync() self.f.close() if self.fname is None: return try: os.remove(self.fname) except os.error: pass def test_getitem(self): for k, v in self.d.iteritems(): self.assertEqual(self.f[k], v) def test_len(self): self.assertEqual(len(self.f), len(self.d)) def test_change(self): self.f['r'] = 'discovered' self.assertEqual(self.f['r'], 'discovered') self.assert_('r' in self.f.keys()) self.assert_('discovered' in self.f.values()) def test_close_and_reopen(self): if self.fname is None: # if we're using an in-memory only db, we can't reopen it # so finish here. return self.f.close() self.f = self.openmethod[0](self.fname, 'w') for k, v in self.d.iteritems(): self.assertEqual(self.f[k], v) def assertSetEquals(self, seqn1, seqn2): self.assertEqual(set(seqn1), set(seqn2)) def test_mapping_iteration_methods(self): f = self.f d = self.d self.assertSetEquals(d, f) self.assertSetEquals(d.keys(), f.keys()) self.assertSetEquals(d.values(), f.values()) self.assertSetEquals(d.items(), f.items()) self.assertSetEquals(d.iterkeys(), f.iterkeys()) self.assertSetEquals(d.itervalues(), f.itervalues()) self.assertSetEquals(d.iteritems(), f.iteritems()) def test_iter_while_modifying_values(self): di = iter(self.d) while 1: try: key = di.next() self.d[key] = 'modified '+key except StopIteration: break # it should behave the same as a dict. modifying values # of existing keys should not break iteration. (adding # or removing keys should) loops_left = len(self.f) fi = iter(self.f) while 1: try: key = fi.next() self.f[key] = 'modified '+key loops_left -= 1 except StopIteration: break self.assertEqual(loops_left, 0) self.test_mapping_iteration_methods() def test_iter_abort_on_changed_size(self): def DictIterAbort(): di = iter(self.d) while 1: try: di.next() self.d['newkey'] = 'SPAM' except StopIteration: break self.assertRaises(RuntimeError, DictIterAbort) def DbIterAbort(): fi = iter(self.f) while 1: try: fi.next() self.f['newkey'] = 'SPAM' except StopIteration: break self.assertRaises(RuntimeError, DbIterAbort) def test_iteritems_abort_on_changed_size(self): def DictIteritemsAbort(): di = self.d.iteritems() while 1: try: di.next() self.d['newkey'] = 'SPAM' except StopIteration: break self.assertRaises(RuntimeError, DictIteritemsAbort) def DbIteritemsAbort(): fi = self.f.iteritems() while 1: try: key, value = fi.next() del self.f[key] except StopIteration: break self.assertRaises(RuntimeError, DbIteritemsAbort) def test_iteritems_while_modifying_values(self): di = self.d.iteritems() while 1: try: k, v = di.next() self.d[k] = 'modified '+v except StopIteration: break # it should behave the same as a dict. modifying values # of existing keys should not break iteration. (adding # or removing keys should) loops_left = len(self.f) fi = self.f.iteritems() while 1: try: k, v = fi.next() self.f[k] = 'modified '+v loops_left -= 1 except StopIteration: break self.assertEqual(loops_left, 0) self.test_mapping_iteration_methods() def test_first_next_looping(self): items = [self.f.first()] for i in xrange(1, len(self.f)): items.append(self.f.next()) self.assertSetEquals(items, self.d.items()) def test_previous_last_looping(self): items = [self.f.last()] for i in xrange(1, len(self.f)): items.append(self.f.previous()) self.assertSetEquals(items, self.d.items()) def test_first_while_deleting(self): # Test for bug 1725856 self.assert_(len(self.d) >= 2, "test requires >=2 items") for _ in self.d: key = self.f.first()[0] del self.f[key] self.assertEqual([], self.f.items(), "expected empty db after test") def test_last_while_deleting(self): # Test for bug 1725856's evil twin self.assert_(len(self.d) >= 2, "test requires >=2 items") for _ in self.d: key = self.f.last()[0] del self.f[key] self.assertEqual([], self.f.items(), "expected empty db after test") def test_set_location(self): self.assertEqual(self.f.set_location('e'), ('e', self.d['e'])) def test_contains(self): for k in self.d: self.assert_(k in self.f) self.assert_('not here' not in self.f) def test_has_key(self): for k in self.d: self.assert_(self.f.has_key(k)) self.assert_(not self.f.has_key('not here')) def test_clear(self): self.f.clear() self.assertEqual(len(self.f), 0) def test__no_deadlock_first(self, debug=0): # do this so that testers can see what function we're in in # verbose mode when we deadlock. sys.stdout.flush() # in pybsddb's _DBWithCursor this causes an internal DBCursor # object is created. Other test_ methods in this class could # inadvertently cause the deadlock but an explicit test is needed. if debug: print "A" k,v = self.f.first() if debug: print "B", k self.f[k] = "deadlock. do not pass go. do not collect $200." if debug: print "C" # if the bsddb implementation leaves the DBCursor open during # the database write and locking+threading support is enabled # the cursor's read lock will deadlock the write lock request.. # test the iterator interface if True: if debug: print "D" i = self.f.iteritems() k,v = i.next() if debug: print "E" self.f[k] = "please don't deadlock" if debug: print "F" while 1: try: k,v = i.next() except StopIteration: break if debug: print "F2" i = iter(self.f) if debug: print "G" while i: try: if debug: print "H" k = i.next() if debug: print "I" self.f[k] = "deadlocks-r-us" if debug: print "J" except StopIteration: i = None if debug: print "K" # test the legacy cursor interface mixed with writes self.assert_(self.f.first()[0] in self.d) k = self.f.next()[0] self.assert_(k in self.d) self.f[k] = "be gone with ye deadlocks" self.assert_(self.f[k], "be gone with ye deadlocks") def test_for_cursor_memleak(self): # do the bsddb._DBWithCursor iterator internals leak cursors? nc1 = len(self.f._cursor_refs) # create iterator i = self.f.iteritems() nc2 = len(self.f._cursor_refs) # use the iterator (should run to the first yield, creating the cursor) k, v = i.next() nc3 = len(self.f._cursor_refs) # destroy the iterator; this should cause the weakref callback # to remove the cursor object from self.f._cursor_refs del i nc4 = len(self.f._cursor_refs) self.assertEqual(nc1, nc2) self.assertEqual(nc1, nc4) self.assert_(nc3 == nc1+1) def test_popitem(self): k, v = self.f.popitem() self.assert_(k in self.d) self.assert_(v in self.d.values()) self.assert_(k not in self.f) self.assertEqual(len(self.d)-1, len(self.f)) def test_pop(self): k = 'w' v = self.f.pop(k) self.assertEqual(v, self.d[k]) self.assert_(k not in self.f) self.assert_(v not in self.f.values()) self.assertEqual(len(self.d)-1, len(self.f)) def test_get(self): self.assertEqual(self.f.get('NotHere'), None) self.assertEqual(self.f.get('NotHere', 'Default'), 'Default') self.assertEqual(self.f.get('q', 'Default'), self.d['q']) def test_setdefault(self): self.assertEqual(self.f.setdefault('new', 'dog'), 'dog') self.assertEqual(self.f.setdefault('r', 'cat'), self.d['r']) def test_update(self): new = dict(y='life', u='of', i='brian') self.f.update(new) self.d.update(new) for k, v in self.d.iteritems(): self.assertEqual(self.f[k], v) def test_keyordering(self): if self.openmethod[0] is not bsddb.btopen: return keys = self.d.keys() keys.sort() self.assertEqual(self.f.first()[0], keys[0]) self.assertEqual(self.f.next()[0], keys[1]) self.assertEqual(self.f.last()[0], keys[-1]) self.assertEqual(self.f.previous()[0], keys[-2]) self.assertEqual(list(self.f), keys) class TestBTree(TestBSDDB): fname = test_support.TESTFN openmethod = [bsddb.btopen] class TestBTree_InMemory(TestBSDDB): fname = None openmethod = [bsddb.btopen] class TestBTree_InMemory_Truncate(TestBSDDB): fname = None openflag = 'n' openmethod = [bsddb.btopen] class TestHashTable(TestBSDDB): fname = test_support.TESTFN openmethod = [bsddb.hashopen] class TestHashTable_InMemory(TestBSDDB): fname = None openmethod = [bsddb.hashopen] ## # (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85 ## # appears broken... at least on ## # Solaris Intel - rmasse 1/97 def test_main(verbose=None): test_support.run_unittest( TestBTree, TestHashTable, TestBTree_InMemory, TestHashTable_InMemory, TestBTree_InMemory_Truncate, ) if __name__ == "__main__": test_main(verbose=True)
leighpauls/k2cro4
third_party/python_26/Lib/test/test_bsddb.py
Python
bsd-3-clause
11,518
[ "Brian" ]
a91e528059fcb9f8e654fee1c27848da428cbe6bacc6234201c939bc9a50d147
"""The SOINN regressor.""" import isoinn2 from kde import density from __gaussian_custom import norm_pdf_multivariate from numpy import array,diag,matrix import time from pygraph.classes.graph import graph from pygraph.algorithms.accessibility import connected_components import itertools from copy import deepcopy from gks import GKS class ISOINNregressor: """Regression interface based on SSL-GKS and SOINN. smooth can be set to None or real number, normally falls in [-1,0]. If set to None, SSL will be employed to estimate its value. response_dimension is integer, means the number of response variables. K is integer which is the number of neurons for kernel smoothing, larger K means little details but more smoothed predictions. The rest of the parameters are GNG training parameters.""" Pis = [] #:Distribution of the neuron populations. bands = [] #:Bandwidth for visualization. nodes = [] #:Weights of the neurons. sigmax = 0 ux = [] uy = [] gr = 0 #:Topology structure of neurons. counts = 0 standard_deviation = 0 smooth = -0.4 #:Smooth parameter for kernel smoothing, if set to None, SSL smooth parameter selection will be employed. reg_model = None __res_dimension = 1 __global = False __inn_parameter_list = [] K = 10 #:Number of neurons selected for kernel smoothing. def __init__(self, smooth = None, response_dimension = 1, K = 10, age_max = 200, nn_lambda = 60, alpha = 10,del_noise = True): isoinn2.set_parameter(age_max,nn_lambda,alpha,0,del_noise) self.__inn_parameter_list = [age_max,nn_lambda,alpha,0,del_noise] self.smooth = smooth self.__res_dimension = 1 self.K = K def fit(self, X, y): """X is array or list, each element is numpy array. Y is array or list containing the response varaible values.""" print 'training with bandwidth calculation, please wait...' timecost = time.time() t = 0 for i in range(len(y)): n_point = array(list(X[i]) + list([y[i]])) if t == 0: EX = n_point EX2 = n_point ** 2 else: count = float(t) EX = (EX*count/(count + 1.0)) + (n_point/(count + 1.0)) EX2 = (EX2*count/(count + 1.0)) + ((n_point ** 2)/(count + 1.0)) t += 1 isoinn2.step(n_point,0,t) isoinn2.step(array([]),0,-1) print 'time cost',time.time() - timecost standard_deviation = (EX2 - EX ** 2) ** 0.5 self.standard_deviation = standard_deviation if self.smooth == None: self.bands = standard_deviation * (len(isoinn2.setN) ** (-0.2)) else: self.bands = standard_deviation * (len(isoinn2.setN) ** (self.smooth)) Pis = isoinn2.accumulated self.counts = isoinn2.accumulated self.Pis = array(Pis) / float(sum(array(Pis)))#distribution of the clusters self.nodes = deepcopy(isoinn2.setN) self.sigmax = matrix(diag(array(self.bands)[0:-1]**2)) for each in self.nodes: self.ux.append(each[0:-1]) self.uy.append(each[-1]) self.uy = array(self.uy) self.gr = isoinn2.gr self.reg_model = GKS(self.nodes, self.counts, standard_deviation**2, self.__res_dimension, self.smooth, self.K) def predict(self, data): """This method returns the predictions the variable data. data should be within the same data space to X in the fit method. When smooth parameter is set to None, an SSL procedure will be employed to estimate it.""" if self.smooth == None: isoinn2.set_parameter(self.__inn_parameter_list[0],self.__inn_parameter_list[1],self.__inn_parameter_list[2],self.__inn_parameter_list[3],self.__inn_parameter_list[4]) t = 0 for i in range(len(data)): n_point = array(data[i]) if t == 0: EX = n_point EX2 = n_point ** 2 else: count = float(t) EX = (EX*count/(count + 1.0)) + (n_point/(count + 1.0)) EX2 = (EX2*count/(count + 1.0)) + ((n_point ** 2)/(count + 1.0)) t += 1 isoinn2.step(n_point,0,t) isoinn2.step(array([]),0,-1) return self.reg_model.responses(data, isoinn2.setN) else: return self.reg_model.responses(data) def draw_density(self, resolution = 0.05): """Draws the density contour of any regressor instance. It can only be called after calling the fit method, and only work in 2d case. resolution is a postitive real number definining the detail level of drawing. A smaller resolution number will generate more detailed drawings.""" from numpy import mgrid,zeros from copy import deepcopy the_d = density(self.nodes,array(self.counts),self.standard_deviation) dx, dy = resolution, resolution # generate 2 2d grids for the x & y bounds y, x = mgrid[slice(0, 1 + dy, dy),slice(0, 1 + dx, dx)] t=deepcopy(x[0]) z = zeros(shape = (len(x[0]),len(y[0]))) z1= zeros(shape = (len(x[0]),len(y[0]))) print('Please wait...') for i in range(len(t)): for j in range(len(t)): input_point = array([t[i],t[j]]) z[j][i] = the_d.estimate(input_point) if not ((input_point - array([0.5,0.2])).any()): print i,j print('drawing...') import matplotlib.pyplot as plt from matplotlib.colors import BoundaryNorm from matplotlib.ticker import MaxNLocator z = z[:-1, :-1] levels = MaxNLocator(nbins=15).bin_boundaries(z.min(), z.max()) cmap = plt.get_cmap('PiYG') plt.contourf(x[:-1, :-1] + dx / 2., y[:-1, :-1] + dy / 2., z, levels=levels, cmap=cmap) plt.colorbar() plt.title('Density estimation by SOINN') plt.show() if __name__ == '__main__': from utils import csv_reader r = csv_reader('reg_intro.csv') X,y = r.separate_label() the_reg = ISOINNregressor(smooth = -0.4, K = 15) the_reg.fit(X,y) # the_reg.draw_density() test_x = [] draw_x = [] for i in range(50): test_x.append(array([i/50.0])) draw_x.append(i/50.0) test_y = the_reg.predict(test_x) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.plot(draw_x,test_y,'k-') plt.axis('off') plt.show()
sbxzy/pygks
build/lib/pygks/reg_inn.py
Python
bsd-3-clause
6,787
[ "NEURON" ]
d4b0e59b7c06fda533c8d8dfd743b31922d962a5cb7536b6998f23e0cf9b1c96
#from ctx_base import StandardBaseContext from .libmp.backend import basestring, exec_ from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps, round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps, ComplexResult, to_pickable, from_pickable, normalize, from_int, from_float, from_str, to_int, to_float, to_str, from_rational, from_man_exp, fone, fzero, finf, fninf, fnan, mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod, mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge, mpf_hash, mpf_rand, mpf_sum, bitcount, to_fixed, mpc_to_str, mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate, mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf, mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int, mpc_mpf_div, mpf_pow, mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10, mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin, mpf_glaisher, mpf_twinprime, mpf_mertens, int_types) from . import rational from . import function_docs new = object.__new__ class mpnumeric(object): """Base class for mpf and mpc.""" __slots__ = [] def __new__(cls, val): raise NotImplementedError class _mpf(mpnumeric): """ An mpf instance holds a real-valued floating-point number. mpf:s work analogously to Python floats, but support arbitrary-precision arithmetic. """ __slots__ = ['_mpf_'] def __new__(cls, val=fzero, **kwargs): """A new mpf can be created from a Python float, an int, a or a decimal string representing a number in floating-point format.""" prec, rounding = cls.context._prec_rounding if kwargs: prec = kwargs.get('prec', prec) if 'dps' in kwargs: prec = dps_to_prec(kwargs['dps']) rounding = kwargs.get('rounding', rounding) if type(val) is cls: sign, man, exp, bc = val._mpf_ if (not man) and exp: return val v = new(cls) v._mpf_ = normalize(sign, man, exp, bc, prec, rounding) return v elif type(val) is tuple: if len(val) == 2: v = new(cls) v._mpf_ = from_man_exp(val[0], val[1], prec, rounding) return v if len(val) == 4: sign, man, exp, bc = val v = new(cls) v._mpf_ = normalize(sign, MPZ(man), exp, bc, prec, rounding) return v raise ValueError else: v = new(cls) v._mpf_ = mpf_pos(cls.mpf_convert_arg(val, prec, rounding), prec, rounding) return v @classmethod def mpf_convert_arg(cls, x, prec, rounding): if isinstance(x, int_types): return from_int(x) if isinstance(x, float): return from_float(x) if isinstance(x, basestring): return from_str(x, prec, rounding) if isinstance(x, cls.context.constant): return x.func(prec, rounding) if hasattr(x, '_mpf_'): return x._mpf_ if hasattr(x, '_mpmath_'): t = cls.context.convert(x._mpmath_(prec, rounding)) if hasattr(t, '_mpf_'): return t._mpf_ if hasattr(x, '_mpi_'): a, b = x._mpi_ if a == b: return a raise ValueError("can only create mpf from zero-width interval") raise TypeError("cannot create mpf from " + repr(x)) @classmethod def mpf_convert_rhs(cls, x): if isinstance(x, int_types): return from_int(x) if isinstance(x, float): return from_float(x) if isinstance(x, complex_types): return cls.context.mpc(x) if isinstance(x, rational.mpq): p, q = x._mpq_ return from_rational(p, q, cls.context.prec) if hasattr(x, '_mpf_'): return x._mpf_ if hasattr(x, '_mpmath_'): t = cls.context.convert(x._mpmath_(*cls.context._prec_rounding)) if hasattr(t, '_mpf_'): return t._mpf_ return t return NotImplemented @classmethod def mpf_convert_lhs(cls, x): x = cls.mpf_convert_rhs(x) if type(x) is tuple: return cls.context.make_mpf(x) return x man_exp = property(lambda self: self._mpf_[1:3]) man = property(lambda self: self._mpf_[1]) exp = property(lambda self: self._mpf_[2]) bc = property(lambda self: self._mpf_[3]) real = property(lambda self: self) imag = property(lambda self: self.context.zero) conjugate = lambda self: self def __getstate__(self): return to_pickable(self._mpf_) def __setstate__(self, val): self._mpf_ = from_pickable(val) def __repr__(s): if s.context.pretty: return str(s) return "mpf('%s')" % to_str(s._mpf_, s.context._repr_digits) def __str__(s): return to_str(s._mpf_, s.context._str_digits) def __hash__(s): return mpf_hash(s._mpf_) def __int__(s): return int(to_int(s._mpf_)) def __long__(s): return long(to_int(s._mpf_)) def __float__(s): return to_float(s._mpf_) def __complex__(s): return complex(float(s)) def __nonzero__(s): return s._mpf_ != fzero __bool__ = __nonzero__ def __abs__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpf_ = mpf_abs(s._mpf_, prec, rounding) return v def __pos__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpf_ = mpf_pos(s._mpf_, prec, rounding) return v def __neg__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpf_ = mpf_neg(s._mpf_, prec, rounding) return v def _cmp(s, t, func): if hasattr(t, '_mpf_'): t = t._mpf_ else: t = s.mpf_convert_rhs(t) if t is NotImplemented: return t return func(s._mpf_, t) def __cmp__(s, t): return s._cmp(t, mpf_cmp) def __lt__(s, t): return s._cmp(t, mpf_lt) def __gt__(s, t): return s._cmp(t, mpf_gt) def __le__(s, t): return s._cmp(t, mpf_le) def __ge__(s, t): return s._cmp(t, mpf_ge) def __ne__(s, t): v = s.__eq__(t) if v is NotImplemented: return v return not v def __rsub__(s, t): cls, new, (prec, rounding) = s._ctxdata if type(t) in int_types: v = new(cls) v._mpf_ = mpf_sub(from_int(t), s._mpf_, prec, rounding) return v t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t - s def __rdiv__(s, t): cls, new, (prec, rounding) = s._ctxdata if isinstance(t, int_types): v = new(cls) v._mpf_ = mpf_rdiv_int(t, s._mpf_, prec, rounding) return v t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t / s def __rpow__(s, t): t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t ** s def __rmod__(s, t): t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t % s def sqrt(s): return s.context.sqrt(s) def ae(s, t, rel_eps=None, abs_eps=None): return s.context.almosteq(s, t, rel_eps, abs_eps) def to_fixed(self, prec): return to_fixed(self._mpf_, prec) def __round__(self, *args): return round(float(self), *args) mpf_binary_op = """ def %NAME%(self, other): mpf, new, (prec, rounding) = self._ctxdata sval = self._mpf_ if hasattr(other, '_mpf_'): tval = other._mpf_ %WITH_MPF% ttype = type(other) if ttype in int_types: %WITH_INT% elif ttype is float: tval = from_float(other) %WITH_MPF% elif hasattr(other, '_mpc_'): tval = other._mpc_ mpc = type(other) %WITH_MPC% elif ttype is complex: tval = from_float(other.real), from_float(other.imag) mpc = self.context.mpc %WITH_MPC% if isinstance(other, mpnumeric): return NotImplemented try: other = mpf.context.convert(other, strings=False) except TypeError: return NotImplemented return self.%NAME%(other) """ return_mpf = "; obj = new(mpf); obj._mpf_ = val; return obj" return_mpc = "; obj = new(mpc); obj._mpc_ = val; return obj" mpf_pow_same = """ try: val = mpf_pow(sval, tval, prec, rounding) %s except ComplexResult: if mpf.context.trap_complex: raise mpc = mpf.context.mpc val = mpc_pow((sval, fzero), (tval, fzero), prec, rounding) %s """ % (return_mpf, return_mpc) def binary_op(name, with_mpf='', with_int='', with_mpc=''): code = mpf_binary_op code = code.replace("%WITH_INT%", with_int) code = code.replace("%WITH_MPC%", with_mpc) code = code.replace("%WITH_MPF%", with_mpf) code = code.replace("%NAME%", name) np = {} exec_(code, globals(), np) return np[name] _mpf.__eq__ = binary_op('__eq__', 'return mpf_eq(sval, tval)', 'return mpf_eq(sval, from_int(other))', 'return (tval[1] == fzero) and mpf_eq(tval[0], sval)') _mpf.__add__ = binary_op('__add__', 'val = mpf_add(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_add(sval, from_int(other), prec, rounding)' + return_mpf, 'val = mpc_add_mpf(tval, sval, prec, rounding)' + return_mpc) _mpf.__sub__ = binary_op('__sub__', 'val = mpf_sub(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_sub(sval, from_int(other), prec, rounding)' + return_mpf, 'val = mpc_sub((sval, fzero), tval, prec, rounding)' + return_mpc) _mpf.__mul__ = binary_op('__mul__', 'val = mpf_mul(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_mul_int(sval, other, prec, rounding)' + return_mpf, 'val = mpc_mul_mpf(tval, sval, prec, rounding)' + return_mpc) _mpf.__div__ = binary_op('__div__', 'val = mpf_div(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_div(sval, from_int(other), prec, rounding)' + return_mpf, 'val = mpc_mpf_div(sval, tval, prec, rounding)' + return_mpc) _mpf.__mod__ = binary_op('__mod__', 'val = mpf_mod(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_mod(sval, from_int(other), prec, rounding)' + return_mpf, 'raise NotImplementedError("complex modulo")') _mpf.__pow__ = binary_op('__pow__', mpf_pow_same, 'val = mpf_pow_int(sval, other, prec, rounding)' + return_mpf, 'val = mpc_pow((sval, fzero), tval, prec, rounding)' + return_mpc) _mpf.__radd__ = _mpf.__add__ _mpf.__rmul__ = _mpf.__mul__ _mpf.__truediv__ = _mpf.__div__ _mpf.__rtruediv__ = _mpf.__rdiv__ class _constant(_mpf): """Represents a mathematical constant with dynamic precision. When printed or used in an arithmetic operation, a constant is converted to a regular mpf at the working precision. A regular mpf can also be obtained using the operation +x.""" def __new__(cls, func, name, docname=''): a = object.__new__(cls) a.name = name a.func = func a.__doc__ = getattr(function_docs, docname, '') return a def __call__(self, prec=None, dps=None, rounding=None): prec2, rounding2 = self.context._prec_rounding if not prec: prec = prec2 if not rounding: rounding = rounding2 if dps: prec = dps_to_prec(dps) return self.context.make_mpf(self.func(prec, rounding)) @property def _mpf_(self): prec, rounding = self.context._prec_rounding return self.func(prec, rounding) def __repr__(self): return "<%s: %s~>" % (self.name, self.context.nstr(self)) class _mpc(mpnumeric): """ An mpc represents a complex number using a pair of mpf:s (one for the real part and another for the imaginary part.) The mpc class behaves fairly similarly to Python's complex type. """ __slots__ = ['_mpc_'] def __new__(cls, real=0, imag=0): s = object.__new__(cls) if isinstance(real, complex_types): real, imag = real.real, real.imag elif hasattr(real, '_mpc_'): s._mpc_ = real._mpc_ return s real = cls.context.mpf(real) imag = cls.context.mpf(imag) s._mpc_ = (real._mpf_, imag._mpf_) return s real = property(lambda self: self.context.make_mpf(self._mpc_[0])) imag = property(lambda self: self.context.make_mpf(self._mpc_[1])) def __getstate__(self): return to_pickable(self._mpc_[0]), to_pickable(self._mpc_[1]) def __setstate__(self, val): self._mpc_ = from_pickable(val[0]), from_pickable(val[1]) def __repr__(s): if s.context.pretty: return str(s) r = repr(s.real)[4:-1] i = repr(s.imag)[4:-1] return "%s(real=%s, imag=%s)" % (type(s).__name__, r, i) def __str__(s): return "(%s)" % mpc_to_str(s._mpc_, s.context._str_digits) def __complex__(s): return mpc_to_complex(s._mpc_) def __pos__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpc_ = mpc_pos(s._mpc_, prec, rounding) return v def __abs__(s): prec, rounding = s.context._prec_rounding v = new(s.context.mpf) v._mpf_ = mpc_abs(s._mpc_, prec, rounding) return v def __neg__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpc_ = mpc_neg(s._mpc_, prec, rounding) return v def conjugate(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpc_ = mpc_conjugate(s._mpc_, prec, rounding) return v def __nonzero__(s): return mpc_is_nonzero(s._mpc_) __bool__ = __nonzero__ def __hash__(s): return mpc_hash(s._mpc_) @classmethod def mpc_convert_lhs(cls, x): try: y = cls.context.convert(x) return y except TypeError: return NotImplemented def __eq__(s, t): if not hasattr(t, '_mpc_'): if isinstance(t, str): return False t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return s.real == t.real and s.imag == t.imag def __ne__(s, t): b = s.__eq__(t) if b is NotImplemented: return b return not b def _compare(*args): raise TypeError("no ordering relation is defined for complex numbers") __gt__ = _compare __le__ = _compare __gt__ = _compare __ge__ = _compare def __add__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_add_mpf(s._mpc_, t._mpf_, prec, rounding) return v v = new(cls) v._mpc_ = mpc_add(s._mpc_, t._mpc_, prec, rounding) return v def __sub__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_sub_mpf(s._mpc_, t._mpf_, prec, rounding) return v v = new(cls) v._mpc_ = mpc_sub(s._mpc_, t._mpc_, prec, rounding) return v def __mul__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): if isinstance(t, int_types): v = new(cls) v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) return v t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_mul_mpf(s._mpc_, t._mpf_, prec, rounding) return v t = s.mpc_convert_lhs(t) v = new(cls) v._mpc_ = mpc_mul(s._mpc_, t._mpc_, prec, rounding) return v def __div__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_div_mpf(s._mpc_, t._mpf_, prec, rounding) return v v = new(cls) v._mpc_ = mpc_div(s._mpc_, t._mpc_, prec, rounding) return v def __pow__(s, t): cls, new, (prec, rounding) = s._ctxdata if isinstance(t, int_types): v = new(cls) v._mpc_ = mpc_pow_int(s._mpc_, t, prec, rounding) return v t = s.mpc_convert_lhs(t) if t is NotImplemented: return t v = new(cls) if hasattr(t, '_mpf_'): v._mpc_ = mpc_pow_mpf(s._mpc_, t._mpf_, prec, rounding) else: v._mpc_ = mpc_pow(s._mpc_, t._mpc_, prec, rounding) return v __radd__ = __add__ def __rsub__(s, t): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t - s def __rmul__(s, t): cls, new, (prec, rounding) = s._ctxdata if isinstance(t, int_types): v = new(cls) v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) return v t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t * s def __rdiv__(s, t): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t / s def __rpow__(s, t): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t ** s __truediv__ = __div__ __rtruediv__ = __rdiv__ def ae(s, t, rel_eps=None, abs_eps=None): return s.context.almosteq(s, t, rel_eps, abs_eps) complex_types = (complex, _mpc) class PythonMPContext: def __init__(ctx): ctx._prec_rounding = [53, round_nearest] ctx.mpf = type('mpf', (_mpf,), {}) ctx.mpc = type('mpc', (_mpc,), {}) ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding] ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding] ctx.mpf.context = ctx ctx.mpc.context = ctx ctx.constant = type('constant', (_constant,), {}) ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding] ctx.constant.context = ctx def make_mpf(ctx, v): a = new(ctx.mpf) a._mpf_ = v return a def make_mpc(ctx, v): a = new(ctx.mpc) a._mpc_ = v return a def default(ctx): ctx._prec = ctx._prec_rounding[0] = 53 ctx._dps = 15 ctx.trap_complex = False def _set_prec(ctx, n): ctx._prec = ctx._prec_rounding[0] = max(1, int(n)) ctx._dps = prec_to_dps(n) def _set_dps(ctx, n): ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n) ctx._dps = max(1, int(n)) prec = property(lambda ctx: ctx._prec, _set_prec) dps = property(lambda ctx: ctx._dps, _set_dps) def convert(ctx, x, strings=True): """ Converts *x* to an ``mpf`` or ``mpc``. If *x* is of type ``mpf``, ``mpc``, ``int``, ``float``, ``complex``, the conversion will be performed losslessly. If *x* is a string, the result will be rounded to the present working precision. Strings representing fractions or complex numbers are permitted. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> mpmathify(3.5) mpf('3.5') >>> mpmathify('2.1') mpf('2.1000000000000001') >>> mpmathify('3/4') mpf('0.75') >>> mpmathify('2+3j') mpc(real='2.0', imag='3.0') """ if type(x) in ctx.types: return x if isinstance(x, int_types): return ctx.make_mpf(from_int(x)) if isinstance(x, float): return ctx.make_mpf(from_float(x)) if isinstance(x, complex): return ctx.make_mpc((from_float(x.real), from_float(x.imag))) prec, rounding = ctx._prec_rounding if isinstance(x, rational.mpq): p, q = x._mpq_ return ctx.make_mpf(from_rational(p, q, prec)) if strings and isinstance(x, basestring): try: _mpf_ = from_str(x, prec, rounding) return ctx.make_mpf(_mpf_) except ValueError: pass if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_) if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_) if hasattr(x, '_mpmath_'): return ctx.convert(x._mpmath_(prec, rounding)) return ctx._convert_fallback(x, strings) def isnan(ctx, x): """ Return *True* if *x* is a NaN (not-a-number), or for a complex number, whether either the real or complex part is NaN; otherwise return *False*:: >>> from mpmath import * >>> isnan(3.14) False >>> isnan(nan) True >>> isnan(mpc(3.14,2.72)) False >>> isnan(mpc(3.14,nan)) True """ if hasattr(x, "_mpf_"): return x._mpf_ == fnan if hasattr(x, "_mpc_"): return fnan in x._mpc_ if isinstance(x, int_types) or isinstance(x, rational.mpq): return False x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isnan(x) raise TypeError("isnan() needs a number as input") def isinf(ctx, x): """ Return *True* if the absolute value of *x* is infinite; otherwise return *False*:: >>> from mpmath import * >>> isinf(inf) True >>> isinf(-inf) True >>> isinf(3) False >>> isinf(3+4j) False >>> isinf(mpc(3,inf)) True >>> isinf(mpc(inf,3)) True """ if hasattr(x, "_mpf_"): return x._mpf_ in (finf, fninf) if hasattr(x, "_mpc_"): re, im = x._mpc_ return re in (finf, fninf) or im in (finf, fninf) if isinstance(x, int_types) or isinstance(x, rational.mpq): return False x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isinf(x) raise TypeError("isinf() needs a number as input") def isnormal(ctx, x): """ Determine whether *x* is "normal" in the sense of floating-point representation; that is, return *False* if *x* is zero, an infinity or NaN; otherwise return *True*. By extension, a complex number *x* is considered "normal" if its magnitude is normal:: >>> from mpmath import * >>> isnormal(3) True >>> isnormal(0) False >>> isnormal(inf); isnormal(-inf); isnormal(nan) False False False >>> isnormal(0+0j) False >>> isnormal(0+3j) True >>> isnormal(mpc(2,nan)) False """ if hasattr(x, "_mpf_"): return bool(x._mpf_[1]) if hasattr(x, "_mpc_"): re, im = x._mpc_ re_normal = bool(re[1]) im_normal = bool(im[1]) if re == fzero: return im_normal if im == fzero: return re_normal return re_normal and im_normal if isinstance(x, int_types) or isinstance(x, rational.mpq): return bool(x) x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isnormal(x) raise TypeError("isnormal() needs a number as input") def isint(ctx, x, gaussian=False): """ Return *True* if *x* is integer-valued; otherwise return *False*:: >>> from mpmath import * >>> isint(3) True >>> isint(mpf(3)) True >>> isint(3.2) False >>> isint(inf) False Optionally, Gaussian integers can be checked for:: >>> isint(3+0j) True >>> isint(3+2j) False >>> isint(3+2j, gaussian=True) True """ if isinstance(x, int_types): return True if hasattr(x, "_mpf_"): sign, man, exp, bc = xval = x._mpf_ return bool((man and exp >= 0) or xval == fzero) if hasattr(x, "_mpc_"): re, im = x._mpc_ rsign, rman, rexp, rbc = re isign, iman, iexp, ibc = im re_isint = (rman and rexp >= 0) or re == fzero if gaussian: im_isint = (iman and iexp >= 0) or im == fzero return re_isint and im_isint return re_isint and im == fzero if isinstance(x, rational.mpq): p, q = x._mpq_ return p % q == 0 x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isint(x, gaussian) raise TypeError("isint() needs a number as input") def fsum(ctx, terms, absolute=False, squared=False): """ Calculates a sum containing a finite number of terms (for infinite series, see :func:`~mpmath.nsum`). The terms will be converted to mpmath numbers. For len(terms) > 2, this function is generally faster and produces more accurate results than the builtin Python function :func:`sum`. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fsum([1, 2, 0.5, 7]) mpf('10.5') With squared=True each term is squared, and with absolute=True the absolute value of each term is used. """ prec, rnd = ctx._prec_rounding real = [] imag = [] other = 0 for term in terms: reval = imval = 0 if hasattr(term, "_mpf_"): reval = term._mpf_ elif hasattr(term, "_mpc_"): reval, imval = term._mpc_ else: term = ctx.convert(term) if hasattr(term, "_mpf_"): reval = term._mpf_ elif hasattr(term, "_mpc_"): reval, imval = term._mpc_ else: if absolute: term = ctx.absmax(term) if squared: term = term**2 other += term continue if imval: if squared: if absolute: real.append(mpf_mul(reval,reval)) real.append(mpf_mul(imval,imval)) else: reval, imval = mpc_pow_int((reval,imval),2,prec+10) real.append(reval) imag.append(imval) elif absolute: real.append(mpc_abs((reval,imval), prec)) else: real.append(reval) imag.append(imval) else: if squared: reval = mpf_mul(reval, reval) elif absolute: reval = mpf_abs(reval) real.append(reval) s = mpf_sum(real, prec, rnd, absolute) if imag: s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) else: s = ctx.make_mpf(s) if other == 0: return s else: return s + other def fdot(ctx, A, B=None, conjugate=False): r""" Computes the dot product of the iterables `A` and `B`, .. math :: \sum_{k=0} A_k B_k. Alternatively, :func:`~mpmath.fdot` accepts a single iterable of pairs. In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent. The elements are automatically converted to mpmath numbers. With ``conjugate=True``, the elements in the second vector will be conjugated: .. math :: \sum_{k=0} A_k \overline{B_k} **Examples** >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> A = [2, 1.5, 3] >>> B = [1, -1, 2] >>> fdot(A, B) mpf('6.5') >>> list(zip(A, B)) [(2, 1), (1.5, -1), (3, 2)] >>> fdot(_) mpf('6.5') >>> A = [2, 1.5, 3j] >>> B = [1+j, 3, -1-j] >>> fdot(A, B) mpc(real='9.5', imag='-1.0') >>> fdot(A, B, conjugate=True) mpc(real='3.5', imag='-5.0') """ if B: A = zip(A, B) prec, rnd = ctx._prec_rounding real = [] imag = [] other = 0 hasattr_ = hasattr types = (ctx.mpf, ctx.mpc) for a, b in A: if type(a) not in types: a = ctx.convert(a) if type(b) not in types: b = ctx.convert(b) a_real = hasattr_(a, "_mpf_") b_real = hasattr_(b, "_mpf_") if a_real and b_real: real.append(mpf_mul(a._mpf_, b._mpf_)) continue a_complex = hasattr_(a, "_mpc_") b_complex = hasattr_(b, "_mpc_") if a_real and b_complex: aval = a._mpf_ bre, bim = b._mpc_ if conjugate: bim = mpf_neg(bim) real.append(mpf_mul(aval, bre)) imag.append(mpf_mul(aval, bim)) elif b_real and a_complex: are, aim = a._mpc_ bval = b._mpf_ real.append(mpf_mul(are, bval)) imag.append(mpf_mul(aim, bval)) elif a_complex and b_complex: #re, im = mpc_mul(a._mpc_, b._mpc_, prec+20) are, aim = a._mpc_ bre, bim = b._mpc_ if conjugate: bim = mpf_neg(bim) real.append(mpf_mul(are, bre)) real.append(mpf_neg(mpf_mul(aim, bim))) imag.append(mpf_mul(are, bim)) imag.append(mpf_mul(aim, bre)) else: if conjugate: other += a*ctx.conj(b) else: other += a*b s = mpf_sum(real, prec, rnd) if imag: s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) else: s = ctx.make_mpf(s) if other == 0: return s else: return s + other def _wrap_libmp_function(ctx, mpf_f, mpc_f=None, mpi_f=None, doc="<no doc>"): """ Given a low-level mpf_ function, and optionally similar functions for mpc_ and mpi_, defines the function as a context method. It is assumed that the return type is the same as that of the input; the exception is that propagation from mpf to mpc is possible by raising ComplexResult. """ def f(x, **kwargs): if type(x) not in ctx.types: x = ctx.convert(x) prec, rounding = ctx._prec_rounding if kwargs: prec = kwargs.get('prec', prec) if 'dps' in kwargs: prec = dps_to_prec(kwargs['dps']) rounding = kwargs.get('rounding', rounding) if hasattr(x, '_mpf_'): try: return ctx.make_mpf(mpf_f(x._mpf_, prec, rounding)) except ComplexResult: # Handle propagation to complex if ctx.trap_complex: raise return ctx.make_mpc(mpc_f((x._mpf_, fzero), prec, rounding)) elif hasattr(x, '_mpc_'): return ctx.make_mpc(mpc_f(x._mpc_, prec, rounding)) raise NotImplementedError("%s of a %s" % (name, type(x))) name = mpf_f.__name__[4:] f.__doc__ = function_docs.__dict__.get(name, "Computes the %s of x" % doc) return f # Called by SpecialFunctions.__init__() @classmethod def _wrap_specfun(cls, name, f, wrap): if wrap: def f_wrapped(ctx, *args, **kwargs): convert = ctx.convert args = [convert(a) for a in args] prec = ctx.prec try: ctx.prec += 10 retval = f(ctx, *args, **kwargs) finally: ctx.prec = prec return +retval else: f_wrapped = f f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__) setattr(cls, name, f_wrapped) def _convert_param(ctx, x): if hasattr(x, "_mpc_"): v, im = x._mpc_ if im != fzero: return x, 'C' elif hasattr(x, "_mpf_"): v = x._mpf_ else: if type(x) in int_types: return int(x), 'Z' p = None if isinstance(x, tuple): p, q = x elif hasattr(x, '_mpq_'): p, q = x._mpq_ elif isinstance(x, basestring) and '/' in x: p, q = x.split('/') p = int(p) q = int(q) if p is not None: if not p % q: return p // q, 'Z' return ctx.mpq(p,q), 'Q' x = ctx.convert(x) if hasattr(x, "_mpc_"): v, im = x._mpc_ if im != fzero: return x, 'C' elif hasattr(x, "_mpf_"): v = x._mpf_ else: return x, 'U' sign, man, exp, bc = v if man: if exp >= -4: if sign: man = -man if exp >= 0: return int(man) << exp, 'Z' if exp >= -4: p, q = int(man), (1<<(-exp)) return ctx.mpq(p,q), 'Q' x = ctx.make_mpf(v) return x, 'R' elif not exp: return 0, 'Z' else: return x, 'U' def _mpf_mag(ctx, x): sign, man, exp, bc = x if man: return exp+bc if x == fzero: return ctx.ninf if x == finf or x == fninf: return ctx.inf return ctx.nan def mag(ctx, x): """ Quick logarithmic magnitude estimate of a number. Returns an integer or infinity `m` such that `|x| <= 2^m`. It is not guaranteed that `m` is an optimal bound, but it will never be too large by more than 2 (and probably not more than 1). **Examples** >>> from mpmath import * >>> mp.pretty = True >>> mag(10), mag(10.0), mag(mpf(10)), int(ceil(log(10,2))) (4, 4, 4, 4) >>> mag(10j), mag(10+10j) (4, 5) >>> mag(0.01), int(ceil(log(0.01,2))) (-6, -6) >>> mag(0), mag(inf), mag(-inf), mag(nan) (-inf, +inf, +inf, nan) """ if hasattr(x, "_mpf_"): return ctx._mpf_mag(x._mpf_) elif hasattr(x, "_mpc_"): r, i = x._mpc_ if r == fzero: return ctx._mpf_mag(i) if i == fzero: return ctx._mpf_mag(r) return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i)) elif isinstance(x, int_types): if x: return bitcount(abs(x)) return ctx.ninf elif isinstance(x, rational.mpq): p, q = x._mpq_ if p: return 1 + bitcount(abs(p)) - bitcount(q) return ctx.ninf else: x = ctx.convert(x) if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"): return ctx.mag(x) else: raise TypeError("requires an mpf/mpc")
lidavidm/mathics-heroku
venv/lib/python2.7/site-packages/sympy/mpmath/ctx_mp_python.py
Python
gpl-3.0
36,688
[ "Gaussian" ]
aa8d787269210c0bd39e5822c415fc0605ac9cf4acf2fe50942faad79cfcbc0e
# - Coding UTF8 - # # Networked Decision Making # Development Sites (source code): # http://code.google.com/p/global-decision-making-system/ # http://github.com/NewGlobalStrategy/NetDecisionMaking # # Demo Sites (Google App Engine) # http://netdecisionmaking.appspot.com # http://globaldecisionmaking.appspot.com # # License Code: MIT # License Content: Creative Commons Attribution 3.0 # # Also visit: www.web2py.com # or Groups: http://groups.google.com/group/web2py # For details on the web framework used for this development # # Developed by Russ King (newglobalstrategy@gmail.com # Russ also blogs occasionally to pass the time at: # http://proudofyourplanent.blogspot.com # His general thinking on why this project is very important is available at # http://www.scribd.com/doc/98216626/New-Global-Strategy # With thanks to Guido, Massimo and many other that make this sort of thing # much easier than it used to be #This adds some demonstration events and related questions #looking to test a std system or have some desire to reperform the demo questions in a different environment #so coding of this is awful think we need a list and then iterate through - but different questions have different params #and now got the challenge of setting up the links as well and needs to work on gae so a bit more than just looping through #but probably can create a new list as we go of the questids inserted and then create the questlink table from that somehow #need the list of links in a table and just map to the questids actually generated @auth.requires_membership('manager') def addndsquests(): #Plan now is to have 3 programs to replace stdquests - while programatically poor it actually does seem #to need split up to process on gae without timeout or whatever issues so setting up 3 programs seems ok for now messagetext = 'NDS questions have been added' if db(db.event.event_name == "Net Decision Making Evolution").isempty(): nds_id = db.event.insert(event_name="Net Decision Making Evolution", shared=False) else: messagetext = 'Event already setup no questions added' return dict(messagetext=messagetext) ndsquests = [{'questiontext': r'Is group decision making a problem?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Net Decision Making', 'eventid': nds_id}, {'questiontext': r'Would an online asynchronous decision making platform be beneficial?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Net Decision Making', 'eventid': nds_id}, {'qtype': 'action', 'questiontext': r'A prototype networked decision making system should be developed', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 8, 'importance': 9, 'category': 'Net Decision Making', 'responsible': 'Russ King', 'eventid': nds_id}, {'questiontext': r'What is the best method to get feedback on Networked Decision Making?', 'numanswers': 3, 'answers': ["You need to draw users to the site and then review actions generated", "Ask People directly", "Setup a surveyMonkey"], 'urgency': 4, 'importance': 7, 'category': 'Net Decision Making', 'eventid': nds_id}, { 'questiontext': r'Should we develop social network integration features for networked decision making?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Net Decision Making', 'eventid': nds_id}, {'questiontext': r'Which social networking platform should be developed first?', 'numanswers': 4, 'answers': ["Facebook", "Twitter", "Google+", "Other"], 'urgency': 4, 'importance': 7, 'category': 'Net Decision Making', 'eventid': nds_id}, {'questiontext': r'Should we look to use advertising to fund the running costs of NDS?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Net Decision Making', 'eventid': nds_id}, {'qtype': 'action', 'questiontext': r'Google should develop a globally scaleable version of the network decision making system outlined here', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 7, 'importance': 7, 'category': 'Net Decision Making', 'responsible': 'Eric Schmidt', 'eventid': nds_id}] insertlist = [] for x in ndsquests: qtext = x['questiontext'] x['correctanstext'] = '' q = 0 if not request.env.web2py_runtime_gae: if db(db.question.questiontext == qtext).isempty(): q = db.question.insert(**x) else: q = db.question.insert(**x) insertlist.append(q) #have assumed id of first action is 28 - this needs checked stdlinks = [[0, 1], [1, 2], [2, 3], [2, 4], [4, 5], [2, 6], [1, 7]] #then if we have inserted those questions we would create related link for x in stdlinks: source_id = insertlist[x[0]] target_id = insertlist[x[1]] if source_id > 0 and target_id > 0: if db(db.questlink.sourceid == source_id and db.questlink.targetid == target_id).isempty(): db.questlink.insert(sourceid=source_id, targetid=target_id, createcount=1, deletecount=0) eventmap = [[50, 50], [450, 100], [450, 350], [750, 600], [500, 600], [450, 900], [200, 650], [150, 350]] for i, x in enumerate(eventmap): db.eventmap.insert(eventid=nds_id, questid=insertlist[i], xpos=eventmap[i][0], ypos=eventmap[i][1]) return dict(messagetext=messagetext) @auth.requires_membership('manager') def addevtquests(): #Plan now is to have 3 programs to replace stdquests - while programatically poor it actually does seem #to need split up to process on gae without timeout or whatever issues so setting up 3 programs seems ok for now messagetext = 'Strategy Event Quests Added' if db(db.event.event_name == "Global Strategy Review").isempty(): gs_id = db.event.insert(event_name="Global Strategy Review", shared=True) else: messagetext = 'Event already setup no questions added' return dict(messagetext=messagetext) #if db(db.event.event_name == "Net Decision Making Evolution").isempty(): # nds_event = db.event.insert(event_name="Net Decision Making Evolution", shared=False) #if db(db.event.event_name == "Global Healthcare Meeting").isempty(): # nds_event = db.event.insert(event_name="Global Healthcare Meeting", shared=True) gsquests = [{'questiontext': r'Is the world under-achieving?', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Organisation', 'eventid': gs_id}, {'questiontext': r'Should we develop a global strategy as outlined at: http://www.ted.com/talks/jamie_drummond_how_to_set_goals_for_the_world.html ?', 'answers': ["Yes", "No"], 'urgency': 6, 'importance': 8, 'category': 'Strategy', 'eventid': gs_id}, {'questiontext': r'Are you aware of the global strategy?', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Strategy', 'eventid': gs_id}, {'questiontext': r'Is it sensible to discuss consequences for failures to follow the global strategy?', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Strategy', 'eventid': gs_id}, { 'questiontext': r'Is limiting access to future developments in healthcare a possible incentive to help people align their activities and behaviours', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Strategy', 'eventid': gs_id}, { 'questiontext': r'Is The Zeitgeist Movement correct that humanity should move to a natural law, resource based economy?', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Strategy', 'eventid': gs_id}, {'questiontext': r'What are the best solutions to work on right now?', 'numanswers': 5, 'answers': ["Prevention of HIV/Aids", "Networked Decision Making", "Malaria", "Malnutrition", "Global Warming"], 'urgency': 8, 'importance': 8, 'category': 'Strategy', 'eventid': gs_id}, {'questiontext': r'What is the main problem with the world right now?', 'numanswers': 6, 'answers': ["There is no problem - everything is perfect", "There simply isn't enough food in the world so some people have to starve", "Many people don't care if other people are starving", "Humans lack the skills to organise the planet", "Humans derive pleasure from having more than other people", "Lack of vision to see that creating alignment on objectives will get us all much better futures and longer and happier lives"], 'urgency': 7, 'importance': 8, 'category': 'Strategy', 'eventid': gs_id}, { 'questiontext': r'Is the United States a corruption as alleged at http://www.ted.com/talks/lawrence_lessig_we_the_people_and_the_republic_we_must_reclaim.html ?', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Organisation', 'continent': 'North America (NA)', 'country': 'United States (NA)', 'activescope': '3 National', 'eventid': gs_id}, { 'questiontext': r'The distribution of wealth on the planet is radically different than that predicted by micro-economic theory. The question therefore arises does acquisition of great wealth require exploitation of others?', 'numanswers': 3, 'answers': ["Yes", "No", "Usually"], 'urgency': 4, 'importance': 4, 'category': 'Strategy', 'eventid': gs_id}] insertlist = [] for x in gsquests: x['correctanstext'] = '' qtext = x['questiontext'] q = 0 if not request.env.web2py_runtime_gae: if db(db.question.questiontext == qtext).isempty(): q = db.question.insert(**x) else: q = db.question.insert(**x) insertlist.append(q) eventmap = [[400, 0], [200, 450], [200, 200], [150, 750], [350, 750], [850, 550], [850, 300], [850, 0], [600, 700], [500, 300]] for i, x in enumerate(eventmap): db.eventmap.insert(eventid=gs_id, questid=insertlist[i], xpos=eventmap[i][0], ypos=eventmap[i][1]) return dict(messagetext=messagetext) @auth.requires_membership('manager') def addhealthquests(): #Plan now is to have 3 programs to replace stdquests - while programatically poor it actually does seem #to need split up to process on gae without timeout or whatever issues so setting up 3 programs seems ok for now messagetext = 'Health questions have been added' if db(db.event.event_name == "Healthcare Review").isempty(): gs_id = db.event.insert(event_name="Healthcare Review", shared=True) else: messagetext = 'Event already setup no questions added' return dict(messagetext=messagetext) stdquests = [ {'questiontext': r'Is aging a disease or is it just inevitable and we should accept it?', 'answers': ["A disease", "Inevitable"], 'urgency': 4, 'importance': 7, 'category': 'Healthcare', 'eventid': gs_id}, {'questiontext': r'Is it reasonable to try and promote biogerontology research?', 'answers': ["Yes", "No"], 'urgency': 6, 'importance': 6, 'category': 'Healthcare', 'eventid': gs_id}, {'questiontext': r'Is it possible to live for more than 130 years?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 6, 'category': 'Healthcare', 'eventid': gs_id}, {'questiontext': r'Are we investing enough in aging research', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Healthcare', 'eventid': gs_id}, {'qtype': 'action', 'questiontext': r'Unless they have already done so, all global citizens with net assets in excess of US$100 million should invest 1% of their assets in biogerontology or related research and action. Eg The Gates Foundation activities and Ellison medical would both count so several leading individuals have done this already.', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 8, 'importance': 8, 'category': 'Healthcare', 'responsible': 'people with >$100M', 'eventid': gs_id}, {'qtype': 'action', 'questiontext': r'The programme to deliver better housing as explained at http://www.ted.com/talks/paul_pholeros_how_to_reduce_poverty_fix_homes.html should be rolled out globally with associated crowdsourced measurement of progress.', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 8, 'importance': 9, 'category': 'Healthcare', 'responsible': 'All global leaders', 'eventid': gs_id}, {'qtype': 'action', 'questiontext': r'The top priority action from the 2012 Copenhagen Consensus (http://copenhagenconsensus.com) to use bundled micronutrient interventions to fight hunger and improve education should be actioned and funded by a $75bn cut to US defence spending', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 8, 'importance': 9, 'category': 'Food', 'responsible': 'Barrack Obama', 'eventid': gs_id}, { 'questiontext': r'Is it reasonable to engage in debate on alternative eligibility criteria for new life extending healthcare', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Healthcare', 'eventid': gs_id}, { 'questiontext': r'If behaviour as opposed to ability to pay became an accepted factor is this likely to change behaviour and improve the world?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Healthcare', 'eventid': gs_id}] insertlist = [] for x in stdquests: x['correctanstext'] = '' qtext = x['questiontext'] q = 0 if not request.env.web2py_runtime_gae: if db(db.question.questiontext == qtext).isempty(): q = db.question.insert(**x) else: q = db.question.insert(**x) insertlist.append(q) #have assumed id of first action is 28 - this needs checked stdlinks = [[0, 1], [0, 2], [1, 3], [3, 4]] #then if we have inserted those questions we would create related link # for x in stdlinks: source_id = insertlist[x[0]] target_id = insertlist[x[1]] if source_id > 0 and target_id > 0: if db(db.questlink.sourceid == source_id and db.questlink.targetid == target_id).isempty(): db.questlink.insert(sourceid=source_id, targetid=target_id, createcount=1, deletecount=0) eventmap = [[300, 50], [300, 350], [50, 500], [300, 600], [300, 850], [700, 450], [800, 750], [550, 750], [650, 200]] for i, x in enumerate(eventmap): db.eventmap.insert(eventid=gs_id, questid=insertlist[i], xpos=eventmap[i][0], ypos=eventmap[i][1]) return dict(messagetext=messagetext) @auth.requires_membership('manager') def addothquests(): #Plan now is to have 4 programs to replace stdquests - while programatically poor it actually does seem #to need split up to process on gae without timeout or whatever issues so setting up 4 programs seems ok for now #this replaces std quests for other things we have eg philosophy etc no event on these and just paste from stdquests evid = db(db.event.event_name =='Unspecified').select(db.event.id).first().id #'eventid': evid messagetext = 'Other questions have been added' stdquests = [{'questiontext': r'Do we know if nothing is a stable state?', 'numanswers': 3, 'answers': ["Yes", "No", "We don't know we just assume it is"], 'urgency': 3, 'importance': 6, 'category': 'Philosophy','eventid': evid}, {'questiontext': r'Is there sufficient education on when to compete and when to co-operate?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 8, 'category': 'Strategy','eventid': evid}, {'questiontext': r'Did you choose where you were born?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 4, 'category': 'Philosophy','eventid': evid}, { 'questiontext': r'Is it right that place of birth determines so much of your life and restricts so many people?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Philosophy','eventid': evid}, {'questiontext': r'Could we unite theists and atheists on a project to create heaven on earth?', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Philosophy','eventid': evid}, {'questiontext': r'What is the optimum number of countries in the world?', 'answers': ["Just right", "Too many", "Too few", "One"], 'urgency': 5, 'importance': 8, 'category': 'Organisation','eventid': evid}, {'questiontext': r'What is the main problem with the world right now?', 'numanswers': 6, 'answers': ["There is no problem - everything is perfect", "There simply isn't enough food in the world so some people have to starve", "Many people don't care if other people are starving", "Humans lack the skills to organise the planet", "Humans derive pleasure from having more than other people", "Lack of vision to see that creating alignment on objectives will get us all much better futures and longer and happier lives"], 'urgency': 7, 'importance': 8, 'category': 'Strategy','eventid': evid}, {'questiontext': r'Does God Exist?', 'answers': ["Yes", "No"], 'urgency': 3, 'importance': 7, 'category': 'Philosophy','eventid': evid}, {'questiontext': r'Is God rational?', 'answers': ["Yes", "No"], 'urgency': 5, 'importance': 7, 'category': 'Philosophy','eventid': evid}, {'questiontext': r'Is it rational to believe in an irrational God?', 'answers': ["Yes", "No"], 'urgency': 6, 'importance': 7, 'category': 'Philosophy','eventid': evid}, {'questiontext': r'Do countries assist or hinder the operation of the world?', 'answers': ["Assist", "Hinder"], 'urgency': 6, 'importance': 7, 'category': 'Organisation','eventid': evid}, {'questiontext': r'Why are so many people unemployed?', 'numanswers': 5, 'answers': ["The unemployed are all useless", "Just a cost of human progress that many are left with lots of leisure but little income", "Inability to co-operate, share and work together", "Many people are just lazy", "Something else"], 'urgency': 8, 'importance': 7, 'category': 'Organisation','eventid': evid}, { 'questiontext': r'Did JFK speed-up or slow down progress in getting to the moon by explaining that was the intention in 1962 http://www.astrosociology.com/Library/PDF/JFK%201962%20Rice%20University%20Speech%20Transcript.pdf ?', 'answers': ["Speed Up", "Slow Down"], 'urgency': 7, 'importance': 7, 'category': 'Organisation','eventid': evid}, { 'questiontext': r"If stating we were going to the moon was important to getting there is there not a similarly strong case for saying we are going to extend human lifespans?", 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Organisation','eventid': evid}, {'questiontext': r'Should Scotland become an independent country?', 'answers': ["Yes", "No"], 'urgency': 4, 'importance': 7, 'category': 'Organisation', 'continent': 'Europe (EU)', 'country': 'United Kingdom (EU)', 'activescope': '3 National','eventid': evid}, { 'questiontext': r'Is the United States a corruption as alleged at http://www.ted.com/talks/lawrence_lessig_we_the_people_and_the_republic_we_must_reclaim.html ?', 'answers': ["Yes", "No"], 'urgency': 7, 'importance': 7, 'category': 'Organisation', 'continent': 'North America (NA)', 'country': 'United States (NA)', 'activescope': '3 National','eventid': evid}, { 'questiontext': r'The distribution of wealth on the planet is radically different than that predicted by micro-economic theory. The question therefore arises does acquisition of great wealth require exploitation of others?', 'numanswers': 3, 'answers': ["Yes", "No", "Usually"], 'urgency': 4, 'importance': 4, 'category': 'Strategy','eventid': evid}, {'qtype': 'action', 'questiontext': r'Daylight saving time should operate all year in Europe to reduce accidents and CO2 emissions', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 8, 'importance': 9, 'category': 'Organisation', 'responsible': 'Jose Manuel Barroso', 'continent': 'Europe (EU)', 'activescope': '2 Continental','eventid': evid}, {'qtype': 'action', 'questiontext': r'The funding model for US politics must change', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 8, 'importance': 9, 'category': 'Organisation', 'responsible': 'Barrack Obama', 'continent': 'North America (NA)', 'country': 'United States (NA)', 'activescope': '3 National','eventid': evid}, {'qtype': 'action', 'questiontext': r'All African health centres and schools should get internet access to improve access and trust in the global knowledge base, this should be provided by leading pharmaceutical and technology companies working together', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 5, 'importance': 9, 'category': 'Healthcare', 'responsible': 'CEOs of leading pharma & IT Companies', 'continent': 'Africa (AF)', 'activescope': '2 Continental','eventid': evid}, {'qtype': 'action', 'questiontext': r'A national solution to the problem of misfuelling cars with petrol instead of diesel should be establised in the UK the costs of this problem are estimated at around $120M per year and magnets, RFID readers on fuel pumps, better fuel caps or some other agreed approach should be able to permanently eliminate this waste for less than half that cost', 'numanswers': 2, 'answers': ['Approve', 'Disapprove', 'OK'], 'urgency': 5, 'importance': 6, 'category': 'Organisation', 'responsible': 'CEOs of leading Auto & Oil Companies', 'continent': 'Europe (EU)', 'country': 'United Kingdom (EU)', 'activescope': '3 National','eventid': evid}] insertlist = [] for x in stdquests: x['correctanstext'] = '' qtext = x['questiontext'] q = 0 if not request.env.web2py_runtime_gae: if db(db.question.questiontext == qtext).isempty(): q = db.question.insert(**x) else: q = db.question.insert(**x) #time.sleep(1) insertlist.append(q) #have assumed id of first action is 28 - this needs checked stdlinks = [[8, 9], [11, 12]] #then if we have inserted those questions we would create related link # for x in stdlinks: source_id = insertlist[x[0]] target_id = insertlist[x[1]] if source_id > 0 and target_id > 0: if db(db.questlink.sourceid == source_id and db.questlink.targetid == target_id).isempty(): db.questlink.insert(sourceid=source_id, targetid=target_id, createcount=1, deletecount=0) return dict(messagetext=messagetext)
NewGlobalStrategy/NetDecisionMaking
controllers/eventquests.py
Python
mit
25,087
[ "VisIt" ]
de26a97ba6b4b25c5970abd2a8cb37e21ce71d8c63ed279ce5f2bf25f67e47b0
""" To know more or get code samples, please visit my website: https://morvanzhou.github.io/tutorials/ Or search: 莫烦Python Thank you for supporting! """ # please note, all tutorial code are running under python3.5. # If you use the version like python2.7, please modify the code accordingly # 4 - Regressor example import numpy as np np.random.seed(1337) # for reproducibility from keras.models import Sequential from keras.layers import Dense import matplotlib.pyplot as plt # create some data X = np.linspace(-1, 1, 200) np.random.shuffle(X) # randomize the data Y = 0.5 * X + 2 + np.random.normal(0, 0.05, (200, )) # plot data plt.scatter(X, Y) plt.show() X_train, Y_train = X[:160], Y[:160] # first 160 data points X_test, Y_test = X[160:], Y[160:] # last 40 data points # build a neural network from the 1st layer to the last layer model = Sequential() model.add(Dense(units=1, input_dim=1)) # choose loss function and optimizing method model.compile(loss='mse', optimizer='sgd') # training print('Training -----------') for step in range(301): cost = model.train_on_batch(X_train, Y_train) if step % 100 == 0: print('train cost: ', cost) # test print('\nTesting ------------') cost = model.evaluate(X_test, Y_test, batch_size=40) print('test cost:', cost) W, b = model.layers[0].get_weights() print('Weights=', W, '\nbiases=', b) # plotting the prediction Y_pred = model.predict(X_test) plt.scatter(X_test, Y_test) plt.plot(X_test, Y_pred) plt.show()
tencrance/cool-config
ml_keras_learn/tutorials/kerasTUT/4-regressor_example.py
Python
mit
1,503
[ "VisIt" ]
3faf777872fb298486c61023985595d0ef36689b9edfb89a6f4841bf8b02ac48
#!/usr/bin/env python ######################################################################## # $HeadURL$ # File : dirac-install-agent # Author : Ricardo Graciani ######################################################################## """ Do the initial installation and configuration of a DIRAC agent """ __RCSID__ = "$Id$" # from DIRAC.Core.Utilities import InstallTools from DIRAC.ConfigurationSystem.Client.Helpers import getCSExtensions # from DIRAC import gConfig, S_OK, S_ERROR InstallTools.exitOnError = True # from DIRAC.Core.Base import Script overwrite = False def setOverwrite( opVal ): global overwrite overwrite = True return S_OK() module = '' specialOptions = {} def setModule( optVal ): global specialOptions,module specialOptions['Module'] = optVal module = optVal return S_OK() def setSpecialOption( optVal ): global specialOptions option,value = optVal.split('=') specialOptions[option] = value return S_OK() Script.registerSwitch( "w", "overwrite", "Overwrite the configuration in the global CS", setOverwrite ) Script.registerSwitch( "m:", "module=", "Python module name for the agent code", setModule ) Script.registerSwitch( "p:", "parameter=", "Special agent option ", setSpecialOption ) Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1], 'Usage:', ' %s [option|cfgfile] ... System Agent|System/Agent' % Script.scriptName, 'Arguments:', ' System: Name of the DIRAC system (ie: WorkloadManagement)', ' Agent: Name of the DIRAC agent (ie: JobCleaningAgent)'] ) ) Script.parseCommandLine() args = Script.getPositionalArgs() if len( args ) == 1: args = args[0].split( '/' ) if len( args ) != 2: Script.showHelp() exit( -1 ) # system = args[0] agent = args[1] if module: result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, module, getCSExtensions(), overwrite = overwrite ) result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, agent, getCSExtensions(), specialOptions=specialOptions, overwrite = overwrite, addDefaultOptions = False ) else: result = InstallTools.addDefaultOptionsToCS( gConfig, 'agent', system, agent, getCSExtensions(), specialOptions=specialOptions, overwrite = overwrite ) if not result['OK']: print "ERROR:", result['Message'] else: result = InstallTools.installComponent( 'agent', system, agent, getCSExtensions(), module ) if not result['OK']: print "ERROR:", result['Message'] else: print "Successfully installed agent %s in %s system" % ( agent, system )
avedaee/DIRAC
Core/scripts/dirac-install-agent.py
Python
gpl-3.0
3,120
[ "DIRAC" ]
ac5da8bb37e1748a15bce784e898ab351bb94695889314bc0cd2cb8d659b2a1a
#!/usr/bin/env python -i # Script: logplot.py # Purpose: use GnuPlot to plot two columns from a LAMMPS log file # Syntax: logplot.py log.lammps X Y # log.lammps = LAMMPS log file # X,Y = plot Y versus X where X,Y are thermo keywords # once plot appears, you are in Python interpreter, type C-D to exit # Author: Steve Plimpton (Sandia), sjplimp at sandia.gov import sys,os path = os.environ["LAMMPS_PYTHON_TOOLS"] sys.path.append(path) from log import log from gnu import gnu if len(sys.argv) != 4: raise StandardError, "Syntax: logplot.py log.lammps X Y" logfile = sys.argv[1] xlabel = sys.argv[2] ylabel = sys.argv[3] lg = log(logfile) x,y = lg.get(xlabel,ylabel) g = gnu() g.plot(x,y) print "Type Ctrl-D to exit Python"
SGenheden/lammps
tools/python/logplot.py
Python
gpl-2.0
760
[ "LAMMPS" ]
1b2ee8916bee193ac1924c5948c29e590cbcfe78bed8dd456ad98451029c12df
# coding=utf-8 from __future__ import unicode_literals import re from calendar import timegm from datetime import timedelta, MAXYEAR from time import time from dateutil import relativedelta from dateutil.tz import tzlocal, tzutc from faker.utils import is_string from faker.utils.datetime_safe import date, datetime, real_date, real_datetime from .. import BaseProvider localized = True def datetime_to_timestamp(dt): if getattr(dt, 'tzinfo', None) is not None: dt = dt.astimezone(tzutc()) return timegm(dt.timetuple()) def timestamp_to_datetime(timestamp, tzinfo): if tzinfo is None: pick = datetime.fromtimestamp(timestamp, tzlocal()) pick = pick.astimezone(tzutc()).replace(tzinfo=None) else: pick = datetime.fromtimestamp(timestamp, tzinfo) return pick class ParseError(ValueError): pass timedelta_pattern = r'' for name, sym in [('years', 'y'), ('months', 'M'), ('weeks', 'w'), ('days', 'd'), ('hours', 'h'), ('minutes', 'm'), ('seconds', 's')]: timedelta_pattern += r'((?P<{0}>(?:\+|-)\d+?){1})?'.format(name, sym) class Provider(BaseProvider): centuries = [ 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI', 'XVII', 'XVIII', 'XIX', 'XX', 'XXI'] countries = [{'timezones': ['Europe/Andorra'], 'alpha-2-code': 'AD', 'alpha-3-code': 'AND', 'continent': 'Europe', 'name': 'Andorra', 'capital': 'Andorra la Vella'}, {'timezones': ['Asia/Kabul'], 'alpha-2-code': 'AF', 'alpha-3-code': 'AFG', 'continent': 'Asia', 'name': 'Afghanistan', 'capital': 'Kabul'}, {'timezones': ['America/Antigua'], 'alpha-2-code': 'AG', 'alpha-3-code': 'ATG', 'continent': 'North America', 'name': 'Antigua and Barbuda', 'capital': "St. John's"}, {'timezones': ['Europe/Tirane'], 'alpha-2-code': 'AL', 'alpha-3-code': 'ALB', 'continent': 'Europe', 'name': 'Albania', 'capital': 'Tirana'}, {'timezones': ['Asia/Yerevan'], 'alpha-2-code': 'AM', 'alpha-3-code': 'ARM', 'continent': 'Asia', 'name': 'Armenia', 'capital': 'Yerevan'}, {'timezones': ['Africa/Luanda'], 'alpha-2-code': 'AO', 'alpha-3-code': 'AGO', 'continent': 'Africa', 'name': 'Angola', 'capital': 'Luanda'}, {'timezones': ['America/Argentina/Buenos_Aires', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/Tucuman', 'America/Argentina/Catamarca', 'America/Argentina/La_Rioja', 'America/Argentina/San_Juan', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Ushuaia'], 'alpha-2-code': 'AR', 'alpha-3-code': 'ARG', 'continent': 'South America', 'name': 'Argentina', 'capital': 'Buenos Aires'}, {'timezones': ['Europe/Vienna'], 'alpha-2-code': 'AT', 'alpha-3-code': 'AUT', 'continent': 'Europe', 'name': 'Austria', 'capital': 'Vienna'}, {'timezones': ['Australia/Lord_Howe', 'Australia/Hobart', 'Australia/Currie', 'Australia/Melbourne', 'Australia/Sydney', 'Australia/Broken_Hill', 'Australia/Brisbane', 'Australia/Lindeman', 'Australia/Adelaide', 'Australia/Darwin', 'Australia/Perth'], 'alpha-2-code': 'AU', 'alpha-3-code': 'AUS', 'continent': 'Oceania', 'name': 'Australia', 'capital': 'Canberra'}, {'timezones': ['Asia/Baku'], 'alpha-2-code': 'AZ', 'alpha-3-code': 'AZE', 'continent': 'Asia', 'name': 'Azerbaijan', 'capital': 'Baku'}, {'timezones': ['America/Barbados'], 'alpha-2-code': 'BB', 'alpha-3-code': 'BRB', 'continent': 'North America', 'name': 'Barbados', 'capital': 'Bridgetown'}, {'timezones': ['Asia/Dhaka'], 'alpha-2-code': 'BD', 'alpha-3-code': 'BGD', 'continent': 'Asia', 'name': 'Bangladesh', 'capital': 'Dhaka'}, {'timezones': ['Europe/Brussels'], 'alpha-2-code': 'BE', 'alpha-3-code': 'BEL', 'continent': 'Europe', 'name': 'Belgium', 'capital': 'Brussels'}, {'timezones': ['Africa/Ouagadougou'], 'alpha-2-code': 'BF', 'alpha-3-code': 'BFA', 'continent': 'Africa', 'name': 'Burkina Faso', 'capital': 'Ouagadougou'}, {'timezones': ['Europe/Sofia'], 'alpha-2-code': 'BG', 'alpha-3-code': 'BGR', 'continent': 'Europe', 'name': 'Bulgaria', 'capital': 'Sofia'}, {'timezones': ['Asia/Bahrain'], 'alpha-2-code': 'BH', 'alpha-3-code': 'BHR', 'continent': 'Asia', 'name': 'Bahrain', 'capital': 'Manama'}, {'timezones': ['Africa/Bujumbura'], 'alpha-2-code': 'BI', 'alpha-3-code': 'BDI', 'continent': 'Africa', 'name': 'Burundi', 'capital': 'Bujumbura'}, {'timezones': ['Africa/Porto-Novo'], 'alpha-2-code': 'BJ', 'alpha-3-code': 'BEN', 'continent': 'Africa', 'name': 'Benin', 'capital': 'Porto-Novo'}, {'timezones': ['Asia/Brunei'], 'alpha-2-code': 'BN', 'alpha-3-code': 'BRN', 'continent': 'Asia', 'name': 'Brunei Darussalam', 'capital': 'Bandar Seri Begawan'}, {'timezones': ['America/La_Paz'], 'alpha-2-code': 'BO', 'alpha-3-code': 'BOL', 'continent': 'South America', 'name': 'Bolivia', 'capital': 'Sucre'}, {'timezones': ['America/Noronha', 'America/Belem', 'America/Fortaleza', 'America/Recife', 'America/Araguaina', 'America/Maceio', 'America/Bahia', 'America/Sao_Paulo', 'America/Campo_Grande', 'America/Cuiaba', 'America/Porto_Velho', 'America/Boa_Vista', 'America/Manaus', 'America/Eirunepe', 'America/Rio_Branco'], 'alpha-2-code': 'BR', 'alpha-3-code': 'BRA', 'continent': 'South America', 'name': 'Brazil', 'capital': 'Bras\xc3\xadlia'}, {'timezones': ['America/Nassau'], 'alpha-2-code': 'BS', 'alpha-3-code': 'BHS', 'continent': 'North America', 'name': 'Bahamas', 'capital': 'Nassau'}, {'timezones': ['Asia/Thimphu'], 'alpha-2-code': 'BT', 'alpha-3-code': 'BTN', 'continent': 'Asia', 'name': 'Bhutan', 'capital': 'Thimphu'}, {'timezones': ['Africa/Gaborone'], 'alpha-2-code': 'BW', 'alpha-3-code': 'BWA', 'continent': 'Africa', 'name': 'Botswana', 'capital': 'Gaborone'}, {'timezones': ['Europe/Minsk'], 'alpha-2-code': 'BY', 'alpha-3-code': 'BLR', 'continent': 'Europe', 'name': 'Belarus', 'capital': 'Minsk'}, {'timezones': ['America/Belize'], 'alpha-2-code': 'BZ', 'alpha-3-code': 'BLZ', 'continent': 'North America', 'name': 'Belize', 'capital': 'Belmopan'}, {'timezones': ['America/St_Johns', 'America/Halifax', 'America/Glace_Bay', 'America/Moncton', 'America/Goose_Bay', 'America/Blanc-Sablon', 'America/Montreal', 'America/Toronto', 'America/Nipigon', 'America/Thunder_Bay', 'America/Pangnirtung', 'America/Iqaluit', 'America/Atikokan', 'America/Rankin_Inlet', 'America/Winnipeg', 'America/Rainy_River', 'America/Cambridge_Bay', 'America/Regina', 'America/Swift_Current', 'America/Edmonton', 'America/Yellowknife', 'America/Inuvik', 'America/Dawson_Creek', 'America/Vancouver', 'America/Whitehorse', 'America/Dawson'], 'alpha-2-code': 'CA', 'alpha-3-code': 'CAN', 'continent': 'North America', 'name': 'Canada', 'capital': 'Ottawa'}, {'timezones': ['Africa/Kinshasa', 'Africa/Lubumbashi'], 'alpha-2-code': 'CD', 'alpha-3-code': 'COD', 'continent': 'Africa', 'name': 'Democratic Republic of the Congo', 'capital': 'Kinshasa'}, {'timezones': ['Africa/Brazzaville'], 'alpha-2-code': 'CG', 'alpha-3-code': 'COG', 'continent': 'Africa', 'name': 'Republic of the Congo', 'capital': 'Brazzaville'}, {'timezones': ['Africa/Abidjan'], 'alpha-2-code': 'CI', 'alpha-3-code': 'CIV', 'continent': 'Africa', 'name': "C\xc3\xb4te d'Ivoire", 'capital': 'Yamoussoukro'}, {'timezones': ['America/Santiago', 'Pacific/Easter'], 'alpha-2-code': 'CL', 'alpha-3-code': 'CHL', 'continent': 'South America', 'name': 'Chile', 'capital': 'Santiago'}, {'timezones': ['Africa/Douala'], 'alpha-2-code': 'CM', 'alpha-3-code': 'CMR', 'continent': 'Africa', 'name': 'Cameroon', 'capital': 'Yaound\xc3\xa9'}, {'timezones': ['Asia/Shanghai', 'Asia/Harbin', 'Asia/Chongqing', 'Asia/Urumqi', 'Asia/Kashgar'], 'alpha-2-code': 'CN', 'alpha-3-code': 'CHN', 'continent': 'Asia', 'name': "People's Republic of China", 'capital': 'Beijing'}, {'timezones': ['America/Bogota'], 'alpha-2-code': 'CO', 'alpha-3-code': 'COL', 'continent': 'South America', 'name': 'Colombia', 'capital': 'Bogot\xc3\xa1'}, {'timezones': ['America/Costa_Rica'], 'alpha-2-code': 'CR', 'alpha-3-code': 'CRI', 'continent': 'North America', 'name': 'Costa Rica', 'capital': 'San Jos\xc3\xa9'}, {'timezones': ['America/Havana'], 'alpha-2-code': 'CU', 'alpha-3-code': 'CUB', 'continent': 'North America', 'name': 'Cuba', 'capital': 'Havana'}, {'timezones': ['Atlantic/Cape_Verde'], 'alpha-2-code': 'CV', 'alpha-3-code': 'CPV', 'continent': 'Africa', 'name': 'Cape Verde', 'capital': 'Praia'}, {'timezones': ['Asia/Nicosia'], 'alpha-2-code': 'CY', 'alpha-3-code': 'CYP', 'continent': 'Asia', 'name': 'Cyprus', 'capital': 'Nicosia'}, {'timezones': ['Europe/Prague'], 'alpha-2-code': 'CZ', 'alpha-3-code': 'CZE', 'continent': 'Europe', 'name': 'Czech Republic', 'capital': 'Prague'}, {'timezones': ['Europe/Berlin'], 'alpha-2-code': 'DE', 'alpha-3-code': 'DEU', 'continent': 'Europe', 'name': 'Germany', 'capital': 'Berlin'}, {'timezones': ['Africa/Djibouti'], 'alpha-2-code': 'DJ', 'alpha-3-code': 'DJI', 'continent': 'Africa', 'name': 'Djibouti', 'capital': 'Djibouti City'}, {'timezones': ['Europe/Copenhagen'], 'alpha-2-code': 'DK', 'alpha-3-code': 'DNK', 'continent': 'Europe', 'name': 'Denmark', 'capital': 'Copenhagen'}, {'timezones': ['America/Dominica'], 'alpha-2-code': 'DM', 'alpha-3-code': 'DMA', 'continent': 'North America', 'name': 'Dominica', 'capital': 'Roseau'}, {'timezones': ['America/Santo_Domingo'], 'alpha-2-code': 'DO', 'alpha-3-code': 'DOM', 'continent': 'North America', 'name': 'Dominican Republic', 'capital': 'Santo Domingo'}, {'timezones': ['America/Guayaquil', 'Pacific/Galapagos'], 'alpha-2-code': 'EC', 'alpha-3-code': 'ECU', 'continent': 'South America', 'name': 'Ecuador', 'capital': 'Quito'}, {'timezones': ['Europe/Tallinn'], 'alpha-2-code': 'EE', 'alpha-3-code': 'EST', 'continent': 'Europe', 'name': 'Estonia', 'capital': 'Tallinn'}, {'timezones': ['Africa/Cairo'], 'alpha-2-code': 'EG', 'alpha-3-code': 'EGY', 'continent': 'Africa', 'name': 'Egypt', 'capital': 'Cairo'}, {'timezones': ['Africa/Asmera'], 'alpha-2-code': 'ER', 'alpha-3-code': 'ERI', 'continent': 'Africa', 'name': 'Eritrea', 'capital': 'Asmara'}, {'timezones': ['Africa/Addis_Ababa'], 'alpha-2-code': 'ET', 'alpha-3-code': 'ETH', 'continent': 'Africa', 'name': 'Ethiopia', 'capital': 'Addis Ababa'}, {'timezones': ['Europe/Helsinki'], 'alpha-2-code': 'FI', 'alpha-3-code': 'FIN', 'continent': 'Europe', 'name': 'Finland', 'capital': 'Helsinki'}, {'timezones': ['Pacific/Fiji'], 'alpha-2-code': 'FJ', 'alpha-3-code': 'FJI', 'continent': 'Oceania', 'name': 'Fiji', 'capital': 'Suva'}, {'timezones': ['Europe/Paris'], 'alpha-2-code': 'FR', 'alpha-3-code': 'FRA', 'continent': 'Europe', 'name': 'France', 'capital': 'Paris'}, {'timezones': ['Africa/Libreville'], 'alpha-2-code': 'GA', 'alpha-3-code': 'GAB', 'continent': 'Africa', 'name': 'Gabon', 'capital': 'Libreville'}, {'timezones': ['Asia/Tbilisi'], 'alpha-2-code': 'GE', 'alpha-3-code': 'GEO', 'continent': 'Asia', 'name': 'Georgia', 'capital': 'Tbilisi'}, {'timezones': ['Africa/Accra'], 'alpha-2-code': 'GH', 'alpha-3-code': 'GHA', 'continent': 'Africa', 'name': 'Ghana', 'capital': 'Accra'}, {'timezones': ['Africa/Banjul'], 'alpha-2-code': 'GM', 'alpha-3-code': 'GMB', 'continent': 'Africa', 'name': 'The Gambia', 'capital': 'Banjul'}, {'timezones': ['Africa/Conakry'], 'alpha-2-code': 'GN', 'alpha-3-code': 'GIN', 'continent': 'Africa', 'name': 'Guinea', 'capital': 'Conakry'}, {'timezones': ['Europe/Athens'], 'alpha-2-code': 'GR', 'alpha-3-code': 'GRC', 'continent': 'Europe', 'name': 'Greece', 'capital': 'Athens'}, {'timezones': ['America/Guatemala'], 'alpha-2-code': 'GT', 'alpha-3-code': 'GTM', 'continent': 'North America', 'name': 'Guatemala', 'capital': 'Guatemala City'}, {'timezones': ['America/Guatemala'], 'alpha-2-code': 'HT', 'alpha-3-code': 'HTI', 'continent': 'North America', 'name': 'Haiti', 'capital': 'Port-au-Prince'}, {'timezones': ['Africa/Bissau'], 'alpha-2-code': 'GW', 'alpha-3-code': 'GNB', 'continent': 'Africa', 'name': 'Guinea-Bissau', 'capital': 'Bissau'}, {'timezones': ['America/Guyana'], 'alpha-2-code': 'GY', 'alpha-3-code': 'GUY', 'continent': 'South America', 'name': 'Guyana', 'capital': 'Georgetown'}, {'timezones': ['America/Tegucigalpa'], 'alpha-2-code': 'HN', 'alpha-3-code': 'HND', 'continent': 'North America', 'name': 'Honduras', 'capital': 'Tegucigalpa'}, {'timezones': ['Europe/Budapest'], 'alpha-2-code': 'HU', 'alpha-3-code': 'HUN', 'continent': 'Europe', 'name': 'Hungary', 'capital': 'Budapest'}, {'timezones': ['Asia/Jakarta', 'Asia/Pontianak', 'Asia/Makassar', 'Asia/Jayapura'], 'alpha-2-code': 'ID', 'alpha-3-code': 'IDN', 'continent': 'Asia', 'name': 'Indonesia', 'capital': 'Jakarta'}, {'timezones': ['Europe/Dublin'], 'alpha-2-code': 'IE', 'alpha-3-code': 'IRL', 'continent': 'Europe', 'name': 'Republic of Ireland', 'capital': 'Dublin'}, {'timezones': ['Asia/Jerusalem'], 'alpha-2-code': 'IL', 'alpha-3-code': 'ISR', 'continent': 'Asia', 'name': 'Israel', 'capital': 'Jerusalem'}, {'timezones': ['Asia/Calcutta'], 'alpha-2-code': 'IN', 'alpha-3-code': 'IND', 'continent': 'Asia', 'name': 'India', 'capital': 'New Delhi'}, {'timezones': ['Asia/Baghdad'], 'alpha-2-code': 'IQ', 'alpha-3-code': 'IRQ', 'continent': 'Asia', 'name': 'Iraq', 'capital': 'Baghdad'}, {'timezones': ['Asia/Tehran'], 'alpha-2-code': 'IR', 'alpha-3-code': 'IRN', 'continent': 'Asia', 'name': 'Iran', 'capital': 'Tehran'}, {'timezones': ['Atlantic/Reykjavik'], 'alpha-2-code': 'IS', 'alpha-3-code': 'ISL', 'continent': 'Europe', 'name': 'Iceland', 'capital': 'Reykjav\xc3\xadk'}, {'timezones': ['Europe/Rome'], 'alpha-2-code': 'IT', 'alpha-3-code': 'ITA', 'continent': 'Europe', 'name': 'Italy', 'capital': 'Rome'}, {'timezones': ['America/Jamaica'], 'alpha-2-code': 'JM', 'alpha-3-code': 'JAM', 'continent': 'North America', 'name': 'Jamaica', 'capital': 'Kingston'}, {'timezones': ['Asia/Amman'], 'alpha-2-code': 'JO', 'alpha-3-code': 'JOR', 'continent': 'Asia', 'name': 'Jordan', 'capital': 'Amman'}, {'timezones': ['Asia/Tokyo'], 'alpha-2-code': 'JP', 'alpha-3-code': 'JPN', 'continent': 'Asia', 'name': 'Japan', 'capital': 'Tokyo'}, {'timezones': ['Africa/Nairobi'], 'alpha-2-code': 'KE', 'alpha-3-code': 'KEN', 'continent': 'Africa', 'name': 'Kenya', 'capital': 'Nairobi'}, {'timezones': ['Asia/Bishkek'], 'alpha-2-code': 'KG', 'alpha-3-code': 'KGZ', 'continent': 'Asia', 'name': 'Kyrgyzstan', 'capital': 'Bishkek'}, {'timezones': ['Pacific/Tarawa', 'Pacific/Enderbury', 'Pacific/Kiritimati'], 'alpha-2-code': 'KI', 'alpha-3-code': 'KIR', 'continent': 'Oceania', 'name': 'Kiribati', 'capital': 'Tarawa'}, {'timezones': ['Asia/Pyongyang'], 'alpha-2-code': 'KP', 'alpha-3-code': 'PRK', 'continent': 'Asia', 'name': 'North Korea', 'capital': 'Pyongyang'}, {'timezones': ['Asia/Seoul'], 'alpha-2-code': 'KR', 'alpha-3-code': 'KOR', 'continent': 'Asia', 'name': 'South Korea', 'capital': 'Seoul'}, {'timezones': ['Asia/Kuwait'], 'alpha-2-code': 'KW', 'alpha-3-code': 'KWT', 'continent': 'Asia', 'name': 'Kuwait', 'capital': 'Kuwait City'}, {'timezones': ['Asia/Beirut'], 'alpha-2-code': 'LB', 'alpha-3-code': 'LBN', 'continent': 'Asia', 'name': 'Lebanon', 'capital': 'Beirut'}, {'timezones': ['Europe/Vaduz'], 'alpha-2-code': 'LI', 'alpha-3-code': 'LIE', 'continent': 'Europe', 'name': 'Liechtenstein', 'capital': 'Vaduz'}, {'timezones': ['Africa/Monrovia'], 'alpha-2-code': 'LR', 'alpha-3-code': 'LBR', 'continent': 'Africa', 'name': 'Liberia', 'capital': 'Monrovia'}, {'timezones': ['Africa/Maseru'], 'alpha-2-code': 'LS', 'alpha-3-code': 'LSO', 'continent': 'Africa', 'name': 'Lesotho', 'capital': 'Maseru'}, {'timezones': ['Europe/Vilnius'], 'alpha-2-code': 'LT', 'alpha-3-code': 'LTU', 'continent': 'Europe', 'name': 'Lithuania', 'capital': 'Vilnius'}, {'timezones': ['Europe/Luxembourg'], 'alpha-2-code': 'LU', 'alpha-3-code': 'LUX', 'continent': 'Europe', 'name': 'Luxembourg', 'capital': 'Luxembourg City'}, {'timezones': ['Europe/Riga'], 'alpha-2-code': 'LV', 'alpha-3-code': 'LVA', 'continent': 'Europe', 'name': 'Latvia', 'capital': 'Riga'}, {'timezones': ['Africa/Tripoli'], 'alpha-2-code': 'LY', 'alpha-3-code': 'LBY', 'continent': 'Africa', 'name': 'Libya', 'capital': 'Tripoli'}, {'timezones': ['Indian/Antananarivo'], 'alpha-2-code': 'MG', 'alpha-3-code': 'MDG', 'continent': 'Africa', 'name': 'Madagascar', 'capital': 'Antananarivo'}, {'timezones': ['Pacific/Majuro', 'Pacific/Kwajalein'], 'alpha-2-code': 'MH', 'alpha-3-code': 'MHL', 'continent': 'Oceania', 'name': 'Marshall Islands', 'capital': 'Majuro'}, {'timezones': ['Europe/Skopje'], 'alpha-2-code': 'MK', 'alpha-3-code': 'MKD', 'continent': 'Europe', 'name': 'Macedonia', 'capital': 'Skopje'}, {'timezones': ['Africa/Bamako'], 'alpha-2-code': 'ML', 'alpha-3-code': 'MLI', 'continent': 'Africa', 'name': 'Mali', 'capital': 'Bamako'}, {'timezones': ['Asia/Rangoon'], 'alpha-2-code': 'MM', 'alpha-3-code': 'MMR', 'continent': 'Asia', 'name': 'Myanmar', 'capital': 'Naypyidaw'}, {'timezones': ['Asia/Ulaanbaatar', 'Asia/Hovd', 'Asia/Choibalsan'], 'alpha-2-code': 'MN', 'alpha-3-code': 'MNG', 'continent': 'Asia', 'name': 'Mongolia', 'capital': 'Ulaanbaatar'}, {'timezones': ['Africa/Nouakchott'], 'alpha-2-code': 'MR', 'alpha-3-code': 'MRT', 'continent': 'Africa', 'name': 'Mauritania', 'capital': 'Nouakchott'}, {'timezones': ['Europe/Malta'], 'alpha-2-code': 'MT', 'alpha-3-code': 'MLT', 'continent': 'Europe', 'name': 'Malta', 'capital': 'Valletta'}, {'timezones': ['Indian/Mauritius'], 'alpha-2-code': 'MU', 'alpha-3-code': 'MUS', 'continent': 'Africa', 'name': 'Mauritius', 'capital': 'Port Louis'}, {'timezones': ['Indian/Maldives'], 'alpha-2-code': 'MV', 'alpha-3-code': 'MDV', 'continent': 'Asia', 'name': 'Maldives', 'capital': 'Mal\xc3\xa9'}, {'timezones': ['Africa/Blantyre'], 'alpha-2-code': 'MW', 'alpha-3-code': 'MWI', 'continent': 'Africa', 'name': 'Malawi', 'capital': 'Lilongwe'}, {'timezones': ['America/Mexico_City', 'America/Cancun', 'America/Merida', 'America/Monterrey', 'America/Mazatlan', 'America/Chihuahua', 'America/Hermosillo', 'America/Tijuana'], 'alpha-2-code': 'MX', 'alpha-3-code': 'MEX', 'continent': 'North America', 'name': 'Mexico', 'capital': 'Mexico City'}, {'timezones': ['Asia/Kuala_Lumpur', 'Asia/Kuching'], 'alpha-2-code': 'MY', 'alpha-3-code': 'MYS', 'continent': 'Asia', 'name': 'Malaysia', 'capital': 'Kuala Lumpur'}, {'timezones': ['Africa/Maputo'], 'alpha-2-code': 'MZ', 'alpha-3-code': 'MOZ', 'continent': 'Africa', 'name': 'Mozambique', 'capital': 'Maputo'}, {'timezones': ['Africa/Windhoek'], 'alpha-2-code': 'NA', 'alpha-3-code': 'NAM', 'continent': 'Africa', 'name': 'Namibia', 'capital': 'Windhoek'}, {'timezones': ['Africa/Niamey'], 'alpha-2-code': 'NE', 'alpha-3-code': 'NER', 'continent': 'Africa', 'name': 'Niger', 'capital': 'Niamey'}, {'timezones': ['Africa/Lagos'], 'alpha-2-code': 'NG', 'alpha-3-code': 'NGA', 'continent': 'Africa', 'name': 'Nigeria', 'capital': 'Abuja'}, {'timezones': ['America/Managua'], 'alpha-2-code': 'NI', 'alpha-3-code': 'NIC', 'continent': 'North America', 'name': 'Nicaragua', 'capital': 'Managua'}, {'timezones': ['Europe/Amsterdam'], 'alpha-2-code': 'NL', 'alpha-3-code': 'NLD', 'continent': 'Europe', 'name': 'Kingdom of the Netherlands', 'capital': 'Amsterdam'}, {'timezones': ['Europe/Oslo'], 'alpha-2-code': 'NO', 'alpha-3-code': 'NOR', 'continent': 'Europe', 'name': 'Norway', 'capital': 'Oslo'}, {'timezones': ['Asia/Katmandu'], 'alpha-2-code': 'NP', 'alpha-3-code': 'NPL', 'continent': 'Asia', 'name': 'Nepal', 'capital': 'Kathmandu'}, {'timezones': ['Pacific/Nauru'], 'alpha-2-code': 'NR', 'alpha-3-code': 'NRU', 'continent': 'Oceania', 'name': 'Nauru', 'capital': 'Yaren'}, {'timezones': ['Pacific/Auckland', 'Pacific/Chatham'], 'alpha-2-code': 'NZ', 'alpha-3-code': 'NZL', 'continent': 'Oceania', 'name': 'New Zealand', 'capital': 'Wellington'}, {'timezones': ['Asia/Muscat'], 'alpha-2-code': 'OM', 'alpha-3-code': 'OMN', 'continent': 'Asia', 'name': 'Oman', 'capital': 'Muscat'}, {'timezones': ['America/Panama'], 'alpha-2-code': 'PA', 'alpha-3-code': 'PAN', 'continent': 'North America', 'name': 'Panama', 'capital': 'Panama City'}, {'timezones': ['America/Lima'], 'alpha-2-code': 'PE', 'alpha-3-code': 'PER', 'continent': 'South America', 'name': 'Peru', 'capital': 'Lima'}, {'timezones': ['Pacific/Port_Moresby'], 'alpha-2-code': 'PG', 'alpha-3-code': 'PNG', 'continent': 'Oceania', 'name': 'Papua New Guinea', 'capital': 'Port Moresby'}, {'timezones': ['Asia/Manila'], 'alpha-2-code': 'PH', 'alpha-3-code': 'PHL', 'continent': 'Asia', 'name': 'Philippines', 'capital': 'Manila'}, {'timezones': ['Asia/Karachi'], 'alpha-2-code': 'PK', 'alpha-3-code': 'PAK', 'continent': 'Asia', 'name': 'Pakistan', 'capital': 'Islamabad'}, {'timezones': ['Europe/Warsaw'], 'alpha-2-code': 'PL', 'alpha-3-code': 'POL', 'continent': 'Europe', 'name': 'Poland', 'capital': 'Warsaw'}, {'timezones': ['Europe/Lisbon', 'Atlantic/Madeira', 'Atlantic/Azores'], 'alpha-2-code': 'PT', 'alpha-3-code': 'PRT', 'continent': 'Europe', 'name': 'Portugal', 'capital': 'Lisbon'}, {'timezones': ['Pacific/Palau'], 'alpha-2-code': 'PW', 'alpha-3-code': 'PLW', 'continent': 'Oceania', 'name': 'Palau', 'capital': 'Ngerulmud'}, {'timezones': ['America/Asuncion'], 'alpha-2-code': 'PY', 'alpha-3-code': 'PRY', 'continent': 'South America', 'name': 'Paraguay', 'capital': 'Asunci\xc3\xb3n'}, {'timezones': ['Asia/Qatar'], 'alpha-2-code': 'QA', 'alpha-3-code': 'QAT', 'continent': 'Asia', 'name': 'Qatar', 'capital': 'Doha'}, {'timezones': ['Europe/Bucharest'], 'alpha-2-code': 'RO', 'alpha-3-code': 'ROU', 'continent': 'Europe', 'name': 'Romania', 'capital': 'Bucharest'}, {'timezones': ['Europe/Kaliningrad', 'Europe/Moscow', 'Europe/Volgograd', 'Europe/Samara', 'Asia/Yekaterinburg', 'Asia/Omsk', 'Asia/Novosibirsk', 'Asia/Krasnoyarsk', 'Asia/Irkutsk', 'Asia/Yakutsk', 'Asia/Vladivostok', 'Asia/Sakhalin', 'Asia/Magadan', 'Asia/Kamchatka', 'Asia/Anadyr'], 'alpha-2-code': 'RU', 'alpha-3-code': 'RUS', 'continent': 'Europe', 'name': 'Russia', 'capital': 'Moscow'}, {'timezones': ['Africa/Kigali'], 'alpha-2-code': 'RW', 'alpha-3-code': 'RWA', 'continent': 'Africa', 'name': 'Rwanda', 'capital': 'Kigali'}, {'timezones': ['Asia/Riyadh'], 'alpha-2-code': 'SA', 'alpha-3-code': 'SAU', 'continent': 'Asia', 'name': 'Saudi Arabia', 'capital': 'Riyadh'}, {'timezones': ['Pacific/Guadalcanal'], 'alpha-2-code': 'SB', 'alpha-3-code': 'SLB', 'continent': 'Oceania', 'name': 'Solomon Islands', 'capital': 'Honiara'}, {'timezones': ['Indian/Mahe'], 'alpha-2-code': 'SC', 'alpha-3-code': 'SYC', 'continent': 'Africa', 'name': 'Seychelles', 'capital': 'Victoria'}, {'timezones': ['Africa/Khartoum'], 'alpha-2-code': 'SD', 'alpha-3-code': 'SDN', 'continent': 'Africa', 'name': 'Sudan', 'capital': 'Khartoum'}, {'timezones': ['Europe/Stockholm'], 'alpha-2-code': 'SE', 'alpha-3-code': 'SWE', 'continent': 'Europe', 'name': 'Sweden', 'capital': 'Stockholm'}, {'timezones': ['Asia/Singapore'], 'alpha-2-code': 'SG', 'alpha-3-code': 'SGP', 'continent': 'Asia', 'name': 'Singapore', 'capital': 'Singapore'}, {'timezones': ['Europe/Ljubljana'], 'alpha-2-code': 'SI', 'alpha-3-code': 'SVN', 'continent': 'Europe', 'name': 'Slovenia', 'capital': 'Ljubljana'}, {'timezones': ['Europe/Bratislava'], 'alpha-2-code': 'SK', 'alpha-3-code': 'SVK', 'continent': 'Europe', 'name': 'Slovakia', 'capital': 'Bratislava'}, {'timezones': ['Africa/Freetown'], 'alpha-2-code': 'SL', 'alpha-3-code': 'SLE', 'continent': 'Africa', 'name': 'Sierra Leone', 'capital': 'Freetown'}, {'timezones': ['Europe/San_Marino'], 'alpha-2-code': 'SM', 'alpha-3-code': 'SMR', 'continent': 'Europe', 'name': 'San Marino', 'capital': 'San Marino'}, {'timezones': ['Africa/Dakar'], 'alpha-2-code': 'SN', 'alpha-3-code': 'SEN', 'continent': 'Africa', 'name': 'Senegal', 'capital': 'Dakar'}, {'timezones': ['Africa/Mogadishu'], 'alpha-2-code': 'SO', 'alpha-3-code': 'SOM', 'continent': 'Africa', 'name': 'Somalia', 'capital': 'Mogadishu'}, {'timezones': ['America/Paramaribo'], 'alpha-2-code': 'SR', 'alpha-3-code': 'SUR', 'continent': 'South America', 'name': 'Suriname', 'capital': 'Paramaribo'}, {'timezones': ['Africa/Sao_Tome'], 'alpha-2-code': 'ST', 'alpha-3-code': 'STP', 'continent': 'Africa', 'name': 'S\xc3\xa3o Tom\xc3\xa9 and Pr\xc3\xadncipe', 'capital': 'S\xc3\xa3o Tom\xc3\xa9'}, {'timezones': ['Asia/Damascus'], 'alpha-2-code': 'SY', 'alpha-3-code': 'SYR', 'continent': 'Asia', 'name': 'Syria', 'capital': 'Damascus'}, {'timezones': ['Africa/Lome'], 'alpha-2-code': 'TG', 'alpha-3-code': 'TGO', 'continent': 'Africa', 'name': 'Togo', 'capital': 'Lom\xc3\xa9'}, {'timezones': ['Asia/Bangkok'], 'alpha-2-code': 'TH', 'alpha-3-code': 'THA', 'continent': 'Asia', 'name': 'Thailand', 'capital': 'Bangkok'}, {'timezones': ['Asia/Dushanbe'], 'alpha-2-code': 'TJ', 'alpha-3-code': 'TJK', 'continent': 'Asia', 'name': 'Tajikistan', 'capital': 'Dushanbe'}, {'timezones': ['Asia/Ashgabat'], 'alpha-2-code': 'TM', 'alpha-3-code': 'TKM', 'continent': 'Asia', 'name': 'Turkmenistan', 'capital': 'Ashgabat'}, {'timezones': ['Africa/Tunis'], 'alpha-2-code': 'TN', 'alpha-3-code': 'TUN', 'continent': 'Africa', 'name': 'Tunisia', 'capital': 'Tunis'}, {'timezones': ['Pacific/Tongatapu'], 'alpha-2-code': 'TO', 'alpha-3-code': 'TON', 'continent': 'Oceania', 'name': 'Tonga', 'capital': 'Nuku\xca\xbbalofa'}, {'timezones': ['Europe/Istanbul'], 'alpha-2-code': 'TR', 'alpha-3-code': 'TUR', 'continent': 'Asia', 'name': 'Turkey', 'capital': 'Ankara'}, {'timezones': ['America/Port_of_Spain'], 'alpha-2-code': 'TT', 'alpha-3-code': 'TTO', 'continent': 'North America', 'name': 'Trinidad and Tobago', 'capital': 'Port of Spain'}, {'timezones': ['Pacific/Funafuti'], 'alpha-2-code': 'TV', 'alpha-3-code': 'TUV', 'continent': 'Oceania', 'name': 'Tuvalu', 'capital': 'Funafuti'}, {'timezones': ['Africa/Dar_es_Salaam'], 'alpha-2-code': 'TZ', 'alpha-3-code': 'TZA', 'continent': 'Africa', 'name': 'Tanzania', 'capital': 'Dodoma'}, {'timezones': ['Europe/Kiev', 'Europe/Uzhgorod', 'Europe/Zaporozhye', 'Europe/Simferopol'], 'alpha-2-code': 'UA', 'alpha-3-code': 'UKR', 'continent': 'Europe', 'name': 'Ukraine', 'capital': 'Kiev'}, {'timezones': ['Africa/Kampala'], 'alpha-2-code': 'UG', 'alpha-3-code': 'UGA', 'continent': 'Africa', 'name': 'Uganda', 'capital': 'Kampala'}, {'timezones': ['America/New_York', 'America/Detroit', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Indiana/Indianapolis', 'America/Indiana/Marengo', 'America/Indiana/Knox', 'America/Indiana/Vevay', 'America/Chicago', 'America/Indiana/Vincennes', 'America/Indiana/Petersburg', 'America/Menominee', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Denver', 'America/Boise', 'America/Shiprock', 'America/Phoenix', 'America/Los_Angeles', 'America/Anchorage', 'America/Juneau', 'America/Yakutat', 'America/Nome', 'America/Adak', 'Pacific/Honolulu'], 'alpha-2-code': 'US', 'alpha-3-code': 'USA', 'continent': 'North America', 'name': 'United States', 'capital': 'Washington, D.C.'}, {'timezones': ['America/Montevideo'], 'alpha-2-code': 'UY', 'alpha-3-code': 'URY', 'continent': 'South America', 'name': 'Uruguay', 'capital': 'Montevideo'}, {'timezones': ['Asia/Samarkand', 'Asia/Tashkent'], 'alpha-2-code': 'UZ', 'alpha-3-code': 'UZB', 'continent': 'Asia', 'name': 'Uzbekistan', 'capital': 'Tashkent'}, {'timezones': ['Europe/Vatican'], 'alpha-2-code': 'VA', 'alpha-3-code': 'VAT', 'continent': 'Europe', 'name': 'Vatican City', 'capital': 'Vatican City'}, {'timezones': ['America/Caracas'], 'alpha-2-code': 'VE', 'alpha-3-code': 'VEN', 'continent': 'South America', 'name': 'Venezuela', 'capital': 'Caracas'}, {'timezones': ['Asia/Saigon'], 'alpha-2-code': 'VN', 'alpha-3-code': 'VNM', 'continent': 'Asia', 'name': 'Vietnam', 'capital': 'Hanoi'}, {'timezones': ['Pacific/Efate'], 'alpha-2-code': 'VU', 'alpha-3-code': 'VUT', 'continent': 'Oceania', 'name': 'Vanuatu', 'capital': 'Port Vila'}, {'timezones': ['Asia/Aden'], 'alpha-2-code': 'YE', 'alpha-3-code': 'YEM', 'continent': 'Asia', 'name': 'Yemen', 'capital': "Sana'a"}, {'timezones': ['Africa/Lusaka'], 'alpha-2-code': 'ZM', 'alpha-3-code': 'ZMB', 'continent': 'Africa', 'name': 'Zambia', 'capital': 'Lusaka'}, {'timezones': ['Africa/Harare'], 'alpha-2-code': 'ZW', 'alpha-3-code': 'ZWE', 'continent': 'Africa', 'name': 'Zimbabwe', 'capital': 'Harare'}, {'timezones': ['Africa/Algiers'], 'alpha-2-code': 'DZ', 'alpha-3-code': 'DZA', 'continent': 'Africa', 'name': 'Algeria', 'capital': 'Algiers'}, {'timezones': ['Europe/Sarajevo'], 'alpha-2-code': 'BA', 'alpha-3-code': 'BIH', 'continent': 'Europe', 'name': 'Bosnia and Herzegovina', 'capital': 'Sarajevo'}, {'timezones': ['Asia/Phnom_Penh'], 'alpha-2-code': 'KH', 'alpha-3-code': 'KHM', 'continent': 'Asia', 'name': 'Cambodia', 'capital': 'Phnom Penh'}, {'timezones': ['Africa/Bangui'], 'alpha-2-code': 'CF', 'alpha-3-code': 'CAF', 'continent': 'Africa', 'name': 'Central African Republic', 'capital': 'Bangui'}, {'timezones': ['Africa/Ndjamena'], 'alpha-2-code': 'TD', 'alpha-3-code': 'TCD', 'continent': 'Africa', 'name': 'Chad', 'capital': "N'Djamena"}, {'timezones': ['Indian/Comoro'], 'alpha-2-code': 'KM', 'alpha-3-code': 'COM', 'continent': 'Africa', 'name': 'Comoros', 'capital': 'Moroni'}, {'timezones': ['Europe/Zagreb'], 'alpha-2-code': 'HR', 'alpha-3-code': 'HRV', 'continent': 'Europe', 'name': 'Croatia', 'capital': 'Zagreb'}, {'timezones': ['Asia/Dili'], 'alpha-2-code': 'TL', 'alpha-3-code': 'TLS', 'continent': 'Asia', 'name': 'East Timor', 'capital': 'Dili'}, {'timezones': ['America/El_Salvador'], 'alpha-2-code': 'SV', 'alpha-3-code': 'SLV', 'continent': 'North America', 'name': 'El Salvador', 'capital': 'San Salvador'}, {'timezones': ['Africa/Malabo'], 'alpha-2-code': 'GQ', 'alpha-3-code': 'GNQ', 'continent': 'Africa', 'name': 'Equatorial Guinea', 'capital': 'Malabo'}, {'timezones': ['America/Grenada'], 'alpha-2-code': 'GD', 'alpha-3-code': 'GRD', 'continent': 'North America', 'name': 'Grenada', 'capital': "St. George's"}, {'timezones': ['Asia/Almaty', 'Asia/Qyzylorda', 'Asia/Aqtobe', 'Asia/Aqtau', 'Asia/Oral'], 'alpha-2-code': 'KZ', 'alpha-3-code': 'KAZ', 'continent': 'Asia', 'name': 'Kazakhstan', 'capital': 'Astana'}, {'timezones': ['Asia/Vientiane'], 'alpha-2-code': 'LA', 'alpha-3-code': 'LAO', 'continent': 'Asia', 'name': 'Laos', 'capital': 'Vientiane'}, {'timezones': ['Pacific/Truk', 'Pacific/Ponape', 'Pacific/Kosrae'], 'alpha-2-code': 'FM', 'alpha-3-code': 'FSM', 'continent': 'Oceania', 'name': 'Federated States of Micronesia', 'capital': 'Palikir'}, {'timezones': ['Europe/Chisinau'], 'alpha-2-code': 'MD', 'alpha-3-code': 'MDA', 'continent': 'Europe', 'name': 'Moldova', 'capital': 'Chi\xc5\x9fin\xc4\x83u'}, {'timezones': ['Europe/Monaco'], 'alpha-2-code': 'MC', 'alpha-3-code': 'MCO', 'continent': 'Europe', 'name': 'Monaco', 'capital': 'Monaco'}, {'timezones': ['Europe/Podgorica'], 'alpha-2-code': 'ME', 'alpha-3-code': 'MNE', 'continent': 'Europe', 'name': 'Montenegro', 'capital': 'Podgorica'}, {'timezones': ['Africa/Casablanca'], 'alpha-2-code': 'MA', 'alpha-3-code': 'MAR', 'continent': 'Africa', 'name': 'Morocco', 'capital': 'Rabat'}, {'timezones': ['America/St_Kitts'], 'alpha-2-code': 'KN', 'alpha-3-code': 'KNA', 'continent': 'North America', 'name': 'Saint Kitts and Nevis', 'capital': 'Basseterre'}, {'timezones': ['America/St_Lucia'], 'alpha-2-code': 'LC', 'alpha-3-code': 'LCA', 'continent': 'North America', 'name': 'Saint Lucia', 'capital': 'Castries'}, {'timezones': ['America/St_Vincent'], 'alpha-2-code': 'VC', 'alpha-3-code': 'VCT', 'continent': 'North America', 'name': 'Saint Vincent and the Grenadines', 'capital': 'Kingstown'}, {'timezones': ['Pacific/Apia'], 'alpha-2-code': 'WS', 'alpha-3-code': 'WSM', 'continent': 'Oceania', 'name': 'Samoa', 'capital': 'Apia'}, {'timezones': ['Europe/Belgrade'], 'alpha-2-code': 'RS', 'alpha-3-code': 'SRB', 'continent': 'Europe', 'name': 'Serbia', 'capital': 'Belgrade'}, {'timezones': ['Africa/Johannesburg'], 'alpha-2-code': 'ZA', 'alpha-3-code': 'ZAF', 'continent': 'Africa', 'name': 'South Africa', 'capital': 'Pretoria'}, {'timezones': ['Europe/Madrid', 'Africa/Ceuta', 'Atlantic/Canary'], 'alpha-2-code': 'ES', 'alpha-3-code': 'ESP', 'continent': 'Europe', 'name': 'Spain', 'capital': 'Madrid'}, {'timezones': ['Asia/Colombo'], 'alpha-2-code': 'LK', 'alpha-3-code': 'LKA', 'continent': 'Asia', 'name': 'Sri Lanka', 'capital': 'Sri Jayewardenepura Kotte'}, {'timezones': ['Africa/Mbabane'], 'alpha-2-code': 'SZ', 'alpha-3-code': 'SWZ', 'continent': 'Africa', 'name': 'Swaziland', 'capital': 'Mbabane'}, {'timezones': ['Europe/Zurich'], 'alpha-2-code': 'CH', 'alpha-3-code': 'CHE', 'continent': 'Europe', 'name': 'Switzerland', 'capital': 'Bern'}, {'timezones': ['Asia/Dubai'], 'alpha-2-code': 'AE', 'alpha-3-code': 'ARE', 'continent': 'Asia', 'name': 'United Arab Emirates', 'capital': 'Abu Dhabi'}, {'timezones': ['Europe/London'], 'alpha-2-code': 'GB', 'alpha-3-code': 'GBR', 'continent': 'Europe', 'name': 'United Kingdom', 'capital': 'London'}, ] regex = re.compile(timedelta_pattern) def unix_time(self, end_datetime=None, start_datetime=None): """ Get a timestamp between January 1, 1970 and now, unless passed explicit start_datetime or end_datetime values. :example 1061306726 """ start_datetime = self._parse_start_datetime(start_datetime) end_datetime = self._parse_end_datetime(end_datetime) return self.generator.random.randint(start_datetime, end_datetime) def time_delta(self, end_datetime=None): """ Get a timedelta object """ start_datetime = self._parse_start_datetime('now') end_datetime = self._parse_end_datetime(end_datetime) seconds = end_datetime - start_datetime ts = self.generator.random.randint(*sorted([0, seconds])) return timedelta(seconds=ts) def date_time(self, tzinfo=None, end_datetime=None): """ Get a datetime object for a date between January 1, 1970 and now :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2005-08-16 20:39:21') :return datetime """ # NOTE: On windows, the lowest value you can get from windows is 86400 # on the first day. Known python issue: # https://bugs.python.org/issue30684 return datetime(1970, 1, 1, tzinfo=tzinfo) + \ timedelta(seconds=self.unix_time(end_datetime=end_datetime)) def date_time_ad(self, tzinfo=None, end_datetime=None, start_datetime=None): """ Get a datetime object for a date between January 1, 001 and now :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1265-03-22 21:15:52') :return datetime """ # 1970-01-01 00:00:00 UTC minus 62135596800 seconds is # 0001-01-01 00:00:00 UTC. Since _parse_end_datetime() is used # elsewhere where a default value of 0 is expected, we can't # simply change that class method to use this magic number as a # default value when None is provided. start_time = -62135596800 if start_datetime is None else self._parse_start_datetime(start_datetime) end_datetime = self._parse_end_datetime(end_datetime) ts = self.generator.random.randint(start_time, end_datetime) # NOTE: using datetime.fromtimestamp(ts) directly will raise # a "ValueError: timestamp out of range for platform time_t" # on some platforms due to system C functions; # see http://stackoverflow.com/a/10588133/2315612 # NOTE: On windows, the lowest value you can get from windows is 86400 # on the first day. Known python issue: # https://bugs.python.org/issue30684 return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts) def iso8601(self, tzinfo=None, end_datetime=None): """ :param tzinfo: timezone, instance of datetime.tzinfo subclass :example '2003-10-21T16:05:52+0000' """ return self.date_time(tzinfo, end_datetime=end_datetime).isoformat() def date(self, pattern='%Y-%m-%d', end_datetime=None): """ Get a date string between January 1, 1970 and now :param pattern format :example '2008-11-27' """ return self.date_time(end_datetime=end_datetime).strftime(pattern) def date_object(self, end_datetime=None): """ Get a date object between January 1, 1970 and now :example datetime.date(2016, 9, 20) """ return self.date_time(end_datetime=end_datetime).date() def time(self, pattern='%H:%M:%S', end_datetime=None): """ Get a time string (24h format by default) :param pattern format :example '15:02:34' """ return self.date_time( end_datetime=end_datetime).time().strftime(pattern) def time_object(self, end_datetime=None): """ Get a time object :example datetime.time(15, 56, 56, 772876) """ return self.date_time(end_datetime=end_datetime).time() @classmethod def _parse_start_datetime(cls, value): if value is None: return 0 return cls._parse_date_time(value) @classmethod def _parse_end_datetime(cls, value): if value is None: return int(time()) return cls._parse_date_time(value) @classmethod def _parse_date_string(cls, value): parts = cls.regex.match(value) if not parts: raise ParseError("Can't parse date string `{}`.".format(value)) parts = parts.groupdict() time_params = {} for (name_, param_) in parts.items(): if param_: time_params[name_] = int(param_) if 'years' in time_params: if 'days' not in time_params: time_params['days'] = 0 time_params['days'] += 365.24 * time_params.pop('years') if 'months' in time_params: if 'days' not in time_params: time_params['days'] = 0 time_params['days'] += 30.42 * time_params.pop('months') if not time_params: raise ParseError("Can't parse date string `{}`.".format(value)) return time_params @classmethod def _parse_timedelta(cls, value): if isinstance(value, timedelta): return value.total_seconds() if is_string(value): time_params = cls._parse_date_string(value) return timedelta(**time_params).total_seconds() if isinstance(value, (int, float)): return value raise ParseError("Invalid format for timedelta '{0}'".format(value)) @classmethod def _parse_date_time(cls, value, tzinfo=None): if isinstance(value, (datetime, date, real_datetime, real_date)): return datetime_to_timestamp(value) now = datetime.now(tzinfo) if isinstance(value, timedelta): return datetime_to_timestamp(now + value) if is_string(value): if value == 'now': return datetime_to_timestamp(datetime.now(tzinfo)) time_params = cls._parse_date_string(value) return datetime_to_timestamp(now + timedelta(**time_params)) if isinstance(value, int): return datetime_to_timestamp(now + timedelta(value)) raise ParseError("Invalid format for date '{0}'".format(value)) @classmethod def _parse_date(cls, value): if isinstance(value, (datetime, real_datetime)): return value.date() elif isinstance(value, (date, real_date)): return value today = date.today() if isinstance(value, timedelta): return today + value if is_string(value): if value in ('today', 'now'): return today time_params = cls._parse_date_string(value) return today + timedelta(**time_params) if isinstance(value, int): return today + timedelta(value) raise ParseError("Invalid format for date '{0}'".format(value)) def date_time_between(self, start_date='-30y', end_date='now', tzinfo=None): """ Get a DateTime object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "now" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ start_date = self._parse_date_time(start_date, tzinfo=tzinfo) end_date = self._parse_date_time(end_date, tzinfo=tzinfo) if end_date - start_date <= 1: ts = start_date + self.generator.random.random() else: ts = self.generator.random.randint(start_date, end_date) if tzinfo is None: return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts) else: return ( datetime(1970, 1, 1, tzinfo=tzutc()) + timedelta(seconds=ts) ).astimezone(tzinfo) def date_between(self, start_date='-30y', end_date='today'): """ Get a Date object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "today" :example Date('1999-02-02') :return Date """ start_date = self._parse_date(start_date) end_date = self._parse_date(end_date) return self.date_between_dates(date_start=start_date, date_end=end_date) def future_datetime(self, end_date='+30d', tzinfo=None): """ Get a DateTime object based on a random date between 1 second form now and a given date. Accepts date strings that can be recognized by strtotime(). :param end_date Defaults to "+30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ return self.date_time_between( start_date='+1s', end_date=end_date, tzinfo=tzinfo, ) def future_date(self, end_date='+30d', tzinfo=None): """ Get a Date object based on a random date between 1 day from now and a given date. Accepts date strings that can be recognized by strtotime(). :param end_date Defaults to "+30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ return self.date_between(start_date='+1d', end_date=end_date) def past_datetime(self, start_date='-30d', tzinfo=None): """ Get a DateTime object based on a random date between a given date and 1 second ago. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to "-30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ return self.date_time_between( start_date=start_date, end_date='-1s', tzinfo=tzinfo, ) def past_date(self, start_date='-30d', tzinfo=None): """ Get a Date object based on a random date between a given date and 1 day ago. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to "-30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ return self.date_between(start_date=start_date, end_date='-1d') def date_time_between_dates( self, datetime_start=None, datetime_end=None, tzinfo=None): """ Takes two DateTime objects and returns a random datetime between the two given datetimes. Accepts DateTime objects. :param datetime_start: DateTime :param datetime_end: DateTime :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ if datetime_start is None: datetime_start = datetime.now(tzinfo) if datetime_end is None: datetime_end = datetime.now(tzinfo) timestamp = self.generator.random.randint( datetime_to_timestamp(datetime_start), datetime_to_timestamp(datetime_end), ) try: if tzinfo is None: pick = datetime.fromtimestamp(timestamp, tzlocal()) pick = pick.astimezone(tzutc()).replace(tzinfo=None) else: pick = datetime.fromtimestamp(timestamp, tzinfo) except OverflowError: raise OverflowError( "You specified an end date with a timestamp bigger than the maximum allowed on this" " system. Please specify an earlier date.", ) return pick def date_between_dates(self, date_start=None, date_end=None): """ Takes two Date objects and returns a random date between the two given dates. Accepts Date or Datetime objects :param date_start: Date :param date_end: Date :return Date """ return self.date_time_between_dates(date_start, date_end).date() def date_time_this_century( self, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the current century. :param before_now: include days in current century before today :param after_now: include days in current century after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_century_start = datetime( now.year - (now.year % 100), 1, 1, tzinfo=tzinfo) next_century_start = datetime( min(this_century_start.year + 100, MAXYEAR), 1, 1, tzinfo=tzinfo) if before_now and after_now: return self.date_time_between_dates( this_century_start, next_century_start, tzinfo) elif not before_now and after_now: return self.date_time_between_dates(now, next_century_start, tzinfo) elif not after_now and before_now: return self.date_time_between_dates(this_century_start, now, tzinfo) else: return now def date_time_this_decade( self, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the decade year. :param before_now: include days in current decade before today :param after_now: include days in current decade after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_decade_start = datetime( now.year - (now.year % 10), 1, 1, tzinfo=tzinfo) next_decade_start = datetime( min(this_decade_start.year + 10, MAXYEAR), 1, 1, tzinfo=tzinfo) if before_now and after_now: return self.date_time_between_dates( this_decade_start, next_decade_start, tzinfo) elif not before_now and after_now: return self.date_time_between_dates(now, next_decade_start, tzinfo) elif not after_now and before_now: return self.date_time_between_dates(this_decade_start, now, tzinfo) else: return now def date_time_this_year( self, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the current year. :param before_now: include days in current year before today :param after_now: include days in current year after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_year_start = now.replace( month=1, day=1, hour=0, minute=0, second=0, microsecond=0) next_year_start = datetime(now.year + 1, 1, 1, tzinfo=tzinfo) if before_now and after_now: return self.date_time_between_dates( this_year_start, next_year_start, tzinfo) elif not before_now and after_now: return self.date_time_between_dates(now, next_year_start, tzinfo) elif not after_now and before_now: return self.date_time_between_dates(this_year_start, now, tzinfo) else: return now def date_time_this_month( self, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the current month. :param before_now: include days in current month before today :param after_now: include days in current month after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_month_start = now.replace( day=1, hour=0, minute=0, second=0, microsecond=0) next_month_start = this_month_start + \ relativedelta.relativedelta(months=1) if before_now and after_now: return self.date_time_between_dates( this_month_start, next_month_start, tzinfo) elif not before_now and after_now: return self.date_time_between_dates(now, next_month_start, tzinfo) elif not after_now and before_now: return self.date_time_between_dates(this_month_start, now, tzinfo) else: return now def date_this_century(self, before_today=True, after_today=False): """ Gets a Date object for the current century. :param before_today: include days in current century before today :param after_today: include days in current century after today :example Date('2012-04-04') :return Date """ today = date.today() this_century_start = date(today.year - (today.year % 100), 1, 1) next_century_start = date(this_century_start.year + 100, 1, 1) if before_today and after_today: return self.date_between_dates( this_century_start, next_century_start) elif not before_today and after_today: return self.date_between_dates(today, next_century_start) elif not after_today and before_today: return self.date_between_dates(this_century_start, today) else: return today def date_this_decade(self, before_today=True, after_today=False): """ Gets a Date object for the decade year. :param before_today: include days in current decade before today :param after_today: include days in current decade after today :example Date('2012-04-04') :return Date """ today = date.today() this_decade_start = date(today.year - (today.year % 10), 1, 1) next_decade_start = date(this_decade_start.year + 10, 1, 1) if before_today and after_today: return self.date_between_dates(this_decade_start, next_decade_start) elif not before_today and after_today: return self.date_between_dates(today, next_decade_start) elif not after_today and before_today: return self.date_between_dates(this_decade_start, today) else: return today def date_this_year(self, before_today=True, after_today=False): """ Gets a Date object for the current year. :param before_today: include days in current year before today :param after_today: include days in current year after today :example Date('2012-04-04') :return Date """ today = date.today() this_year_start = today.replace(month=1, day=1) next_year_start = date(today.year + 1, 1, 1) if before_today and after_today: return self.date_between_dates(this_year_start, next_year_start) elif not before_today and after_today: return self.date_between_dates(today, next_year_start) elif not after_today and before_today: return self.date_between_dates(this_year_start, today) else: return today def date_this_month(self, before_today=True, after_today=False): """ Gets a Date object for the current month. :param before_today: include days in current month before today :param after_today: include days in current month after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ today = date.today() this_month_start = today.replace(day=1) next_month_start = this_month_start + \ relativedelta.relativedelta(months=1) if before_today and after_today: return self.date_between_dates(this_month_start, next_month_start) elif not before_today and after_today: return self.date_between_dates(today, next_month_start) elif not after_today and before_today: return self.date_between_dates(this_month_start, today) else: return today def time_series( self, start_date='-30d', end_date='now', precision=None, distrib=None, tzinfo=None): """ Returns a generator yielding tuples of ``(<datetime>, <value>)``. The data points will start at ``start_date``, and be at every time interval specified by ``precision``. ``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>`` """ start_date = self._parse_date_time(start_date, tzinfo=tzinfo) end_date = self._parse_date_time(end_date, tzinfo=tzinfo) if end_date < start_date: raise ValueError("`end_date` must be greater than `start_date`.") if precision is None: precision = (end_date - start_date) / 30 precision = self._parse_timedelta(precision) if distrib is None: def distrib(dt): return self.generator.random.uniform(0, precision) # noqa if not callable(distrib): raise ValueError( "`distrib` must be a callable. Got {} instead.".format(distrib)) datapoint = start_date while datapoint < end_date: dt = timestamp_to_datetime(datapoint, tzinfo) datapoint += precision yield (dt, distrib(dt)) def am_pm(self): return self.date('%p') def day_of_month(self): return self.date('%d') def day_of_week(self): return self.date('%A') def month(self): return self.date('%m') def month_name(self): return self.date('%B') def year(self): return self.date('%Y') def century(self): """ :example 'XVII' """ return self.random_element(self.centuries) def timezone(self): return self.generator.random.choice( self.random_element(self.countries)['timezones']) def date_of_birth(self, tzinfo=None, minimum_age=0, maximum_age=115): """ Generate a random date of birth represented as a Date object, constrained by optional miminimum_age and maximum_age parameters. :param tzinfo Defaults to None. :param minimum_age Defaults to 0. :param maximum_age Defaults to 115. :example Date('1979-02-02') :return Date """ if not isinstance(minimum_age, int): raise TypeError("minimum_age must be an integer.") if not isinstance(maximum_age, int): raise TypeError("maximum_age must be an integer.") if (maximum_age < 0): raise ValueError("maximum_age must be greater than or equal to zero.") if (minimum_age < 0): raise ValueError("minimum_age must be greater than or equal to zero.") if (minimum_age > maximum_age): raise ValueError("minimum_age must be less than or equal to maximum_age.") # In order to return the full range of possible dates of birth, add one # year to the potential age cap and subtract one day if we land on the # boundary. now = datetime.now(tzinfo).date() start_date = now.replace(year=now.year - (maximum_age+1)) end_date = now.replace(year=now.year - minimum_age) dob = self.date_time_ad(tzinfo=tzinfo, start_datetime=start_date, end_datetime=end_date).date() return dob if dob != start_date else dob + timedelta(days=1)
deanishe/alfred-fakeum
src/libs/faker/providers/date_time/__init__.py
Python
mit
82,424
[ "BWA" ]
7a516c1385f0745b2010848707f0af13d0cd4113a7f9d3ec8278697ed44b5253
# python3 # pylint: disable=g-bad-file-header # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utility functions for calculating likelihood.""" import chex import dataclasses import jax import jax.numpy as jnp from neural_testbed import base as testbed_base from neural_testbed.likelihood import base as likelihood_base def gaussian_log_likelihood(err: chex.Array, cov: chex.Array) -> float: """Calculates the Gaussian log likelihood of a multivariate normal.""" first_term = len(err) * jnp.log(2 * jnp.pi) _, second_term = jnp.linalg.slogdet(cov) third_term = jnp.einsum('ai,ab,bi->i', err, jnp.linalg.pinv(cov), err) return -0.5 * (first_term + second_term + third_term) def optimized_gaussian_ll(err: chex.Array) -> float: """Computes the Gaussian LL based on optimized residual MSE.""" optimized_cov = jnp.mean(err ** 2) * jnp.eye(len(err)) return gaussian_log_likelihood(err, optimized_cov) @dataclasses.dataclass class GaussianSampleKL(likelihood_base.SampleBasedKL): """Evaluates KL according to optimized Gaussian residual model.""" num_test_seeds: int num_enn_samples: int enn_sigma: float key: chex.PRNGKey def __call__( self, enn_sampler: testbed_base.EpistemicSampler, data_sampler: likelihood_base.GenerativeDataSampler, ) -> testbed_base.ENNQuality: """Evaluates KL according to optimized Gaussian residual model.""" batched_sampler = jax.vmap(enn_sampler, in_axes=[None, 0]) batched_ll = jax.vmap(gaussian_log_likelihood, in_axes=[0, None]) def kl_estimate(key: chex.PRNGKey) -> float: """Computes KL estimate on a single instance of test data.""" data, true_ll = data_sampler.test_data(key) tau = data.x.shape[0] data_keys = jax.random.split(key, self.num_enn_samples) samples = batched_sampler(data.x, data_keys) batched_err = samples - jnp.expand_dims(data.y, 0) chex.assert_shape(batched_err, [self.num_enn_samples, tau, 1]) # ENN uses the enn_sigma to compute likelihood of sampled data enn_cov = self.enn_sigma ** 2 * jnp.eye(tau) sampled_ll = batched_ll(batched_err, enn_cov) chex.assert_shape(sampled_ll, [self.num_enn_samples, 1]) # TODO(author2): Make sure of our KL computation.() ave_ll = likelihood_base.average_sampled_log_likelihood(sampled_ll) return true_ll - ave_ll batched_kl = jax.jit(jax.vmap(kl_estimate)) kl_keys = jax.random.split(self.key, self.num_test_seeds) sampled_kl = batched_kl(kl_keys) return testbed_base.ENNQuality(kl_estimate=jnp.mean(sampled_kl)) @dataclasses.dataclass class GaussianSmoothedSampleKL(likelihood_base.SampleBasedKL): """Evaluates KL according to optimized Gaussian residual model.""" num_test_seeds: int num_enn_samples: int enn_sigma: float key: chex.PRNGKey cov_ridge: float = 1e-6 # To smooth out the covariance estimate def __call__( self, enn_sampler: testbed_base.EpistemicSampler, data_sampler: likelihood_base.GenerativeDataSampler, ) -> testbed_base.ENNQuality: """Evaluates KL according to optimized Gaussian residual model.""" batched_sampler = jax.vmap(enn_sampler, in_axes=[None, 0]) def kl_estimate(key: chex.PRNGKey) -> float: """Computes KL estimate on a single instance of test data.""" data_key, enn_key = jax.random.split(key) data, true_ll = data_sampler.test_data(data_key) tau = data.x.shape[0] # Forward the ENN at many samples and form smoothed Gaussian approximation enn_keys = jax.random.split(enn_key, self.num_enn_samples) enn_samples = batched_sampler(data.x, enn_keys) enn_mean = jnp.mean(enn_samples, axis=0) chex.assert_shape(enn_mean, [tau, 1]) # Estimates the covariance matrix with bias (simple variance in tau=1). enn_cov = jnp.cov(enn_samples[:, :, 0], rowvar=False, bias=True) if tau == 1: enn_cov = enn_cov[None, None] enn_cov += self.cov_ridge * jnp.eye(tau) chex.assert_shape(enn_cov, [tau, tau]) # Estimate KL based on combined distribution err = data.y - enn_mean cov = enn_cov + self.enn_sigma ** 2 * jnp.eye(tau) unnormalized_kl = true_ll - gaussian_log_likelihood(err, cov) return unnormalized_kl batched_kl = jax.jit(jax.vmap(kl_estimate)) kl_keys = jax.random.split(self.key, self.num_test_seeds) sampled_kl = batched_kl(kl_keys) return testbed_base.ENNQuality(kl_estimate=jnp.mean(sampled_kl))
deepmind/neural_testbed
neural_testbed/likelihood/regression.py
Python
apache-2.0
5,157
[ "Gaussian" ]
d77e61ac6d509fad335260e91b9e1dbeec194e37d44a6af5e01a3c6d90c32139
""" Downloads feeds, keys, packages and icons. """ # Copyright (C) 2009, Thomas Leonard # See the README file for details, or visit http://0install.net. from zeroinstall import _, logger import os, sys from zeroinstall import support from zeroinstall.support import tasks, basedir, portable_rename from zeroinstall.injector.namespaces import XMLNS_IFACE, config_site from zeroinstall.injector import model from zeroinstall.injector.model import DownloadSource, Recipe, SafeException, escape, DistributionSource from zeroinstall.injector.iface_cache import PendingFeed, ReplayAttack from zeroinstall.injector.handler import NoTrustedKeys from zeroinstall.injector import download def _escape_slashes(path): return path.replace('/', '%23') def _get_feed_dir(feed): """The algorithm from 0mirror.""" if '#' in feed: raise SafeException(_("Invalid URL '%s'") % feed) scheme, rest = feed.split('://', 1) assert '/' in rest, "Missing / in %s" % feed domain, rest = rest.split('/', 1) for x in [scheme, domain, rest]: if not x or x.startswith('.'): raise SafeException(_("Invalid URL '%s'") % feed) return '/'.join(['feeds', scheme, domain, _escape_slashes(rest)]) class KeyInfoFetcher: """Fetches information about a GPG key from a key-info server. See L{Fetcher.fetch_key_info} for details. @since: 0.42 Example: >>> kf = KeyInfoFetcher(fetcher, 'https://server', fingerprint) >>> while True: print kf.info if kf.blocker is None: break print kf.status yield kf.blocker """ def __init__(self, fetcher, server, fingerprint): self.fingerprint = fingerprint self.info = [] self.blocker = None if server is None: return self.status = _('Fetching key information from %s...') % server dl = fetcher.download_url(server + '/key/' + fingerprint) from xml.dom import minidom @tasks.async def fetch_key_info(): tempfile = dl.tempfile try: yield dl.downloaded self.blocker = None tasks.check(dl.downloaded) tempfile.seek(0) doc = minidom.parse(tempfile) if doc.documentElement.localName != 'key-lookup': raise SafeException(_('Expected <key-lookup>, not <%s>') % doc.documentElement.localName) self.info += doc.documentElement.childNodes except Exception as ex: doc = minidom.parseString('<item vote="bad"/>') root = doc.documentElement root.appendChild(doc.createTextNode(_('Error getting key information: %s') % ex)) self.info.append(root) finally: tempfile.close() self.blocker = fetch_key_info() class Fetcher(object): """Downloads and stores various things. @ivar config: used to get handler, iface_cache and stores @type config: L{config.Config} @ivar key_info: caches information about GPG keys @type key_info: {str: L{KeyInfoFetcher}} """ __slots__ = ['config', 'key_info', '_scheduler', 'external_store'] def __init__(self, config): assert config.handler, "API change!" self.config = config self.key_info = {} self._scheduler = None self.external_store = os.environ.get('ZEROINSTALL_EXTERNAL_STORE') @property def handler(self): return self.config.handler @property def scheduler(self): if self._scheduler is None: from . import scheduler self._scheduler = scheduler.DownloadScheduler() return self._scheduler # (force is deprecated and ignored) @tasks.async def cook(self, required_digest, recipe, stores, force = False, impl_hint = None): """Follow a Recipe. @param impl_hint: the Implementation this is for (if any) as a hint for the GUI @see: L{download_impl} uses this method when appropriate""" # Maybe we're taking this metaphor too far? # Start a download for each ingredient blockers = [] steps = [] try: for stepdata in recipe.steps: cls = StepRunner.class_for(stepdata) step = cls(stepdata, impl_hint=impl_hint) step.prepare(self, blockers) steps.append(step) while blockers: yield blockers tasks.check(blockers) blockers = [b for b in blockers if not b.happened] if self.external_store: # Note: external_store will not yet work with non-<archive> steps. streams = [step.stream for step in steps] self._add_to_external_store(required_digest, recipe.steps, streams) else: # Create an empty directory for the new implementation store = stores.stores[0] tmpdir = store.get_tmp_dir_for(required_digest) try: # Unpack each of the downloaded archives into it in turn for step in steps: step.apply(tmpdir) # Check that the result is correct and store it in the cache store.check_manifest_and_rename(required_digest, tmpdir) tmpdir = None finally: # If unpacking fails, remove the temporary directory if tmpdir is not None: support.ro_rmtree(tmpdir) finally: for step in steps: try: step.close() except IOError as ex: # Can get "close() called during # concurrent operation on the same file # object." if we're unlucky (Python problem). logger.info("Failed to close: %s", ex) def _get_mirror_url(self, feed_url, resource): """Return the URL of a mirror for this feed.""" if self.config.mirror is None: return None if feed_url.startswith('http://') or feed_url.startswith('https://'): if support.urlparse(feed_url).hostname == 'localhost': return None return '%s/%s/%s' % (self.config.mirror, _get_feed_dir(feed_url), resource) return None def get_feed_mirror(self, url): """Return the URL of a mirror for this feed.""" return self._get_mirror_url(url, 'latest.xml') def _get_archive_mirror(self, source): if self.config.mirror is None: return None if support.urlparse(source.url).hostname == 'localhost': return None if sys.version_info[0] > 2: from urllib.parse import quote else: from urllib import quote return '{mirror}/archive/{archive}'.format( mirror = self.config.mirror, archive = quote(source.url.replace('/', '#'), safe = '')) def _get_impl_mirror(self, impl): return self._get_mirror_url(impl.feed.url, 'impl/' + _escape_slashes(impl.id)) @tasks.async def get_packagekit_feed(self, feed_url): """Send a query to PackageKit (if available) for information about this package. On success, the result is added to iface_cache. """ assert feed_url.startswith('distribution:'), feed_url master_feed = self.config.iface_cache.get_feed(feed_url.split(':', 1)[1]) if master_feed: fetch = self.config.iface_cache.distro.fetch_candidates(master_feed) if fetch: yield fetch tasks.check(fetch) # Force feed to be regenerated with the new information self.config.iface_cache.get_feed(feed_url, force = True) def download_and_import_feed(self, feed_url, iface_cache = None): """Download the feed, download any required keys, confirm trust if needed and import. @param feed_url: the feed to be downloaded @type feed_url: str @param iface_cache: (deprecated)""" from .download import DownloadAborted assert iface_cache is None or iface_cache is self.config.iface_cache if not self.config.handler.dry_run: self.config.iface_cache.mark_as_checking(feed_url) logger.debug(_("download_and_import_feed %(url)s"), {'url': feed_url}) assert not os.path.isabs(feed_url) if feed_url.startswith('distribution:'): return self.get_packagekit_feed(feed_url) primary = self._download_and_import_feed(feed_url, use_mirror = False) @tasks.named_async("monitor feed downloads for " + feed_url) def wait_for_downloads(primary): # Download just the upstream feed, unless it takes too long... timeout = tasks.TimeoutBlocker(5, 'Mirror timeout') # 5 seconds yield primary, timeout tasks.check(timeout) try: tasks.check(primary) if primary.happened: return # OK, primary succeeded! # OK, maybe it's just being slow... logger.info("Feed download from %s is taking a long time.", feed_url) primary_ex = None except NoTrustedKeys as ex: raise # Don't bother trying the mirror if we have a trust problem except ReplayAttack as ex: raise # Don't bother trying the mirror if we have a replay attack except DownloadAborted as ex: raise # Don't bother trying the mirror if the user cancelled except SafeException as ex: # Primary failed primary = None primary_ex = ex logger.warning(_("Feed download from %(url)s failed: %(exception)s"), {'url': feed_url, 'exception': ex}) # Start downloading from mirror... mirror = self._download_and_import_feed(feed_url, use_mirror = True) # Wait until both mirror and primary tasks are complete... while True: blockers = list(filter(None, [primary, mirror])) if not blockers: break yield blockers if primary: try: tasks.check(primary) if primary.happened: primary = None # No point carrying on with the mirror once the primary has succeeded if mirror: logger.info(_("Primary feed download succeeded; aborting mirror download for %s") % feed_url) mirror.dl.abort() except SafeException as ex: primary = None primary_ex = ex logger.info(_("Feed download from %(url)s failed; still trying mirror: %(exception)s"), {'url': feed_url, 'exception': ex}) if mirror: try: tasks.check(mirror) if mirror.happened: mirror = None if primary_ex: # We already warned; no need to raise an exception too, # as the mirror download succeeded. primary_ex = None except ReplayAttack as ex: logger.info(_("Version from mirror is older than cached version; ignoring it: %s"), ex) mirror = None primary_ex = None except SafeException as ex: logger.info(_("Mirror download failed: %s"), ex) mirror = None if primary_ex: raise primary_ex return wait_for_downloads(primary) def _download_and_import_feed(self, feed_url, use_mirror): """Download and import a feed. @param use_mirror: False to use primary location; True to use mirror.""" if use_mirror: url = self.get_feed_mirror(feed_url) if url is None: return None logger.info(_("Trying mirror server for feed %s") % feed_url) else: url = feed_url if self.config.handler.dry_run: print(_("[dry-run] downloading feed {url}").format(url = url)) dl = self.download_url(url, hint = feed_url) stream = dl.tempfile @tasks.named_async("fetch_feed " + url) def fetch_feed(): try: yield dl.downloaded tasks.check(dl.downloaded) pending = PendingFeed(feed_url, stream) if use_mirror: # If we got the feed from a mirror, get the key from there too key_mirror = self.config.mirror + '/keys/' else: key_mirror = None keys_downloaded = tasks.Task(pending.download_keys(self, feed_hint = feed_url, key_mirror = key_mirror), _("download keys for %s") % feed_url) yield keys_downloaded.finished tasks.check(keys_downloaded.finished) dry_run = self.handler.dry_run if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml, dry_run = dry_run): blocker = self.config.trust_mgr.confirm_keys(pending) if blocker: yield blocker tasks.check(blocker) if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml, dry_run = dry_run): raise NoTrustedKeys(_("No signing keys trusted; not importing")) finally: stream.close() task = fetch_feed() task.dl = dl return task def fetch_key_info(self, fingerprint): try: return self.key_info[fingerprint] except KeyError: if self.config.handler.dry_run: print(_("[dry-run] asking {url} about key {key}").format( url = self.config.key_info_server, key = fingerprint)) self.key_info[fingerprint] = key_info = KeyInfoFetcher(self, self.config.key_info_server, fingerprint) return key_info # (force is deprecated and ignored) def download_impl(self, impl, retrieval_method, stores, force = False): """Download an implementation. @param impl: the selected implementation @type impl: L{model.ZeroInstallImplementation} @param retrieval_method: a way of getting the implementation (e.g. an Archive or a Recipe) @type retrieval_method: L{model.RetrievalMethod} @param stores: where to store the downloaded implementation @type stores: L{zerostore.Stores} @rtype: L{tasks.Blocker}""" assert impl assert retrieval_method if isinstance(retrieval_method, DistributionSource): return retrieval_method.install(self.handler) from zeroinstall.zerostore import manifest, parse_algorithm_digest_pair best = None for digest in impl.digests: alg_name, digest_value = parse_algorithm_digest_pair(digest) alg = manifest.algorithms.get(alg_name, None) if alg and (best is None or best.rating < alg.rating): best = alg required_digest = digest if best is None: if not impl.digests: raise SafeException(_("No <manifest-digest> given for '%(implementation)s' version %(version)s") % {'implementation': impl.feed.get_name(), 'version': impl.get_version()}) raise SafeException(_("Unknown digest algorithms '%(algorithms)s' for '%(implementation)s' version %(version)s") % {'algorithms': impl.digests, 'implementation': impl.feed.get_name(), 'version': impl.get_version()}) @tasks.async def download_impl(method): original_exception = None while True: try: if isinstance(method, DownloadSource): blocker, stream = self.download_archive(method, impl_hint = impl, may_use_mirror = original_exception is None) try: yield blocker tasks.check(blocker) stream.seek(0) if self.external_store: self._add_to_external_store(required_digest, [method], [stream]) else: self._add_to_cache(required_digest, stores, method, stream) finally: stream.close() elif isinstance(method, Recipe): blocker = self.cook(required_digest, method, stores, impl_hint = impl) yield blocker tasks.check(blocker) else: raise Exception(_("Unknown download type for '%s'") % method) except download.DownloadError as ex: if original_exception: logger.info("Error from mirror: %s", ex) raise original_exception else: original_exception = ex mirror_url = self._get_impl_mirror(impl) if mirror_url is not None: logger.info("%s: trying implementation mirror at %s", ex, mirror_url) method = model.DownloadSource(impl, mirror_url, None, None, type = 'application/x-bzip-compressed-tar') continue # Retry raise break self.handler.impl_added_to_store(impl) return download_impl(retrieval_method) def _add_to_cache(self, required_digest, stores, retrieval_method, stream): assert isinstance(retrieval_method, DownloadSource) stores.add_archive_to_cache(required_digest, stream, retrieval_method.url, retrieval_method.extract, type = retrieval_method.type, start_offset = retrieval_method.start_offset or 0, dry_run = self.handler.dry_run) def _add_to_external_store(self, required_digest, steps, streams): from zeroinstall.zerostore.unpack import type_from_url # combine archive path, extract directory and MIME type arguments in an alternating fashion paths = map(lambda stream: stream.name, streams) extracts = map(lambda step: step.extract or "", steps) types = map(lambda step: step.type or type_from_url(step.url), steps) args = [None]*(len(paths)+len(extracts)+len(types)) args[::3] = paths args[1::3] = extracts args[2::3] = types # close file handles to allow external processes access for stream in streams: stream.close() # delegate extracting archives to external tool import subprocess subprocess.call([self.external_store, "add", required_digest] + args) # delete temp files for path in paths: os.remove(path) # (force is deprecated and ignored) def download_archive(self, download_source, force = False, impl_hint = None, may_use_mirror = False): """Fetch an archive. You should normally call L{download_impl} instead, since it handles other kinds of retrieval method too. It is the caller's responsibility to ensure that the returned stream is closed. """ from zeroinstall.zerostore import unpack url = download_source.url if not (url.startswith('http:') or url.startswith('https:') or url.startswith('ftp:')): raise SafeException(_("Unknown scheme in download URL '%s'") % url) mime_type = download_source.type if not mime_type: mime_type = unpack.type_from_url(download_source.url) if not mime_type: raise SafeException(_("No 'type' attribute on archive, and I can't guess from the name (%s)") % download_source.url) if not self.external_store: unpack.check_type_ok(mime_type) if may_use_mirror: mirror = self._get_archive_mirror(download_source) else: mirror = None if self.config.handler.dry_run: print(_("[dry-run] downloading archive {url}").format(url = download_source.url)) dl = self.download_url(download_source.url, hint = impl_hint, mirror_url = mirror) if download_source.size is not None: dl.expected_size = download_source.size + (download_source.start_offset or 0) # (else don't know sizes for mirrored archives) return (dl.downloaded, dl.tempfile) # (force is deprecated and ignored) def download_icon(self, interface, force = False): """Download an icon for this interface and add it to the icon cache. If the interface has no icon do nothing. @return: the task doing the import, or None @rtype: L{tasks.Task}""" logger.debug("download_icon %(interface)s", {'interface': interface}) modification_time = None existing_icon = self.config.iface_cache.get_icon_path(interface) if existing_icon: file_mtime = os.stat(existing_icon).st_mtime from email.utils import formatdate modification_time = formatdate(timeval = file_mtime, localtime = False, usegmt = True) feed = self.config.iface_cache.get_feed(interface.uri) if feed is None: return None # Find a suitable icon to download for icon in feed.get_metadata(XMLNS_IFACE, 'icon'): type = icon.getAttribute('type') if type != 'image/png': logger.debug(_('Skipping non-PNG icon')) continue source = icon.getAttribute('href') if source: break logger.warning(_('Missing "href" attribute on <icon> in %s'), interface) else: logger.info(_('No PNG icons found in %s'), interface) return dl = self.download_url(source, hint = interface, modification_time = modification_time) @tasks.async def download_and_add_icon(): stream = dl.tempfile try: yield dl.downloaded tasks.check(dl.downloaded) if dl.unmodified: return stream.seek(0) import shutil, tempfile icons_cache = basedir.save_cache_path(config_site, 'interface_icons') tmp_file = tempfile.NamedTemporaryFile(dir = icons_cache, delete = False) shutil.copyfileobj(stream, tmp_file) tmp_file.close() icon_file = os.path.join(icons_cache, escape(interface.uri)) portable_rename(tmp_file.name, icon_file) finally: stream.close() return download_and_add_icon() def download_impls(self, implementations, stores): """Download the given implementations, choosing a suitable retrieval method for each. If any of the retrieval methods are DistributionSources and need confirmation, handler.confirm is called to check that the installation should proceed. """ unsafe_impls = [] to_download = [] for impl in implementations: logger.debug(_("start_downloading_impls: for %(feed)s get %(implementation)s"), {'feed': impl.feed, 'implementation': impl}) source = self.get_best_source(impl) if not source: raise SafeException(_("Implementation %(implementation_id)s of interface %(interface)s" " cannot be downloaded (no download locations given in " "interface!)") % {'implementation_id': impl.id, 'interface': impl.feed.get_name()}) to_download.append((impl, source)) if isinstance(source, DistributionSource) and source.needs_confirmation: unsafe_impls.append(source.package_id) @tasks.async def download_impls(): if unsafe_impls: confirm = self.handler.confirm_install(_('The following components need to be installed using native packages. ' 'These come from your distribution, and should therefore be trustworthy, but they also ' 'run with extra privileges. In particular, installing them may run extra services on your ' 'computer or affect other users. You may be asked to enter a password to confirm. The ' 'packages are:\n\n') + ('\n'.join('- ' + x for x in unsafe_impls))) yield confirm tasks.check(confirm) blockers = [] for impl, source in to_download: blockers.append(self.download_impl(impl, source, stores)) # Record the first error log the rest error = [] def dl_error(ex, tb = None): if error: self.handler.report_error(ex) else: error.append((ex, tb)) while blockers: yield blockers tasks.check(blockers, dl_error) blockers = [b for b in blockers if not b.happened] if error: from zeroinstall import support support.raise_with_traceback(*error[0]) if not to_download: return None return download_impls() def get_best_source(self, impl): """Return the best download source for this implementation. @rtype: L{model.RetrievalMethod}""" if impl.download_sources: return impl.download_sources[0] return None def download_url(self, url, hint = None, modification_time = None, expected_size = None, mirror_url = None): """The most low-level method here; just download a raw URL. It is the caller's responsibility to ensure that dl.stream is closed. @param url: the location to download from @param hint: user-defined data to store on the Download (e.g. used by the GUI) @param modification_time: don't download unless newer than this @param mirror_url: an altertive URL to try if this one fails @type mirror_url: str @rtype: L{download.Download} @since: 1.5 """ dl = download.Download(url, hint = hint, modification_time = modification_time, expected_size = expected_size, auto_delete = not self.external_store) dl.mirror = mirror_url self.handler.monitor_download(dl) dl.downloaded = self.scheduler.download(dl) return dl class StepRunner(object): """The base class of all step runners. @since: 1.10""" def __init__(self, stepdata, impl_hint): self.stepdata = stepdata self.impl_hint = impl_hint def prepare(self, fetcher, blockers): pass @classmethod def class_for(cls, model): for subcls in cls.__subclasses__(): if subcls.model_type == type(model): return subcls assert False, "Couldn't find step runner for %s" % (type(model),) def close(self): """Release any resources (called on success or failure).""" pass class RenameStepRunner(StepRunner): """A step runner for the <rename> step. @since: 1.10""" model_type = model.RenameStep def apply(self, basedir): source = native_path_within_base(basedir, self.stepdata.source) dest = native_path_within_base(basedir, self.stepdata.dest) os.rename(source, dest) class DownloadStepRunner(StepRunner): """A step runner for the <archive> step. @since: 1.10""" model_type = model.DownloadSource def prepare(self, fetcher, blockers): self.blocker, self.stream = fetcher.download_archive(self.stepdata, impl_hint = self.impl_hint, may_use_mirror = True) assert self.stream blockers.append(self.blocker) def apply(self, basedir): from zeroinstall.zerostore import unpack assert self.blocker.happened unpack.unpack_archive_over(self.stepdata.url, self.stream, basedir, extract = self.stepdata.extract, type=self.stepdata.type, start_offset = self.stepdata.start_offset or 0) def close(self): self.stream.close() def native_path_within_base(base, crossplatform_path): """Takes a cross-platform relative path (i.e using forward slashes, even on windows) and returns the absolute, platform-native version of the path. If the path does not resolve to a location within `base`, a SafeError is raised. @since: 1.10 """ assert os.path.isabs(base) if crossplatform_path.startswith("/"): raise SafeException("path %r is not within the base directory" % (crossplatform_path,)) native_path = os.path.join(*crossplatform_path.split("/")) fullpath = os.path.realpath(os.path.join(base, native_path)) base = os.path.realpath(base) if not fullpath.startswith(base + os.path.sep): raise SafeException("path %r is not within the base directory" % (crossplatform_path,)) return fullpath
dsqmoore/0install
zeroinstall/injector/fetch.py
Python
lgpl-2.1
24,666
[ "VisIt" ]
efc3519f1b9831ae69fbb53d370a42a093b63f43684383cea3f3e4471bd45b24
import time, socket import commands, channels, mods, irc_msg class irc: def __init__(self, host, nick, port, chans, rootpwd): self.host = host self.port = port self.nick = nick self.command_char = "!" self.init_phase = True self.auto_join = chans self.root_pwd = rootpwd # core functionality (available for all modules) self.channels = channels.channels(self) self.commands = commands.commands(self) self.modules = mods.modules(self) # need to be constructed last of the core modules # queues have the same outgoing interval, that critical_queue remains # more responsive is up to module writers staying reponsible self.critical_queue = [] self.noncritical_queue = [] self.shitter_queue = [] # for spammers self.crit_time = 0 self.ncrit_time = 0 self.shitter_time = 0 self.user_sent = {} self.user_time = 0 # clear user_sent every X time period self.blacklisted = {} def __del__(self): if hasattr(self, 'sock'): self.sock.close(); def chan_msg(self, channel, msg, queue=2, user=None): print("<" + self.nick + " (" + channel.name + ", " + str(queue) + ")> " + msg) s = "PRIVMSG " + channel.name + " :" + msg self.send(s, queue, user) def priv_msg(self, user, msg, queue=2): print("<" + self.nick + " (" + user.nick + ", " + str(queue) + ")> " + msg) s = "PRIVMSG " + user.nick + " :" + msg self.send(s, queue, user) def has_privilege(self, user, priv): if priv == 0: return True auth = self.modules.get_module("auth") if auth == None or auth.active == False: return False return auth.module.has_priv(user, priv) # queue: 0: instant, 1: critical messages, 2: non-critical messages, else: spammer queue (size limited) # user: user that is the reason we are sending this message, if applicable => enables the spam limit def send(self, s, queue=2, user=None): # cannot contain newlines or carriage-returns assert s.find("\n") == -1 and s.find("\r") == -1 # run must've been called before send assert hasattr(self, 'sock') # max length of message assert len(s) <= 510 s += "\r\n" b = s.encode('utf-8') if user != None: if not user.hostname in self.user_sent: self.user_sent[user.hostname] = 0 self.user_sent[user.hostname] += 1 # if a user gets into blacklisted he needs to wait a full minute before he starts invoking commands if user.hostname in self.blacklisted: print(user.nick + "'s visit in the shitter queue extended to a full minute") self.blacklisted[user.hostname] = time.time() queue = 3 elif self.user_sent[user.hostname] >= 20: print("added " + user.nick + " to shitter queue") self.blacklisted[user.hostname] = time.time() queue = 3 if queue == 0: self.sock.sendall(b) elif queue == 1: self.critical_queue.append(b) elif queue == 2: self.noncritical_queue.append(b) else: if len(self.shitter_queue) < 4096: self.shitter_queue.append(b) def net_loop(self): self.sock.settimeout(0.2) bs = b"" while True: while True: try: bs += self.sock.recv(1) except socket.timeout: break if bs.find(b"\r\n") != -1: break # process message if a full message has been received if bs.find(b"\r\n") != -1: bs = bs[:-2] # remove \r\n try: decoded_msg = bs.decode('utf-8') self.recv_handle(decoded_msg) except UnicodeDecodeError: # Warn user that he's sending weird characters self.point_laugh(bs) bs = b"" # handle sending of queues self.send_queues() def send_queues(self): # clear user_time if self.user_time+60.0 <= time.time(): self.user_sent.clear() self.user_time = time.time() # trim blacklisted del_keys = [] for k,v in self.blacklisted.items(): if v+60.0 < time.time(): del_keys.append(k) for k in del_keys: del self.blacklisted[k] if len(self.critical_queue) > 0 and time.time() >= self.crit_time + 1.0: self.sock.send(self.critical_queue[0], 0) self.critical_queue = self.critical_queue[1:] self.crit_time = time.time() if len(self.noncritical_queue) > 0 and time.time() >= self.ncrit_time + 2.0: self.sock.send(self.noncritical_queue[0], 0) self.noncritical_queue = self.noncritical_queue[1:] self.ncrit_time = time.time() # only send from shitter_queue once the other queues are depleted if len(self.critical_queue) != 0 or len(self.noncritical_queue) != 0: return if len(self.shitter_queue) > 0 and time.time() >= self.shitter_time + 2.0: self.sock.send(self.shitter_queue[0], 0) self.shitter_queue = self.shitter_queue[1:] self.shitter_time = time.time() def run(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.host, self.port)) # XXX: pass message self.send("NICK " + self.nick, 0) self.send("USER " + self.nick + " 0 * :Tester Jackson", 0) self.net_loop() # handles raw input def recv_handle(self, s): msg = irc_msg.irc_msg() index = 0 # extract optional prefix if s[index] == ":": index += 1 old_index = index index = s.find(" ", index) msg.prefix = s[old_index:index] index += 1 # extract command old_index = index index = s.find(" ", index) msg.cmd = s[old_index:index] index += 1 # extract command parameters while index < len(s) and s[index] != ":": old_index = index index = s.find(" ", index) if index == -1: index = len(s) msg.cmd_params.append(s[old_index:index]) index += 1 # extract trailing command parameter if index < len(s) and s[index] == ":": msg.cmd_params.append(s[index+1:len(s)]) self.msg_handle(msg) # handles parsed irc messages def msg_handle(self, msg): # some irc server software send PING as the first message (which seems to be contrary to RFC2812) if msg.cmd == "PING": self.send("PONG :" + str(msg.cmd_params[0]), 0) if self.init_phase == True: if msg.cmd != "004" and msg.cmd != "PING": return self.init_phase = False # join auto-join channels for chan in self.auto_join: self.send("JOIN " + chan) # set some modes we care about (NOTE: these are non-standard) # B: we're a bot, T: don't get CTCPs self.send("MODE " + self.nick + " +BT") # Initialization phase is finished, begin processing messages # pass raw message to core modules self.channels.raw_msg(msg) # pass raw message onto all active modules self.modules.invoke('raw_msg', msg) # user to user private messages if msg.cmd == "PRIVMSG" and msg.cmd_params[0] == self.nick and msg.prefix.find("!") != -1: # only handle private user to user messages if the user is in a channel we are in as well nick, _ = msg.prefix.split("!", 1) user = self.channels.find_user(nick) if user != None: if user.hostmask != msg.prefix: user.hostmask = msg.prefix user.extract_hostname(msg.prefix) message = msg.cmd_params[1] self.commands.priv_msg(user, message) self.modules.invoke('priv_msg', user, message) def point_laugh(self, byte_str): # We should be able to read up until PRIVMSG as ASCII and figure out the channel ascii_str = "" for c in byte_str: if c > 127: break ascii_str += str(chr(c)) if ascii_str.find("PRIVMSG") != -1: user, _ = ascii_str.split("!", 1) user = user[1:] # skip : part of prefix _, chan = ascii_str.split("PRIVMSG ", 1) chan, _ = chan.split(" ", 1) chan = chan.lower() if len(chan) > 0 and chan[0] == "#" and chan in self.channels.channels: self.chan_msg(self.channels.channels[chan], user + " is not using utf-8 to encode his messages, let's all point and laugh. (http://utf8everywhere.org/)")
ccshiro/mib
src/irc.py
Python
mit
9,263
[ "VisIt" ]
f213060a6f98599350f726e62370ddef43a2797256ae9c6c62224fc3916aefd5
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.magic.tools.regions Contains functions for dealing with regions. # ----------------------------------------------------------------- # Ensure Python 3 functionality from __future__ import absolute_import, division, print_function # Import standard modules import copy import numpy as np # Import astronomical modules import pyregion import astropy.units as u from astropy.coordinates import Angle # Import the relevant PTS classes and modules from ..basics.geometry import Ellipse from ..basics.vector import Position, Extent # ----------------------------------------------------------------- def largest_ellipse(region): """ This function ... :param region: :return: """ largest_shape = None # Loop over all the shapes in the galaxy region for shape in region: # Skip shapes that are not ellipses if not isinstance(shape, Ellipse): continue major_axis_length = shape.major if largest_shape is None or major_axis_length > largest_shape.major: largest_shape = shape # Return the largest shape in the galaxy region return largest_shape # ----------------------------------------------------------------- def ellipse(shape): """ This function ... :param shape: :return: """ x_center, y_center, x_radius, y_radius, angle = ellipse_parameters(shape) return Ellipse(Position(x_center, y_center), Extent(x_radius, y_radius), Angle(angle, u.Unit("deg"))) # ----------------------------------------------------------------- def ellipse_parameters(shape): """ This function ... :param shape: :return: """ x_center = shape.coord_list[0] y_center = shape.coord_list[1] x_radius = shape.coord_list[2] if shape.name == "ellipse": y_radius = shape.coord_list[3] try: angle = shape.coord_list[4] except: angle = 0.0 elif shape.name == "circle": y_radius = shape.coord_list[2] angle = 0.0 else: raise ValueError("Shape must be either a circle or an ellipse") return x_center, y_center, x_radius, y_radius, angle # ----------------------------------------------------------------- def get_enclosing_boxes(region): """ This function ... :param region: :return: """ boxes = [] # This is a hack to use mpl to determine the outer bounds of the regions # (but it's a legit hack - pyregion needs a major internal refactor before # we can approach this any other way) mpl_objs = region.get_mpl_patches_texts(origin=0)[0] # Loop over all objects for obj in mpl_objs: # Find the minimal enclosing box containing the shape extent = obj.get_extents() x_min, y_min = extent.min x_max, y_max = extent.max # Add the extent of this box boxes.append((x_min, x_max, y_min, y_max)) return boxes # ----------------------------------------------------------------- def create_annulus(region, outer_factor, inner_factor=1.0): """ This function ... :param region: :param outer_factor: :param inner_factor: :return: """ # Create a new region region_annulus = pyregion.ShapeList([]) # ... for shape in region: # Create new shapes by deep-copying the original shape # Creating new shapes from scratch: pyregion.parser_helper.Shape(None, None) inner_shape = copy.deepcopy(shape) outer_shape = copy.deepcopy(shape) # Add a '-' symbol to the name of the inner region inner_shape.name = '-' + shape.name # Set the size of the inner shape inner_shape.coord_list[2] *= inner_factor inner_shape.coord_list[3] *= inner_factor # Do special things to make this an excluded region (We're not supposed to do this as well) inner_shape.exclude = True # Set the size of the outer shape outer_shape.coord_list[2] *= outer_factor outer_shape.coord_list[3] *= outer_factor region_annulus.append(outer_shape) region_annulus.append(inner_shape) # Return the new region return region_annulus # ----------------------------------------------------------------- def expand(region, factor): """ This function ... :param region: :param factor: :return: """ # Create a new region region_expanded = pyregion.ShapeList([]) # Loop over all shapes in the original region for shape in region: # Create a new shape expanded_shape = copy.deepcopy(shape) # Set the size of the new shape expanded_shape.coord_list[2] *= factor if shape.name == "ellipse": expanded_shape.coord_list[3] *= factor # Add the new shape to the new region region_expanded.append(expanded_shape) # Return the new region return region_expanded # ----------------------------------------------------------------- def ellipses(ra_list, dec_list, height_list, width_list, angle_list): # Initialize the region string region_string = "# Region file format: DS9 version 3.0\n" region_string += "global color=green\n" region_string += "fk5\n" for ra, dec, height, width, angle in zip(ra_list, dec_list, height_list, width_list, angle_list): line = "fk5;ellipse(%s,%s,%.2f\",%.2f\",%s)\n" % (ra, dec, height, width, angle) region_string += line region = pyregion.parse(region_string) # Return the region return region # ----------------------------------------------------------------- def circles(ra_list, dec_list, radius_list): # Initialize the region string region_string = "# Region file format: DS9 version 3.0\n" region_string += "global color=green\n" region_string += "fk5\n" for ra, dec, radius in zip(ra_list, dec_list, radius_list): line = "fk5;circle(%s,%s,%.2f\")\n" % (ra, dec, radius) region_string += line region = pyregion.parse(region_string) # Return the region return region # ----------------------------------------------------------------- def ellipses_from_coordinates(coordinates): """ This function creates a region consisting of ellipses, based on a list of coordinates :param coordinates: the list of coordinates for the different ellipses :return: the region of ellipses """ # Initialize the region string region_string = "# Region file format: DS9 version 3.0\n" region_string += "global color=green\n" region_string += "image\n" # Loop over the objects in the coordinates list, adding a line for each one for object in coordinates: if type(object).__name__ == "Gaussian2D": line = "ellipse(" + str(object.x_mean.value) + "," + str(object.y_mean.value) + "," + str(object.x_stddev.value) + "," + str(object.y_stddev.value) + ",0.0)\n" elif type(object).__name__ == "AiryDisk2D": # see https://en.wikipedia.org/wiki/Airy_disk#Approximation_using_a_Gaussian_profile and # http://astropy.readthedocs.org/en/latest/api/astropy.modeling.functional_models.AiryDisk2D.html#astropy.modeling.functional_models.AiryDisk2D sigma = 0.42 * object.radius.value * 0.81989397882 line = "ellipse(" + str(object.x_0.value) + "," + str(object.y_0.value) + "," + str(sigma) + "," + str(sigma) + ",0.0)\n" else: raise ValueError("Models other than Gaussian2D and AiryDisk2D are not yet supported") region_string += line # Parse the region string into a region object region = pyregion.parse(region_string) # Return the region return region # ----------------------------------------------------------------- def one_ellipse(parameters): """ This function ... :param parameters: :return: """ # Create a string identifying this ellipse region_string = "# Region file format: DS9 version 3.0\n" region_string += "global color=green\n" region_string += "image\n" region_string += "ellipse(" + str(parameters[0]) + "," + str(parameters[1]) + "," + str(parameters[2]) + "," + str(parameters[3]) + "," + str(parameters[4]) + ")\n" # Create a region and return it return pyregion.parse(region_string) # ----------------------------------------------------------------- def create_mask(region, header, x_size, y_size): """ This function ... :param region: :param header: :param x_size: :param y_size: :return: """ # Create a mask and return it return region.get_mask(header=header, shape=(y_size,x_size)) # ----------------------------------------------------------------- def parse(region_string): """ This function is a simple wrapper around the pyregion.parse function, to contain :param region_string: :return: """ # Parse the region string and create a region return pyregion.parse(region_string) # ----------------------------------------------------------------- def scale(shape, factor): """ This function ... :param shape: :param factor: :return: """ new_shape = copy.deepcopy(shape) new_shape.coord_list[2] *= factor if new_shape.name == "ellipse": new_shape.coord_list[3] *= factor return new_shape # ----------------------------------------------------------------- def scale_circle(shape, factor): """ This function ... :param shape: :param factor: :return: """ new_shape = copy.deepcopy(shape) new_shape.coord_list[2] *= factor return new_shape # ----------------------------------------------------------------- def subtract(region_a, region_b, center_offset_tolerance, header): """ This function ... :param region_a: :param region_b: :return: """ # TODO: fix this function: do not only use the first shape of region b!! new_region = region_a.as_imagecoord(header) region_b = region_b.as_imagecoord(header) x_b = region_b[0].coord_list[0] y_b = region_b[0].coord_list[1] for i in range(len(new_region)): x_center = new_region[i].coord_list[0] y_center = new_region[i].coord_list[1] diff_x = x_center - x_b diff_y = y_center - y_b distance = np.sqrt(diff_x**2 + diff_y**2) if distance < center_offset_tolerance: del new_region[i] break # Return the subtracted region return new_region # ----------------------------------------------------------------- def mean_radius(region): # Initialize an empty list to contain the different sigma values sigmas = [] # Loop over all shapes in the region for shape in region: sigma_x = shape.coord_list[2] sigma_y = shape.coord_list[3] # Add the sigma, averaged over the x and y directions, to the list of sigmas sigmas.append(0.5*(sigma_x + sigma_y)) return np.mean(sigmas) # ----------------------------------------------------------------- def max_radius(region): # Initialize an empty list to contain the different sigma values sigmas = [] # Loop over all shapes in the region for shape in region: sigma_x = shape.coord_list[2] sigma_y = shape.coord_list[3] # Add the sigma, averaged over the x and y directions, to the list of sigmas sigmas.append(0.5*(sigma_x + sigma_y)) return max(sigmas) # -----------------------------------------------------------------
Stargrazer82301/CAAPR
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/regions.py
Python
mit
11,810
[ "Galaxy" ]
dab5321badff28515cef5d4b9ddd4d057319baf17c3ac164a8c84281923dad2a
__author__ = 'cos' from tactic_world import TacticWorld from strategic_world import StrategicWorld from galaxy import * from faction import Faction from gui.universeUI import UniverseUI from campaign import Campaign from agents.unit import * from agents.building import * import Tkinter, tkFileDialog from engine.sound import Sound class UnitLoader(object): ''' Reads the unit text files and generates unit property objects. It can be used to obtain unit and building objects (without attached instances). ''' def __init__(self, settings, world=None): self.unitProps = {} self.weaponProps = {} self.buildingProps = {} self.world = world self.settings = settings self.parseUnitFile ("objects/agents/alien.units", "Tauran") self.parseWeaponFile("objects/agents/alien.weapons", "Tauran") self.parseBuildingFile("objects/agents/alien.buildings", "Tauran") self.parseUnitFile("objects/agents/human.units", "Human") self.parseWeaponFile("objects/agents/human.weapons", "Human") self.parseBuildingFile("objects/agents/human.buildings", "Human") def setWorld(self, world): self.world = world def parseFile(self, filename): returnDict = {} fil = open(filename) wholeText = fil.readlines() fil.close() # remove the lines starting with ";" and "\n" wholeText = [text for text in wholeText if text[0] != ";" if text != "\n"] # Get indexes of the beginning of units (marked by "[]". IDCells = [text for text in wholeText if text[0] == "["] for IDCell in IDCells: ID = IDCell.split("]")[0].split("[")[1] propertyObj = {} propertyText = [] for i in range(wholeText.index(IDCell)+1, wholeText.__len__()): if wholeText[i][0] == "[": break # We parsed all the object and found the next object. propertyText.append(wholeText[i]) # Now we have to parse this propertyText. for line in propertyText: if not "=" in line: continue name, value = line.split("=") value = value.split("\n")[0] numvalue = None try: numvalue = int(value) + 0 except: numvalue = value propertyObj[name] = numvalue returnDict[ID] = propertyObj return returnDict def parseUnitFile(self, filename, faction): unitDict = self.parseFile(filename) for unit in unitDict.keys(): unitDict[unit]["unitName"] = unit unitDict[unit]["faction"] = faction unitDict[unit]["type"] = "Unit" self.unitProps.update(unitDict) def parseWeaponFile(self, filename, faction): weaponDict = self.parseFile(filename) for weapon in weaponDict.keys(): weaponDict[weapon]["weaponName"] = weapon weaponDict[weapon]["faction"] = faction weaponDict[weapon]["type"] = "Weapon" self.weaponProps.update(weaponDict) def parseBuildingFile(self, filename, faction): buildingDict = self.parseFile(filename) for building in buildingDict.keys(): buildingDict[building]["buildingName"] = building buildingDict[building]["faction"] = faction buildingDict[building]["type"] = "Building" self.buildingProps.update(buildingDict) def createBuilding(self, buildingName): if not self.world: print "Error: No world associated with UnitCreator!" return if buildingName not in self.buildingProps.keys(): print "Found no building with that buildingName!" return buildingProps = self.buildingProps[buildingName] buildingProps["unitName"] = buildingName newBuilding = Building(self.world, buildingProps) # newBuilding.properties = buildingProps return newBuilding def createUnit(self, unitName): if not self.world: print "Error: No world associated with UnitCreator!" return if unitName not in self.unitProps.keys(): return "Found no unit with that unitName!" unitProps = self.unitProps[unitName] lWeapon = Weapon(self.world) weaponName = unitProps["LightWeapon"] lWeapon.properties = self.weaponProps[weaponName] hWeapon = Weapon(self.world) weaponName = unitProps["HeavyWeapon"] hWeapon.properties = self.weaponProps[weaponName] newUnit = Unit(self.world, unitProps, lWeapon=lWeapon, HWeapon=hWeapon) return newUnit def isUnit(self, id): if id in self.unitProps.keys(): return True else: return False def isBuilding(self, id): if id in self.buildingProps.keys(): return True else: return False class Universe(object): ''' This will hold the overall campaign information. It also takes care of the main GUI where the player can choose the planet and end turn. So far it just creates two buttons: Start Strategic turn: Generates a strategic_world instance. It's the overall "building" mode. Start Tactical turn: Generates a tactic_world instance. It's the combat mode. ''' selectedPlanet = None pause = True def __init__(self, engine, settings): self._engine = engine self._settings = settings self.world = None self.year = 1 # Equivalent to "turn" in strategic view. self.gui = None self.campaign = None self.progress = None self.galaxy = None self.faction = None self.sound = Sound(self._engine, self._settings) self.unitLoader = UnitLoader(self._settings) def load(self): saveDir = "saves/" # self.progress = Progress(self) root = Tkinter.Tk() file = tkFileDialog.askopenfilename(parent=root, title='Select campaign to load', initialdir=saveDir, filetypes=[("Campaign", "*.cpn")]) root.destroy() self.campaign = Campaign(self, file) self.newGame() def newGame(self, campaign=None, giveFreebies=False): ''' Creates a new campaign and starts it. :return: ''' #FIXME: Look into restarting the program. if self.world: self.world.model.deleteMaps() self.world.model.deleteObjects() self.unitLoader.setWorld(None) if campaign: self.campaign = campaign elif not self.campaign: self.campaign = Campaign(self) self.campaign.createCampaign() self.galaxy = Galaxy(self.campaign.galaxyName) # Build the main GUI if not self.gui: self.gui = UniverseUI(self) self.gui.show() self.progress = self.campaign.progress # to save the progress. self.faction = self.campaign.getPlayerFaction() # Give a freebie of 10000 credits if giveFreebies: self.faction.resources["Credits"] = 10000 self.continueGame() self.gui.updateUI() def pauseGame(self): self.pause = True def continueGame(self): self.pause = False def toWarClicked(self): print "Going to war!" self.gui.hide() planetName = "firstCapital" #self.progress.allPlanets[planetName] self.selectedPlanet = Planet(planetName, self.progress.planetInfos[planetName]) self.startTactic() def goToPlanet(self, planetName): print "Going to Planet ", planetName self.gui.hide() if planetName in self.progress.planetInfos.keys(): planetInfo = self.progress.planetInfos[planetName] else: planetInfo = None self.selectedPlanet = Planet(planetName, planetInfo) self.startStrategic() def startTactic(self): ''' Starts Tactic mode. Loads TacticWorld. :return: ''' self.world = TacticWorld(self, self.selectedPlanet) self.world.load(self.selectedPlanet.getMapPath()) print "Loading map: ", self.selectedPlanet.getMapPath() def startStrategic(self): ''' Starts strategic mode. Loads StrategicWorld. :return: ''' self.world = StrategicWorld(self, self.selectedPlanet) self.world.load(self.selectedPlanet.getMapPath()) print "Loading map: ", self.selectedPlanet.getMapPath() def pump(self): if self.pause: return if self.world: self.world.pump() def save(self): if self.world: self.world.updatePlanetAgents() self.campaign.saveCampaign() def backToUniverse(self): # Save the information self.progress.update() self.world.HUD.closeExtraWindows() self.world.destroy() self.world.view.end() self.world.listener.detach() # if self.world.music: # self.world.music.stop() # del self.world.music self.world.HUD.destroy() self.world.HUD = None self.unitLoader.setWorld(None) if self.selectedPlanet: self.selectedPlanet = None # delete map and objects. model = self._engine.getModel() model.deleteObjects() self.world = None self.gui.show() def getPlanetResources(self, planetName, display=False): ''' Calculates the resources of a given planet. :param planetName: name of the planet to calculate :return: a tuple containing: (cash, energy, research) ''' cashConsumption = 0 cashProduction = 0 energyConsumption = 0 energyProduction = 0 researchProduction = 0 planet = self.progress.allPlanets[planetName] for agentID in planet["agentInfo"]: agentName = agentID.split(":")[0] if self.unitLoader.isBuilding(agentName): buildingProps = self.unitLoader.buildingProps[agentName] cashProduction += buildingProps["ProductionCash"] energyConsumption += buildingProps["ConsummationEnergy"] energyProduction += buildingProps["ProductionEnergy"] researchProduction += buildingProps["ProductionResearch"] elif self.unitLoader.isUnit(agentName): unitProps = self.unitLoader.unitProps[agentName] cashConsumption += unitProps["Upkeep"] else: print "Error, no agent type found for ", agentName for storage in planet["storages"].values(): ## Should we also add the "inProduction" to the upkeep costs? So far no. for unitID in storage["unitsReady"]: agentName = unitID.split(":")[1] unitProps = self.unitLoader.unitProps[agentName] cashConsumption += unitProps["Upkeep"] totalCash = cashProduction - cashConsumption totalEnergy = energyProduction - energyConsumption if display: print "\nPlanet resource summary:" print "Planet name: ", planetName print "Cash flow: %d - %d = %d" % (cashProduction, cashConsumption, totalCash) print "Energy flow: %d - %d = %d" % (energyProduction, energyConsumption, totalEnergy) return (totalCash, totalEnergy, researchProduction) def updateResources(self): ''' Calculates the resources and updates the faction information. :return: ''' cashDelta = 0 researchDelta = 0 for planet in self.faction.pwnedPlanets: (cash, energy, research) = self.getPlanetResources(planet, display=True) cashDelta += cash researchDelta += research print "Total cash this turn:", cashDelta print "Research points this turn:", researchDelta self.faction.resources["Credits"] += cashDelta self.faction.resources["Research"] += researchDelta def applyNewTurn(self): ''' This gets executed when a new turn is started. It involves calculation of resources. :return: ''' self.updateResources() def endTurn(self): ''' Ends the current turn. This implies: - Saves the information for this turn and should send it. - Leaves the game at a state where it's waiting the input from the other player. :return: ''' print "Skipping turn!" self.progress.faction = self.faction self.progress.save() self.campaign.compileYear() self.campaign.paused = True self.campaign.saveCampaign() self.gui.updateUI()
conan747/fallen-heaven
scripts/universe.py
Python
gpl-2.0
13,110
[ "Galaxy" ]
2b771c67309f43b074e08448e1b2c6c4bb812a9f67e81f7ac24467455fe460de
#convert Chemical Models to SBML #Moose needs to be compiled with libsbml: USE_SBML=1 import moose moose.loadModel('../genesis/Kholodenko.g','/Kholodenko') moose.writeSBML('../genesis/Kholodenko.xml','/Kholodenko')
dilawar/moose-full
moose-examples/snippets/convert_to_sbml.py
Python
gpl-2.0
216
[ "MOOSE" ]
be26b222e59b30242b7086de4b59d92d63cc703c19f774e9839091ef415240b2
from django.core.management.base import NoArgsCommand from django.contrib.sites.models import Site from pombola.feedback.models import Feedback class Command(NoArgsCommand): help = 'Report all the feedbac that needs attention' args = '' def handle_noargs(self, **options): pending = Feedback.objects.filter( status='pending' ) # If there are no reports to deal with then exit if not pending.exists(): return subject = "Feedback requires attention - %u pending reports" % pending.count() message = "Please visit the admin and process the feedback as needed." url = 'http://%s/admin/feedback/feedback/?status=pending' % Site.objects.get_current().domain print subject print print message print print url
mysociety/pombola
pombola/feedback/management/commands/feedback_report_pending.py
Python
agpl-3.0
857
[ "VisIt" ]
67c93cb51821fbb646a9d8a6ff70e823892346df61f4f91589f655023c106c43
# pylint: disable=missing-docstring import json import platform import re import time from textwrap import dedent from urllib import quote_plus # django_url is assigned late in the process of loading lettuce, # so we import this as a module, and then read django_url from # it to get the correct value import lettuce.django from lettuce import world from nose.tools import assert_true from selenium.common.exceptions import ( InvalidElementStateException, StaleElementReferenceException, TimeoutException, WebDriverException ) from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait GLOBAL_WAIT_FOR_TIMEOUT = 60 REQUIREJS_WAIT = { # Settings - Schedule & Details re.compile(r'^Schedule & Details Settings \|'): [ "jquery", "js/base", "js/models/course", "js/models/settings/course_details", "js/views/settings/main"], # Settings - Advanced Settings re.compile(r'^Advanced Settings \|'): [ "jquery", "js/base", "js/models/course", "js/models/settings/advanced", "js/views/settings/advanced", "codemirror"], # Content - Outline # Note that calling your org, course number, or display name, 'course' will mess this up re.compile(r'^Course Outline \|'): [ "js/base", "js/models/course", "js/models/location", "js/models/section"], # Dashboard re.compile(r'^Studio Home \|'): [ "gettext", "js/base", "jquery.ui", "cms/js/main", "underscore"], # Pages re.compile(r'^Pages \|'): [ 'js/models/explicit_url', 'js/views/tabs', 'cms/js/main', 'xblock/cms.runtime.v1' ], } TRUTHY_WAIT = { # Pages re.compile(r'^Pages \|'): [ 'XBlock' ], # Unit page re.compile(r'Unit \|'): [ "jQuery", "XBlock", "ContainerFactory" ], } @world.absorb def wait(seconds): time.sleep(float(seconds)) @world.absorb def wait_for_js_to_load(): for test, req in REQUIREJS_WAIT.items(): if test.search(world.browser.title): world.wait_for_requirejs(req) break for test, req in TRUTHY_WAIT.items(): if test.search(world.browser.title): for var in req: world.wait_for_js_variable_truthy(var) # Selenium's `execute_async_script` function pauses Selenium's execution # until the browser calls a specific Javascript callback; in effect, # Selenium goes to sleep until the JS callback function wakes it back up again. # This callback is passed as the last argument to the script. Any arguments # passed to this callback get returned from the `execute_async_script` # function, which allows the JS to communicate information back to Python. # Ref: https://selenium.googlecode.com/svn/trunk/docs/api/dotnet/html/M_OpenQA_Selenium_IJavaScriptExecutor_ExecuteAsyncScript.htm @world.absorb def wait_for_js_variable_truthy(variable): """ Using Selenium's `execute_async_script` function, poll the Javascript environment until the given variable is defined and truthy. This process guards against page reloads, and seamlessly retries on the next page. """ javascript = """ var callback = arguments[arguments.length - 1]; var unloadHandler = function() {{ callback("unload"); }} addEventListener("beforeunload", unloadHandler); addEventListener("unload", unloadHandler); var intervalID = setInterval(function() {{ try {{ if({variable}) {{ clearInterval(intervalID); removeEventListener("beforeunload", unloadHandler); removeEventListener("unload", unloadHandler); callback(true); }} }} catch (e) {{}} }}, 10); """.format(variable=variable) for _ in range(5): # 5 attempts max try: result = world.browser.driver.execute_async_script(dedent(javascript)) except WebDriverException as wde: if "document unloaded while waiting for result" in wde.msg: result = "unload" else: raise if result == "unload": # we ran this on the wrong page. Wait a bit, and try again, when the # browser has loaded the next page. world.wait(1) continue else: return result @world.absorb def wait_for_xmodule(): "Wait until the XModule Javascript has loaded on the page." world.wait_for_js_variable_truthy("XModule") world.wait_for_js_variable_truthy("XBlock") @world.absorb def wait_for_mathjax(): "Wait until MathJax is loaded and set up on the page." world.wait_for_js_variable_truthy("MathJax") class RequireJSError(Exception): """ An error related to waiting for require.js. If require.js is unable to load a dependency in the `wait_for_requirejs` function, Python will throw this exception to make sure that the failure doesn't pass silently. """ pass def load_requrejs_modules(dependencies, callback="callback(true);"): javascript = """ var callback = arguments[arguments.length - 1]; if(window.require) {{ requirejs.onError = callback; var unloadHandler = function() {{ callback("unload"); }} addEventListener("beforeunload", unloadHandler); addEventListener("unload", unloadHandler); require({deps}, function($) {{ var modules = arguments; setTimeout(function() {{ removeEventListener("beforeunload", unloadHandler); removeEventListener("unload", unloadHandler); {callback} }}, 50); }}); }} else {{ callback(false); }} """.format(deps=json.dumps(dependencies), callback=callback) for _ in range(5): # 5 attempts max try: result = world.browser.driver.execute_async_script(dedent(javascript)) except WebDriverException as wde: if "document unloaded while waiting for result" in wde.msg: result = "unload" else: raise if result == "unload": # we ran this on the wrong page. Wait a bit, and try again, when the # browser has loaded the next page. world.wait(1) continue elif result not in (None, True, False): # We got a require.js error # Sometimes requireJS will throw an error with requireType=require # This doesn't seem to cause problems on the page, so we ignore it if result['requireType'] == 'require': world.wait(1) continue # Otherwise, fail and report the error else: msg = "Error loading dependencies: type={0} modules={1}".format( result['requireType'], result['requireModules']) err = RequireJSError(msg) err.error = result raise err else: return result def wait_for_xmodules_to_load(): """ If requirejs is loaded on the page, this function will pause Selenium until require is finished loading all xmodules. If requirejs is not loaded on the page, this function will return immediately. """ callback = """ if (modules[0] && modules[0].done) {{ modules[0].done(function () {{callback(true)}}); }} """ return load_requrejs_modules(["xmodule"], callback) @world.absorb def wait_for_requirejs(dependencies=None): """ If requirejs is loaded on the page, this function will pause Selenium until require is finished loading the given dependencies. If requirejs is not loaded on the page, this function will return immediately. :param dependencies: a list of strings that identify resources that we should wait for requirejs to load. By default, requirejs will only wait for jquery. """ if not dependencies: dependencies = ["jquery"] # stick jquery at the front if dependencies[0] != "jquery": dependencies.insert(0, "jquery") result = load_requrejs_modules(dependencies) if result and "xmodule" in dependencies: result = wait_for_xmodules_to_load() return result @world.absorb def wait_for_ajax_complete(): """ Wait until all jQuery AJAX calls have completed. "Complete" means that either the server has sent a response (regardless of whether the response indicates success or failure), or that the AJAX call timed out waiting for a response. For more information about the `jQuery.active` counter that keeps track of this information, go here: http://stackoverflow.com/questions/3148225/jquery-active-function#3148506 """ javascript = """ var callback = arguments[arguments.length - 1]; if(!window.jQuery) {callback(false);} var intervalID = setInterval(function() { if(jQuery.active == 0) { clearInterval(intervalID); callback(true); } }, 100); """ # Sometimes the ajax when it returns will make the browser reload # the DOM, and throw a WebDriverException with the message: # 'javascript error: document unloaded while waiting for result' for _ in range(5): # 5 attempts max try: result = world.browser.driver.execute_async_script(dedent(javascript)) except WebDriverException as wde: if "document unloaded while waiting for result" in wde.msg: # Wait a bit, and try again, when the browser has reloaded the page. world.wait(1) continue else: raise return result @world.absorb def visit(url): world.browser.visit(lettuce.django.django_url(url)) wait_for_js_to_load() @world.absorb def url_equals(url): return world.browser.url == lettuce.django.django_url(url) @world.absorb def is_css_present(css_selector, wait_time=30): return world.browser.is_element_present_by_css(css_selector, wait_time=wait_time) @world.absorb def is_css_not_present(css_selector, wait_time=5): world.browser.driver.implicitly_wait(1) try: return world.browser.is_element_not_present_by_css(css_selector, wait_time=wait_time) except: raise finally: world.browser.driver.implicitly_wait(world.IMPLICIT_WAIT) @world.absorb def css_has_text(css_selector, text, index=0, strip=False): """ Return a boolean indicating whether the element with `css_selector` has `text`. If `strip` is True, strip whitespace at beginning/end of both strings before comparing. If there are multiple elements matching the css selector, use `index` to indicate which one. """ # If we're expecting a non-empty string, give the page # a chance to fill in text fields. if text: wait_for(lambda _: css_text(css_selector, index=index)) actual_text = css_text(css_selector, index=index) if strip: actual_text = actual_text.strip() text = text.strip() return actual_text == text @world.absorb def css_contains_text(css_selector, partial_text, index=0): """ Return a boolean indicating whether the element with `css_selector` contains `partial_text`. If there are multiple elements matching the css selector, use `index` to indicate which one. """ # If we're expecting a non-empty string, give the page # a chance to fill in text fields. if partial_text: wait_for(lambda _: css_html(css_selector, index=index), timeout=8) actual_text = css_html(css_selector, index=index) return partial_text in actual_text @world.absorb def css_has_value(css_selector, value, index=0): """ Return a boolean indicating whether the element with `css_selector` has the specified `value`. If there are multiple elements matching the css selector, use `index` to indicate which one. """ # If we're expecting a non-empty string, give the page # a chance to fill in values if value: wait_for(lambda _: css_value(css_selector, index=index)) return css_value(css_selector, index=index) == value @world.absorb def wait_for(func, timeout=5, timeout_msg=None): """ Calls the method provided with the driver as an argument until the return value is not False. Throws an error if the WebDriverWait timeout clock expires. Otherwise this method will return None. """ msg = timeout_msg or "Timed out after {} seconds.".format(timeout) try: WebDriverWait( driver=world.browser.driver, timeout=timeout, ignored_exceptions=(StaleElementReferenceException) ).until(func) except TimeoutException: raise TimeoutException(msg) @world.absorb def wait_for_present(css_selector, timeout=GLOBAL_WAIT_FOR_TIMEOUT): """ Wait for the element to be present in the DOM. """ wait_for( func=lambda _: EC.presence_of_element_located((By.CSS_SELECTOR, css_selector,)), timeout=timeout, timeout_msg="Timed out waiting for {} to be present.".format(css_selector) ) @world.absorb def wait_for_visible(css_selector, index=0, timeout=GLOBAL_WAIT_FOR_TIMEOUT): """ Wait for the element to be visible in the DOM. """ wait_for( func=lambda _: css_visible(css_selector, index), timeout=timeout, timeout_msg="Timed out waiting for {} to be visible.".format(css_selector) ) @world.absorb def wait_for_invisible(css_selector, timeout=GLOBAL_WAIT_FOR_TIMEOUT): """ Wait for the element to be either invisible or not present on the DOM. """ wait_for( func=lambda _: EC.invisibility_of_element_located((By.CSS_SELECTOR, css_selector,)), timeout=timeout, timeout_msg="Timed out waiting for {} to be invisible.".format(css_selector) ) @world.absorb def wait_for_clickable(css_selector, timeout=GLOBAL_WAIT_FOR_TIMEOUT): """ Wait for the element to be present and clickable. """ wait_for( func=lambda _: EC.element_to_be_clickable((By.CSS_SELECTOR, css_selector,)), timeout=timeout, timeout_msg="Timed out waiting for {} to be clickable.".format(css_selector) ) @world.absorb def css_find(css, wait_time=GLOBAL_WAIT_FOR_TIMEOUT): """ Wait for the element(s) as defined by css locator to be present. This method will return a WebDriverElement. """ wait_for_present(css_selector=css, timeout=wait_time) return world.browser.find_by_css(css) @world.absorb def css_click(css_selector, index=0, wait_time=GLOBAL_WAIT_FOR_TIMEOUT, dismiss_alert=False): """ Perform a click on a CSS selector, first waiting for the element to be present and clickable. This method will return True if the click worked. If `dismiss_alert` is true, dismiss any alerts that appear. """ wait_for_clickable(css_selector, timeout=wait_time) wait_for_visible(css_selector, index=index, timeout=wait_time) assert_true( css_visible(css_selector, index=index), msg="Element {}[{}] is present but not visible".format(css_selector, index) ) retry_on_exception(lambda: css_find(css_selector)[index].click()) # Dismiss any alerts that occur. # We need to do this before calling `wait_for_js_to_load()` # to avoid getting an unexpected alert exception if dismiss_alert: world.browser.get_alert().accept() wait_for_js_to_load() return True @world.absorb def css_check(css_selector, wait_time=GLOBAL_WAIT_FOR_TIMEOUT): """ Checks a check box based on a CSS selector, first waiting for the element to be present and clickable. This is just a wrapper for calling "click" because that's how selenium interacts with check boxes and radio buttons. Then for synchronization purposes, wait for the element to be checked. This method will return True if the check worked. """ css_click(css_selector=css_selector, wait_time=wait_time) wait_for(lambda _: css_find(css_selector).selected) return True @world.absorb def select_option(name, value, wait_time=GLOBAL_WAIT_FOR_TIMEOUT): ''' A method to select an option Then for synchronization purposes, wait for the option to be selected. This method will return True if the selection worked. ''' select_css = "select[name='{}']".format(name) option_css = "option[value='{}']".format(value) css_selector = "{} {}".format(select_css, option_css) css_click(css_selector=css_selector, wait_time=wait_time) wait_for(lambda _: css_has_value(select_css, value)) return True @world.absorb def id_click(elem_id): """ Perform a click on an element as specified by its id """ css_click('#{}'.format(elem_id)) @world.absorb def css_fill(css_selector, text, index=0): """ Set the value of the element to the specified text. Note that this will replace the current value completely. Then for synchronization purposes, wait for the value on the page. """ wait_for_visible(css_selector, index=index) retry_on_exception(lambda: css_find(css_selector)[index].fill(text)) wait_for(lambda _: css_has_value(css_selector, text, index=index)) return True @world.absorb def click_link(partial_text, index=0): retry_on_exception(lambda: world.browser.find_link_by_partial_text(partial_text)[index].click()) wait_for_js_to_load() @world.absorb def click_button(data_attr, index=0): xpath = '//button[text()="{button_text}"]'.format( button_text=data_attr ) world.browser.find_by_xpath(xpath)[index].click() @world.absorb def click_link_by_text(text, index=0): retry_on_exception(lambda: world.browser.find_link_by_text(text)[index].click()) @world.absorb def css_text(css_selector, index=0, timeout=GLOBAL_WAIT_FOR_TIMEOUT): # Wait for the css selector to appear if is_css_present(css_selector): return retry_on_exception(lambda: css_find(css_selector, wait_time=timeout)[index].text) else: return "" @world.absorb def css_value(css_selector, index=0): # Wait for the css selector to appear if is_css_present(css_selector): return retry_on_exception(lambda: css_find(css_selector)[index].value) else: return "" @world.absorb def css_html(css_selector, index=0): """ Returns the HTML of a css_selector """ assert is_css_present(css_selector) return retry_on_exception(lambda: css_find(css_selector)[index].html) @world.absorb def css_has_class(css_selector, class_name, index=0): return retry_on_exception(lambda: css_find(css_selector)[index].has_class(class_name)) @world.absorb def css_visible(css_selector, index=0): assert is_css_present(css_selector) return retry_on_exception(lambda: css_find(css_selector)[index].visible) @world.absorb def dialogs_closed(): def are_dialogs_closed(_driver): ''' Return True when no modal dialogs are visible ''' return not css_visible('.modal') wait_for(are_dialogs_closed) return not css_visible('.modal') @world.absorb def save_the_html(path='/tmp'): url = world.browser.url html = world.browser.html.encode('ascii', 'ignore') filename = "{path}/{name}.html".format(path=path, name=quote_plus(url)) with open(filename, "w") as f: f.write(html) @world.absorb def click_course_content(): world.wait_for_js_to_load() course_content_css = 'li.nav-course-courseware' css_click(course_content_css) @world.absorb def click_course_settings(): world.wait_for_js_to_load() course_settings_css = 'li.nav-course-settings' css_click(course_settings_css) @world.absorb def click_tools(): world.wait_for_js_to_load() tools_css = 'li.nav-course-tools' css_click(tools_css) @world.absorb def is_mac(): return platform.mac_ver()[0] is not '' @world.absorb def is_firefox(): return world.browser.driver_name is 'Firefox' @world.absorb def trigger_event(css_selector, event='change', index=0): world.browser.execute_script("$('{}:eq({})').trigger('{}')".format(css_selector, index, event)) @world.absorb def retry_on_exception(func, max_attempts=5, ignored_exceptions=(StaleElementReferenceException, InvalidElementStateException)): """ Retry the interaction, ignoring the passed exceptions. By default ignore StaleElementReferenceException, which happens often in our application when the DOM is being manipulated by client side JS. Note that ignored_exceptions is passed directly to the except block, and as such can be either a single exception or multiple exceptions as a parenthesized tuple. """ attempt = 0 while attempt < max_attempts: try: return func() except ignored_exceptions: world.wait(1) attempt += 1 assert_true(attempt < max_attempts, 'Ran out of attempts to execute {}'.format(func)) @world.absorb def disable_jquery_animations(): """ Disable JQuery animations on the page. Any state changes will occur immediately to the final state. """ # Ensure that jquery is loaded world.wait_for_js_to_load() # Disable jQuery animations world.browser.execute_script("jQuery.fx.off = true;")
BehavioralInsightsTeam/edx-platform
common/djangoapps/terrain/ui_helpers.py
Python
agpl-3.0
21,595
[ "VisIt" ]
e1142dfba1e1c13b9f4a77c73ca66897181684dc5bc8c95f17ee0c54a92ac830
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2013 Stanford University and the Authors # # Authors: Robert McGibbon # Contributors: Kyle A Beauchamp, Ravi Ramanathan # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## ############################################################################## # Imports ############################################################################## from __future__ import print_function, division import numpy as np from mdtraj.utils import ensure_type from mdtraj.geometry import _geometry __all__ = ['compute_dihedrals', 'compute_phi', 'compute_psi', 'compute_omega', 'compute_chi1','compute_chi2','compute_chi3','compute_chi4'] ############################################################################## # Functions ############################################################################## def _dihedral(xyz, indices, out=None): """Compute the dihedral angles of traj for the atom indices in indices. Parameters ---------- xyz : np.ndarray, shape=(num_frames, num_atoms, 3), dtype=float The XYZ coordinates of a trajectory indices : np.ndarray, shape=(num_dihedrals, 4), dtype=int Atom indices to compute dihedrals. Returns ------- dih : np.ndarray, shape=(num_dihedrals), dtype=float dih[i,j] gives the dihedral angle at traj[i] correponding to indices[j]. """ x0 = xyz[:, indices[:, 0]] x1 = xyz[:, indices[:, 1]] x2 = xyz[:, indices[:, 2]] x3 = xyz[:, indices[:, 3]] b1 = x1 - x0 b2 = x2 - x1 b3 = x3 - x2 c1 = np.cross(b2, b3) c2 = np.cross(b1, b2) p1 = (b1 * c1).sum(-1) p1 *= (b2 * b2).sum(-1) ** 0.5 p2 = (c1 * c2).sum(-1) return np.arctan2(p1, p2, out) def compute_dihedrals(traj, indices, opt=True): """Compute the dihedral angles between the supplied quartets of atoms in each frame in a trajectory. Parameters ---------- traj : Trajectory An mtraj trajectory. indices : np.ndarray, shape=(n_dihedrals, 4), dtype=int Each row gives the indices of four atoms which together make a dihedral angle. The angle is between the planes spanned by the first three atoms and the last three atoms, a torsion around the bond between the middle two atoms. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- dihedrals : np.ndarray, shape=(n_frames, n_dihedrals), dtype=float The output array gives, in each frame from the trajectory, each of the `n_dihedrals` torsion angles. The angles are measured in **radians**. """ xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz', shape=(None, None, 3), warn_on_cast=False) quartets = ensure_type(np.asarray(indices), dtype=np.int32, ndim=2, name='indices', shape=(None, 4), warn_on_cast=False) if not np.all(np.logical_and(quartets < traj.n_atoms, quartets >= 0)): raise ValueError('indices must be between 0 and %d' % traj.n_atoms) out = np.zeros((xyz.shape[0], quartets.shape[0]), dtype=np.float32) if opt: _geometry._dihedral(xyz, quartets, out) else: _dihedral(xyz, quartets, out) return out def _construct_atom_dict(topology, chain_id=0): """Create dictionary to lookup indices by atom name and residue_id. Parameters ---------- topology : Topology The topology to parse chain_id : int The index of the chain to sequence Notes ----- By default, we assume you are interested in the first chain. """ atom_dict = {} for chain in topology.chains: if chain.index == chain_id: for residue in chain.residues: local_dict = {} for atom in residue.atoms: local_dict[atom.name] = atom.index atom_dict[residue.index] = local_dict break return atom_dict def _atom_sequence(traj, atom_names, residue_offsets=None, chain_id=0): """Find sequences of atom indices corresponding to desired atoms. This method can be used to find sets of atoms corresponding to specific dihedral angles (like phi or psi). It looks for the given pattern of atoms in each residue of a given chain. See the example for details. Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. atom_names : np.ndarray, shape=(4), dtype='str' Array of atoms to in each dihedral angle. residue_offsets : np.ndarray, optional, shape=(4), dtype='int' Array of integer offsets for each atom. These are used to refer to atoms forward or backward in the chain relative to the current residue chain_id : int The index of the chain to sequence. Notes ----- In additional finding dihedral atoms, this function could be used to match *general* sequences of atoms and residue_id offsets. Examples -------- Here we calculate the phi torsion angles by specifying the correct atom names and the residue_id offsets (e.g. forward or backward in chain) for each atom. >>> traj = mdtraj.load("native.pdb") >>> atom_names = ["C" ,"N" , "CA", "C"] >>> residue_offsets = [-1, 0, 0, 0] >>> found_residue_ids, indices = _atom_sequence(traj, atom_names, residue_offsets) """ if residue_offsets is None: residue_offsets = parse_offsets(atom_names) atom_names = _strip_offsets(atom_names) atom_dict = _construct_atom_dict(traj.top, chain_id=chain_id) atom_indices = [] found_residue_ids = [] # py3k criticial list(zip(, not just zip(, since we iterate multiple # times through it atoms_and_offsets = list(zip(atom_names, residue_offsets)) for chain in traj.top.chains: if chain.index == chain_id: for residue in chain.residues: rid = residue.index if all([rid + offset in atom_dict for offset in residue_offsets]): # Check that desired residue_IDs are in dict if all([atom in atom_dict[rid + offset] for atom, offset in atoms_and_offsets]): # Check that we find all atom names in dict atom_indices.append([atom_dict[rid + offset][atom] for atom, offset in atoms_and_offsets]) # Lookup desired atom indices and and add to list. found_residue_ids.append(rid) atom_indices = np.array(atom_indices) found_residue_ids = np.array(found_residue_ids) return found_residue_ids, atom_indices def parse_offsets(atom_names): """Convert a list of atom+offset strings into lists offsets. Parameters ---------- atom_names : list The names of the atoms to parse for their offsets. Returns ------- offsets : list The offsets of the atoms, giving whether they refer to atoms in the previous residue (-1), current residue (0) or next residue (+1) Notes ----- For example, ["-C", "N", "CA", "C"] will be parsed as [-1, 0, 0, 0] """ offsets = [] for atom in atom_names: if atom[0] == "-": offsets.append(-1) elif atom[0] == "+": offsets.append(+1) else: offsets.append(0) return offsets def _strip_offsets(atom_names): """Convert a list of atom + offset strings into lists of atoms. Parameters ---------- atom_names : list The names of the atoms, whose offset prexifs you want to strip Notes ----- For example, ["-C", "N", "CA", "C"] will be parsed as ["C","N","CA","C"] """ atoms = [] for atom in atom_names: if atom[0] == "-": atoms.append(atom[1:]) elif atom[0] == "+": atoms.append(atom[1:]) else: atoms.append(atom) return atoms PHI_ATOMS = ["-C", "N", "CA", "C"] PSI_ATOMS = ["N", "CA", "C", "+N"] OMEGA_ATOMS = ["CA", "C", "+N", "+CA"] CHI1_ATOMS = [["N", "CA", "CB", "CG"], ["N", "CA", "CB", "CG1"], ["N", "CA", "CB", "SG"], ["N", "CA", "CB", "OG"], ["N", "CA", "CB", "OG1"]] CHI2_ATOMS = [["CA", "CB", "CG", "CD"], ["CA", "CB", "CG", "CD1"], ["CA", "CB", "CG1", "CD1"], ["CA", "CB", "CG", "OD1"], ["CA", "CB", "CG", "ND1"]] CHI3_ATOMS = [["CB", "CG", "CD", "NE"], ["CB", "CG", "CD", "CE"], ["CB", "CG", "CD", "OE1"], ["CB", "CG", "SD", "CE"]] CHI4_ATOMS = [["CG", "CD", "NE", "CZ"], ["CG", "CD", "CE", "NZ"]] _get_indices_omega = lambda traj: _atom_sequence(traj, OMEGA_ATOMS) _get_indices_phi = lambda traj: _atom_sequence(traj, PHI_ATOMS) _get_indices_psi = lambda traj: _atom_sequence(traj, PSI_ATOMS) def compute_phi(traj, opt=True): """Calculate the phi torsions of a trajectory. Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- indices : np.ndarray, shape=(n_phi, 4) The indices of the atoms involved in each of the phi dihedral angles angles : np.ndarray, shape=(n_frames, n_phi) The value of the dihedral angle for each of the angles in each of the frames. """ rid, indices = _get_indices_phi(traj) if len(indices) == 0: return np.empty(shape=(0,4), dtype=np.int), np.empty(shape=(len(traj), 0), dtype=np.float32) return indices, compute_dihedrals(traj, indices, opt=opt) def compute_psi(traj, opt=True): """Calculate the psi torsions of a trajectory. Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- indices : np.ndarray, shape=(n_psi, 4) The indices of the atoms involved in each of the psi dihedral angles angles : np.ndarray, shape=(n_frames, n_psi) The value of the dihedral angle for each of the angles in each of the frames. """ rid, indices = _get_indices_psi(traj) if len(indices) == 0: return np.empty(shape=(0,4), dtype=np.int), np.empty(shape=(len(traj), 0), dtype=np.float32) return indices, compute_dihedrals(traj, indices, opt=opt) def compute_chi1(traj, opt=True): """Calculate the chi1 torsions of a trajectory. chi1 is the first side chain torsion angle formed between the 4 atoms over the CA-CB axis. Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- indices : np.ndarray, shape=(n_chi, 4) The indices of the atoms involved in each of the chi1 dihedral angles angles : np.ndarray, shape=(n_frames, n_chi) The value of the dihedral angle for each of the angles in each of the frames. """ rids, indices = zip(*(_atom_sequence(traj, atoms) for atoms in CHI1_ATOMS)) id_sort = np.argsort(np.concatenate(rids)) if not any(x.size for x in indices): return np.empty(shape=(0,4), dtype=np.int), np.empty(shape=(len(traj), 0), dtype=np.float32) indices = np.vstack(x for x in indices if x.size)[id_sort] all_chi1 = compute_dihedrals(traj, indices, opt=opt) return indices, all_chi1 def compute_chi2(traj, opt=True): """Calculate the chi2 torsions of a trajectory. chi2 is the second side chain torsion angle formed between the corresponding 4 atoms over the CB-CG axis. Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- indices : np.ndarray, shape=(n_chi, 4) The indices of the atoms involved in each of the chi dihedral angles angles : np.ndarray, shape=(n_frames, n_chi) The value of the dihedral angle for each of the angles in each of the frames. """ rids, indices = zip(*(_atom_sequence(traj, atoms) for atoms in CHI2_ATOMS)) id_sort = np.argsort(np.concatenate(rids)) if not any(x.size for x in indices): return np.empty(shape=(0,4), dtype=np.int), np.empty(shape=(len(traj), 0), dtype=np.float32) indices = np.vstack(x for x in indices if x.size)[id_sort] all_chi1 = compute_dihedrals(traj, indices, opt=opt) return indices, all_chi1 def compute_chi3(traj, opt=True): """Calculate the chi3 torsions of a trajectory. chi3 is the third side chain torsion angle formed between the corresponding 4 atoms over the CG-CD axis (only the residues ARG, GLN, GLU, LYS & MET have these atoms) Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- indices : np.ndarray, shape=(n_chi, 4) The indices of the atoms involved in each of the chi dihedral angles angles : np.ndarray, shape=(n_frames, n_chi) The value of the dihedral angle for each of the angles in each of the frames. """ rids, indices = zip(*(_atom_sequence(traj, atoms) for atoms in CHI3_ATOMS)) id_sort = np.argsort(np.concatenate(rids)) if not any(x.size for x in indices): return np.empty(shape=(0,4), dtype=np.int), np.empty(shape=(len(traj), 0), dtype=np.float32) indices = np.vstack(x for x in indices if x.size)[id_sort] all_chi1 = compute_dihedrals(traj, indices, opt=opt) return indices, all_chi1 def compute_chi4(traj, opt=True): """Calculate the chi4 torsions of a trajectory. chi4 is the fourth side chain torsion angle formed between the corresponding 4 atoms over the CD-CE or CD-NE axis (only ARG & LYS residues have these atoms) Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- indices : np.ndarray, shape=(n_chi, 4) The indices of the atoms involved in each of the chi dihedral angles angles : np.ndarray, shape=(n_frames, n_chi) The value of the dihedral angle for each of the angles in each of the frames. """ rids, indices = zip(*(_atom_sequence(traj, atoms) for atoms in CHI4_ATOMS)) id_sort = np.argsort(np.concatenate(rids)) if not any(x.size for x in indices): return np.empty(shape=(0,4), dtype=np.int), np.empty(shape=(len(traj), 0), dtype=np.float32) indices = np.vstack(x for x in indices if x.size)[id_sort] all_chi1 = compute_dihedrals(traj, indices, opt=opt) return indices, all_chi1 def compute_omega(traj, opt=True): """Calculate the omega torsions of a trajectory. Parameters ---------- traj : Trajectory Trajectory for which you want dihedrals. opt : bool, default=True Use an optimized native library to calculate angles. Returns ------- indices : np.ndarray, shape=(n_omega, 4) The indices of the atoms involved in each of the omega dihedral angles angles : np.ndarray, shape=(n_frames, n_omega) The value of the dihedral angle for each of the angles in each of the frames. """ rid, indices = _get_indices_omega(traj) if len(indices) == 0: return np.empty(shape=(0,4), dtype=np.int), np.empty(shape=(len(traj), 0), dtype=np.float32) return indices, compute_dihedrals(traj, indices, opt=opt)
marscher/mdtraj
MDTraj/geometry/dihedral.py
Python
lgpl-2.1
16,636
[ "MDTraj" ]
95352000584b6f6f51deebc6c87663afa6eb73c6de4523a63b6b856ebd7b00f5
from compiler import ast # XXX should probably rename ASTVisitor to ASTWalker # XXX can it be made even more generic? class ASTVisitor: """Performs a depth-first walk of the AST The ASTVisitor will walk the AST, performing either a preorder or postorder traversal depending on which method is called. methods: preorder(tree, visitor) postorder(tree, visitor) tree: an instance of ast.Node visitor: an instance with visitXXX methods The ASTVisitor is responsible for walking over the tree in the correct order. For each node, it checks the visitor argument for a method named 'visitNodeType' where NodeType is the name of the node's class, e.g. Class. If the method exists, it is called with the node as its sole argument. The visitor method for a particular node type can control how child nodes are visited during a preorder walk. (It can't control the order during a postorder walk, because it is called _after_ the walk has occurred.) The ASTVisitor modifies the visitor argument by adding a visit method to the visitor; this method can be used to visit a child node of arbitrary type. """ VERBOSE = 0 def __init__(self): self.node = None self._cache = {} def default(self, node, *args): for child in node.getChildNodes(): self.dispatch(child, *args) def dispatch(self, node, *args): self.node = node klass = node.__class__ meth = self._cache.get(klass, None) if meth is None: className = klass.__name__ meth = getattr(self.visitor, 'visit' + className, self.default) self._cache[klass] = meth ## if self.VERBOSE > 0: ## className = klass.__name__ ## if self.VERBOSE == 1: ## if meth == 0: ## print "dispatch", className ## else: ## print "dispatch", className, (meth and meth.__name__ or '') return meth(node, *args) def preorder(self, tree, visitor, *args): """Do preorder walk of tree using visitor""" self.visitor = visitor visitor.visit = self.dispatch self.dispatch(tree, *args) # XXX *args make sense? class ExampleASTVisitor(ASTVisitor): """Prints examples of the nodes that aren't visited This visitor-driver is only useful for development, when it's helpful to develop a visitor incremently, and get feedback on what you still have to do. """ examples = {} def dispatch(self, node, *args): self.node = node meth = self._cache.get(node.__class__, None) className = node.__class__.__name__ if meth is None: meth = getattr(self.visitor, 'visit' + className, 0) self._cache[node.__class__] = meth if self.VERBOSE > 1: print "dispatch", className, (meth and meth.__name__ or '') if meth: meth(node, *args) elif self.VERBOSE > 0: klass = node.__class__ if not self.examples.has_key(klass): self.examples[klass] = klass print print self.visitor print klass for attr in dir(node): if attr[0] != '_': print "\t", "%-12.12s" % attr, getattr(node, attr) print return self.default(node, *args) # XXX this is an API change _walker = ASTVisitor def walk(tree, visitor, walker=None, verbose=None): if walker is None: walker = _walker() if verbose is not None: walker.VERBOSE = verbose walker.preorder(tree, visitor) return walker.visitor def dumpNode(node): print node.__class__ for attr in dir(node): if attr[0] != '_': print "\t", "%-10.10s" % attr, getattr(node, attr)
trivoldus28/pulsarch-verilog
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/compiler/visitor.py
Python
gpl-2.0
3,900
[ "VisIt" ]
fe171bdac89157d9cb8576778917129735945bece4f8e19fbe4f9c8f9189f11c
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import unittest import tempfile import shutil import io import os.path try: import httpretty has_httpretty = True except ImportError: has_httpretty = False import skbio.io from skbio.io.registry import open_file from skbio.util import get_data_path class TestOpen(unittest.TestCase): def test_open_invalid_mode(self): with self.assertRaises(ValueError): skbio.io.open([], mode='a') def test_open_invalid_source(self): with self.assertRaises(skbio.io.IOSourceError): skbio.io.open(42) def test_open_invalid_source_compression(self): with self.assertRaises(ValueError): skbio.io.open(['foo'], compression='gzip') def test_open_invalid_source_encoding(self): with self.assertRaises(ValueError): skbio.io.open(['foo'], encoding='binary') with self.assertRaises(ValueError): skbio.io.open(['foo'], encoding='binary', newline='\r') def test_open_invalid_compression(self): with self.assertRaises(ValueError): skbio.io.open(io.BytesIO(), compression='foo') class ReadableBinarySourceTests: def check_closed(self, file, expected): if hasattr(file, 'closed'): self.assertEqual(file.closed, expected) def check_open_state_contents(self, file, contents, is_binary, **kwargs): result = skbio.io.open(file, **kwargs) if is_binary: self.assertIsInstance(result, (io.BufferedReader, io.BufferedRandom)) else: self.assertIsInstance(result, io.TextIOBase) self.assertTrue(result.readable()) self.assertEqual(result.read(), contents) self.assertFalse(result.closed) result.close() self.assertTrue(result.closed) self.check_closed(file, True) def check_open_file_state_contents(self, file, contents, is_binary, **kwargs): with open_file(file, **kwargs) as f: if is_binary: self.assertIsInstance(f, (io.BufferedReader, io.BufferedRandom)) else: self.assertIsInstance(f, io.TextIOBase) self.assertTrue(f.readable()) self.assertEqual(f.read(), contents) self.assertEqual(f.closed, self.expected_close) f.close() self.assertTrue(f.closed) self.check_closed(file, True) def check_open_buffer_close_behaviour(self, file, **kwargs): if hasattr(file, 'close'): wrapped = skbio.io.open(file, **kwargs) file.close() self.assertTrue(wrapped.closed) def check_open_file_buffer_close_behaviour(self, file, **kwargs): if hasattr(file, 'close'): with open_file(file, **kwargs) as wrapped: file.close() self.assertTrue(wrapped.closed) def check_open_gc_behaviour(self, file, **kwargs): def mangle(file): result = skbio.io.open(file, **kwargs) self.assertIsInstance(result, io.TextIOBase) f = skbio.io.open(file, encoding='binary') mangle(f) self.assertFalse(f.closed) f.close() def check_open_file_gc_behaviour(self, file, **kwargs): def mangle(file): with open_file(file, **kwargs) as result: self.assertIsInstance(result, io.TextIOBase) with open_file(file, encoding='binary') as f: mangle(f) self.assertFalse(f.closed) def test_open_gc_binary(self): self.check_open_gc_behaviour(self.read_file) def test_open_gc_encoding(self): self.check_open_gc_behaviour(self.encoded_file) def test_open_gc_compression(self): self.check_open_gc_behaviour(self.gzip_file) self.check_open_gc_behaviour(self.bz2_file) def test_open_gc_compression_encoding(self): self.check_open_gc_behaviour(self.gzip_encoded_file) self.check_open_gc_behaviour(self.bz2_encoded_file) def test_open_file_gc_binary(self): self.check_open_file_gc_behaviour(self.read_file) def test_open_file_gc_encoding(self): self.check_open_file_gc_behaviour(self.encoded_file) def test_open_file_gc_compression(self): self.check_open_file_gc_behaviour(self.gzip_file) self.check_open_file_gc_behaviour(self.bz2_file) def test_open_file_gc_compression_encoding(self): self.check_open_file_gc_behaviour(self.gzip_encoded_file) self.check_open_file_gc_behaviour(self.bz2_encoded_file) def test_open_underclose_binary(self): self.check_open_buffer_close_behaviour(self.read_file) def test_open_underclose_encoding(self): self.check_open_buffer_close_behaviour(self.encoded_file) def test_open_underclose_compression(self): self.check_open_buffer_close_behaviour(self.gzip_file) self.check_open_buffer_close_behaviour(self.bz2_file) def test_open_underclose_compression_encoding(self): self.check_open_buffer_close_behaviour(self.gzip_encoded_file) self.check_open_buffer_close_behaviour(self.bz2_encoded_file) def test_open_file_underclose_binary(self): self.check_open_file_buffer_close_behaviour(self.read_file) def test_open_file_underclose_encoding(self): self.check_open_file_buffer_close_behaviour(self.encoded_file) def test_open_file_underclose_compression(self): self.check_open_file_buffer_close_behaviour(self.gzip_file) self.check_open_file_buffer_close_behaviour(self.bz2_file) def test_open_file_underclose_compression_encoding(self): self.check_open_file_buffer_close_behaviour(self.gzip_encoded_file) self.check_open_file_buffer_close_behaviour(self.bz2_encoded_file) def test_open_binary(self): self.check_open_state_contents(self.read_file, self.binary_contents, True, mode='r', encoding='binary') def test_open_binary_compression_none(self): self.check_open_state_contents(self.read_file, self.binary_contents, True, mode='r', encoding='binary', compression=None) def test_open_encoding(self): self.check_open_state_contents(self.encoded_file, self.decoded_contents, False, mode='r', encoding=self.encoding) def test_open_auto_compression_binary(self): self.check_open_state_contents(self.gzip_file, self.binary_contents, True, mode='r', encoding='binary', compression='auto') self.check_open_state_contents(self.bz2_file, self.binary_contents, True, mode='r', encoding='binary', compression='auto') def test_open_gzip_compression_binary(self): self.check_open_state_contents(self.gzip_file, self.binary_contents, True, mode='r', encoding='binary', compression='gzip') def test_open_bz2_compression_binary(self): self.check_open_state_contents(self.bz2_file, self.binary_contents, True, mode='r', encoding='binary', compression='bz2') def test_open_default_compression_encoding(self): self.check_open_state_contents(self.gzip_encoded_file, self.decoded_contents, False, mode='r', encoding=self.encoding) self.check_open_state_contents(self.bz2_encoded_file, self.decoded_contents, False, mode='r', encoding=self.encoding) def test_open_file_binary(self): self.check_open_file_state_contents(self.read_file, self.binary_contents, True, mode='r', encoding='binary') def test_open_file_binary_compression_none(self): self.check_open_file_state_contents(self.read_file, self.binary_contents, True, mode='r', encoding='binary', compression=None) def test_open_file_encoding(self): self.check_open_file_state_contents(self.encoded_file, self.decoded_contents, False, mode='r', encoding=self.encoding) def test_open_file_auto_compression_binary(self): self.check_open_file_state_contents(self.gzip_file, self.binary_contents, True, mode='r', encoding='binary', compression='auto') self.check_open_file_state_contents(self.bz2_file, self.binary_contents, True, mode='r', encoding='binary', compression='auto') def test_open_file_gzip_compression_binary(self): self.check_open_file_state_contents(self.gzip_file, self.binary_contents, True, mode='r', encoding='binary', compression='gzip') def test_open_file_bz2_compression_binary(self): self.check_open_file_state_contents(self.bz2_file, self.binary_contents, True, mode='r', encoding='binary', compression='bz2') def test_open_file_default_compression_encoding(self): self.check_open_file_state_contents(self.gzip_encoded_file, self.decoded_contents, False, mode='r', encoding=self.encoding) self.check_open_file_state_contents(self.bz2_encoded_file, self.decoded_contents, False, mode='r', encoding=self.encoding) class ReadableSourceTest(unittest.TestCase): def setUp(self): self.read_file = self.get_fileobj(get_data_path("example_file")) self.gzip_file = \ self.get_fileobj(get_data_path("example_file.gz")) self.bz2_file = \ self.get_fileobj(get_data_path("example_file.bz2")) self.encoded_file = self.get_fileobj(get_data_path("big5_file")) self.gzip_encoded_file = \ self.get_fileobj(get_data_path("big5_file.gz")) self.bz2_encoded_file = \ self.get_fileobj(get_data_path("big5_file.bz2")) self.binary_contents = (b"This is some content\n" b"It occurs on more than one line\n") self.decoded_contents = '\u4f60\u597d\n' # Ni Hau self.compression = 'gzip' self.encoding = "big5" def tearDown(self): self.safe_close(self.read_file) self.safe_close(self.gzip_file) self.safe_close(self.bz2_file) self.safe_close(self.encoded_file) self.safe_close(self.gzip_encoded_file) self.safe_close(self.bz2_encoded_file) def safe_close(self, f): if hasattr(f, 'close'): f.close() class WritableBinarySourceTests: def check_closed(self, file, expected): if hasattr(file, 'closed'): self.assertEqual(file.closed, expected) def check_open_state_contents(self, file, contents, is_binary, **kwargs): result = skbio.io.open(file, mode='w', **kwargs) if is_binary: self.assertIsInstance(result, (io.BufferedWriter, io.BufferedRandom)) else: self.assertIsInstance(result, io.TextIOBase) self.assertTrue(result.writable()) result.write(contents) self.assertFalse(result.closed) if self.expected_close: result.close() self.assertTrue(result.closed) self.check_closed(file, True) def compare_gzip_file_contents(self, a, b): # The first 10 bytes of a gzip header include a timestamp. The header # can be followed by other "volatile" metadata, so only compare gzip # footers (last 8 bytes) which contain a CRC-32 checksum and the length # of the uncompressed data. self.assertEqual(a[-8:], b[-8:]) def test_open_binary(self): self.check_open_state_contents(self.binary_file, self.binary_contents, True, encoding='binary', compression=None) self.assertEqual(self.get_contents(self.binary_file), self.binary_contents) def test_open_gzip(self): self.check_open_state_contents(self.gzip_file, self.text_contents, False, compression='gzip') self.compare_gzip_file_contents(self.get_contents(self.gzip_file), self.gzip_contents) def test_open_bz2(self): self.check_open_state_contents(self.bz2_file, self.text_contents, False, compression='bz2') self.assertEqual(self.get_contents(self.bz2_file), self.bz2_contents) def test_open_encoding(self): self.check_open_state_contents(self.big5_file, self.decoded_contents, False, encoding='big5') self.assertEqual(self.get_contents(self.big5_file), self.encoded_contents) def test_open_gzip_encoding(self): self.check_open_state_contents(self.gzip_encoded_file, self.decoded_contents, False, compression='gzip', encoding='big5') self.compare_gzip_file_contents( self.get_contents(self.gzip_encoded_file), self.gzip_encoded_contents) def test_open_bz2_encoding(self): self.check_open_state_contents(self.bz2_encoded_file, self.decoded_contents, False, compression='bz2', encoding='big5') self.assertEqual(self.get_contents(self.bz2_encoded_file), self.bz2_encoded_contents) class WritableSourceTest(unittest.TestCase): def setUp(self): self._dir = tempfile.mkdtemp() with io.open(get_data_path('example_file'), mode='rb') as f: self.binary_contents = f.read() self.binary_file = self._make_file('example_file') with io.open(get_data_path('big5_file'), mode='rb') as f: self.encoded_contents = f.read() self.big5_file = self._make_file('big5_file') with io.open(get_data_path('example_file.gz'), mode='rb') as f: self.gzip_contents = f.read() self.gzip_file = self._make_file('example_file.gz') with io.open(get_data_path('example_file.bz2'), mode='rb') as f: self.bz2_contents = f.read() self.bz2_file = self._make_file('example_file.bz2') with io.open(get_data_path('big5_file.gz'), mode='rb') as f: self.gzip_encoded_contents = f.read() self.gzip_encoded_file = self._make_file('big5_file.gz') with io.open(get_data_path('big5_file.bz2'), mode='rb') as f: self.bz2_encoded_contents = f.read() self.bz2_encoded_file = self._make_file('big5_file.bz2') self.decoded_contents = self.encoded_contents.decode('big5') self.text_contents = self.binary_contents.decode('utf8') def tearDown(self): shutil.rmtree(self._dir) self.safe_close(self.binary_file) self.safe_close(self.gzip_file) self.safe_close(self.bz2_file) self.safe_close(self.big5_file) self.safe_close(self.gzip_encoded_file) self.safe_close(self.bz2_encoded_file) def safe_close(self, f): if hasattr(f, 'close'): f.close() def _make_file(self, name): return self.get_fileobj(os.path.join(self._dir, name)) class TestReadFilepath(ReadableBinarySourceTests, ReadableSourceTest): expected_close = True def get_fileobj(self, path): return path class TestWriteFilepath(WritableBinarySourceTests, WritableSourceTest): expected_close = True def get_fileobj(self, path): return path def get_contents(self, file): with io.open(file, mode='rb') as f: return f.read() @unittest.skipIf(not has_httpretty, "HTTPretty not available to mock tests.") class TestReadURL(ReadableBinarySourceTests, ReadableSourceTest): expected_close = True def setUp(self): super(TestReadURL, self).setUp() httpretty.enable() for file in (get_data_path('example_file'), get_data_path('big5_file'), get_data_path('example_file.gz'), get_data_path('example_file.bz2'), get_data_path('big5_file.gz'), get_data_path('big5_file.bz2')): with io.open(file, mode='rb') as f: httpretty.register_uri(httpretty.GET, self.get_fileobj(file), body=f.read(), content_type="application/octet-stream") def tearDown(self): super(TestReadURL, self).setUp() httpretty.disable() def get_fileobj(self, path): return "http://example.com/" + os.path.split(path)[1] class TestReadBytesIO(ReadableBinarySourceTests, ReadableSourceTest): expected_close = False def get_fileobj(self, path): with io.open(path, mode='rb') as f: return io.BytesIO(f.read()) class TestWriteBytesIO(WritableBinarySourceTests, WritableSourceTest): expected_close = False def get_fileobj(self, path): return io.BytesIO() def get_contents(self, file): return file.getvalue() def test_open_gzip(self): self.check_open_state_contents(self.gzip_file, self.text_contents, False, compression='gzip') self.compare_gzip_file_contents(self.get_contents(self.gzip_file), self.gzip_contents) def test_open_gzip_encoding(self): self.check_open_state_contents(self.gzip_encoded_file, self.decoded_contents, False, compression='gzip', encoding='big5') self.compare_gzip_file_contents( self.get_contents(self.gzip_encoded_file), self.gzip_encoded_contents) class TestReadBufferedReader(ReadableBinarySourceTests, ReadableSourceTest): expected_close = False def get_fileobj(self, path): return io.open(path, mode='rb') class TestWriteBufferedReader(WritableBinarySourceTests, WritableSourceTest): expected_close = False def get_fileobj(self, path): return io.open(path, mode='w+b') def get_contents(self, file): file.close() with io.open(file.name, mode='rb') as f: return f.read() class TestReadNamedTemporaryFile(ReadableBinarySourceTests, ReadableSourceTest): expected_close = False def get_fileobj(self, path): fileobj = tempfile.NamedTemporaryFile() with io.open(path, mode='rb') as fh: fileobj.write(fh.read()) fileobj.flush() fileobj.seek(0) return fileobj class TestWriteNamedTemporaryFile(WritableBinarySourceTests, WritableSourceTest): expected_close = False def get_fileobj(self, path): return tempfile.NamedTemporaryFile() def get_contents(self, file): file.flush() file.seek(0) contents = file.read() file.close() return contents class TestReadTemporaryFile(ReadableBinarySourceTests, ReadableSourceTest): expected_close = False def get_fileobj(self, path): fileobj = tempfile.TemporaryFile() with io.open(path, mode='rb') as fh: fileobj.write(fh.read()) fileobj.flush() fileobj.seek(0) return fileobj class TestWriteTemporaryFile(WritableBinarySourceTests, WritableSourceTest): expected_close = False def get_fileobj(self, path): return tempfile.TemporaryFile() def get_contents(self, file): file.flush() file.seek(0) contents = file.read() file.close() return contents class TestIterableReaderWriter(unittest.TestCase): def test_open(self): def gen(): yield from ('a', 'b', 'c') list_ = list(gen()) for input_ in gen(), list_: with skbio.io.open(input_) as result: self.assertIsInstance(result, io.TextIOBase) self.assertEqual(result.read(), 'abc') def test_open_with_newline(self): lines = ['a\r', 'b\r', 'c\r'] with skbio.io.open(lines, newline='\r') as result: self.assertIsInstance(result, io.TextIOBase) self.assertEqual(result.readlines(), lines) def test_open_invalid_iterable(self): with self.assertRaises(skbio.io.IOSourceError): skbio.io.open([1, 2, 3]) def test_open_empty_iterable(self): with skbio.io.open([]) as result: self.assertIsInstance(result, io.TextIOBase) self.assertEqual(result.read(), '') def test_open_write_mode(self): lines = [] with skbio.io.open(lines, mode='w') as fh: fh.write('abc') self.assertEqual(lines, ['abc']) lines = [] with skbio.io.open(lines, mode='w', newline='\r') as fh: fh.write('ab\nc\n') self.assertEqual(lines, ['ab\r', 'c\r']) self.assertTrue(fh.closed) fh.close() self.assertTrue(fh.closed) if __name__ == '__main__': unittest.main()
gregcaporaso/scikit-bio
skbio/io/tests/test_util.py
Python
bsd-3-clause
23,129
[ "scikit-bio" ]
d850f2ef2ff19dc2da149e63508de5696c1e6ab785a0c1a07f10eaf423960402
######################################################################## # # (C) 2015, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import errno import datetime import os import tarfile import tempfile import yaml from distutils.version import LooseVersion from shutil import rmtree from ansible import context from ansible.errors import AnsibleError from ansible.galaxy.user_agent import user_agent from ansible.module_utils._text import to_native, to_text from ansible.module_utils.urls import open_url from ansible.playbook.role.requirement import RoleRequirement from ansible.utils.display import Display display = Display() class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml')) META_INSTALL = os.path.join('meta', '.galaxy_install_info') ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests') def __init__(self, galaxy, api, name, src=None, version=None, scm=None, path=None): self._metadata = None self._install_info = None self._validate_certs = not context.CLIARGS['ignore_certs'] display.debug('Validate TLS certificates: %s' % self._validate_certs) self.galaxy = galaxy self.api = api self.name = name self.version = version self.src = src or name self.scm = scm self.paths = [os.path.join(x, self.name) for x in galaxy.roles_paths] if path is not None: if not path.endswith(os.path.join(os.path.sep, self.name)): path = os.path.join(path, self.name) else: # Look for a meta/main.ya?ml inside the potential role dir in case # the role name is the same as parent directory of the role. # # Example: # ./roles/testing/testing/meta/main.yml for meta_main in self.META_MAIN: if os.path.exists(os.path.join(path, name, meta_main)): path = os.path.join(path, self.name) break self.path = path else: # use the first path by default self.path = os.path.join(galaxy.roles_paths[0], self.name) def __repr__(self): """ Returns "rolename (version)" if version is not null Returns "rolename" otherwise """ if self.version: return "%s (%s)" % (self.name, self.version) else: return self.name def __eq__(self, other): return self.name == other.name @property def metadata(self): """ Returns role metadata """ if self._metadata is None: for path in self.paths: for meta_main in self.META_MAIN: meta_path = os.path.join(path, meta_main) if os.path.isfile(meta_path): try: with open(meta_path, 'r') as f: self._metadata = yaml.safe_load(f) except Exception: display.vvvvv("Unable to load metadata for %s" % self.name) return False break return self._metadata @property def install_info(self): """ Returns role install info """ if self._install_info is None: info_path = os.path.join(self.path, self.META_INSTALL) if os.path.isfile(info_path): try: f = open(info_path, 'r') self._install_info = yaml.safe_load(f) except Exception: display.vvvvv("Unable to load Galaxy install info for %s" % self.name) return False finally: f.close() return self._install_info @property def _exists(self): for path in self.paths: if os.path.isdir(path): return True return False def _write_galaxy_install_info(self): """ Writes a YAML-formatted file to the role's meta/ directory (named .galaxy_install_info) which contains some information we can use later for commands like 'list' and 'info'. """ info = dict( version=self.version, install_date=datetime.datetime.utcnow().strftime("%c"), ) if not os.path.exists(os.path.join(self.path, 'meta')): os.makedirs(os.path.join(self.path, 'meta')) info_path = os.path.join(self.path, self.META_INSTALL) with open(info_path, 'w+') as f: try: self._install_info = yaml.safe_dump(info, f) except Exception: return False return True def remove(self): """ Removes the specified role from the roles path. There is a sanity check to make sure there's a meta/main.yml file at this path so the user doesn't blow away random directories. """ if self.metadata: try: rmtree(self.path) return True except Exception: pass return False def fetch(self, role_data): """ Downloads the archived role to a temp location based on role data """ if role_data: # first grab the file and save it to a temp location if "github_user" in role_data and "github_repo" in role_data: archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version) else: archive_url = self.src display.display("- downloading role from %s" % archive_url) try: url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent()) temp_file = tempfile.NamedTemporaryFile(delete=False) data = url_file.read() while data: temp_file.write(data) data = url_file.read() temp_file.close() return temp_file.name except Exception as e: display.error(u"failed to download the file: %s" % to_text(e)) return False def install(self): if self.scm: # create tar file from scm url tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec) elif self.src: if os.path.isfile(self.src): tmp_file = self.src elif '://' in self.src: role_data = self.src tmp_file = self.fetch(role_data) else: role_data = self.api.lookup_role_by_name(self.src) if not role_data: raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.api.api_server)) if role_data.get('role_type') == 'APP': # Container Role display.warning("%s is a Container App role, and should only be installed using Ansible " "Container" % self.name) role_versions = self.api.fetch_role_related('versions', role_data['id']) if not self.version: # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head # of the master branch if len(role_versions) > 0: loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions] try: loose_versions.sort() except TypeError: raise AnsibleError( 'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. ' 'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to ' 'install.' % ', '.join([v.vstring for v in loose_versions]) ) self.version = to_text(loose_versions[-1]) elif role_data.get('github_branch', None): self.version = role_data['github_branch'] else: self.version = 'master' elif self.version != 'master': if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) # check if there's a source link for our role_version for role_version in role_versions: if role_version['name'] == self.version and 'source' in role_version: self.src = role_version['source'] tmp_file = self.fetch(role_data) else: raise AnsibleError("No valid role data found") if tmp_file: display.debug("installing from %s" % tmp_file) if not tarfile.is_tarfile(tmp_file): raise AnsibleError("the downloaded file does not appear to be a valid tar archive.") else: role_tar_file = tarfile.open(tmp_file, "r") # verify the role's meta file meta_file = None members = role_tar_file.getmembers() # next find the metadata file for member in members: for meta_main in self.META_MAIN: if meta_main in member.name: # Look for parent of meta/main.yml # Due to possibility of sub roles each containing meta/main.yml # look for shortest length parent meta_parent_dir = os.path.dirname(os.path.dirname(member.name)) if not meta_file: archive_parent_dir = meta_parent_dir meta_file = member else: if len(meta_parent_dir) < len(archive_parent_dir): archive_parent_dir = meta_parent_dir meta_file = member if not meta_file: raise AnsibleError("this role does not appear to have a meta/main.yml file.") else: try: self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file)) except Exception: raise AnsibleError("this role does not appear to have a valid meta/main.yml file.") # we strip off any higher-level directories for all of the files contained within # the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other # hand, does not have a parent directory at all. installed = False while not installed: display.display("- extracting %s to %s" % (self.name, self.path)) try: if os.path.exists(self.path): if not os.path.isdir(self.path): raise AnsibleError("the specified roles path exists and is not a directory.") elif not context.CLIARGS.get("force", False): raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name) else: # using --force, remove the old path if not self.remove(): raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really " "want to put the role here." % self.path) else: os.makedirs(self.path) # now we do the actual extraction to the path for member in members: # we only extract files, and remove any relative path # bits that might be in the file for security purposes # and drop any containing directory, as mentioned above if member.isreg() or member.issym(): parts = member.name.replace(archive_parent_dir, "", 1).split(os.sep) final_parts = [] for part in parts: if part != '..' and '~' not in part and '$' not in part: final_parts.append(part) member.name = os.path.join(*final_parts) role_tar_file.extract(member, self.path) # write out the install info file for later use self._write_galaxy_install_info() installed = True except OSError as e: error = True if e.errno == errno.EACCES and len(self.paths) > 1: current = self.paths.index(self.path) if len(self.paths) > current: self.path = self.paths[current + 1] error = False if error: raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e))) # return the parsed yaml metadata display.display("- %s was installed successfully" % str(self)) if not (self.src and os.path.isfile(self.src)): try: os.unlink(tmp_file) except (OSError, IOError) as e: display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e))) return True return False @property def spec(self): """ Returns role spec info { 'scm': 'git', 'src': 'http://git.example.com/repos/repo.git', 'version': 'v1.0', 'name': 'repo' } """ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
ilpianista/ansible
lib/ansible/galaxy/role.py
Python
gpl-3.0
16,177
[ "Brian", "Galaxy" ]
e222c1405bf9c9bde31dbb45c8aaee1289cede3f73ce012603be7342bbd08d6c
"""Visualization helpers.""" from contextlib import contextmanager import copy import os.path as op import numpy as np from scipy import linalg from mne import read_proj, read_events, pick_types from mne.utils import verbose from mne.viz.utils import tight_layout, plt_show from ._sss import compute_good_coils from ._paths import get_raw_fnames def _viz_raw_ssp_events(p, subj, ridx): """Plot filtered cleaned raw trace with ExG events""" from ._ssp import _raw_LRFCP pca_dir = op.join(p.work_dir, subj, p.pca_dir) raw_names = get_raw_fnames(p, subj, 'sss', False, False, ridx) pre_list = [r for ri, r in enumerate(raw_names) if ri in p.get_projs_from] all_proj = op.join(pca_dir, 'preproc_all-proj.fif') projs = read_proj(all_proj) colors = dict() ev = np.zeros((0, 3), int) for n, c, cid in zip(['ecg', 'blink'], ['r', 'b'], [999, 998]): fname = op.join(pca_dir, 'preproc_%s-eve.fif' % n) if op.isfile(fname): ev = np.concatenate((ev, read_events(fname))) colors[cid] = c ev = ev[np.argsort(ev[:, 0], axis=0)] raw = _raw_LRFCP(pre_list, p.proj_sfreq, None, None, p.n_jobs_fir, p.n_jobs_resample, projs, None, p.disp_files, method='fir', filter_length=p.filter_length, force_bads=False, l_trans=p.hp_trans, h_trans=p.lp_trans) raw.plot(events=ev, event_color=colors) def clean_brain(brain_img): """Remove borders of a brain image and make transparent.""" bg = (brain_img == brain_img[0, 0]).all(-1) brain_img = brain_img[(~bg).any(axis=-1)] brain_img = brain_img[:, (~bg).any(axis=0)] alpha = 255 * np.ones(brain_img.shape[:-1], np.uint8) x, y = np.where((brain_img == 255).all(-1)) alpha[x, y] = 0 return np.concatenate((brain_img, alpha[..., np.newaxis]), -1) def plot_colorbar(lims, ticks=None, ticklabels=None, figsize=(1, 2), labelsize='small', ticklabelsize='x-small', ax=None, label='', tickrotation=0., orientation='vertical', end_labels=None, colormap='mne', transparent=False, diverging=None): import matplotlib.pyplot as plt from matplotlib.colorbar import ColorbarBase from matplotlib.colors import Normalize from mne.viz._3d import _limits_to_control_points with plt.rc_context({'axes.labelsize': labelsize, 'xtick.labelsize': ticklabelsize, 'ytick.labelsize': ticklabelsize}): if diverging is None: diverging = (colormap == 'mne') # simple heuristic here if diverging: use_lims = dict(kind='value', pos_lims=lims) else: use_lims = dict(kind='value', lims=lims) cmap, scale_pts, diverging, _, none_ticks = _limits_to_control_points( use_lims, 0, colormap, transparent, linearize=True) vmin, vmax = scale_pts[0], scale_pts[-1] if ticks is None: ticks = none_ticks del colormap, lims, use_lims adjust = (ax is None) if ax is None: fig, ax = plt.subplots(1, figsize=figsize) else: fig = ax.figure norm = Normalize(vmin=vmin, vmax=vmax) if ticklabels is None: ticklabels = ticks assert len(ticks) == len(ticklabels) cbar = ColorbarBase(ax, cmap, norm=norm, ticks=ticks, label=label, orientation=orientation) for key in ('left', 'top', 'bottom' if orientation == 'vertical' else 'right'): ax.spines[key].set_visible(False) cbar.set_ticklabels(ticklabels) cbar.patch.set(facecolor='0.5', edgecolor='0.5') if orientation == 'horizontal': plt.setp(ax.xaxis.get_majorticklabels(), rotation=tickrotation) else: plt.setp(ax.yaxis.get_majorticklabels(), rotation=tickrotation) cbar.outline.set_visible(False) lims = np.array(list(ax.get_xlim()) + list(ax.get_ylim())) if end_labels is not None: if orientation == 'horizontal': delta = np.diff(lims[:2]) * np.array([-0.05, 0.05]) xs = np.array(lims[:2]) + delta has = ['right', 'left'] ys = [lims[2:].mean()] * 2 vas = ['center', 'center'] else: xs = [lims[:2].mean()] * 2 has = ['center'] * 2 delta = np.diff(lims[2:]) * np.array([-0.05, 0.05]) ys = lims[2:] + delta vas = ['top', 'bottom'] for x, y, l, ha, va in zip(xs, ys, end_labels, has, vas): ax.text(x, y, l, ha=ha, va=va, fontsize=ticklabelsize) if adjust: fig.subplots_adjust(0.01, 0.05, 0.2, 0.95) return fig def plot_reconstruction(evoked, origin=(0., 0., 0.04)): """Plot the reconstructed data for Evoked Currently only works for MEG data. Parameters ---------- evoked : instance of Evoked The evoked data. origin : array-like, shape (3,) The head origin to use. Returns ------- fig : instance of matplotlib.figure.Figure The figure. """ from mne.forward._field_interpolation import _map_meg_channels import matplotlib.pyplot as plt evoked = evoked.copy().pick_types(meg=True, exclude='bads') info_to = copy.deepcopy(evoked.info) info_to['projs'] = [] op = _map_meg_channels( evoked.info, info_to, mode='accurate', origin=(0., 0., 0.04)) fig, axs = plt.subplots(3, 2, squeeze=False) titles = dict(grad='Gradiometers (fT/cm)', mag='Magnetometers (fT)') for mi, meg in enumerate(('grad', 'mag')): picks = pick_types(evoked.info, meg=meg) kwargs = dict(ylim=dict(grad=[-250, 250], mag=[-600, 600]), spatial_colors=True, picks=picks) evoked.plot(axes=axs[0, mi], proj=False, titles=dict(grad='Proj off', mag=''), **kwargs) evoked_remap = evoked.copy().apply_proj() evoked_remap.info['projs'] = [] evoked_remap.plot(axes=axs[1, mi], titles=dict(grad='Proj on', mag=''), **kwargs) evoked_remap.data = np.dot(op, evoked_remap.data) evoked_remap.plot(axes=axs[2, mi], titles=dict(grad='Recon', mag=''), **kwargs) axs[0, mi].set_title(titles[meg]) for ii in range(3): if ii in (0, 1): axs[ii, mi].set_xlabel('') if ii in (1, 2): axs[ii, mi].set_title('') for ii in range(3): axs[ii, 1].set_ylabel('') axs[0, 0].set_ylabel('Original') axs[1, 0].set_ylabel('Projection') axs[2, 0].set_ylabel('Reconstruction') fig.tight_layout() return fig def plot_chpi_snr_raw(raw, win_length, n_harmonics=None, show=True, verbose=True): """Compute and plot cHPI SNR from raw data Parameters ---------- win_length : float Length of window to use for SNR estimates (seconds). A longer window will naturally include more low frequency power, resulting in lower SNR. n_harmonics : int or None Number of line frequency harmonics to include in the model. If None, use all harmonics up to the MEG analog lowpass corner. show : bool Show figure if True. Returns ------- fig : instance of matplotlib.figure.Figure cHPI SNR as function of time, residual variance. Notes ----- A general linear model including cHPI and line frequencies is fit into each data window. The cHPI power obtained from the model is then divided by the residual variance (variance of signal unexplained by the model) to obtain the SNR. The SNR may decrease either due to decrease of cHPI amplitudes (e.g. head moving away from the helmet), or due to increase in the residual variance. In case of broadband interference that overlaps with the cHPI frequencies, the resulting decreased SNR accurately reflects the true situation. However, increased narrowband interference outside the cHPI and line frequencies would also cause an increase in the residual variance, even though it wouldn't necessarily affect estimation of the cHPI amplitudes. Thus, this method is intended for a rough overview of cHPI signal quality. A more accurate picture of cHPI quality (at an increased computational cost) can be obtained by examining the goodness-of-fit of the cHPI coil fits. """ import matplotlib.pyplot as plt from mne.chpi import _get_hpi_info # plotting parameters legend_fontsize = 10 title_fontsize = 10 tick_fontsize = 10 label_fontsize = 10 # get some info from fiff sfreq = raw.info['sfreq'] linefreq = raw.info['line_freq'] if n_harmonics is not None: linefreqs = (np.arange(n_harmonics + 1) + 1) * linefreq else: linefreqs = np.arange(linefreq, raw.info['lowpass'], linefreq) buflen = int(win_length * sfreq) if buflen <= 0: raise ValueError('Window length should be >0') cfreqs = _get_hpi_info(raw.info)[0] if verbose: print('Nominal cHPI frequencies: %s Hz' % cfreqs) print('Sampling frequency: %s Hz' % sfreq) print('Using line freqs: %s Hz' % linefreqs) print('Using buffers of %s samples = %s seconds\n' % (buflen, buflen / sfreq)) pick_meg = pick_types(raw.info, meg=True, exclude=[]) pick_mag = pick_types(raw.info, meg='mag', exclude=[]) pick_grad = pick_types(raw.info, meg='grad', exclude=[]) nchan = len(pick_meg) # grad and mag indices into an array that already has meg channels only pick_mag_ = np.in1d(pick_meg, pick_mag).nonzero()[0] pick_grad_ = np.in1d(pick_meg, pick_grad).nonzero()[0] # create general linear model for the data t = np.arange(buflen) / float(sfreq) model = np.empty((len(t), 2 + 2 * (len(linefreqs) + len(cfreqs)))) model[:, 0] = t model[:, 1] = np.ones(t.shape) # add sine and cosine term for each freq allfreqs = np.concatenate([linefreqs, cfreqs]) model[:, 2::2] = np.cos(2 * np.pi * t[:, np.newaxis] * allfreqs) model[:, 3::2] = np.sin(2 * np.pi * t[:, np.newaxis] * allfreqs) inv_model = linalg.pinv(model) # drop last buffer to avoid overrun bufs = np.arange(0, raw.n_times, buflen)[:-1] tvec = bufs / sfreq snr_avg_grad = np.zeros([len(cfreqs), len(bufs)]) hpi_pow_grad = np.zeros([len(cfreqs), len(bufs)]) snr_avg_mag = np.zeros([len(cfreqs), len(bufs)]) resid_vars = np.zeros([nchan, len(bufs)]) for ind, buf0 in enumerate(bufs): if verbose: print('Buffer %s/%s' % (ind + 1, len(bufs))) megbuf = raw[pick_meg, buf0:buf0 + buflen][0].T coeffs = np.dot(inv_model, megbuf) coeffs_hpi = coeffs[2 + 2 * len(linefreqs):] resid_vars[:, ind] = np.var(megbuf - np.dot(model, coeffs), 0) # get total power by combining sine and cosine terms # sinusoidal of amplitude A has power of A**2/2 hpi_pow = (coeffs_hpi[0::2, :] ** 2 + coeffs_hpi[1::2, :] ** 2) / 2 hpi_pow_grad[:, ind] = hpi_pow[:, pick_grad_].mean(1) # divide average HPI power by average variance snr_avg_grad[:, ind] = hpi_pow_grad[:, ind] / \ resid_vars[pick_grad_, ind].mean() snr_avg_mag[:, ind] = hpi_pow[:, pick_mag_].mean(1) / \ resid_vars[pick_mag_, ind].mean() cfreqs_legend = ['%s Hz' % fre for fre in cfreqs] fig, axs = plt.subplots(4, 1, sharex=True) # SNR plots for gradiometers and magnetometers ax = axs[0] lines1 = ax.plot(tvec, 10 * np.log10(snr_avg_grad.T)) lines1_med = ax.plot(tvec, 10 * np.log10(np.median(snr_avg_grad, axis=0)), lw=2, ls=':', color='k') ax.set_xlim([tvec.min(), tvec.max()]) ax.set(ylabel='SNR (dB)') ax.yaxis.label.set_fontsize(label_fontsize) ax.set_title('Mean cHPI power / mean residual variance, gradiometers', fontsize=title_fontsize) ax.tick_params(axis='both', which='major', labelsize=tick_fontsize) ax = axs[1] lines2 = ax.plot(tvec, 10 * np.log10(snr_avg_mag.T)) lines2_med = ax.plot(tvec, 10 * np.log10(np.median(snr_avg_mag, axis=0)), lw=2, ls=':', color='k') ax.set_xlim([tvec.min(), tvec.max()]) ax.set(ylabel='SNR (dB)') ax.yaxis.label.set_fontsize(label_fontsize) ax.set_title('Mean cHPI power / mean residual variance, magnetometers', fontsize=title_fontsize) ax.tick_params(axis='both', which='major', labelsize=tick_fontsize) ax = axs[2] lines3 = ax.plot(tvec, hpi_pow_grad.T) lines3_med = ax.plot(tvec, np.median(hpi_pow_grad, axis=0), lw=2, ls=':', color='k') ax.set_xlim([tvec.min(), tvec.max()]) ax.set(ylabel='Power (T/m)$^2$') ax.yaxis.label.set_fontsize(label_fontsize) ax.set_title('Mean cHPI power, gradiometers', fontsize=title_fontsize) ax.tick_params(axis='both', which='major', labelsize=tick_fontsize) # residual (unexplained) variance as function of time ax = axs[3] cls = plt.get_cmap('plasma')(np.linspace(0., 0.7, len(pick_meg))) ax.set_prop_cycle(color=cls) ax.semilogy(tvec, resid_vars[pick_grad_, :].T, alpha=.4) ax.set_xlim([tvec.min(), tvec.max()]) ax.set(ylabel='Var. (T/m)$^2$', xlabel='Time (s)') ax.xaxis.label.set_fontsize(label_fontsize) ax.yaxis.label.set_fontsize(label_fontsize) ax.set_title('Residual (unexplained) variance, all gradiometer channels', fontsize=title_fontsize) ax.tick_params(axis='both', which='major', labelsize=tick_fontsize) tight_layout(pad=.5, w_pad=.1, h_pad=.2) # from mne.viz # tight_layout will screw these up ax = axs[0] box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # order curve legends according to mean of data sind = np.argsort(snr_avg_grad.mean(axis=1))[::-1] handles = [lines1[i] for i in sind] handles.append(lines1_med[0]) labels = [cfreqs_legend[i] for i in sind] labels.append('Median') ax.legend(handles, labels, prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ), loc='center left', borderpad=1) ax = axs[1] box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) sind = np.argsort(snr_avg_mag.mean(axis=1))[::-1] handles = [lines2[i] for i in sind] handles.append(lines2_med[0]) labels = [cfreqs_legend[i] for i in sind] labels.append('Median') ax.legend(handles, labels, prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ), loc='center left', borderpad=1) ax = axs[2] box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) sind = np.argsort(hpi_pow_grad.mean(axis=1))[::-1] handles = [lines3[i] for i in sind] handles.append(lines3_med[0]) labels = [cfreqs_legend[i] for i in sind] labels.append('Median') ax.legend(handles, labels, prop={'size': legend_fontsize}, bbox_to_anchor=(1.02, 0.5, ), loc='center left', borderpad=1) ax = axs[3] box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) if show: plt.show() return fig @verbose def plot_good_coils(raw, t_step=1., t_window=0.2, dist_limit=0.005, show=True, verbose=None): """Plot the good coil count as a function of time.""" import matplotlib.pyplot as plt if isinstance(raw, dict): # fit_data calculated and stored to disk t = raw['fit_t'] counts = raw['counts'] n_coils = raw['n_coils'] else: t, counts, n_coils = compute_good_coils(raw, t_step, t_window, dist_limit) del t_step, t_window, dist_limit fig, ax = plt.subplots(figsize=(8, 2)) ax.step(t, counts, zorder=4, color='k', clip_on=False) ax.set(xlim=t[[0, -1]], ylim=[0, n_coils], xlabel='Time (sec)', ylabel='Good coils') ax.set(yticks=np.arange(n_coils + 1)) for comp, n, color in ((np.greater_equal, 5, '#2ca02c'), (np.equal, 4, '#98df8a'), (np.equal, 3, (1, 1, 0)), (np.less_equal, 2, (1, 0, 0))): mask = comp(counts, n) mask[:-1] |= comp(counts[1:], n) ax.fill_between(t, 0, n_coils, where=mask, color=color, edgecolor='none', linewidth=0, zorder=1) ax.grid(True) fig.tight_layout() plt_show(show) return fig @contextmanager def mlab_offscreen(offscreen=True): from mayavi import mlab old_offscreen = mlab.options.offscreen mlab.options.offscreen = offscreen try: yield finally: mlab.options.offscreen = old_offscreen def discretize_cmap(colormap, lims, transparent=True): """Discretize a colormap.""" lims = np.array(lims, int) assert lims.shape == (2,) from matplotlib import colors, pyplot as plt n_pts = lims[1] - lims[0] + 1 assert n_pts > 0 if n_pts == 1: vals = np.ones(256) else: vals = np.round(np.linspace(-0.5, n_pts - 0.5, 256)) / (n_pts - 1) colormap = plt.get_cmap(colormap)(vals) if transparent: colormap[:, 3] = np.clip((vals + 0.5 / n_pts) * 2, 0, 1) colormap[0, 3] = 0. colormap = colors.ListedColormap(colormap) use_lims = [lims[0] - 0.5, (lims[0] + lims[1]) / 2., lims[1] + 0.5] return colormap, use_lims def trim_bg(img, color=None): """Trim background rows/cols from an image-like object.""" if color is None: color = img[0, 0] img = img[:, (img != color).any(0).any(-1)] img = img[(img != color).any(1).any(-1)] return img
kambysese/mnefun
mnefun/_viz.py
Python
bsd-3-clause
18,073
[ "Mayavi" ]
13dd8a747da2630bb884a9564a9e1cada40570801667ef2f0568266e7320b248
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import json import frappe.utils from frappe.utils import cstr, flt, getdate, cint, nowdate, add_days, get_link_to_form from frappe import _ from six import string_types from frappe.model.utils import get_fetch_values from frappe.model.mapper import get_mapped_doc from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty from frappe.desk.notifications import clear_doctype_notifications from frappe.contacts.doctype.address.address import get_company_address from erpnext.controllers.selling_controller import SellingController from frappe.automation.doctype.auto_repeat.auto_repeat import get_next_schedule_date from erpnext.selling.doctype.customer.customer import check_credit_limit from erpnext.stock.doctype.item.item import get_item_defaults from erpnext.setup.doctype.item_group.item_group import get_item_group_defaults from erpnext.manufacturing.doctype.production_plan.production_plan import get_items_for_material_requests from erpnext.accounts.doctype.sales_invoice.sales_invoice import validate_inter_company_party, update_linked_doc,\ unlink_inter_company_doc form_grid_templates = { "items": "templates/form_grid/item_grid.html" } class WarehouseRequired(frappe.ValidationError): pass class SalesOrder(SellingController): def __init__(self, *args, **kwargs): super(SalesOrder, self).__init__(*args, **kwargs) def validate(self): super(SalesOrder, self).validate() self.validate_order_type() self.validate_delivery_date() self.validate_proj_cust() self.validate_po() self.validate_uom_is_integer("stock_uom", "stock_qty") self.validate_uom_is_integer("uom", "qty") self.validate_for_items() self.validate_warehouse() self.validate_drop_ship() self.validate_serial_no_based_delivery() validate_inter_company_party(self.doctype, self.customer, self.company, self.inter_company_order_reference) from erpnext.stock.doctype.packed_item.packed_item import make_packing_list make_packing_list(self) self.validate_with_previous_doc() self.set_status() if not self.billing_status: self.billing_status = 'Not Billed' if not self.delivery_status: self.delivery_status = 'Not Delivered' def validate_po(self): # validate p.o date v/s delivery date if self.po_date: for d in self.get("items"): if d.delivery_date and getdate(self.po_date) > getdate(d.delivery_date): frappe.throw(_("Row #{0}: Expected Delivery Date cannot be before Purchase Order Date") .format(d.idx)) if self.po_no and self.customer: so = frappe.db.sql("select name from `tabSales Order` \ where ifnull(po_no, '') = %s and name != %s and docstatus < 2\ and customer = %s", (self.po_no, self.name, self.customer)) if so and so[0][0] and not cint(frappe.db.get_single_value("Selling Settings", "allow_against_multiple_purchase_orders")): frappe.msgprint(_("Warning: Sales Order {0} already exists against Customer's Purchase Order {1}").format(so[0][0], self.po_no)) def validate_for_items(self): check_list = [] for d in self.get('items'): check_list.append(cstr(d.item_code)) # used for production plan d.transaction_date = self.transaction_date tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \ where item_code = %s and warehouse = %s", (d.item_code, d.warehouse)) d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0 # check for same entry multiple times unique_chk_list = set(check_list) if len(unique_chk_list) != len(check_list) and \ not cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")): frappe.msgprint(_("Same item has been entered multiple times"), title=_("Warning"), indicator='orange') def product_bundle_has_stock_item(self, product_bundle): """Returns true if product bundle has stock item""" ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle)) return ret def validate_sales_mntc_quotation(self): for d in self.get('items'): if d.prevdoc_docname: res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type)) if not res: frappe.msgprint(_("Quotation {0} not of type {1}") .format(d.prevdoc_docname, self.order_type)) def validate_order_type(self): super(SalesOrder, self).validate_order_type() def validate_delivery_date(self): if self.order_type == 'Sales': delivery_date_list = [d.delivery_date for d in self.get("items") if d.delivery_date] max_delivery_date = max(delivery_date_list) if delivery_date_list else None if not self.delivery_date: self.delivery_date = max_delivery_date if self.delivery_date: for d in self.get("items"): if not d.delivery_date: d.delivery_date = self.delivery_date if getdate(self.transaction_date) > getdate(d.delivery_date): frappe.msgprint(_("Expected Delivery Date should be after Sales Order Date"), indicator='orange', title=_('Warning')) if getdate(self.delivery_date) != getdate(max_delivery_date): self.delivery_date = max_delivery_date else: frappe.throw(_("Please enter Delivery Date")) self.validate_sales_mntc_quotation() def validate_proj_cust(self): if self.project and self.customer_name: res = frappe.db.sql("""select name from `tabProject` where name = %s and (customer = %s or ifnull(customer,'')='')""", (self.project, self.customer)) if not res: frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project)) def validate_warehouse(self): super(SalesOrder, self).validate_warehouse() for d in self.get("items"): if (frappe.get_cached_value("Item", d.item_code, "is_stock_item") == 1 or (self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \ and not d.warehouse and not cint(d.delivered_by_supplier): frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code), WarehouseRequired) def validate_with_previous_doc(self): super(SalesOrder, self).validate_with_previous_doc({ "Quotation": { "ref_dn_field": "prevdoc_docname", "compare_fields": [["company", "="]] } }) def update_enquiry_status(self, prevdoc, flag): enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc) if enq: frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0])) def update_prevdoc_status(self, flag): for quotation in list(set([d.prevdoc_docname for d in self.get("items")])): if quotation: doc = frappe.get_doc("Quotation", quotation) if doc.docstatus==2: frappe.throw(_("Quotation {0} is cancelled").format(quotation)) doc.set_status(update=True) doc.update_opportunity() def validate_drop_ship(self): for d in self.get('items'): if d.delivered_by_supplier and not d.supplier: frappe.throw(_("Row #{0}: Set Supplier for item {1}").format(d.idx, d.item_code)) def on_submit(self): self.check_credit_limit() self.update_reserved_qty() frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self) self.update_project() self.update_prevdoc_status('submit') self.update_blanket_order() update_linked_doc(self.doctype, self.name, self.inter_company_order_reference) def on_cancel(self): super(SalesOrder, self).on_cancel() # Cannot cancel closed SO if self.status == 'Closed': frappe.throw(_("Closed order cannot be cancelled. Unclose to cancel.")) self.check_nextdoc_docstatus() self.update_reserved_qty() self.update_project() self.update_prevdoc_status('cancel') frappe.db.set(self, 'status', 'Cancelled') self.update_blanket_order() unlink_inter_company_doc(self.doctype, self.name, self.inter_company_order_reference) def update_project(self): if frappe.db.get_single_value('Selling Settings', 'sales_update_frequency') != "Each Transaction": return if self.project: project = frappe.get_doc("Project", self.project) project.update_sales_amount() project.db_update() def check_credit_limit(self): # if bypass credit limit check is set to true (1) at sales order level, # then we need not to check credit limit and vise versa if not cint(frappe.get_cached_value("Customer", self.customer, "bypass_credit_limit_check_at_sales_order")): check_credit_limit(self.customer, self.company) def check_nextdoc_docstatus(self): # Checks Delivery Note submit_dn = frappe.db.sql_list(""" select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2 where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name) if submit_dn: submit_dn = [get_link_to_form("Delivery Note", dn) for dn in submit_dn] frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order") .format(", ".join(submit_dn))) # Checks Sales Invoice submit_rv = frappe.db.sql_list("""select t1.name from `tabSales Invoice` t1,`tabSales Invoice Item` t2 where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""", self.name) if submit_rv: submit_rv = [get_link_to_form("Sales Invoice", si) for si in submit_rv] frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order") .format(", ".join(submit_rv))) #check maintenance schedule submit_ms = frappe.db.sql_list(""" select t1.name from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2 where t2.parent=t1.name and t2.sales_order = %s and t1.docstatus = 1""", self.name) if submit_ms: submit_ms = [get_link_to_form("Maintenance Schedule", ms) for ms in submit_ms] frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order") .format(", ".join(submit_ms))) # check maintenance visit submit_mv = frappe.db.sql_list(""" select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name) if submit_mv: submit_mv = [get_link_to_form("Maintenance Visit", mv) for mv in submit_mv] frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order") .format(", ".join(submit_mv))) # check work order pro_order = frappe.db.sql_list(""" select name from `tabWork Order` where sales_order = %s and docstatus = 1""", self.name) if pro_order: pro_order = [get_link_to_form("Work Order", po) for po in pro_order] frappe.throw(_("Work Order {0} must be cancelled before cancelling this Sales Order") .format(", ".join(pro_order))) def check_modified_date(self): mod_db = frappe.db.get_value("Sales Order", self.name, "modified") date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" % ( mod_db, cstr(self.modified))) if date_diff and date_diff[0][0]: frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name)) def update_status(self, status): self.check_modified_date() self.set_status(update=True, status=status) self.update_reserved_qty() self.notify_update() clear_doctype_notifications(self) def update_reserved_qty(self, so_item_rows=None): """update requested qty (before ordered_qty is updated)""" item_wh_list = [] def _valid_for_reserve(item_code, warehouse): if item_code and warehouse and [item_code, warehouse] not in item_wh_list \ and frappe.get_cached_value("Item", item_code, "is_stock_item"): item_wh_list.append([item_code, warehouse]) for d in self.get("items"): if (not so_item_rows or d.name in so_item_rows) and not d.delivered_by_supplier: if self.has_product_bundle(d.item_code): for p in self.get("packed_items"): if p.parent_detail_docname == d.name and p.parent_item == d.item_code: _valid_for_reserve(p.item_code, p.warehouse) else: _valid_for_reserve(d.item_code, d.warehouse) for item_code, warehouse in item_wh_list: update_bin_qty(item_code, warehouse, { "reserved_qty": get_reserved_qty(item_code, warehouse) }) def on_update(self): pass def before_update_after_submit(self): self.validate_po() self.validate_drop_ship() self.validate_supplier_after_submit() self.validate_delivery_date() def validate_supplier_after_submit(self): """Check that supplier is the same after submit if PO is already made""" exc_list = [] for item in self.items: if item.supplier: supplier = frappe.db.get_value("Sales Order Item", {"parent": self.name, "item_code": item.item_code}, "supplier") if item.ordered_qty > 0.0 and item.supplier != supplier: exc_list.append(_("Row #{0}: Not allowed to change Supplier as Purchase Order already exists").format(item.idx)) if exc_list: frappe.throw('\n'.join(exc_list)) def update_delivery_status(self): """Update delivery status from Purchase Order for drop shipping""" tot_qty, delivered_qty = 0.0, 0.0 for item in self.items: if item.delivered_by_supplier: item_delivered_qty = frappe.db.sql("""select sum(qty) from `tabPurchase Order Item` poi, `tabPurchase Order` po where poi.sales_order_item = %s and poi.item_code = %s and poi.parent = po.name and po.docstatus = 1 and po.status = 'Delivered'""", (item.name, item.item_code)) item_delivered_qty = item_delivered_qty[0][0] if item_delivered_qty else 0 item.db_set("delivered_qty", flt(item_delivered_qty), update_modified=False) delivered_qty += item.delivered_qty tot_qty += item.qty if tot_qty != 0: self.db_set("per_delivered", flt(delivered_qty/tot_qty) * 100, update_modified=False) def set_indicator(self): """Set indicator for portal""" if self.per_billed < 100 and self.per_delivered < 100: self.indicator_color = "orange" self.indicator_title = _("Not Paid and Not Delivered") elif self.per_billed == 100 and self.per_delivered < 100: self.indicator_color = "orange" self.indicator_title = _("Paid and Not Delivered") else: self.indicator_color = "green" self.indicator_title = _("Paid") def get_work_order_items(self, for_raw_material_request=0): '''Returns items with BOM that already do not have a linked work order''' items = [] for table in [self.items, self.packed_items]: for i in table: bom = get_default_bom_item(i.item_code) stock_qty = i.qty if i.doctype == 'Packed Item' else i.stock_qty if not for_raw_material_request: total_work_order_qty = flt(frappe.db.sql('''select sum(qty) from `tabWork Order` where production_item=%s and sales_order=%s and sales_order_item = %s and docstatus<2''', (i.item_code, self.name, i.name))[0][0]) pending_qty = stock_qty - total_work_order_qty else: pending_qty = stock_qty if pending_qty: if bom: items.append(dict( name= i.name, item_code= i.item_code, description= i.description, bom = bom, warehouse = i.warehouse, pending_qty = pending_qty, required_qty = pending_qty if for_raw_material_request else 0, sales_order_item = i.name )) else: items.append(dict( name= i.name, item_code= i.item_code, description= i.description, bom = '', warehouse = i.warehouse, pending_qty = pending_qty, required_qty = pending_qty if for_raw_material_request else 0, sales_order_item = i.name )) return items def on_recurring(self, reference_doc, auto_repeat_doc): def _get_delivery_date(ref_doc_delivery_date, red_doc_transaction_date, transaction_date): delivery_date = get_next_schedule_date(ref_doc_delivery_date, auto_repeat_doc.frequency, cint(auto_repeat_doc.repeat_on_day)) if delivery_date <= transaction_date: delivery_date_diff = frappe.utils.date_diff(ref_doc_delivery_date, red_doc_transaction_date) delivery_date = frappe.utils.add_days(transaction_date, delivery_date_diff) return delivery_date self.set("delivery_date", _get_delivery_date(reference_doc.delivery_date, reference_doc.transaction_date, self.transaction_date )) for d in self.get("items"): reference_delivery_date = frappe.db.get_value("Sales Order Item", {"parent": reference_doc.name, "item_code": d.item_code, "idx": d.idx}, "delivery_date") d.set("delivery_date", _get_delivery_date(reference_delivery_date, reference_doc.transaction_date, self.transaction_date)) def validate_serial_no_based_delivery(self): reserved_items = [] normal_items = [] for item in self.items: if item.ensure_delivery_based_on_produced_serial_no: if item.item_code in normal_items: frappe.throw(_("Cannot ensure delivery by Serial No as \ Item {0} is added with and without Ensure Delivery by \ Serial No.").format(item.item_code)) if item.item_code not in reserved_items: if not frappe.get_cached_value("Item", item.item_code, "has_serial_no"): frappe.throw(_("Item {0} has no Serial No. Only serilialized items \ can have delivery based on Serial No").format(item.item_code)) if not frappe.db.exists("BOM", {"item": item.item_code, "is_active": 1}): frappe.throw(_("No active BOM found for item {0}. Delivery by \ Serial No cannot be ensured").format(item.item_code)) reserved_items.append(item.item_code) else: normal_items.append(item.item_code) if not item.ensure_delivery_based_on_produced_serial_no and \ item.item_code in reserved_items: frappe.throw(_("Cannot ensure delivery by Serial No as \ Item {0} is added with and without Ensure Delivery by \ Serial No.").format(item.item_code)) def get_list_context(context=None): from erpnext.controllers.website_list_for_contact import get_list_context list_context = get_list_context(context) list_context.update({ 'show_sidebar': True, 'show_search': True, 'no_breadcrumbs': True, 'title': _('Orders'), }) return list_context @frappe.whitelist() def close_or_unclose_sales_orders(names, status): if not frappe.has_permission("Sales Order", "write"): frappe.throw(_("Not permitted"), frappe.PermissionError) names = json.loads(names) for name in names: so = frappe.get_doc("Sales Order", name) if so.docstatus == 1: if status == "Closed": if so.status not in ("Cancelled", "Closed") and (so.per_delivered < 100 or so.per_billed < 100): so.update_status(status) else: if so.status == "Closed": so.update_status('Draft') so.update_blanket_order() frappe.local.message_log = [] def get_requested_item_qty(sales_order): return frappe._dict(frappe.db.sql(""" select sales_order_item, sum(stock_qty) from `tabMaterial Request Item` where docstatus = 1 and sales_order = %s group by sales_order_item """, sales_order)) @frappe.whitelist() def make_material_request(source_name, target_doc=None): requested_item_qty = get_requested_item_qty(source_name) def postprocess(source, doc): doc.material_request_type = "Purchase" def update_item(source, target, source_parent): # qty is for packed items, because packed items don't have stock_qty field qty = source.get("stock_qty") or source.get("qty") target.project = source_parent.project target.qty = qty - requested_item_qty.get(source.name, 0) target.conversion_factor = 1 target.stock_qty = qty - requested_item_qty.get(source.name, 0) doc = get_mapped_doc("Sales Order", source_name, { "Sales Order": { "doctype": "Material Request", "validation": { "docstatus": ["=", 1] } }, "Packed Item": { "doctype": "Material Request Item", "field_map": { "parent": "sales_order", "stock_uom": "uom" }, "postprocess": update_item }, "Sales Order Item": { "doctype": "Material Request Item", "field_map": { "name": "sales_order_item", "parent": "sales_order", "stock_uom": "uom", "stock_qty": "qty" }, "condition": lambda doc: not frappe.db.exists('Product Bundle', doc.item_code) and doc.stock_qty > requested_item_qty.get(doc.name, 0), "postprocess": update_item } }, target_doc, postprocess) return doc @frappe.whitelist() def make_project(source_name, target_doc=None): def postprocess(source, doc): doc.project_type = "External" doc.project_name = source.name doc = get_mapped_doc("Sales Order", source_name, { "Sales Order": { "doctype": "Project", "validation": { "docstatus": ["=", 1] }, "field_map":{ "name" : "sales_order", "base_grand_total" : "estimated_costing", } }, }, target_doc, postprocess) return doc @frappe.whitelist() def make_delivery_note(source_name, target_doc=None): def set_missing_values(source, target): target.ignore_pricing_rule = 1 target.run_method("set_missing_values") target.run_method("set_po_nos") target.run_method("calculate_taxes_and_totals") # set company address target.update(get_company_address(target.company)) if target.company_address: target.update(get_fetch_values("Delivery Note", 'company_address', target.company_address)) def update_item(source, target, source_parent): target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate) target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate) target.qty = flt(source.qty) - flt(source.delivered_qty) item = get_item_defaults(target.item_code, source_parent.company) item_group = get_item_group_defaults(target.item_code, source_parent.company) if item: target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center") \ or item.get("buying_cost_center") \ or item_group.get("buying_cost_center") target_doc = get_mapped_doc("Sales Order", source_name, { "Sales Order": { "doctype": "Delivery Note", "validation": { "docstatus": ["=", 1] } }, "Sales Order Item": { "doctype": "Delivery Note Item", "field_map": { "rate": "rate", "name": "so_detail", "parent": "against_sales_order", }, "postprocess": update_item, "condition": lambda doc: abs(doc.delivered_qty) < abs(doc.qty) and doc.delivered_by_supplier!=1 }, "Sales Taxes and Charges": { "doctype": "Sales Taxes and Charges", "add_if_empty": True }, "Sales Team": { "doctype": "Sales Team", "add_if_empty": True } }, target_doc, set_missing_values) return target_doc @frappe.whitelist() def make_sales_invoice(source_name, target_doc=None, ignore_permissions=False): def postprocess(source, target): set_missing_values(source, target) #Get the advance paid Journal Entries in Sales Invoice Advance if target.get("allocate_advances_automatically"): target.set_advances() def set_missing_values(source, target): target.is_pos = 0 target.ignore_pricing_rule = 1 target.flags.ignore_permissions = True target.run_method("set_missing_values") target.run_method("set_po_nos") target.run_method("calculate_taxes_and_totals") # set company address target.update(get_company_address(target.company)) if target.company_address: target.update(get_fetch_values("Sales Invoice", 'company_address', target.company_address)) # set the redeem loyalty points if provided via shopping cart if source.loyalty_points and source.order_type == "Shopping Cart": target.redeem_loyalty_points = 1 def update_item(source, target, source_parent): target.amount = flt(source.amount) - flt(source.billed_amt) target.base_amount = target.amount * flt(source_parent.conversion_rate) target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty - source.returned_qty if source_parent.project: target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center") if not target.cost_center and target.item_code: item = get_item_defaults(target.item_code, source_parent.company) item_group = get_item_group_defaults(target.item_code, source_parent.company) target.cost_center = item.get("selling_cost_center") \ or item_group.get("selling_cost_center") doclist = get_mapped_doc("Sales Order", source_name, { "Sales Order": { "doctype": "Sales Invoice", "field_map": { "party_account_currency": "party_account_currency", "payment_terms_template": "payment_terms_template" }, "validation": { "docstatus": ["=", 1] } }, "Sales Order Item": { "doctype": "Sales Invoice Item", "field_map": { "name": "so_detail", "parent": "sales_order", }, "postprocess": update_item, "condition": lambda doc: doc.qty and (doc.base_amount==0 or abs(doc.billed_amt) < abs(doc.amount)) }, "Sales Taxes and Charges": { "doctype": "Sales Taxes and Charges", "add_if_empty": True }, "Sales Team": { "doctype": "Sales Team", "add_if_empty": True } }, target_doc, postprocess, ignore_permissions=ignore_permissions) return doclist @frappe.whitelist() def make_maintenance_schedule(source_name, target_doc=None): maint_schedule = frappe.db.sql("""select t1.name from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2 where t2.parent=t1.name and t2.sales_order=%s and t1.docstatus=1""", source_name) if not maint_schedule: doclist = get_mapped_doc("Sales Order", source_name, { "Sales Order": { "doctype": "Maintenance Schedule", "validation": { "docstatus": ["=", 1] } }, "Sales Order Item": { "doctype": "Maintenance Schedule Item", "field_map": { "parent": "sales_order" }, "add_if_empty": True } }, target_doc) return doclist @frappe.whitelist() def make_maintenance_visit(source_name, target_doc=None): visit = frappe.db.sql("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name) if not visit: doclist = get_mapped_doc("Sales Order", source_name, { "Sales Order": { "doctype": "Maintenance Visit", "validation": { "docstatus": ["=", 1] } }, "Sales Order Item": { "doctype": "Maintenance Visit Purpose", "field_map": { "parent": "prevdoc_docname", "parenttype": "prevdoc_doctype" }, "add_if_empty": True } }, target_doc) return doclist @frappe.whitelist() def get_events(start, end, filters=None): """Returns events for Gantt / Calendar view rendering. :param start: Start date-time. :param end: End date-time. :param filters: Filters (JSON). """ from frappe.desk.calendar import get_event_conditions conditions = get_event_conditions("Sales Order", filters) data = frappe.db.sql(""" select distinct `tabSales Order`.name, `tabSales Order`.customer_name, `tabSales Order`.status, `tabSales Order`.delivery_status, `tabSales Order`.billing_status, `tabSales Order Item`.delivery_date from `tabSales Order`, `tabSales Order Item` where `tabSales Order`.name = `tabSales Order Item`.parent and (ifnull(`tabSales Order Item`.delivery_date, '0000-00-00')!= '0000-00-00') \ and (`tabSales Order Item`.delivery_date between %(start)s and %(end)s) and `tabSales Order`.docstatus < 2 {conditions} """.format(conditions=conditions), { "start": start, "end": end }, as_dict=True, update={"allDay": 0}) return data @frappe.whitelist() def make_purchase_order(source_name, for_supplier=None, selected_items=[], target_doc=None): if isinstance(selected_items, string_types): selected_items = json.loads(selected_items) def set_missing_values(source, target): target.supplier = supplier target.apply_discount_on = "" target.additional_discount_percentage = 0.0 target.discount_amount = 0.0 target.inter_company_order_reference = "" default_price_list = frappe.get_value("Supplier", supplier, "default_price_list") if default_price_list: target.buying_price_list = default_price_list if any( item.delivered_by_supplier==1 for item in source.items): if source.shipping_address_name: target.shipping_address = source.shipping_address_name target.shipping_address_display = source.shipping_address else: target.shipping_address = source.customer_address target.shipping_address_display = source.address_display target.customer_contact_person = source.contact_person target.customer_contact_display = source.contact_display target.customer_contact_mobile = source.contact_mobile target.customer_contact_email = source.contact_email else: target.customer = "" target.customer_name = "" target.run_method("set_missing_values") target.run_method("calculate_taxes_and_totals") def update_item(source, target, source_parent): target.schedule_date = source.delivery_date target.qty = flt(source.qty) - flt(source.ordered_qty) target.stock_qty = (flt(source.qty) - flt(source.ordered_qty)) * flt(source.conversion_factor) target.project = source_parent.project suppliers =[] if for_supplier: suppliers.append(for_supplier) else: sales_order = frappe.get_doc("Sales Order", source_name) for item in sales_order.items: if item.supplier and item.supplier not in suppliers: suppliers.append(item.supplier) for supplier in suppliers: po =frappe.get_list("Purchase Order", filters={"sales_order":source_name, "supplier":supplier, "docstatus": ("<", "2")}) if len(po) == 0: doc = get_mapped_doc("Sales Order", source_name, { "Sales Order": { "doctype": "Purchase Order", "field_no_map": [ "address_display", "contact_display", "contact_mobile", "contact_email", "contact_person", "taxes_and_charges" ], "validation": { "docstatus": ["=", 1] } }, "Sales Order Item": { "doctype": "Purchase Order Item", "field_map": [ ["name", "sales_order_item"], ["parent", "sales_order"], ["stock_uom", "stock_uom"], ["uom", "uom"], ["conversion_factor", "conversion_factor"], ["delivery_date", "schedule_date"] ], "field_no_map": [ "rate", "price_list_rate" ], "postprocess": update_item, "condition": lambda doc: doc.ordered_qty < doc.qty and doc.supplier == supplier and doc.item_code in selected_items } }, target_doc, set_missing_values) if not for_supplier: doc.insert() else: suppliers =[] if suppliers: if not for_supplier: frappe.db.commit() return doc else: frappe.msgprint(_("PO already created for all sales order items")) @frappe.whitelist() def get_supplier(doctype, txt, searchfield, start, page_len, filters): supp_master_name = frappe.defaults.get_user_default("supp_master_name") if supp_master_name == "Supplier Name": fields = ["name", "supplier_group"] else: fields = ["name", "supplier_name", "supplier_group"] fields = ", ".join(fields) return frappe.db.sql("""select {field} from `tabSupplier` where docstatus < 2 and ({key} like %(txt)s or supplier_name like %(txt)s) and name in (select supplier from `tabSales Order Item` where parent = %(parent)s) and name not in (select supplier from `tabPurchase Order` po inner join `tabPurchase Order Item` poi on po.name=poi.parent where po.docstatus<2 and poi.sales_order=%(parent)s) order by if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999), if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999), name, supplier_name limit %(start)s, %(page_len)s """.format(**{ 'field': fields, 'key': frappe.db.escape(searchfield) }), { 'txt': "%%%s%%" % txt, '_txt': txt.replace("%", ""), 'start': start, 'page_len': page_len, 'parent': filters.get('parent') }) @frappe.whitelist() def make_work_orders(items, sales_order, company, project=None): '''Make Work Orders against the given Sales Order for the given `items`''' items = json.loads(items).get('items') out = [] for i in items: if not i.get("bom"): frappe.throw(_("Please select BOM against item {0}").format(i.get("item_code"))) if not i.get("pending_qty"): frappe.throw(_("Please select Qty against item {0}").format(i.get("item_code"))) work_order = frappe.get_doc(dict( doctype='Work Order', production_item=i['item_code'], bom_no=i.get('bom'), qty=i['pending_qty'], company=company, sales_order=sales_order, sales_order_item=i['sales_order_item'], project=project, fg_warehouse=i['warehouse'], description=i['description'] )).insert() work_order.set_work_order_operations() work_order.save() out.append(work_order) return [p.name for p in out] @frappe.whitelist() def update_status(status, name): so = frappe.get_doc("Sales Order", name) so.update_status(status) def get_default_bom_item(item_code): bom = frappe.get_all('BOM', dict(item=item_code, is_active=True), order_by='is_default desc') bom = bom[0].name if bom else None return bom @frappe.whitelist() def make_raw_material_request(items, company, sales_order, project=None): if not frappe.has_permission("Sales Order", "write"): frappe.throw(_("Not permitted"), frappe.PermissionError) if isinstance(items, string_types): items = frappe._dict(json.loads(items)) for item in items.get('items'): item["include_exploded_items"] = items.get('include_exploded_items') item["ignore_existing_ordered_qty"] = items.get('ignore_existing_ordered_qty') item["include_raw_materials_from_sales_order"] = items.get('include_raw_materials_from_sales_order') items.update({ 'company': company, 'sales_order': sales_order }) raw_materials = get_items_for_material_requests(items) if not raw_materials: frappe.msgprint(_("Material Request not created, as quantity for Raw Materials already available.")) return material_request = frappe.new_doc('Material Request') material_request.update(dict( doctype = 'Material Request', transaction_date = nowdate(), company = company, requested_by = frappe.session.user, material_request_type = 'Purchase' )) for item in raw_materials: item_doc = frappe.get_cached_doc('Item', item.get('item_code')) schedule_date = add_days(nowdate(), cint(item_doc.lead_time_days)) material_request.append('items', { 'item_code': item.get('item_code'), 'qty': item.get('quantity'), 'schedule_date': schedule_date, 'warehouse': item.get('warehouse'), 'sales_order': sales_order, 'project': project }) material_request.insert() material_request.flags.ignore_permissions = 1 material_request.run_method("set_missing_values") material_request.submit() return material_request @frappe.whitelist() def make_inter_company_purchase_order(source_name, target_doc=None): from erpnext.accounts.doctype.sales_invoice.sales_invoice import make_inter_company_transaction return make_inter_company_transaction("Sales Order", source_name, target_doc)
Zlash65/erpnext
erpnext/selling/doctype/sales_order/sales_order.py
Python
gpl-3.0
35,256
[ "VisIt" ]
d1269a9884ec0d977d8f44f141944f73d7dc814dae12e49fadb78aaba35045e1
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import time import pytest import itertools import tvm import tvm.relay.testing from tvm import relay, runtime from tvm.relay.op.contrib import tensorrt from tvm.contrib import graph_executor, utils from tvm.runtime.vm import VirtualMachine from tvm.relay import Any, GlobalVar, transform from tvm.relay.expr_functor import ExprVisitor from typing import Dict, Tuple, Union from tvm.contrib.download import download from tvm.relay.op.contrib import tensorrt import tvm.testing has_tensorrt_codegen = pytest.mark.skipif( not tvm.get_global_func("relay.ext.tensorrt", True), reason="TensorRT codegen not available" ) has_tensorrt_runtime = pytest.mark.skipif( not tensorrt.is_tensorrt_runtime_enabled(), reason="TensorRT runtime not available" ) run_module = tvm.testing.parameter( pytest.param(False, marks=[has_tensorrt_codegen, *tvm.testing.requires_cuda()]), pytest.param( True, marks=[has_tensorrt_runtime, has_tensorrt_codegen, *tvm.testing.requires_cuda()] ), ids=["compile", "run"], ) def vmobj_to_list(o): if isinstance(o, tvm.nd.NDArray): return [o.numpy()] elif isinstance(o, tvm.runtime.container.ADT) or isinstance(o, list): return [vmobj_to_list(f) for f in o] else: raise RuntimeError("Unknown object type: %s" % type(o)) def assert_result_dict_holds(result_dict): for k1, k2 in itertools.combinations(result_dict, 2): res1 = vmobj_to_list(result_dict[k1]) res2 = vmobj_to_list(result_dict[k2]) for r1, r2 in zip(res1, res2): tvm.testing.assert_allclose(r1, r2, rtol=1e-3, atol=1e-3) def set_func_attr(func, compile_name, symbol_name): func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Compiler", compile_name) func = func.with_attr("global_symbol", symbol_name) return func def run_and_verify_func(config, target="cuda", run_module=True): """Test a Relay func by compiling, running, and comparing TVM and TRT outputs. Parameters ---------- config : Tuple[relay.Function, Dict[str, NDArray], List[str]] A tuple containing 1) The function to test, 2) A dictionary of var names to input shapes and 3) A list of which vars should be considered params. run_module: bool If True, the built module will be run after being compiled. """ f, input_shapes, is_param = config params = {x: np.random.uniform(-1, 1, input_shapes[x]).astype(np.float32) for x in is_param} input_dict = { k: np.random.uniform(-1, 1, v).astype(np.float32) for k, v in input_shapes.items() if k not in is_param } dev = tvm.device(target) result_dict = dict() for mode in ["graph", "vm"]: for use_trt in [False, True]: mod = tvm.IRModule() mod["main"] = f result_key = mode + ("_trt" if use_trt else "") if use_trt: mod, config = tensorrt.partition_for_tensorrt(mod, params) with tvm.transform.PassContext( opt_level=3, config={"relay.ext.tensorrt.options": config} ): func = relay.create_executor( mode, mod=mod, device=dev, target=target ).evaluate() else: with tvm.transform.PassContext(opt_level=3): func = relay.create_executor( mode, mod=mod, device=dev, target=target ).evaluate() if run_module: result_dict[result_key] = func(**input_dict, **params) if run_module: assert_result_dict_holds(result_dict) def run_and_verify_model(model, run_module): import mxnet as mx from mxnet.gluon.model_zoo.vision import get_model def check_trt_used(mod): num_trt_subgraphs = sum( [1 if gv.name_hint == "tensorrt_0" else 0 for gv in mod.get_global_vars()] ) assert num_trt_subgraphs == 1 def compile_and_run(mod, params, i_data, mode="vm", use_trt=True): assert mode in ["graph", "vm"] if use_trt: mod, config = tensorrt.partition_for_tensorrt(mod, params) check_trt_used(mod) with tvm.transform.PassContext( opt_level=3, config={"relay.ext.tensorrt.options": config} ): func = relay.create_executor( mode, mod=mod, device=tvm.cuda(0), target="cuda" ).evaluate() else: with tvm.transform.PassContext(opt_level=3): func = relay.create_executor( mode, mod=mod, device=tvm.cuda(0), target="cuda" ).evaluate() res = func(i_data, **params) if run_module else None return res dtype = "float32" input_shape = (1, 3, 224, 224) i_data = np.random.uniform(-1, 1, input_shape).astype(dtype) block = get_model(model, pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) result_dict = dict() for mode in ["vm", "graph"]: for use_trt in [True, False]: result_key = mode + ("_trt" if use_trt else "") result_dict[result_key] = compile_and_run( mod, params, i_data, mode=mode, use_trt=use_trt ) if run_module: assert_result_dict_holds(result_dict) def test_tensorrt_simple(run_module): dtype = "float32" xshape = (1, 3, 2, 2) yshape = (1, 3, 1, 1) zshape = (1, 1, 1, 1) x = relay.var("x", shape=(xshape), dtype=dtype) y = relay.var("y", shape=(yshape), dtype=dtype) z = relay.var("z", shape=(zshape), dtype=dtype) w = z * (x + y) out = relay.nn.relu(w) f = relay.Function([x, y, z], out) x_data = np.random.uniform(-1, 1, xshape).astype(dtype) y_data = np.random.uniform(-1, 1, yshape).astype(dtype) z_data = np.random.uniform(-1, 1, zshape).astype(dtype) result_dict = dict() for mode in ["vm", "graph"]: for use_trt in [True, False]: mod = tvm.IRModule() mod["main"] = f result_key = mode + ("_trt" if use_trt else "") if use_trt: mod, config = tensorrt.partition_for_tensorrt(mod) with tvm.transform.PassContext( opt_level=3, config={"relay.ext.tensorrt.options": config} ): func = relay.create_executor( mode, mod=mod, device=tvm.cuda(0), target="cuda" ).evaluate() else: with tvm.transform.PassContext(opt_level=3): func = relay.create_executor( mode, mod=mod, device=tvm.cuda(0), target="cuda" ).evaluate() if run_module: result_dict[result_key] = func(x_data, y_data, z_data) if run_module: assert_result_dict_holds(result_dict) def test_tensorrt_simple_cpu_io(run_module): def get_graph(): dtype = "float32" x_shape = (1, 3, 2, 2) y_shape = (1, 3, 1, 1) z_shape = (1, 1, 1, 1) x = relay.var("x", shape=(x_shape), dtype=dtype) y = relay.var("y", shape=(y_shape), dtype=dtype) z = relay.var("z", shape=(z_shape), dtype=dtype) w = z * (x + y) out = relay.nn.relu(w) f = relay.Function([x, y, z], out) return f, {"x": x_shape, "y": y_shape, "z": z_shape}, ["y"] run_and_verify_func(get_graph(), target="llvm", run_module=run_module) def test_tensorrt_not_compatible(run_module): dtype = "float32" xshape = (1, 32, 14, 14) x_data = np.random.uniform(-1, 1, xshape).astype(dtype) x = relay.var("x", shape=(xshape), dtype=dtype) y = relay.add(x, x) z = relay.cast(relay.cast(y, "int32"), "float32") out = relay.nn.relu(z) f = relay.Function([x], out) mod = tvm.IRModule() mod["main"] = f mod, config = tensorrt.partition_for_tensorrt(mod) for mode in ["graph", "vm"]: with tvm.transform.PassContext(opt_level=3, config={"relay.ext.tensorrt.options": config}): func = relay.create_executor( mode, mod=mod, device=tvm.cuda(0), target="cuda" ).evaluate() if run_module: results = func(x_data) def test_tensorrt_serialize_graph_executor(run_module): import mxnet as mx from mxnet.gluon.model_zoo.vision import get_model data_shape = (1, 3, 224, 224) data_type = "float32" i_data = np.random.uniform(0, 1, data_shape).astype(data_type) block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": data_shape}, dtype=data_type) mod, config = tensorrt.partition_for_tensorrt(mod) tmpdir = utils.tempdir() def compile_graph(mod, params): with tvm.transform.PassContext(opt_level=3, config={"relay.ext.tensorrt.options": config}): graph, lib, params = relay.build(mod, params=params, target="cuda") params = runtime.save_param_dict(params) return graph, lib, params def run_graph(graph, lib, params): mod_ = graph_executor.create(graph, lib, device=tvm.cuda(0)) mod_.load_params(params) mod_.run(data=i_data) res = mod_.get_output(0) return res def save_graph(graph, lib, params): # Serialize with open(tmpdir.relpath("compiled.json"), "w") as f_graph_json: f_graph_json.write(graph) with open(tmpdir.relpath("compiled.params"), "wb") as f_params: f_params.write(params) lib.export_library(tmpdir.relpath("compiled.so")) def load_graph(): # Deserialize with open(tmpdir.relpath("compiled.json"), "r") as f_graph_json: graph = f_graph_json.read() with open(tmpdir.relpath("compiled.params"), "rb") as f_params: params = bytearray(f_params.read()) lib = tvm.runtime.load_module(tmpdir.relpath("compiled.so")) return graph, lib, params # Test serialization with graph executor graph, lib, graph_params = compile_graph(mod, params) save_graph(graph, lib, graph_params) loaded_graph, loaded_lib, loaded_params = load_graph() if run_module: result_dict = dict() result_dict["graph"] = run_graph(graph, lib, graph_params) result_dict["graph_ref"] = run_graph(loaded_graph, loaded_lib, loaded_params) assert_result_dict_holds(result_dict) def test_tensorrt_serialize_vm(run_module): import mxnet as mx from mxnet.gluon.model_zoo.vision import get_model data_shape = (1, 3, 224, 224) data_type = "float32" i_data = np.random.uniform(0, 1, data_shape).astype(data_type) block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": data_shape}, dtype=data_type) mod, config = tensorrt.partition_for_tensorrt(mod) tmpdir = utils.tempdir() def compile_vm(mod, params): with tvm.transform.PassContext(opt_level=3, config={"relay.ext.tensorrt.options": config}): vm_exec = relay.vm.compile(mod, target="cuda", params=params) code, lib = vm_exec.save() return code, lib def run_vm(code, lib): vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib) vm = VirtualMachine(vm_exec, tvm.cuda(0)) result = vm.invoke("main", data=i_data) return result def save_vm(code, lib): # save and load the code and lib file. lib.export_library(tmpdir.relpath("path_lib.so")) with open(tmpdir.relpath("path_code.ro"), "wb") as fo: fo.write(code) def load_vm(): lib = tvm.runtime.load_module(tmpdir.relpath("path_lib.so")) code = bytearray(open(tmpdir.relpath("path_code.ro"), "rb").read()) return lib, code # Test serialization with VM code_vm, lib_vm = compile_vm(mod, params) save_vm(code_vm, lib_vm) loaded_lib_vm, loaded_code_vm = load_vm() if run_module: result_dict = dict() result_dict["vm"] = run_vm(code_vm, lib_vm) result_dict["vm_ref"] = run_vm(loaded_code_vm, loaded_lib_vm) assert_result_dict_holds(result_dict) def test_conv1d(run_module): def get_graph( x_shape=((1, 3, 224)), k_shape=(10, 3, 3), groups=1, padding=(1, 1), strides=(1), dilation=(1), channels=None, ): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") out = relay.nn.conv1d( x, kernel, kernel_size=k_shape[2:3], groups=groups, padding=padding, strides=strides, dilation=dilation, channels=channels, ) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] run_and_verify_func(get_graph(channels=10), run_module=run_module) def test_conv2d(run_module): def get_graph( x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), groups=1, padding=(0, 0), strides=(1, 1), dilation=(1, 1), channels=None, ): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") out = relay.nn.conv2d( x, kernel, kernel_size=k_shape[2:4], groups=groups, padding=padding, strides=strides, dilation=dilation, channels=channels, ) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] for k_shape, groups in [((16, 32, 3, 3), 1), ((32, 1, 3, 3), 32)]: for padding in [(0, 0), (1, 1)]: for strides in [(1, 1), (2, 2)]: for dilation in [(1, 1), (2, 2)]: run_and_verify_func( get_graph( k_shape=k_shape, groups=groups, padding=padding, strides=strides, dilation=dilation, ), run_module=run_module, ) run_and_verify_func( get_graph((1, 3, 16, 16), (3, 8, 7, 7), 3, [2, 2, 3, 3], [2, 2], [1, 1], 24), run_module=run_module, ) run_and_verify_func(get_graph((1, 3, 16, 16), (1, 3, 1, 1), channels=1), run_module=run_module) def test_conv2d_nhwc(run_module): def get_graph(x_shape=(1, 8, 8, 32), k_shape=(3, 3, 32, 16)): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") out = relay.nn.conv2d( x, kernel, channels=16, kernel_size=(3, 3), data_layout="NHWC", kernel_layout="HWIO", ) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] run_and_verify_func(get_graph(), run_module=run_module) def test_conv2d_weights_const(run_module): def get_graph( x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), groups=1, padding=(0, 0), strides=(1, 1), dilation=(1, 1), ): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.const(np.ones(k_shape).astype("float32")) out = relay.nn.conv2d( x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4], groups=groups, padding=padding, strides=strides, dilation=dilation, ) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(), run_module=run_module) def test_conv2d_weights_transposed(run_module): def get_graph(x_shape=(1, 32, 9, 9), k_shape=(3, 3, 32, 16), order=(3, 2, 0, 1)): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") kernel_t = relay.transpose(kernel, order) # Conv2d requires constant weights in TensorRT, so the weights should be transposed by # FoldConstant. out = relay.nn.conv2d(x, kernel_t, channels=k_shape[order[0]], kernel_size=(3, 3)) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] run_and_verify_func(get_graph(), run_module=run_module) def test_dense(run_module): def get_graph(x_shape=(1, 16), k_shape=(32, 16)): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") # Dense requires constant weights in TensorRT, so the weights are transposed by us. out = relay.nn.dense(x, kernel, units=k_shape[0]) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] run_and_verify_func(get_graph(), run_module=run_module) run_and_verify_func(get_graph(k_shape=(1, 16)), run_module=run_module) def test_batch_matmul(run_module): def get_graph(x_shape=(12, 128, 64), y_shape=(12, 128, 64), transa=False, transb=True): x = relay.var("x", shape=(x_shape), dtype="float32") y = relay.var("y", shape=(y_shape), dtype="float32") out = relay.nn.batch_matmul(x, y, transpose_a=transa, transpose_b=transb) f = relay.Function([x, y], out) return f, {"x": x_shape, "y": y_shape}, [] run_and_verify_func( get_graph(x_shape=(12, 64, 128), y_shape=(12, 128, 64), transa=True, transb=True), run_module=run_module, ) run_and_verify_func( get_graph(x_shape=(12, 64, 128), y_shape=(12, 64, 128), transa=True, transb=False), run_module=run_module, ) run_and_verify_func( get_graph(x_shape=(12, 128, 64), y_shape=(12, 128, 64), transa=False, transb=True), run_module=run_module, ) run_and_verify_func( get_graph(x_shape=(12, 128, 64), y_shape=(12, 64, 128), transa=False, transb=False), run_module=run_module, ) def test_bias_add(run_module): def get_graph(x_shape=(1, 16), channels=16): x = relay.var("x", shape=(x_shape), dtype="float32") bias = relay.var("bias", shape=(channels,), dtype="float32") out = relay.nn.bias_add(x, bias) f = relay.Function([x, bias], out) return f, {"x": x_shape, "bias": (channels,)}, ["bias"] run_and_verify_func(get_graph(), run_module=run_module) run_and_verify_func(get_graph((1, 6, 3, 4), 6), run_module=run_module) def test_pool2d(run_module): def get_graph( op, x_shape=(1, 3, 32, 32), pool_size=(2, 2), strides=(2, 2), padding=(0, 0), ceil_mode=False, count_include_pad=None, ): x = relay.var("x", shape=(x_shape), dtype="float32") if count_include_pad is not None: out = op( x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, ) else: out = op( x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode, ) f = relay.Function([x], out) return f, {"x": x_shape}, [] for pool_size in [(2, 2), (3, 3)]: for strides in [(1, 1), (2, 2)]: for padding in [(0, 0), (1, 1), (0, 0, 1, 1)]: for ceil_mode in [False, True]: # Skip "the padding size is larger than or equal to the filter size for exclusive-counting pooling" if pool_size == (2, 2) and padding == (0, 0, 1, 1): continue for count_include_pad in [False, True]: # Skip "inclusive-counted blended or average pooling is not supported in combination with asymmetric padding" if count_include_pad and (padding == (0, 0, 1, 1) or strides == (2, 2)): continue run_and_verify_func( get_graph( relay.nn.avg_pool2d, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, ), run_module=run_module, ) run_and_verify_func( get_graph( relay.nn.max_pool2d, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode, ), run_module=run_module, ) def test_global_pool2d(run_module): def get_graph(op, x_shape=(1, 3, 32, 32)): x = relay.var("x", shape=(x_shape), dtype="float32") out = op(x) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(relay.nn.global_max_pool2d), run_module=run_module) run_and_verify_func(get_graph(relay.nn.global_avg_pool2d), run_module=run_module) def test_batch_flatten(run_module): def get_graph(x_shape=(1, 3, 4, 6)): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.nn.batch_flatten(x) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(), run_module=run_module) def test_expand_dims(run_module): def get_graph(x_shape=(1, 3), axis=1, num_newaxis=1): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.expand_dims(x, axis, num_newaxis) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(), run_module=run_module) def test_squeeze(run_module): def get_graph(x_shape, axis): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.squeeze(x, axis=axis) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph((1, 5, 1, 1), (2, 3)), run_module=run_module) run_and_verify_func(get_graph((1, 3, 1), (-1,)), run_module=run_module) def test_concatenate(run_module): def get_graph(input_shapes, axis): concat_inputs = [] shapes_dict = {} for i in range(len(input_shapes)): name = "input_{}".format(i) concat_inputs.append(relay.var(name, shape=(input_shapes[i]), dtype="float32")) shapes_dict[name] = input_shapes[i] out = relay.concatenate(concat_inputs, axis) f = relay.Function(concat_inputs, out) return f, shapes_dict, [] run_and_verify_func(get_graph([(1, 2, 6, 6), (1, 3, 6, 6)], axis=1), run_module=run_module) def test_split(run_module): def get_graph(x_shape, indices_or_sections, axis): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.split(x, indices_or_sections=indices_or_sections, axis=axis) f = relay.Function([x], out.astuple()) return f, {"x": x_shape}, [] run_and_verify_func(get_graph((1, 16), indices_or_sections=2, axis=1), run_module=run_module) run_and_verify_func(get_graph((1, 16), indices_or_sections=4, axis=1), run_module=run_module) run_and_verify_func(get_graph((1, 16), indices_or_sections=[8], axis=1), run_module=run_module) run_and_verify_func( get_graph((1, 16), indices_or_sections=[2, 3, 6, 10, 14], axis=1), run_module=run_module ) def test_conv2d_transpose(run_module): def get_graph( x_shape=(1, 32, 8, 8), k_shape=(32, 16, 3, 3), groups=1, padding=(0, 0), strides=(1, 1), ): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") out = relay.nn.conv2d_transpose( x, kernel, channels=k_shape[1], kernel_size=k_shape[2:4], groups=groups, padding=padding, strides=strides, ) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] for padding in [(0, 0), (1, 1)]: for strides in [(1, 1), (2, 2)]: run_and_verify_func(get_graph(padding=padding, strides=strides), run_module=run_module) def test_reshape(run_module): def get_graph(x_shape, new_shape): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.reshape(x, new_shape) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph((1, 1, 1, 10), (-1, 10)), run_module=run_module) run_and_verify_func(get_graph((1, 10, 2, 3), (1, -1)), run_module=run_module) run_and_verify_func(get_graph((1, 1, 2, 3), (1, 6)), run_module=run_module) class AreOpsOnGraph(ExprVisitor): """ Visits the Graph recursively and checks if it contains ops in the op_list """ def __init__(self, op_list): ExprVisitor.__init__(self) self.op_list = op_list self.on_graph = False def visit_call(self, call): if isinstance(call.op, tvm.tir.op.Op): if str(call.op) in self.op_list: self.on_graph = True return super().visit_call(call) def are_ops_on_graph(self, subgraph) -> bool: """ This function recursively visits the graph and checks if op_list ops are ongraph" """ self.visit(subgraph) return self.on_graph def are_ops_on_trt(mod, op_list): for subgraph in mod.get_global_vars(): name = subgraph.name_hint op_on_trt = False op_on_tvm = True if name == "main": op_on_tvm = AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) elif mod[name].attrs and mod[name].attrs["Compiler"] == "tensorrt": op_on_trt = AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) else: op_on_tvm &= AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body) if not op_on_trt or op_on_tvm: return False return True @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_dynamic_reshape(run_module): def test_run(x_data_list, x_shape, new_shape, should_offload_to_trt): result_arr = [{} for _ in range(len(x_data_list))] for use_trt in [True, False]: x = relay.var("x", shape=x_shape, dtype="float32") out = relay.reshape(x, new_shape) f = relay.Function([x], out) mod = tvm.IRModule() mod["main"] = f if use_trt: mod, _ = tensorrt.partition_for_tensorrt( mod, params={}, remove_no_mac_subgraphs=False ) assert are_ops_on_trt(mod, op_list=["reshape"]) == should_offload_to_trt if run_module: with relay.build_config(opt_level=3): func = relay.create_executor( "vm", mod=mod, device=tvm.cpu(0), target="llvm" ).evaluate() for i, x_data in enumerate(x_data_list): result_arr[i][use_trt] = func(x_data) if run_module: for i in range(len(x_data_list)): assert_result_dict_holds(result_arr[i]) dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2] x_shape = (relay.Any(), 3, 2, 3) x_data_list = [ np.ones([dim_value] + list(x_shape)[1:]).astype("float32") for dim_value in dim_values ] new_shape = (-1, 3, 2, 3) should_offload_to_trt = True test_run(x_data_list, x_shape, new_shape, should_offload_to_trt) dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2] x_shape = (relay.Any(), 3, 2, 3) x_data_list = [ np.ones([dim_value] + list(x_shape)[1:]).astype("float32") for dim_value in dim_values ] new_shape = (-1, 1, 2, 3) should_offload_to_trt = False test_run(x_data_list, x_shape, new_shape, should_offload_to_trt) dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2] x_shape = (1, relay.Any(), 2, 3) x_data_list = [ np.ones(list(x_shape[:1]) + [dim_value] + list(x_shape)[2:]).astype("float32") for dim_value in dim_values ] new_shape = (1, -1, 2, 3) should_offload_to_trt = False test_run(x_data_list, x_shape, new_shape, should_offload_to_trt) def test_transpose(run_module): def get_graph(x_shape, order): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.transpose(x, order) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph((1, 16, 7, 7), [0, 2, 3, 1]), run_module=run_module) run_and_verify_func(get_graph((1, 7, 7, 16), [0, 3, 1, 2]), run_module=run_module) def test_float_const(run_module): def get_graph(x_shape=(1, 16)): x = relay.var("x", shape=(x_shape), dtype="float32") beta = relay.const(1, dtype="float32") out = relay.multiply(x, beta) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(), run_module=run_module) def test_pad(run_module): def get_graph(x_shape, pad_width): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.nn.pad(x, pad_width=pad_width) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func( get_graph((1, 8, 16, 16), [[0, 0], [0, 0], [0, 0], [0, 0]]), run_module=run_module ) run_and_verify_func( get_graph((1, 8, 16, 16), [[0, 0], [0, 0], [1, 1], [1, 1]]), run_module=run_module ) run_and_verify_func( get_graph((1, 8, 16, 16), [[0, 0], [0, 0], [0, 1], [2, 0]]), run_module=run_module ) run_and_verify_func( get_graph((1, 8, 3, 16, 16), [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]), run_module=run_module, ) def test_softmax(run_module): def get_graph(x_shape, axis): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.nn.softmax(x, axis=axis) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph((1, 1000), axis=1), run_module=run_module) run_and_verify_func(get_graph((1, 1000), axis=-1), run_module=run_module) run_and_verify_func(get_graph((1, 3, 4), axis=-2), run_module=run_module) run_and_verify_func(get_graph((1, 3, 4), axis=1), run_module=run_module) def test_batch_norm(run_module): def get_graph(x_shape, param_shape, axis=1, epsilon=1e-5): x = relay.var("x", shape=(x_shape), dtype="float32") beta = relay.var("beta", shape=(param_shape), dtype="float32") gamma = relay.var("gamma", shape=(param_shape), dtype="float32") moving_mean = relay.var("moving_mean", shape=(param_shape), dtype="float32") moving_var = relay.var("moving_var", shape=(param_shape), dtype="float32") out, _, _ = relay.nn.batch_norm( x, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, axis=axis, center=True, scale=True, epsilon=epsilon, ) f = relay.Function([x, gamma, beta, moving_mean, moving_var], out) return ( f, { "x": x_shape, "beta": param_shape, "gamma": param_shape, "moving_mean": param_shape, "moving_var": param_shape, }, ["beta", "gamma", "moving_mean", "moving_var"], ) run_and_verify_func(get_graph((1, 64, 56, 56), (64,)), run_module=run_module) run_and_verify_func( get_graph((1, 56, 56, 64), (64,), axis=3, epsilon=1.001e-05), run_module=run_module ) run_and_verify_func(get_graph((1, 4, 8, 4), (8,), axis=2), run_module=run_module) run_and_verify_func(get_graph((1, 8, 4, 4, 4), (8,), axis=1), run_module=run_module) run_and_verify_func(get_graph((1, 4, 8, 4, 4), (8,), axis=2), run_module=run_module) run_and_verify_func(get_graph((1, 4, 4, 4, 8), (8,), axis=4), run_module=run_module) run_and_verify_func(get_graph((1, 8), (8,), axis=1), run_module=run_module) run_and_verify_func(get_graph((1, 3, 8), (8,), axis=2), run_module=run_module) def test_layer_norm(run_module): def get_graph(x_shape, param_shape, axis=1, epsilon=1e-5): x = relay.var("x", shape=(x_shape), dtype="float32") gamma = relay.var("gamma", shape=(param_shape), dtype="float32") beta = relay.var("beta", shape=(param_shape), dtype="float32") out = relay.nn.layer_norm( x, gamma=gamma, beta=beta, axis=axis, epsilon=epsilon, center=True, scale=True, ) f = relay.Function([x, gamma, beta], out) return ( f, { "x": x_shape, "beta": param_shape, "gamma": param_shape, }, ["beta", "gamma"], ) run_and_verify_func(get_graph((1, 32, 8, 8), (32,)), run_module=run_module) run_and_verify_func( get_graph((1, 8, 8, 32), (32,), axis=3, epsilon=1.001e-05), run_module=run_module ) run_and_verify_func(get_graph((1, 8), (8,), axis=1), run_module=run_module) def test_unary(run_module): def get_graph(op, x_shape=(1, 8, 3, 3)): x = relay.var("x", shape=(x_shape), dtype="float32") out = op(x) f = relay.Function([x], out) return f, {"x": x_shape}, [] for op in [ relay.nn.relu, relay.sigmoid, relay.tanh, relay.exp, relay.log, relay.sqrt, relay.abs, relay.negative, relay.sin, relay.cos, relay.atan, relay.ceil, relay.floor, relay.erf, ]: run_and_verify_func(get_graph(op), run_module=run_module) def test_clip(run_module): def get_graph(x_shape=(1, 8, 3, 3)): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.clip(x, a_min=-0.2, a_max=0.4) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(), run_module=run_module) def test_leaky_relu(run_module): def get_graph(x_shape=(1, 8, 3, 3)): x = relay.var("x", shape=(x_shape), dtype="float32") out = relay.nn.leaky_relu(x, alpha=0.1) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(), run_module=run_module) def test_binary(run_module): def get_graph(op, x_shape, y_shape, y_is_const=False): x = relay.var("x", shape=(x_shape), dtype="float32") if y_is_const: y = relay.const(np.ones(y_shape).astype("float32")) out = op(x, y) f = relay.Function([x], out) return f, {"x": x_shape}, [] y = relay.var("y", shape=(y_shape), dtype="float32") out = op(x, y) f = relay.Function([x, y], out) return f, {"x": x_shape, "y": y_shape}, [] for op in [relay.add, relay.subtract, relay.multiply, relay.divide, relay.power]: for y_is_const in [True, False]: run_and_verify_func( get_graph(op, (1, 8, 3, 3), (1, 8, 3, 3), y_is_const), run_module=run_module ) run_and_verify_func( get_graph(op, (1, 8, 1, 3), (1, 8, 3, 1), y_is_const), run_module=run_module ) run_and_verify_func(get_graph(op, (1, 10), (10,), y_is_const), run_module=run_module) run_and_verify_func( get_graph(op, (1, 1, 1, 10), (10,), y_is_const), run_module=run_module ) run_and_verify_func(get_graph(op, (1, 1, 1), (3,), y_is_const), run_module=run_module) def test_reduce(run_module): def get_graph(op, x_shape=(1, 2, 3, 4), axis=(2, 3), keepdims=False): x = relay.var("x", shape=(x_shape), dtype="float32") out = op(x, axis=axis, keepdims=keepdims) f = relay.Function([x], out) return f, {"x": x_shape}, [] for op in [relay.sum, relay.prod, relay.max, relay.min, relay.mean]: for keepdims in [True, False]: run_and_verify_func(get_graph(op, axis=(1), keepdims=keepdims), run_module=run_module) run_and_verify_func( get_graph(op, axis=(2, 3), keepdims=keepdims), run_module=run_module ) run_and_verify_func( get_graph(op, axis=(1, 2), keepdims=keepdims), run_module=run_module ) run_and_verify_func( get_graph(op, axis=(1, 2, 3), keepdims=keepdims), run_module=run_module ) def test_strided_slice(run_module): def get_graph(x_shape, begin, end, strides=None, slice_mode="size"): x = relay.var("x", shape=(x_shape), dtype="float32") if strides: out = relay.strided_slice( x, begin, end, strides, slice_mode=slice_mode, ) else: out = relay.strided_slice( x, begin, end, slice_mode=slice_mode, ) f = relay.Function([x], out) return f, {"x": x_shape}, [] for slice_mode in ["size", "end"]: run_and_verify_func( get_graph((1, 3, 6, 7), (0, 0, 0, 0), (1, 1, 6, 7), slice_mode=slice_mode), run_module=run_module, ) run_and_verify_func( get_graph((1, 3, 6, 7), [0, 1, 0, 0], [1, 2, 6, 6], slice_mode=slice_mode), run_module=run_module, ) run_and_verify_func( get_graph((2, 3, 6, 7), [0, 0, 0, 0], [-1, -1, -1, -1], slice_mode=slice_mode), run_module=run_module, ) run_and_verify_func( get_graph((2, 3, 6, 7), [0, 1, 0, 0], [-1, -1, -1, -1], slice_mode=slice_mode), run_module=run_module, ) run_and_verify_func( get_graph((1, 6), [0, 1], [1, 3], slice_mode=slice_mode), run_module=run_module ) def test_adaptive_pool2d(run_module): def get_graph(op, x_shape=(1, 3, 32, 32), out_size=(1, 1)): x = relay.var("x", shape=(x_shape), dtype="float32") out = op(x, out_size) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(relay.nn.adaptive_max_pool2d), run_module=run_module) run_and_verify_func(get_graph(relay.nn.adaptive_avg_pool2d), run_module=run_module) def test_multiple_outputs(run_module): def get_graph(): x = relay.var("x", shape=(1, 3), dtype="float32") y = relay.var("y", shape=(1, 3), dtype="float32") z = relay.add(x, y) w = relay.add(z, y) out = relay.Tuple((z, w)) f = relay.Function([x, y], out) return f, {"x": (1, 3), "y": (1, 3)}, [] run_and_verify_func(get_graph(), run_module=run_module) def test_conv3d(run_module): def get_graph( x_shape=(1, 32, 8, 8, 8), k_shape=(16, 32, 3, 3, 3), groups=1, padding=(0, 0, 0), strides=(1, 1, 1), dilation=(1, 1, 1), ): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") out = relay.nn.conv3d( x, kernel, channels=k_shape[0], kernel_size=k_shape[2:], groups=groups, padding=padding, strides=strides, dilation=dilation, ) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] run_and_verify_func(get_graph(), run_module=run_module) run_and_verify_func(get_graph(padding=(0, 0, 0, 1, 1, 1)), run_module=run_module) def test_pool3d(run_module): def get_graph( op, x_shape=(1, 3, 8, 32, 32), pool_size=(2, 2, 2), strides=(2, 2, 2), padding=(0, 0, 0), ceil_mode=False, count_include_pad=None, ): x = relay.var("x", shape=(x_shape), dtype="float32") if count_include_pad is not None: out = op( x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, ) else: out = op( x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode, ) f = relay.Function([x], out) return f, {"x": x_shape}, [] run_and_verify_func(get_graph(relay.nn.avg_pool3d), run_module=run_module) run_and_verify_func(get_graph(relay.nn.max_pool3d), run_module=run_module) run_and_verify_func( get_graph(relay.nn.max_pool3d, padding=(0, 0, 0, 1, 1, 1)), run_module=run_module ) run_and_verify_func(get_graph(relay.nn.max_pool3d, strides=(1, 1, 1)), run_module=run_module) def test_conv3d_transpose(run_module): def get_graph( x_shape=(1, 32, 8, 8, 8), k_shape=(32, 16, 3, 3, 3), groups=1, padding=(0, 0, 0), strides=(1, 1, 1), output_padding=(0, 0, 0), ): x = relay.var("x", shape=(x_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") out = relay.nn.conv3d_transpose( x, kernel, channels=k_shape[1], kernel_size=k_shape[2:5], groups=groups, padding=padding, strides=strides, output_padding=output_padding, ) f = relay.Function([x, kernel], out) return f, {"x": x_shape, "kernel": k_shape}, ["kernel"] run_and_verify_func(get_graph(), run_module=run_module) run_and_verify_func(get_graph(strides=(2, 2, 2)), run_module=run_module) run_and_verify_func( get_graph(strides=(2, 2, 2), output_padding=(1, 1, 1)), run_module=run_module ) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_alexnet(run_module): run_and_verify_model("alexnet", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_resnet18_v1(run_module): run_and_verify_model("resnet18_v1", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_resnet18_v2(run_module): run_and_verify_model("resnet18_v2", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_squeezenet(run_module): run_and_verify_model("squeezenet1.0", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_mobilenet(run_module): run_and_verify_model("mobilenet0.25", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_mobilenet_v2(run_module): run_and_verify_model("mobilenetv2_0.25", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_vgg11(run_module): run_and_verify_model("vgg11", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) def test_densenet121(run_module): run_and_verify_model("densenet121", run_module) @pytest.mark.xfail( reason=("Currently failing test. See tracking issue https://github.com/apache/tvm/issues/8901") ) @has_tensorrt_codegen @tvm.testing.requires_cuda def test_dynamic_offload(): """ This test checks for proper dynamic offloading of relay graphs. An addition between the outputs of two conv2d's is performed, one of them having all static args whereas the other has a arg with dynamic shape. It is expected for the TRT partitioner to offload the conv2d with dynamic arg to TVM while running the other in TRT. """ data_shape = (1, 32, 8, 8) k_shape = (1, 32, 3, 3) x = relay.var("x", shape=(data_shape[0], data_shape[1], Any(), Any()), dtype="float32") y = relay.var("y", shape=(data_shape), dtype="float32") kernel = relay.var("kernel", shape=(k_shape), dtype="float32") def get_expected(): # Create a nested TRT function that matches the expected output mod = tvm.IRModule() var1 = relay.var("tensorrt_0_i0", shape=(data_shape), dtype="float32") kernel_trt = relay.var("tensorrt_0_i1", shape=(k_shape), dtype="float32") out1 = relay.nn.conv2d(var1, kernel_trt, channels=k_shape[0], kernel_size=k_shape[2:4]) f1 = GlobalVar("tvmgen_default_tensorrt_0") func = relay.Function([var1, kernel_trt], out1) func = set_func_attr(func, "tensorrt", "tvmgen_default_tensorrt_0") mod[f1] = func mod = relay.transform.InferType()(mod) # Create the main function out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4]) out = relay.add(out1, f1(y, kernel)) f = relay.Function([x, y, kernel], out) mod["main"] = f mod = relay.transform.InferType()(mod) return mod # Create relay function that will be offloaded to TRT out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4]) out2 = relay.nn.conv2d(y, kernel, channels=k_shape[0], kernel_size=k_shape[2:4]) out = relay.add(out1, out2) f = relay.Function([x, y, kernel], out) # Pass the function to TRT compilation mod = tvm.IRModule() mod["main"] = f mod = relay.transform.InferType()(mod) mod_trt, config = tensorrt.partition_for_tensorrt(mod, params={}) # Get the expected relay graph and compare mod_exp = get_expected() tvm.ir.assert_structural_equal(mod_trt, mod_exp, map_free_vars=True) def test_tensorrt_dynamic_batch(run_module): batches_to_test = [1, 1, 0, 2, 3, 0, 1, 3, 2] x_shape = (relay.Any(), 1, 8, 8) x_data = np.ones([max(batches_to_test)] + list(x_shape)[1:]).astype("float32") result_arr = [{} for _ in range(len(batches_to_test))] for use_trt in [True, False]: x = relay.var("x", shape=x_shape, dtype="float32") out = relay.nn.relu(x) f = relay.Function([x], out) mod = tvm.IRModule() mod["main"] = f if use_trt: mod, _ = tensorrt.partition_for_tensorrt(mod) if run_module: with relay.build_config(opt_level=3): func = relay.create_executor( "vm", mod=mod, device=tvm.cpu(0), target="llvm" ).evaluate() for i, batch_size in enumerate(batches_to_test): result_arr[i][use_trt] = func(x_data[:batch_size, ...]) if run_module: for i in range(len(batches_to_test)): assert_result_dict_holds(result_arr[i]) def test_tensorrt_dynamic_batch_conv(run_module): batches_to_test = [1, 5, 1, 0, 2, 3, 0, 1, 3, 2] x_shape = (relay.Any(), 32, 8, 8) x_data = np.ones([max(batches_to_test)] + list(x_shape)[1:]).astype("float32") k_shape = (16, 32, 3, 3) params = {"kernel": np.random.uniform(-1, 1, k_shape).astype("float32")} for use_implicit_batch in [True, False]: result_arr = [{"cuda": {}, "llvm": {}} for _ in range(len(batches_to_test))] for use_trt in [True, False]: x = relay.var("x", shape=x_shape, dtype="float32") kernel = relay.var("kernel", shape=k_shape, dtype="float32") out = relay.nn.conv2d(x, kernel, channels=16, kernel_size=(3, 3), groups=1) f = relay.Function([x, kernel], out) mod = tvm.IRModule() mod["main"] = f if use_trt: mod, config = tensorrt.partition_for_tensorrt( mod, params, use_implicit_batch=use_implicit_batch ) if run_module: for target in ["llvm", "cuda"]: with tvm.transform.PassContext( opt_level=3, config={"relay.ext.tensorrt.options": config} ): func = relay.create_executor( "vm", mod=mod, device=tvm.device(target), target=target ).evaluate() for i, batch_size in enumerate(batches_to_test): result_arr[i][target][use_trt] = func(x_data[:batch_size, ...], **params) if run_module: for i in range(len(batches_to_test)): for target in ["llvm", "cuda"]: assert_result_dict_holds(result_arr[i][target]) def test_maskrcnn_resnet50(run_module) -> None: """ This function tests the working of pytorch maskrcnn with resnet50 as backbone with VM and VM + TRT. Since the order of compiled model outputs is a bit different from original pytorch model, it uses a custom logic for comparison check. """ import torch import torchvision def convert_traced_model_to_vm_trt( traced_module: torch.jit.TopLevelTracedModule, np_sample_input: np.ndarray, target: str ) -> tvm.runtime.vm.Executable: """ This function converts a traced pytorch model to VM + TRT. """ input_shape = np_sample_input.shape input_name = "input0" shape_list = [(input_name, input_shape)] mod, params = relay.frontend.from_pytorch(traced_module, shape_list) mod, config = tensorrt.partition_for_tensorrt(mod, params, remove_no_mac_subgraphs=True) with tvm.transform.PassContext(opt_level=3, disabled_pass=["FoldScaleAxis"]): vm_trt_exec = relay.vm.compile(mod, target=target, params=params) return vm_trt_exec class TraceWrapper(torch.nn.Module): """ This class is a wrapper over the torch module to convert the outputs into traceable form """ def __init__(self, model: torch.nn.Module) -> None: super().__init__() self.model = model def forward( self, inp: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: out = self.model(inp) return out[0]["boxes"], out[0]["scores"], out[0]["labels"], out[0]["masks"] def get_traced_maskrcnn_model(np_sample_input: np.ndarray) -> torch.jit.TopLevelTracedModule: """ This function takes a sample input and returns the traced maskrcnn model """ model_func = torchvision.models.detection.maskrcnn_resnet50_fpn model = TraceWrapper(model_func(pretrained=True)) model.eval() inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=np_sample_input.shape)) with torch.no_grad(): out = model(inp) traced_module = torch.jit.trace(model, inp) traced_module.eval() return traced_module def get_maskrcnn_input(in_size: int) -> np.ndarray: """ This function gets a real image with multiple objects of interest and returns it. """ input_shape = (1, 3, in_size, in_size) img_path = "test_street_small.jpg" img_url = ( "https://raw.githubusercontent.com/dmlc/web-data/" "master/gluoncv/detection/street_small.jpg" ) download(img_url, img_path) import cv2 img = cv2.imread(img_path).astype("float32") img = cv2.resize(img, (in_size, in_size)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.transpose(img / 255.0, [2, 0, 1]) img = np.expand_dims(img, axis=0) return img in_size = 300 np_sample_input = get_maskrcnn_input(in_size) traced_module = get_traced_maskrcnn_model(np_sample_input) vm_trt_exec = convert_traced_model_to_vm_trt(traced_module, np_sample_input, target="llvm") if run_module: dev = tvm.cpu() vm = tvm.runtime.vm.VirtualMachine(vm_trt_exec, dev) vm.set_input("main", **{"input0": np_sample_input}) tvm_res = vm.run() # Descending sort by scores and get the high confidence indices. In this example 9 is chosen, # because this image has 9 boxes over 0.9 confidence num_high_confidence_boxes = 9 tvm_indices = np.argsort(-1 * tvm_res[1].numpy())[:num_high_confidence_boxes] with torch.no_grad(): out = traced_module(torch.Tensor(np_sample_input)) # Descending sort by scores and get the high confidence indices pt_indices = np.argsort(-1 * out[1].numpy())[:num_high_confidence_boxes] tol = [1e-1, 5e-3, 1e-5, 4e-1] # [Box Tol, Score Tol, Label Tol, Mask Tol] # Because of certain ops, there are certain minor differences in TVM outputs and PT outputs, # This means that the tolerance can't be 1e-4 or 1e-5 throughout. The ideal way to get around # this is to test it on an entire dataset and compare mAP with the original model. # However, since that is not practically possible on CI, the following compromise is made. # These tolerances are chosen based on their impact or lack thereof to the mAP score, e.g: # 0.1 pixel difference of a box in a 300X300 image wont make any change. for i, tol_val in zip(range(4), tol): np.testing.assert_allclose( tvm_res[i].numpy()[tvm_indices], out[i].numpy()[pt_indices], rtol=tol_val, atol=tol_val, ) def test_empty_subgraph(run_module): x_shape = (1, 3, 5) mod = tvm.IRModule() # Empty tensorrt subgraph. var1 = relay.var("tensorrt_0_i0", shape=(x_shape), dtype="float32") f1 = GlobalVar("tensorrt_0") func = relay.Function([var1], var1) func = set_func_attr(func, "tensorrt", "tvmgen_default_tensorrt_0") mod[f1] = func mod = relay.transform.InferType()(mod) # Create the main function x = relay.var("x", shape=x_shape, dtype="float32") out = f1(relay.nn.relu(x)) f = relay.Function([x], out) mod["main"] = f x_data = np.random.uniform(-1, 1, x_shape).astype("float32") for mode in ["graph", "vm"]: with tvm.transform.PassContext(opt_level=3): func = relay.create_executor( mode, mod=mod, device=tvm.cuda(0), target="cuda" ).evaluate() if run_module: results = func(x_data) if __name__ == "__main__": import sys sys.exit(pytest.main([__file__] + sys.argv[1:]))
dmlc/tvm
tests/python/contrib/test_tensorrt.py
Python
apache-2.0
56,782
[ "VisIt" ]
c0dc8800b22fad0c014d6d1287dd54fc9ca78202105e23d917ba3a2b0aefc9bf
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import re, threading from future_builtins import map from calibre import browser, random_user_agent from calibre.customize import Plugin from calibre.utils.icu import capitalize, lower, upper from calibre.ebooks.metadata import check_isbn from calibre.utils.localization import canonicalize_lang, get_lang def create_log(ostream=None): from calibre.utils.logging import ThreadSafeLog, FileStream log = ThreadSafeLog(level=ThreadSafeLog.DEBUG) log.outputs = [FileStream(ostream)] return log # Comparing Metadata objects for relevance {{{ words = ("the", "a", "an", "of", "and") prefix_pat = re.compile(r'^(%s)\s+'%("|".join(words))) trailing_paren_pat = re.compile(r'\(.*\)$') whitespace_pat = re.compile(r'\s+') def cleanup_title(s): if not s: s = _('Unknown') s = s.strip().lower() s = prefix_pat.sub(' ', s) s = trailing_paren_pat.sub('', s) s = whitespace_pat.sub(' ', s) return s.strip() class InternalMetadataCompareKeyGen(object): ''' Generate a sort key for comparison of the relevance of Metadata objects, given a search query. This is used only to compare results from the same metadata source, not across different sources. The sort key ensures that an ascending order sort is a sort by order of decreasing relevance. The algorithm is: * Prefer results that have at least one identifier the same as for the query * Prefer results with a cached cover URL * Prefer results with all available fields filled in * Prefer results with the same language as the current user interface language * Prefer results that are an exact title match to the query * Prefer results with longer comments (greater than 10% longer) * Use the relevance of the result as reported by the metadata source's search engine ''' def __init__(self, mi, source_plugin, title, authors, identifiers): same_identifier = 2 idents = mi.get_identifiers() for k, v in identifiers.iteritems(): if idents.get(k) == v: same_identifier = 1 break all_fields = 1 if source_plugin.test_fields(mi) is None else 2 exact_title = 1 if title and \ cleanup_title(title) == cleanup_title(mi.title) else 2 language = 1 if mi.language: mil = canonicalize_lang(mi.language) if mil != 'und' and mil != canonicalize_lang(get_lang()): language = 2 has_cover = 2 if (not source_plugin.cached_cover_url_is_reliable or source_plugin.get_cached_cover_url(mi.identifiers) is None) else 1 self.base = (same_identifier, has_cover, all_fields, language, exact_title) self.comments_len = len(mi.comments.strip() if mi.comments else '') self.extra = (getattr(mi, 'source_relevance', 0), ) def __cmp__(self, other): result = cmp(self.base, other.base) if result == 0: # Now prefer results with the longer comments, within 10% cx, cy = self.comments_len, other.comments_len t = (cx + cy) / 20 delta = cy - cx if abs(delta) > t: result = delta else: result = cmp(self.extra, other.extra) return result # }}} def get_cached_cover_urls(mi): from calibre.customize.ui import metadata_plugins plugins = list(metadata_plugins(['identify'])) for p in plugins: url = p.get_cached_cover_url(mi.identifiers) if url: yield (p, url) def dump_caches(): from calibre.customize.ui import metadata_plugins return {p.name:p.dump_caches() for p in metadata_plugins(['identify'])} def load_caches(dump): from calibre.customize.ui import metadata_plugins plugins = list(metadata_plugins(['identify'])) for p in plugins: cache = dump.get(p.name, None) if cache: p.load_caches(cache) def cap_author_token(token): lt = lower(token) if lt in ('von', 'de', 'el', 'van', 'le'): return lt # no digits no spez. characters if re.match(r'([^\d\W]\.){2,}$', lt, re.UNICODE) is not None: # Normalize tokens of the form J.K. to J. K. parts = token.split('.') return '. '.join(map(capitalize, parts)).strip() scots_name = None for x in ('mc', 'mac'): if (token.lower().startswith(x) and len(token) > len(x) and ( token[len(x)] == upper(token[len(x)]) or lt == token )): scots_name = len(x) break ans = capitalize(token) if scots_name is not None: ans = ans[:scots_name] + upper(ans[scots_name]) + ans[scots_name+1:] for x in ('-', "'"): idx = ans.find(x) if idx > -1 and len(ans) > idx+2: ans = ans[:idx+1] + upper(ans[idx+1]) + ans[idx+2:] return ans def fixauthors(authors): if not authors: return authors ans = [] for x in authors: ans.append(' '.join(map(cap_author_token, x.split()))) return ans def fixcase(x): if x: from calibre.utils.titlecase import titlecase x = titlecase(x) return x class Option(object): __slots__ = ['type', 'default', 'label', 'desc', 'name', 'choices'] def __init__(self, name, type_, default, label, desc, choices=None): ''' :param name: The name of this option. Must be a valid python identifier :param type_: The type of this option, one of ('number', 'string', 'bool', 'choices') :param default: The default value for this option :param label: A short (few words) description of this option :param desc: A longer description of this option :param choices: A dict of possible values, used only if type='choices'. dict is of the form {key:human readable label, ...} ''' self.name, self.type, self.default, self.label, self.desc = (name, type_, default, label, desc) if choices and not isinstance(choices, dict): choices = dict([(x, x) for x in choices]) self.choices = choices class Source(Plugin): type = _('Metadata source') author = 'Kovid Goyal' supported_platforms = ['windows', 'osx', 'linux'] #: Set of capabilities supported by this plugin. #: Useful capabilities are: 'identify', 'cover' capabilities = frozenset() #: List of metadata fields that can potentially be download by this plugin #: during the identify phase touched_fields = frozenset() #: Set this to True if your plugin returns HTML formatted comments has_html_comments = False #: Setting this to True means that the browser object will add #: Accept-Encoding: gzip to all requests. This can speedup downloads #: but make sure that the source actually supports gzip transfer encoding #: correctly first supports_gzip_transfer_encoding = False #: Set this to True to ignore HTTPS certificate errors when connecting #: to this source. ignore_ssl_errors = False #: Cached cover URLs can sometimes be unreliable (i.e. the download could #: fail or the returned image could be bogus. If that is often the case #: with this source set to False cached_cover_url_is_reliable = True #: A list of :class:`Option` objects. They will be used to automatically #: construct the configuration widget for this plugin options = () #: A string that is displayed at the top of the config widget for this #: plugin config_help_message = None #: If True this source can return multiple covers for a given query can_get_multiple_covers = False #: If set to True covers downloaded by this plugin are automatically trimmed. auto_trim_covers = False #: If set to True, and this source returns multiple results for a query, #: some of which have ISBNs and some of which do not, the results without #: ISBNs will be ignored prefer_results_with_isbn = True def __init__(self, *args, **kwargs): Plugin.__init__(self, *args, **kwargs) self.running_a_test = False # Set to True when using identify_test() self._isbn_to_identifier_cache = {} self._identifier_to_cover_url_cache = {} self.cache_lock = threading.RLock() self._config_obj = None self._browser = None self.prefs.defaults['ignore_fields'] = [] for opt in self.options: self.prefs.defaults[opt.name] = opt.default # Configuration {{{ def is_configured(self): ''' Return False if your plugin needs to be configured before it can be used. For example, it might need a username/password/API key. ''' return True def is_customizable(self): return True def customization_help(self): return 'This plugin can only be customized using the GUI' def config_widget(self): from calibre.gui2.metadata.config import ConfigWidget return ConfigWidget(self) def save_settings(self, config_widget): config_widget.commit() @property def prefs(self): if self._config_obj is None: from calibre.utils.config import JSONConfig self._config_obj = JSONConfig('metadata_sources/%s.json'%self.name) return self._config_obj # }}} # Browser {{{ @property def user_agent(self): # Pass in an index to random_user_agent() to test with a particular # user agent return random_user_agent() @property def browser(self): if self._browser is None: self._browser = browser(user_agent=self.user_agent, verify_ssl_certificates=not self.ignore_ssl_errors) if self.supports_gzip_transfer_encoding: self._browser.set_handle_gzip(True) return self._browser.clone_browser() # }}} # Caching {{{ def get_related_isbns(self, id_): with self.cache_lock: for isbn, q in self._isbn_to_identifier_cache.iteritems(): if q == id_: yield isbn def cache_isbn_to_identifier(self, isbn, identifier): with self.cache_lock: self._isbn_to_identifier_cache[isbn] = identifier def cached_isbn_to_identifier(self, isbn): with self.cache_lock: return self._isbn_to_identifier_cache.get(isbn, None) def cache_identifier_to_cover_url(self, id_, url): with self.cache_lock: self._identifier_to_cover_url_cache[id_] = url def cached_identifier_to_cover_url(self, id_): with self.cache_lock: return self._identifier_to_cover_url_cache.get(id_, None) def dump_caches(self): with self.cache_lock: return {'isbn_to_identifier':self._isbn_to_identifier_cache.copy(), 'identifier_to_cover':self._identifier_to_cover_url_cache.copy()} def load_caches(self, dump): with self.cache_lock: self._isbn_to_identifier_cache.update(dump['isbn_to_identifier']) self._identifier_to_cover_url_cache.update(dump['identifier_to_cover']) # }}} # Utility functions {{{ def get_author_tokens(self, authors, only_first_author=True): ''' Take a list of authors and return a list of tokens useful for an AND search query. This function tries to return tokens in first name middle names last name order, by assuming that if a comma is in the author name, the name is in lastname, other names form. ''' if authors: # Leave ' in there for Irish names remove_pat = re.compile(r'[!@#$%^&*(){}`~"\s\[\]/]') replace_pat = re.compile(r'[-+.:;,]') if only_first_author: authors = authors[:1] for au in authors: has_comma = ',' in au au = replace_pat.sub(' ', au) parts = au.split() if has_comma: # au probably in ln, fn form parts = parts[1:] + parts[:1] for tok in parts: tok = remove_pat.sub('', tok).strip() if len(tok) > 2 and tok.lower() not in ('von', 'van', _('Unknown').lower()): yield tok def get_title_tokens(self, title, strip_joiners=True, strip_subtitle=False): ''' Take a title and return a list of tokens useful for an AND search query. Excludes connectives(optionally) and punctuation. ''' if title: # strip sub-titles if strip_subtitle: subtitle = re.compile(r'([\(\[\{].*?[\)\]\}]|[/:\\].*$)') if len(subtitle.sub('', title)) > 1: title = subtitle.sub('', title) title_patterns = [(re.compile(pat, re.IGNORECASE), repl) for pat, repl in [ # Remove things like: (2010) (Omnibus) etc. (r'(?i)[({\[](\d{4}|omnibus|anthology|hardcover|audiobook|audio\scd|paperback|turtleback|mass\s*market|edition|ed\.)[\])}]', ''), # Remove any strings that contain the substring edition inside # parentheses (r'(?i)[({\[].*?(edition|ed.).*?[\]})]', ''), # Remove commas used a separators in numbers (r'(\d+),(\d+)', r'\1\2'), # Remove hyphens only if they have whitespace before them (r'(\s-)', ' '), # Replace other special chars with a space (r'''[:,;!@$%^&*(){}.`~"\s\[\]/]''', ' '), ]] for pat, repl in title_patterns: title = pat.sub(repl, title) tokens = title.split() for token in tokens: token = token.strip().strip('"').strip("'") if token and (not strip_joiners or token.lower() not in ('a', 'and', 'the', '&')): yield token def split_jobs(self, jobs, num): 'Split a list of jobs into at most num groups, as evenly as possible' groups = [[] for i in range(num)] jobs = list(jobs) while jobs: for gr in groups: try: job = jobs.pop() except IndexError: break gr.append(job) return [g for g in groups if g] def test_fields(self, mi): ''' Return the first field from self.touched_fields that is null on the mi object ''' for key in self.touched_fields: if key.startswith('identifier:'): key = key.partition(':')[-1] if not mi.has_identifier(key): return 'identifier: ' + key elif mi.is_null(key): return key def clean_downloaded_metadata(self, mi): ''' Call this method in your plugin's identify method to normalize metadata before putting the Metadata object into result_queue. You can of course, use a custom algorithm suited to your metadata source. ''' docase = mi.language == 'eng' or mi.is_null('language') if docase and mi.title: mi.title = fixcase(mi.title) mi.authors = fixauthors(mi.authors) if mi.tags and docase: mi.tags = list(map(fixcase, mi.tags)) mi.isbn = check_isbn(mi.isbn) def download_multiple_covers(self, title, authors, urls, get_best_cover, timeout, result_queue, abort, log, prefs_name='max_covers'): if not urls: log('No images found for, title: %r and authors: %r'%(title, authors)) return from threading import Thread import time if prefs_name: urls = urls[:self.prefs[prefs_name]] if get_best_cover: urls = urls[:1] log('Downloading %d covers'%len(urls)) workers = [Thread(target=self.download_image, args=(u, timeout, log, result_queue)) for u in urls] for w in workers: w.daemon = True w.start() alive = True start_time = time.time() while alive and not abort.is_set() and time.time() - start_time < timeout: alive = False for w in workers: if w.is_alive(): alive = True break abort.wait(0.1) def download_image(self, url, timeout, log, result_queue): try: ans = self.browser.open_novisit(url, timeout=timeout).read() result_queue.put((self, ans)) log('Downloaded cover from: %s'%url) except Exception: self.log.exception('Failed to download cover from: %r'%url) # }}} # Metadata API {{{ def get_book_url(self, identifiers): ''' Return a 3-tuple or None. The 3-tuple is of the form: (identifier_type, identifier_value, URL). The URL is the URL for the book identified by identifiers at this source. identifier_type, identifier_value specify the identifier corresponding to the URL. This URL must be browseable to by a human using a browser. It is meant to provide a clickable link for the user to easily visit the books page at this source. If no URL is found, return None. This method must be quick, and consistent, so only implement it if it is possible to construct the URL from a known scheme given identifiers. ''' return None def get_book_url_name(self, idtype, idval, url): ''' Return a human readable name from the return value of get_book_url(). ''' return self.name def get_book_urls(self, identifiers): ''' Override this method if you would like to return multiple urls for this book. Return a list of 3-tuples. By default this method simply calls :func:`get_book_url`. ''' data = self.get_book_url(identifiers) if data is None: return () return (data,) def get_cached_cover_url(self, identifiers): ''' Return cached cover URL for the book identified by the identifiers dict or None if no such URL exists. Note that this method must only return validated URLs, i.e. not URLS that could result in a generic cover image or a not found error. ''' return None def identify_results_keygen(self, title=None, authors=None, identifiers={}): ''' Return a function that is used to generate a key that can sort Metadata objects by their relevance given a search query (title, authors, identifiers). These keys are used to sort the results of a call to :meth:`identify`. For details on the default algorithm see :class:`InternalMetadataCompareKeyGen`. Re-implement this function in your plugin if the default algorithm is not suitable. ''' def keygen(mi): return InternalMetadataCompareKeyGen(mi, self, title, authors, identifiers) return keygen def identify(self, log, result_queue, abort, title=None, authors=None, identifiers={}, timeout=30): ''' Identify a book by its title/author/isbn/etc. If identifiers(s) are specified and no match is found and this metadata source does not store all related identifiers (for example, all ISBNs of a book), this method should retry with just the title and author (assuming they were specified). If this metadata source also provides covers, the URL to the cover should be cached so that a subsequent call to the get covers API with the same ISBN/special identifier does not need to get the cover URL again. Use the caching API for this. Every Metadata object put into result_queue by this method must have a `source_relevance` attribute that is an integer indicating the order in which the results were returned by the metadata source for this query. This integer will be used by :meth:`compare_identify_results`. If the order is unimportant, set it to zero for every result. Make sure that any cover/isbn mapping information is cached before the Metadata object is put into result_queue. :param log: A log object, use it to output debugging information/errors :param result_queue: A result Queue, results should be put into it. Each result is a Metadata object :param abort: If abort.is_set() returns True, abort further processing and return as soon as possible :param title: The title of the book, can be None :param authors: A list of authors of the book, can be None :param identifiers: A dictionary of other identifiers, most commonly {'isbn':'1234...'} :param timeout: Timeout in seconds, no network request should hang for longer than timeout. :return: None if no errors occurred, otherwise a unicode representation of the error suitable for showing to the user ''' return None def download_cover(self, log, result_queue, abort, title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False): ''' Download a cover and put it into result_queue. The parameters all have the same meaning as for :meth:`identify`. Put (self, cover_data) into result_queue. This method should use cached cover URLs for efficiency whenever possible. When cached data is not present, most plugins simply call identify and use its results. If the parameter get_best_cover is True and this plugin can get multiple covers, it should only get the "best" one. ''' pass # }}}
jelly/calibre
src/calibre/ebooks/metadata/sources/base.py
Python
gpl-3.0
22,537
[ "VisIt" ]
8d958c25dc86b4b115326561c298c4e78ea40472c90560fdaab4a52f718e23f3
# -*- coding: utf-8 -*- # ### # Copyright (c) 2013, Rice University # This software is subject to the provisions of the GNU Affero General # Public License version 3 (AGPLv3). # See LICENCE.txt for details. # ### """Database search utilties.""" import os import re from collections import Mapping, OrderedDict, Sequence from datetime import datetime from time import strptime from cnxquerygrammar.query_parser import grammar, DictFormater from parsimonious.exceptions import IncompleteParseError from psycopg2.tz import FixedOffsetTimezone, LocalTimezone from .database import SQL_DIRECTORY, db_connect from .utils import ( portaltype_to_mimetype, COLLECTION_MIMETYPE, MODULE_MIMETYPE, PORTALTYPE_TO_MIMETYPE_MAPPING, utf8 ) import logging logger = logging.getLogger('cnxarchive') __all__ = ('search', 'Query',) here = os.path.abspath(os.path.dirname(__file__)) LOCAL_TZINFO = LocalTimezone() with open(os.path.join(here, 'data', 'common-english-words.txt'), 'r') as f: # stopwords are all the common english words plus single characters STOPWORDS = (f.read().split(',') + [chr(i) for i in range(ord('a'), ord('z') + 1)]) # WILDCARD_KEYWORD = 'text' # VALID_FILTER_KEYWORDS = ('type', 'pubYear', 'authorID', 'keyword', 'subject', # 'language', 'title', 'author', 'abstract') # The maximum number of keywords and authors to return in the search result # counts MAX_VALUES_FOR_KEYWORDS = 100 MAX_VALUES_FOR_AUTHORS = 100 SORT_VALUES_MAPPING = { 'pubdate': 'revised DESC', 'version': 'version DESC', 'popularity': 'rank DESC NULLS LAST', } DEFAULT_SEARCH_WEIGHTS = OrderedDict([ ]) SQL_SEARCH_DIRECTORY = os.path.join(SQL_DIRECTORY, 'search') def _read_sql_file(name, root=SQL_SEARCH_DIRECTORY, extension='.sql', remove_comments=False): path = os.path.join(root, '{}{}'.format(name, extension)) with open(path, 'r') as fp: if remove_comments: file = '\n'.join([l for l in fp if not l.startswith('--')]) else: file = fp.read() return file SQL_SEARCH_TEMPLATES = {name: _read_sql_file(name, extension='.part.sql', remove_comments=True) for name in DEFAULT_SEARCH_WEIGHTS.keys()} SQL_WEIGHTED_SELECT_WRAPPER = _read_sql_file('wrapper') SQL_QUICK_SELECT_WRAPPER = _read_sql_file('quick-wrapper') SEARCH_QUERY = _read_sql_file('query') QUERY_FIELD_ITEM_SEPARATOR = ';--;' QUERY_FIELD_PAIR_SEPARATOR = '-::-' SET_OPERATORS = ('OR', 'AND', 'NOT',) QUERY_TYPES = ('OR', 'AND', 'weakAND',) DEFAULT_QUERY_TYPE = QUERY_TYPES[-1] DEFAULT_PER_PAGE = 20 class Query(Sequence): """A structured respresentation of the query string.""" def __init__(self, query): """Create a query object.""" self.filters = [q for q in query if q[0] not in ('text', 'sort')] self.sorts = [q[1] for q in query if q[0] == 'sort'] self.terms = [q for q in query if q not in self.filters and q[0] != 'sort'] def __repr__(self): """String repr.""" s = "<{} with '{}' >".format(self.__class__.__name__, self.terms) return s def __getitem__(self, index): """Return terms.""" return self.terms[index] def __len__(self): """Length is number of terms.""" return len(self.terms) @classmethod def fix_quotes(cls, query_string): """Heuristic attempt to fix unbalanced quotes in query_string.""" if query_string.count('"') % 2 == 0: # no unbalanced quotes to fix return query_string fields = [] # contains what's matched by the regexp # e.g. fields = ['sort:pubDate', 'author:"first last"'] def f(match): fields.append(match.string[match.start():match.end()]) return '' # terms will be all the search terms that don't have a field terms = re.sub(r'[^\s:]*:("[^"]*"|[^\s]*)', f, query_string) query_string = '{}" {}'.format(terms.strip(), ' '.join(fields)) return query_string @classmethod def from_raw_query(cls, query_string): """Parse raw string to query. Given a raw string (typically typed by the user), parse to a structured format and initialize the class. """ try: node_tree = grammar.parse(query_string) except IncompleteParseError: query_string = cls.fix_quotes(query_string) node_tree = grammar.parse(query_string) structured_query = DictFormater().visit(node_tree) return cls([t for t in structured_query if t[1].lower() not in STOPWORDS]) class QueryRecord(Mapping): """A query record wrapper to parse hit values and add behavior.""" def __init__(self, **kwargs): self._record = {k: v for k, v in kwargs.items() if k not in ('_keys', 'matched', 'fields',)} if self._record.get('mediaType') in PORTALTYPE_TO_MIMETYPE_MAPPING: self._record['mediaType'] = \ portaltype_to_mimetype(self._record['mediaType']) self.matched = {} self.fields = {} # Parse the matching fields for field_record in kwargs['_keys'].split(QUERY_FIELD_ITEM_SEPARATOR): if len(field_record) > 0: term, key = field_record.split(QUERY_FIELD_PAIR_SEPARATOR) self.matched.setdefault(term, set()).add(key) self.fields.setdefault(key, set()).add(term) self.match_hits = (self.matched, self.fields) def __repr__(self): s = "<{} id='{}'>".format(self.__class__.__name__, self['id']) return s def __getitem__(self, key): return self._record[key] def __iter__(self): return iter(self._record) def __len__(self): return len(self._record) @property def highlighted_abstract(self): """Highlight the found terms in the abstract text.""" abstract_terms = self.fields.get('abstract', []) if abstract_terms: sql = _read_sql_file('highlighted-abstract') else: sql = _read_sql_file('get-abstract') arguments = {'id': self['id'], 'query': ' & '.join(abstract_terms), } with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute(sql, arguments) hl_abstract = cursor.fetchone() if hl_abstract: return hl_abstract[0] @property def highlighted_fulltext(self): """Highlight the found terms in the fulltext.""" terms = self.fields.get('fulltext', []) if not terms: return None arguments = {'id': self['id'], 'query': ' & '.join(terms), } with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute(_read_sql_file('highlighted-fulltext'), arguments) hl_fulltext = cursor.fetchone()[0] return hl_fulltext class QueryResults(Sequence): """List of search results. A listing of query results as well as hit counts and the parsed query string. The query is necessary to do in-python set operations on the rows. """ def __init__(self, rows, query, query_type=DEFAULT_QUERY_TYPE): if query_type not in QUERY_TYPES: raise ValueError("Invalid query type supplied: '{}'" .format(query_type)) self._query = query self._records = [QueryRecord(**r[0]) for r in rows] self.counts = { 'type': self._count_media(), 'subject': self._count_field('subjects'), 'keyword': self._count_field('keywords', max_results=MAX_VALUES_FOR_KEYWORDS), 'authorID': self._count_authors( max_results=MAX_VALUES_FOR_AUTHORS), 'pubYear': self._count_publication_year(), } def __repr__(self): s = "<{} with '{}' results>".format(self.__class__.__name__, len(self)) return s def __getitem__(self, index): return self._records[index] def __len__(self): return len(self._records) @property def auxiliary(self): return {'authors': self._auxiliary_authors, 'types': self._auxiliary_types, } @property def _auxiliary_authors(self): attr_name = '_aux_authors' if hasattr(self, attr_name): return getattr(self, attr_name) # Used to make the dict hashable for a set([]). class hashabledict(dict): def __hash__(self): # Use the unique value 'id' as the hash value. return hash(self['id']) authors = set([]) for rec in self._records: for author in rec['authors']: # The author is in dict format, just use it. authors.add(hashabledict(author)) authors = list(authors) authors.sort(lambda x, y: cmp(y['id'], x['id'])) setattr(self, attr_name, authors) return getattr(self, attr_name) @property def _auxiliary_types(self): # If we ever add types beyond book and page, # we'll want to change this. return [{'id': COLLECTION_MIMETYPE, 'name': 'Book'}, {'id': MODULE_MIMETYPE, 'name': 'Page'}] def _count_field(self, field_name, sorted=True, max_results=None): counts = {} for rec in self._records: for value in rec[field_name]: counts.setdefault(value, 0) counts[value] += 1 if max_results: # limit the number of results we return counts = counts.items() # sort counts by the count with highest count first counts.sort(lambda a, b: cmp(a[1], b[1]), reverse=True) counts = counts[:max_results] if sorted: if isinstance(counts, dict): counts = counts.items() # Sort counts by the name alphabetically counts.sort(lambda a, b: cmp(a[0].lower(), b[0].lower())) else: counts = counts.iteritems() return counts def _count_media(self): counts = { MODULE_MIMETYPE: 0, COLLECTION_MIMETYPE: 0, } for rec in self._records: counts[rec['mediaType']] += 1 return [(COLLECTION_MIMETYPE, counts[COLLECTION_MIMETYPE],), (MODULE_MIMETYPE, counts[MODULE_MIMETYPE],), ] def _count_authors(self, max_results=None): counts = {} uid_author = {} # look up author record by uid for rec in self._records: for author in rec['authors']: uid = author['id'] counts.setdefault(uid, 0) counts[uid] += 1 uid_author.setdefault(uid, author) authors = [] for uid, count in counts.iteritems(): author = uid_author[uid] authors.append(((uid, author,), count)) if max_results: # limit the number of results we return # sort counts by the count with highest count first authors.sort(lambda a, b: cmp(a[1], b[1]), reverse=True) authors = authors[:max_results] def sort_name(a, b): (uid_a, author_a), count_a = a (uid_b, author_b), count_b = b result = cmp(author_a['surname'], author_b['surname']) if result == 0: result = cmp(author_a['firstname'], author_b['firstname']) return result # Sort authors by surname then first name authors.sort(sort_name) authors = [(a[0][0], a[1],) for a in authors] return authors def _count_publication_year(self): counts = {} for rec in self._records: date = rec['pubDate'] if date is None: continue date = datetime(*strptime(date, "%Y-%m-%dT%H:%M:%SZ")[:6], tzinfo=FixedOffsetTimezone()) year = unicode(date.astimezone(LOCAL_TZINFO).year) counts.setdefault(year, 0) counts[year] += 1 counts = counts.items() # Sort pubYear in reverse chronological order counts.sort(lambda a, b: cmp(a[0], b[0]), reverse=True) return counts def _transmute_sort(sort_value): """Provide a value translation to the SQL column name.""" try: return SORT_VALUES_MAPPING[sort_value.lower()] except KeyError: raise ValueError("Invalid sort key '{}' provided.".format(sort_value)) def _convert(tup, dictlist): """ :param tup: a list of tuples :param di: a dictionary converted from tup :return: dictionary """ di = {} for a, b in tup: di.setdefault(a, []).append(b) for key, val in di.items(): dictlist.append((key, val)) return dictlist def _upper(val_list): """ :param val_list: a list of strings :return: a list of upper-cased strings """ res = [] for ele in val_list: res.append(ele.upper()) return res def _filter_stop_words(raw_query): return [term for term in raw_query if term.lower() not in STOPWORDS] def _build_search(structured_query): """Construct search statment for db execution. Produces the search statement and argument dictionary to be executed by the DBAPI v2 execute method. For example, ``cursor.execute(*_build_search(query, weights))`` :param query: containing terms, filters, and sorts. :type query: Query :param weights: weight values to assign to each keyword search field :type weights: dictionary of field names to weight integers :returns: the build statement and the arguments used against it :rtype: a two value tuple of a SQL template and a dictionary of arguments to pass into that template """ arguments = {} # get text terms and filter out common words text_terms = [term for ttype, term in structured_query.terms if ttype == 'text'] text_terms_wo_stopwords = [term for term in text_terms if term.lower() not in STOPWORDS] # sql where clauses conditions = {'text_terms': '', 'pubYear': '', 'authorID': '', 'type': '', 'keyword': '', 'subject': '', 'language': '', 'title': '', 'author': '', 'abstract': ''} # if there are other search terms (not type "text") or if the text # terms do not only consist of stopwords, then use the text terms # without stopwords if arguments or text_terms_wo_stopwords: text_terms = text_terms_wo_stopwords arguments.update({'text_terms': ' '.join(text_terms)}) # raise Exception(structured_query.filters, structured_query.terms) if len(text_terms) > 0: conditions['text_terms'] = 'AND module_idx \ @@ plainto_tsquery(%(text_terms)s)' # build fulltext keys fulltext_key = [] for term in text_terms_wo_stopwords: fulltext_key.append(term + '-::-fulltext') arguments.update({'fulltext_key': ';--;'.join(fulltext_key)}) idx = 0 invalid_filters = [] filters = _convert(structured_query.filters, []) while idx < len(filters): keyword = filters[idx][0] value = filters[idx][1] if keyword == 'pubYear': conditions['pubYear'] = 'AND extract(year from cm.revised) = \ %(pubYear)s' arguments.update({'pubYear': value[0]}) elif keyword == 'authorID': conditions['authorID'] = 'AND ARRAY[%(authorID)s] \ <@ cm.authors' arguments.update({'authorID': value[0]}) elif keyword == 'type': value[0] = value[0].lower() conditions['type'] = 'AND cm.portal_type = %(type)s' if value[0] != 'book' and value[0] != 'collection' and \ value[0] != 'page' and value[0] != 'module': invalid_filters.append(idx) value[0] = 'Collection' if (value[0] == 'book' or value[0] == 'collection') else 'Module' arguments.update({'type': value[0]}) elif keyword == 'keyword': value = _upper(value) conditions['keyword'] = 'AND cm.module_ident = \ ANY(WITH target AS ( \ SELECT lm.module_ident AS id, \ array_agg(UPPER(kw.word)) AS akw \ FROM latest_modules lm, \ modulekeywords mk, \ keywords kw \ WHERE lm.module_ident = mk.module_ident \ AND kw.keywordid = mk.keywordid \ GROUP BY id) \ SELECT target.id FROM target WHERE \ target.akw @> %(keyword)s)' arguments.update({'keyword': value}) elif keyword == 'subject': conditions['subject'] = 'AND cm.module_ident = \ ANY(WITH sub AS ( \ SELECT module_ident AS id, \ array_agg(tag) AS atag \ FROM latest_modules \ NATURAL JOIN \ moduletags NATURAL JOIN \ tags GROUP BY id) \ SELECT sub.id FROM sub WHERE \ sub.atag @> %(subject)s)' arguments.update({'subject': value}) elif keyword == 'language': conditions['language'] = 'AND cm.language = %(language)s' arguments.update({'language': value[0]}) elif keyword == 'title': conditions['title'] = 'AND strip_html(cm.name) ~* \ %(title)s' arguments.update({'title': value[0]}) elif keyword == 'author': conditions['author'] = 'AND cm.module_ident = \ ANY(WITH name AS ( \ SELECT username FROM users u WHERE \ u.first_name||\' \'||u.last_name \ ~* %(author)s) \ SELECT lm.module_ident \ FROM latest_modules lm \ JOIN name n ON ARRAY[n.username] \ <@ lm.authors)' arguments.update({'author': value[0]}) elif keyword == 'abstract': conditions['abstract'] = 'AND cm.module_ident = \ ANY(SELECT lm.module_ident FROM \ latest_modules lm, \ abstracts ab WHERE\ lm.abstractid = ab.abstractid \ AND ab.abstract \ ~* %(abstract)s)' arguments.update({'abstract': value[0]}) else: # Invalid filter! invalid_filters.append(idx) idx += 1 if len(invalid_filters) == len(structured_query.filters) and \ len(structured_query.terms) == 0: # Either query terms are all invalid filters # or we received a null query. # Clear the filter list in this case. structured_query.filters = [] return None, None for invalid_filter_idx in invalid_filters: # Remove invalid filters. del structured_query.filters[invalid_filter_idx] # Add the arguments for sorting. sorts = ['portal_type'] if structured_query.sorts: for sort in structured_query.sorts: # These sort values are not the name of the column used # in the database. stmt = _transmute_sort(sort) sorts.append(stmt) sorts.extend(('weight DESC', 'uuid DESC',)) sorts = ', '.join(sorts) statement = SQL_QUICK_SELECT_WRAPPER.format(conditions['pubYear'], conditions['authorID'], conditions['type'], conditions['keyword'], conditions['subject'], conditions['text_terms'], conditions['language'], conditions['title'], conditions['author'], conditions['abstract'], sorts=sorts) return statement, arguments def search(query, query_type=DEFAULT_QUERY_TYPE): """Search database using parsed query. Executes a database search query from the given ``query`` (a ``Query`` object) and optionally accepts a list of search weights. By default, the search results are ordered by weight. :param query: containing terms, filters, and sorts. :type query: Query :returns: a sequence of records that match the query conditions :rtype: QueryResults (which is a sequence of QueryRecord objects) """ # Build the SQL statement. statement, arguments = _build_search(query) # Execute the SQL. if statement is None and arguments is None: return QueryResults([], [], 'AND') with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute(statement, arguments) search_results = cursor.fetchall() # Wrap the SQL results. return QueryResults(search_results, query, query_type)
Connexions/cnx-archive
cnxarchive/search.py
Python
agpl-3.0
22,499
[ "VisIt" ]
3f3227530465a8999e9df065ec378def06abe585b5d47bb4fb4654117f1e6247
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <codecell> import pandas as pd import sys import os, os.path sys.path.append('/home/will/PatientPicker/') import LoadingTools # <codecell> redcap= LoadingTools.load_redcap_data().set_index(['Patient ID', 'VisitNum']) # <codecell> admit_no_drugs = [('Current-Drug-Use-NO', redcap['Current Drug use']=='No'), ('Current-Drug-Use-NEVER', redcap['Current Drug use']=='Never'), ('Date-Stopped-Drug-Use', redcap['Date Stopped Drug Use']<redcap['Date Of Visit']), ('Drug-Use-And-HIV-Status-BEFORE', redcap['Drug Use And HIV Status']=='Used after HIV+')] test_cols = [col for col in redcap.columns if col.startswith('Test-')] admit_cols = [col for col in redcap.columns if (col.startswith('Admit-') and ('None' not in col))] ever_test = redcap[test_cols].groupby(level='Patient ID').agg('any') admit_no_drug_df = pd.concat([redcap[admit_cols], pd.DataFrame(dict(admit_no_drugs))], axis = 1) # <codecell> tmp = pd.concat(admit_no_drug_df.align(ever_test, axis=0, level='Patient ID'), 1) tmp.iloc[100:105].T # <codecell> def fix_num(num): if num < 1: return -1/num else: return num res = [] tmp['Test-Anything'] = tmp[test_cols].any(axis=1) ntest_cols = [col for col in tmp.columns if col.startswith('Test-')] nadmit_cols = [col for col in tmp.columns if not col.startswith('Test-')] for col in nadmit_cols: say_yes = tmp[col] yes_frac = tmp[ntest_cols][say_yes].mean() no_frac = tmp[ntest_cols][~say_yes].mean() odds_r = yes_frac/no_frac res.append(odds_r.to_dict()) res[-1]['Col'] = col nres = pd.DataFrame(res).set_index('Col').applymap(fix_num) # <codecell> print nres # <codecell> #nres.to_excel('/home/will/DrugStuff/admit_explanations.xlsx') # <codecell> from sklearn.cross_validation import cross_val_score, StratifiedShuffleSplit from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier X = tmp[nadmit_cols] y_mat = tmp[ntest_cols] classifiers = [('Naive-Bayes', GaussianNB()), ('Logistic-Regression', LogisticRegression()), ('Decision-Tree', DecisionTreeClassifier()), ('Adaboost', AdaBoostClassifier()), ] tres = [] for col in ntest_cols: if len(y_mat[col].dropna().unique()) < 2: continue for name, cl in classifiers: vals= cross_val_score(cl, X.values, y=y_mat[col].values, cv = StratifiedShuffleSplit(y_mat[col].values), scoring = 'roc_auc') tres.append({'Col':col, 'Predictor': name, 'Accuracy':np.mean(vals)}) tdf = pd.pivot_table(pd.DataFrame(tres),rows = 'Col', cols = 'Predictor', values='Accuracy') # <codecell> tdf # <codecell>
JudoWill/ResearchNotebooks
AdmitExplanation.py
Python
mit
2,967
[ "VisIt" ]
3c3527bfbf4bf195ed56c1119d72452321d6b955601f0fd100577466d731ec4a
CONTAINER = [ "Docker", "Rkt", "LXC", "Vagrant", "OpenVZ", "Kubernetes" ] LICENSES = [ "Apache License, 2.0 (Apache-2.0)", "The BSD 3-Clause License", "The BSD 2-Clause License", "GNU General Public License (GPL)", "General Public License (LGPL)", "MIT License (MIT)", "Mozilla Public License 2.0 (MPL-2.0)", "Common Development and Distribution License (CDDL-1.0)", "Eclipse Public License (EPL-1.0)" ] PROGRAMMING_LANGS = [ "ASP", "Assembly", "AutoIt", "Awk", "Bash", "C", "C Shell", "C#", "C++", "Caml", "Ceylon", "Clojure", "CoffeeScript", "Common Lisp", "D", "Dart", "Delphi", "Dylan", "ECMAScript", "Elixir", "Emacs Lisp", "Erlang", "F#", "Falcon", "Fortran", "GNU Octave", "Go", "Groovy", "Haskell", "haXe", "Io", "J#", "Java", "JavaScript", "Julia", "Kotlin", "Lisp", "Lua", "Mathematica", "Objective-C", "OCaml", "Perl", "PHP", "PL-I", "PL-SQL", "PowerShell", "Prolog", "Python", "R", "Racket", "Ruby", "Rust", "Scala", "Scheme", "Smalltalk", "Tcl", "Tex", "Transact-SQL", "TypeScript", "Z shell" ] NOSQL = ( "Aerospike", "Aerospike", "AllegroGraph", "Apache Cassandra", "CouchDB", "Couchbase", "Hyperdex", "InfiniteGraph", "MUMPS", "MemcacheDB", "Memcached", "MongoDB", "Neo4J", "Redis", "RethinkDB", "Riak" ) SQL = ( "MariaDB", "MySQL", "PostgreSQL", "Oracle DB", "SQLite" "DB2" ) BACKEND = [ "C#/ASP.NET", "Java", "Java/Tapestry", "Go/Beego", "Python/Bottle", "Python/CherryPy", "Erlang", "Erlang/ChicagoBoss", "Erlang/Cowboy", "Clojure", "Django", "Python/Django", "Python/Flask/Gunicorn", "Elixir/Phoenix", "Node/Express", "Python/Falcon", "Python/Flask", "Go/Gin", "Go/Martini", "Go/Gorilla", "Groovy/Grails", "PHP/Laravel", "Java/Ninja", "PHP/Yii 2", "Pyramid", "Python/Pyramid", "Python/SQLAlchemy", "Python/Twisted", "Python/web2py", "Go/Revel", "Ruby on Rails", "Ruby/Sinatra", "Java/Spring", "PHP/Symfony", "Python/Tornado", "PHP/Zend Framework" ] FRONTEND = [ "JavaScript/Angular 2", "JavaScript/Backbone", "Gulp/Bower/Sass", "JS/HTML/CSS/Canvas/SVG", "JavaScript/Ember", "JavaScript/React", "JavaScript/React/Redux", "JavaScript/React/Redux/Flux", "JS/HTML/CSS", "JavaScript/Highcharts.js", "JS/Twitter Bootstrap", "React/Canvas/SVG", "React/D3.js", 'React/Redux/Babel', "JavaScript/Reflux/Redux", "JS/Sass/Less/Gulp", "Stylus/Sass/Webpack", "JavaScript/Vue", "JavaScript/jQuery", "TypeScript/React", "TypeScript/CSS/HTML", 'Webpack/Grunt/PostCSS' ] OS = [ "Arch", "CentOS", "Debian", "Fedora", "FreeBSD", "Gentoo", "Kali", "Lubuntu", "Manjaro", "Mint", "OS X", "OpenBSD", "PCLinuxOS", "Slackware", "Ubuntu", "Windows 10", "Windows 7", "Windows 8", "Windows 8.1", "Zorin", "elementaryOS", "macOS", "openSUSE" ] CSS_PROPERTIES = { "background-color": "color", "border-color": "color", "color": "color", "display": [ "block", "none", "inline" ], "font-size": "size", "font-style": [ "inherit", "initial", "italic", "normal", "oblique" ], "position": [ "absolute", "fixed", "inherit", "initial", "relative", "static" ], "margin-bottom": "size", "margin-left": "size", "padding-right": "size", "padding-top": "size", "pointer": [ "crosshair", "help", "pointer", "progress" ], "text-align": [ "center", "inherit", "initial", "justify", "left", "right" ], "width": "size" } CSS_SELECTORS = [ ".", "#" ] CSS_SIZE_UNITS = [ "em", "pt", "px" ] HTML_CONTAINER_TAGS = { "a": { "class": "word", "href": "url", "id": "word", "style": "css", "target": [ "_blank", "_parent", "_top" ] }, "div": { "class": "word", "id": "word", "style": "css" }, "p": { "class": "word", "id": "word", "style": "css" }, "span": { "class": "word", "id": "word", "style": "css" } } HTML_MARKUP_TAGS = [ "b", "em", "i", "small", "strong" ] FOLDERS = ( 'Development', 'Downloads', 'Documents', 'Music', 'Video', 'Work', 'Pictures', 'Desktop', 'Study' ) PROJECT_NAMES = [ "aardonyx", "abelisaurus", "achelousaurus", "achillobator", "acrocanthosaurus", "aegyptosaurus", "afrovenator", "agilisaurus", "alamosaurus", "albertaceratops", "albertosaurus", "alectrosaurus", "alioramus", "allosaurus", "alvarezsaurus", "amargasaurus", "ammosaurus", "ampelosaurus", "amygdalodon", "anatotitan", "anchiceratops", "anchisaurus", "ankylosaurus", "anserimimus", "antarctopelta", "antarctosaurus", "apatosaurus", "aragosaurus", "aralosaurus", "archaeoceratops", "archaeopteryx", "archaeornithomimus", "argentinosaurus", "arrhinoceratops", "atlascopcosaurus", "aucasaurus", "austrosaurus", "avaceratops", "avalonia", "avimimus", "azendohsaurus", "bactrosaurus", "bagaceratops", "bambiraptor", "barapasaurus", "barosaurus", "baryonyx", "becklespinax", "beipiaosaurus", "bellusaurus", "borogovia", "brachiosaurus", "brachyceratops", "bugenasaura", "buitreraptor", "camarasaurus", "camptosaurus", "carnotaurus", "caudipteryx", "cedarpelta", "centrosaurus", "ceratosaurus", "cetiosauriscus", "cetiosaurus", "chaoyangsaurus", "chasmosaurus", "chialingosaurus", "chindesaurus", "chinshakiangosaurus", "chirostenotes", "chubutisaurus", "chungkingosaurus", "citipati", "coelophysis", "coelurus", "coloradisaurus", "compsognathus", "conchoraptor", "confuciusornis", "corythosaurus", "cryolophosaurus", "dacentrurus", "daspletosaurus", "datousaurus", "deinocheirus", "deinonychus", "deltadromeus", "diceratops", "dicraeosaurus", "dilophosaurus", "diplodocus", "dracorex", "dravidosaurus", "dromaeosaurus", "dromiceiomimus", "dryosaurus", "dryptosaurus", "dubreuillosaurus", "edmontonia", "edmontosaurus", "einiosaurus", "elaphrosaurus", "emausaurus", "eolambia", "eoraptor", "eotyrannus", "equijubus", "erketu", "erlikosaurus", "euhelopus", "euoplocephalus", "europasaurus", "euskelosaurus", "eustreptospondylus", "fukuiraptor", "fukuisaurus", "gallimimus", "gargoyleosaurus", "garudimimus", "gasosaurus", "gasparinisaura", "gastonia", "giganotosaurus", "gilmoreosaurus", "giraffatitan", "gobisaurus", "gorgosaurus", "goyocephale", "graciliceratops", "gryposaurus", "guaibasaurus", "guanlong", "hadrosaurus", "hagryphus", "haplocanthosaurus", "harpymimus", "herrerasaurus", "hesperosaurus", "heterodontosaurus", "homalocephale", "huayangosaurus", "hylaeosaurus", "hypacrosaurus", "hypselosaurus", "hypsilophodon", "iguanodon", "indosuchus", "ingenia", "irritator", "isisaurus", "janenschia", "jaxartosaurus", "jingshanosaurus", "jinzhousaurus", "jobaria", "juravenator", "kentrosaurus", "khaan", "kotasaurus", "kritosaurus", "lamaceratops", "lambeosaurus", "lapparentosaurus", "leaellynasaura", "leptoceratops", "lesothosaurus", "lexovisaurus", "liaoceratops", "liaoxiornis", "ligabuesaurus", "liliensternus", "lophorhothon", "lophostropheus", "lufengosaurus", "lurdusaurus", "lycorhinus", "magyarosaurus", "maiasaura", "majungatholus", "malawisaurus", "mamenchisaurus", "mapusaurus", "marshosaurus", "masiakasaurus", "massospondylus", "maxakalisaurus", "megalosaurus", "melanorosaurus", "metriacanthosaurus", "microceratops", "micropachycephalosaurus", "microraptor", "minmi", "monolophosaurus", "mononykus", "mussaurus", "muttaburrasaurus", "nanotyrannus", "nanshiungosaurus", "nemegtosaurus", "neovenator", "neuquenosaurus", "nigersaurus", "nipponosaurus", "noasaurus", "nodosaurus", "nomingia", "nothronychus", "nqwebasaurus", "omeisaurus", "ornitholestes", "ornithomimus", "orodromeus", "oryctodromeus", "othnielia", "ouranosaurus", "oviraptor", "rebbachisaurus", "rhabdodon", "rhoetosaurus", "rinchenia", "riojasaurus", "rugops", "saichania", "saltasaurus", "saltopus", "sarcosaurus", "saurolophus", "sauropelta", "saurophaganax", "saurornithoides", "scelidosaurus", "scutellosaurus", "secernosaurus", "segisaurus", "segnosaurus", "seismosaurus", "shamosaurus", "shanag", "shantungosaurus", "shunosaurus", "shuvuuia", "silvisaurus", "sinocalliopteryx", "sinornithosaurus", "sinosauropteryx", "sinraptor", "sinvenator", "zalmoxes", "zephyrosaurus", "zuniceratops", "byzantine", "svengali", "accolade", "acrimony", "angst", "anomaly", "antidote", "baroque", "bona_fide", "bourgeois", "bravado", "brogue", "brusque", "cacophony", "caustic", "charisma", "cloying", "deja-vu", "dichotomy", "elan", "ennui", "epitome", "esoteric", "euphemism", "faux pas", "fiasco", "finagle", "glib", "harbinger", "hedonist", "heresy", "idyllic", "insidious", "junket", "kitsch", "litany", "lurid", "malaise", "malinger", "mantra", "maudlin", "mercenary", "misnomer", "nirvana", "oblivion", "ogle", "ostracize", "panacea", "paradox", "peevish", "propriety", "revel", "rhetoric", "spartan", "stigma", "stoic", "suave", "sycophant", "tirade", "tryst", "untenable", "vicarious", "vile", "waft", "zealous" ]
wikkiewikkie/elizabeth
elizabeth/intd/dev.py
Python
mit
10,967
[ "GULP" ]
55101c4e29e6be65beae71f5302323fdf4dec43a466da446b2e6e4c0d06801d9
#BEGIN_HEADER import sys import os import glob import json #sys.path.insert(0, '/kb/dev_container/modules/genome_util/lib/biokbase/genome_util') import script_util #from biokbase.workspace.client import Workspace #from workspace.client import Workspace #END_HEADER class KBaseGenomeUtil: ''' Module Name: KBaseGenomeUtil Module Description: ''' ######## WARNING FOR GEVENT USERS ####### # Since asynchronous IO can lead to methods - even the same method - # interrupting each other, you must be *very* careful when using global # state. A method could easily clobber the state set by another while # the latter method is running. ######################################### #BEGIN_CLASS_HEADER #END_CLASS_HEADER # config contains contents of config file in a hash or None if it couldn't # be found def __init__(self, config): #BEGIN_CONSTRUCTOR #END_CONSTRUCTOR pass def blast_against_genome(self, ctx, params): # ctx is the context object # return variables are: returnVal #BEGIN blast_against_genome if len(params['query']) > 5: sequence=params['query'] else: #error message: your sequence are too short print "error" #else: #sequence=script_util.get_seq(params['gene_id']) #sequence=(params['gene_id']) genome_id='Bifidobacterium_animalis_subsp._lactis_AD011' workspaceid='plane83:1436884411390' #print "generate input file\n" target=open('tmp_seq','w') target.write(">") target.write("input_seq\n") target.write(sequence) target.close() #print "downloading genome object from workspace\n" genome=script_util.get_genome('genome_id','workspaceid',ctx['token']) #print "finished downloading\n"; #extract sequences from the genome object with open('tmp_data','w') as outfile: json.dump(genome, outfile) res1=open('tmp_data').read() res=json.loads(res1) os.remove('tmp_data') #print "making dir\n" if os.path.exists('blast_db'): files=glob.glob('blast_db/*') for f in files: os.remove(f) if not os.path.exists('blast_db'): os.makedirs('blast_db') target=open('blast_db/tmp_genome_fasta','w') for gene in res['data']['features']: if 'protein_translation' in gene.keys(): target.write(">") target.write(gene['id']) target.write("\n") target.write(gene['protein_translation']) target.write("\n") target.close() #print "formatdb..\n" #format database for blast os.system("formatdb -i blast_db/tmp_genome_fasta -p T") os.system("blastall -p blastp -i tmp_seq -m 9 -o tmp_out -d blast_db/tmp_genome_fasta") os.remove('tmp_seq') print "test" res=script_util.extract_blast_output('tmp_out') os.remove(tmp_out) os.remove(input_seq) returnVal = res #END blast_against_genome # At some point might do deeper type checking... if not isinstance(returnVal, basestring): raise ValueError('Method blast_against_genome return value ' + 'returnVal is not type basestring as required.') # return the results return [returnVal]
plane83/genome_util
lib/biokbase/genome_util/KBaseGenomeUtilImpl.py
Python
mit
3,110
[ "BLAST" ]
fc6a3e376f030efa39b7a77901529e345a69c513cf8aa7b568e77189a35afa6a
import os import struct from io import StringIO import vstruct import vstruct.defs.pe as vs_pe from . import ordlookup IMAGE_FILE_RELOCS_STRIPPED = 0x0001 IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002 IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004 IMAGE_FILE_LOCAL_SYMS_STRIPED = 0x0008 IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x00010 IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x00020 # 0x0040 is reserved for future use IMAGE_FILE_BYTES_REVERSED_LO = 0x0080 IMAGE_FILE_32BIT_MACHINE = 0x0100 IMAGE_FILE_DEBUG_STRIPPED = 0x0200 IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400 IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800 IMAGE_FILE_SYSTEM = 0x1000 IMAGE_FILE_DLL = 0x2000 IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000 IMAGE_FILE_REVERSED_HI = 0x8000 IMAGE_DLLCHARACTERISTICS_RESERVED_1 = 1 IMAGE_DLLCHARACTERISTICS_RESERVED_2 = 2 IMAGE_DLLCHARACTERISTICS_RESERVED_4 = 4 IMAGE_DLLCHARACTERISTICS_RESERVED_8 = 8 IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 # The DLL can be relocated at load time. IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY = 0x0080 # Code integrity checks are forced. If you set this flag and a section contains only uninitialized data, set the PointerToRawData member of IMAGE_SECTION_HEADER for that section to zero; otherwise, the image will fail to load because the digital signature cannot be verified. IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100 # The image is compatible with data execution prevention (DEP). IMAGE_DLLCHARACTERISTICS_NO_ISOLATION = 0x0200 # The image is isolation aware, but should not be isolated. IMAGE_DLLCHARACTERISTICS_NO_SEH = 0x0400 # The image does not use structured exception handling (SEH). No handlers can be called in this image. IMAGE_DLLCHARACTERISTICS_NO_BIND = 0x0800 # Do not bind the image. IMAGE_DLLCHARACTERISTICS_RESERVED_1000 = 0x1000 # Reserved IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 0x2000 # A WDM driver. IMAGE_DLLCHARACTERISTICS_RESERVED_4000 = 0x4000 # Reserved IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000 IMAGE_SUBSYSTEM_UNKNOWN = 0 #Unknown subsystem. IMAGE_SUBSYSTEM_NATIVE = 1 #No subsystem required (device drivers and native system processes). IMAGE_SUBSYSTEM_WINDOWS_GUI = 2 #Windows graphical user interface (GUI) subsystem. IMAGE_SUBSYSTEM_WINDOWS_CUI = 3 #Windows character-mode user interface (CUI) subsystem. IMAGE_SUBSYSTEM_OS2_CUI = 5 #OS/2 CUI subsystem. IMAGE_SUBSYSTEM_POSIX_CUI = 7 #POSIX CUI subsystem. IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9 #Windows CE system. IMAGE_SUBSYSTEM_EFI_APPLICATION = 10 #Extensible Firmware Interface (EFI) application. IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11 #EFI driver with boot services. IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12 #EFI driver with run-time services. IMAGE_SUBSYSTEM_EFI_ROM = 13 #EFI ROM image. IMAGE_SUBSYSTEM_XBOX = 14 #Xbox system. IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16 #Boot application. IMAGE_FILE_MACHINE_I386 = 0x014c IMAGE_FILE_MACHINE_IA64 = 0x0200 IMAGE_FILE_MACHINE_AMD64 = 0x8664 IMAGE_FILE_MACHINE_ARM = 0x1c0 IMAGE_FILE_MACHINE_ARM64 = 0xaa64 IMAGE_FILE_MACHINE_ARMNT = 0x1c4 # ARMv7 or higher thumb mode only IMAGE_FILE_MACHINE_THUMB = 0x1c2 # interworking arm/thumb machine_names = { IMAGE_FILE_MACHINE_I386: 'i386', IMAGE_FILE_MACHINE_IA64: 'ia64', IMAGE_FILE_MACHINE_AMD64: 'amd64', IMAGE_FILE_MACHINE_ARM: 'arm', IMAGE_FILE_MACHINE_ARM64: 'arm64', IMAGE_FILE_MACHINE_ARMNT: 'thumb', IMAGE_FILE_MACHINE_THUMB: 'thumb16', } IMAGE_REL_BASED_ABSOLUTE = 0 IMAGE_REL_BASED_HIGH = 1 IMAGE_REL_BASED_LOW = 2 IMAGE_REL_BASED_HIGHLOW = 3 IMAGE_REL_BASED_HIGHADJ = 4 IMAGE_REL_BASED_MIPS_JMPADDR = 5 IMAGE_REL_BASED_IA64_IMM64 = 9 IMAGE_REL_BASED_DIR64 = 10 IMAGE_DIRECTORY_ENTRY_EXPORT = 0 # Export Directory IMAGE_DIRECTORY_ENTRY_IMPORT = 1 # Import Directory IMAGE_DIRECTORY_ENTRY_RESOURCE = 2 # Resource Directory IMAGE_DIRECTORY_ENTRY_EXCEPTION = 3 # Exception Directory IMAGE_DIRECTORY_ENTRY_SECURITY = 4 # Security Directory IMAGE_DIRECTORY_ENTRY_BASERELOC = 5 # Base Relocation Table IMAGE_DIRECTORY_ENTRY_DEBUG = 6 # Debug Directory IMAGE_DIRECTORY_ENTRY_COPYRIGHT = 7 # (X86 usage) IMAGE_DIRECTORY_ENTRY_ARCHITECTURE = 7 # Architecture Specific Data IMAGE_DIRECTORY_ENTRY_GLOBALPTR = 8 # RVA of GP IMAGE_DIRECTORY_ENTRY_TLS = 9 # TLS Directory IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG = 10 # Load Configuration Directory IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT = 11 # Bound Import Directory in headers IMAGE_DIRECTORY_ENTRY_IAT = 12 # Import Address Table IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT = 13 # Delay Load Import Descriptors IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR = 14 # COM Runtime descriptor IMAGE_DEBUG_TYPE_UNKNOWN = 0 IMAGE_DEBUG_TYPE_COFF = 1 IMAGE_DEBUG_TYPE_CODEVIEW = 2 IMAGE_DEBUG_TYPE_FPO = 3 IMAGE_DEBUG_TYPE_MISC = 4 IMAGE_DEBUG_TYPE_EXCEPTION = 5 IMAGE_DEBUG_TYPE_FIXUP = 6 IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7 IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8 IMAGE_DEBUG_TYPE_BORLAND = 9 IMAGE_DEBUG_TYPE_RESERVED10 = 10 IMAGE_DEBUG_TYPE_CLSID = 11 IMAGE_SCN_CNT_CODE = 0x00000020 IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040 IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080 IMAGE_SCN_LNK_OTHER = 0x00000100 IMAGE_SCN_LNK_INFO = 0x00000200 IMAGE_SCN_LNK_REMOVE = 0x00000800 IMAGE_SCN_LNK_COMDAT = 0x00001000 IMAGE_SCN_MEM_FARDATA = 0x00008000 IMAGE_SCN_MEM_PURGEABLE = 0x00020000 IMAGE_SCN_MEM_16BIT = 0x00020000 IMAGE_SCN_MEM_LOCKED = 0x00040000 IMAGE_SCN_MEM_PRELOAD = 0x00080000 IMAGE_SCN_ALIGN_1BYTES = 0x00100000 IMAGE_SCN_ALIGN_2BYTES = 0x00200000 IMAGE_SCN_ALIGN_4BYTES = 0x00300000 IMAGE_SCN_ALIGN_8BYTES = 0x00400000 IMAGE_SCN_ALIGN_16BYTES = 0x00500000 IMAGE_SCN_ALIGN_32BYTES = 0x00600000 IMAGE_SCN_ALIGN_64BYTES = 0x00700000 IMAGE_SCN_ALIGN_128BYTES = 0x00800000 IMAGE_SCN_ALIGN_256BYTES = 0x00900000 IMAGE_SCN_ALIGN_512BYTES = 0x00A00000 IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000 IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000 IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000 IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000 IMAGE_SCN_ALIGN_MASK = 0x00F00000 IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000 IMAGE_SCN_MEM_DISCARDABLE = 0x02000000 IMAGE_SCN_MEM_NOT_CACHED = 0x04000000 IMAGE_SCN_MEM_NOT_PAGED = 0x08000000 IMAGE_SCN_MEM_SHARED = 0x10000000 IMAGE_SCN_MEM_EXECUTE = 0x20000000 IMAGE_SCN_MEM_READ = 0x40000000 IMAGE_SCN_MEM_WRITE = 0x80000000 # Flags for the UNWIND_INFO flags field from # RUNTIME_FUNCTION defs UNW_FLAG_NHANDLER = 0x0 UNW_FLAG_EHANDLER = 0x1 UNW_FLAG_UHANDLER = 0x2 UNW_FLAG_CHAININFO = 0x4 # Resource Types RT_CURSOR = 1 RT_BITMAP = 2 RT_ICON = 3 RT_MENU = 4 RT_DIALOG = 5 RT_STRING = 6 RT_FONTDIR = 7 RT_FONT = 8 RT_ACCELERATOR = 9 RT_RCDATA = 10 RT_MESSAGETABLE = 11 RT_GROUP_CURSOR = 12 RT_GROUP_ICON = 14 RT_VERSION = 16 RT_DLGINCLUDE = 17 RT_PLUGPLAY = 19 RT_VXD = 20 RT_ANICURSOR = 21 RT_ANIICON = 22 RT_HTML = 23 RT_MANIFEST = 24 class VS_VERSIONINFO: ''' A simple (read-only) VS_VERSIONINFO parser ''' def __init__(self, bytes): self._version_info = {} self._fixed_file_info = None self._parseBytes(bytes) def getVersionValue(self, key, default=None): ''' Retrieve a key from the VS_VERSIONINFO data. Example: vs.getVersionValue('FileVersion') ''' return self._version_info.get(key, default) def getVersionKeys(self): ''' Return a list of the keys in this VS_VERSIONINFO struct. Example: for keyname in vs.getVersionKeys(): print(keyname) ''' return self._version_info.keys() def getVersionItems(self): ''' Return dictionary style key,val tuples for the version keys in this VS_VERSIONINFO structure. Example: for vskey,vsdata in vs.getVersionItems(): print(vskey,vsdata) ''' return self._version_info.items() def _parseBytes(self, bytes): offset = 0 mysize, valsize, vstype = struct.unpack('<HHH', bytes[:6]) offset += 6 offset, vinfosig = self._eatStringAndAlign(bytes, offset) if vinfosig != 'VS_VERSION_INFO': Exception('Invalid VS_VERSION_INFO signature!: %s' % repr(vinfosig)) if valsize and valsize >= len(vs_pe.VS_FIXEDFILEINFO()): ffinfo = vs_pe.VS_FIXEDFILEINFO() ffinfo.vsParse(bytes[offset:offset+valsize]) self._fixed_file_info = ffinfo offset += valsize offmod = offset % 4 if offmod: offset += (4 - offmod) xmax = min(mysize, len(bytes)) i = 0 while offset < xmax and i < 2: offset = self._stringFileInfo(bytes, offset) i += 1 def _eatStringAndAlign(self, bytes, offset): ret = b'' blen = len(bytes) while bytes[offset:offset+2] != b'\x00\x00': ret += bytes[offset:offset+2] offset += 2 if offset >= blen: break # Add 2 for the null terminator offset += 2 offmod = offset % 4 if offmod: offset += (4 - offmod) return offset, ret.decode('utf-16le') def _stringFileInfo(self, bytes, offset): xoffset = offset mysize, valsize, valtype = struct.unpack('<HHH', bytes[xoffset:xoffset+6]) xoffset += 6 xoffset, sigstr = self._eatStringAndAlign(bytes, xoffset) #if sigstr not in ('VarFileInfo','StringFileInfo'): #raise Exception('Invalid StringFileInfo Key!: %s' % repr(sigstr)) xmax = offset + mysize if sigstr == 'StringFileInfo': while xoffset < xmax: xoffset = self._stringTable(bytes, xoffset, mysize - (xoffset-offset)) elif sigstr == 'VarFileInfo': while xoffset < xmax: xoffset = self._varTable(bytes, xoffset, mysize - (xoffset-offset)) xmod = xoffset % 4 if xmod: xoffset += (4 - xmod) return xoffset def _varTable(self, bytes, offset, size): xmax = offset + size xoffset = offset mysize, valsize, valtype = struct.unpack('<HHH', bytes[xoffset:xoffset+6]) xoffset += 6 xoffset, varname = self._eatStringAndAlign(bytes, xoffset) if xoffset + 4 > len(bytes): return offset+size varval = struct.unpack('<I', bytes[xoffset:xoffset+4])[0] xoffset += 4 self._version_info[varname] = varval return offset + size def _stringTable(self, bytes, offset, size): xmax = offset + size xoffset = offset mysize, valsize, valtype = struct.unpack('<HHH', bytes[offset:offset+6]) xoffset += 6 xoffset, hexcpage = self._eatStringAndAlign(bytes, xoffset) while xoffset < xmax: xoffset = self._stringData(bytes, xoffset) if xoffset == -1: break xmod = xoffset % 4 if xmod: xoffset += (4 - xmod) return offset + size def _stringData(self, bytes, offset): ''' Parse out a "String" structure... ''' xoffset = offset mysize, valsize, stype = struct.unpack('<HHH', bytes[offset:offset+6]) if mysize == 0: return -1 xoffset += 6 xoffset, strkey = self._eatStringAndAlign(bytes, xoffset) # valsize is in words... valsize *= 2 value = bytes[xoffset : xoffset + valsize ] # Do utf16le decode if we're "textual data" if stype == 1: value = value.decode('utf-16le','ignore') value = value.split('\x00')[0] self._version_info[strkey] = value # No matter what we parse, believe the headers... return offset + mysize class ResourceDirectory: ''' Resources are sorted into a hierarchy which begins with "type" and then "name/id" which still points to another directory entry which has 1 child (id 1033) with data. ''' def __init__(self, nameid=None): self._rsrc_data = [] self._rsrc_nameid = nameid self._rsrc_subdirs = {} def addRsrcDirectory(self, nameid): r = ResourceDirectory(nameid=nameid) self._rsrc_subdirs[nameid] = r return r def addRsrcData(self, rva, size, langinfo): self._rsrc_data.append( (rva, size, langinfo) ) def getDirById(self, name_id): return self._rsrc_subdirs.get(name_id) def getResourceDef(self, restype, name_id): ''' This should *only* be called on the root node! ''' typedir = self._rsrc_subdirs.get(restype) if typedir is None: return None datadir = typedir._rsrc_subdirs.get(name_id) if datadir is None: return None if len(datadir._rsrc_data) == 0: return None # The first entry in the datadir's data is the one return datadir._rsrc_data[0] def getDataEntries(self): return self._rsrc_data class PE(object): def __init__(self, fd, inmem=False): """ Construct a PE object. use inmem=True if you are using a MemObjFile or other "memory like" image. """ object.__init__(self) self.inmem = inmem self.filesize = None self.max_rva = None if not inmem: fd.seek(0, os.SEEK_END) self.filesize = fd.tell() fd.seek(0) self.fd = fd self.pe32p = False self.psize = 4 self.high_bit_mask = 0x80000000 self.IMAGE_DOS_HEADER = vstruct.getStructure("pe.IMAGE_DOS_HEADER") dosbytes = self.readAtOffset(0, len(self.IMAGE_DOS_HEADER)) self.IMAGE_DOS_HEADER.vsParse(dosbytes) nt = self.readStructAtOffset(self.IMAGE_DOS_HEADER.e_lfanew, "pe.IMAGE_NT_HEADERS") # Parse in a default 32 bit, and then check for 64... if nt.FileHeader.Machine in [ IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_IA64 ]: nt = self.readStructAtOffset(self.IMAGE_DOS_HEADER.e_lfanew, "pe.IMAGE_NT_HEADERS64") self.pe32p = True self.psize = 8 self.high_bit_mask = 0x8000000000000000 self.IMAGE_NT_HEADERS = nt def __del__(self): try: self.fd.close() except: pass # whatever. we're tearing down anyway def getPdataEntries(self): sec = self.getSectionByName('.pdata') if sec is None: return () ret = [] rbytes = self.readAtRva(sec.VirtualAddress, sec.VirtualSize) while len(rbytes): f = vs_pe.IMAGE_RUNTIME_FUNCTION_ENTRY() f.vsParse(rbytes) rbytes = rbytes[len(f):] ret.append(f) return ret def getDllName(self): ''' Return the "dll name" from the Name field of the IMAGE_EXPORT_DIRECTORY if one is present. If not, return None. ''' if self.IMAGE_EXPORT_DIRECTORY is not None: rawname = self.readAtRva(self.IMAGE_EXPORT_DIRECTORY.Name, 32) return rawname.split(b'\x00')[0].decode('utf-8') return None def getImports(self): """ Return the list of import tuples for this PE. The tuples are in the format (rva, libname, funcname). """ return self.imports def getDelayImports(self): """ Return the list of delay import tuples for this PE. The tuples are in the format (rva, libname, funcname). """ return self.delayImports def getExports(self): """ Return the list of exports in this PE. The list contains tuples in the format; (rva, ord, name). """ return self.exports def getForwarders(self): """ [ (rva, name, forwardname), ... ] """ return self.forwarders def getSections(self): return self.sections def rvaToOffset(self, rva): if self.inmem: return rva if rva >= 0 and rva < self.IMAGE_NT_HEADERS.OptionalHeader.SizeOfHeaders: return rva for s in self.sections: sbase = s.VirtualAddress if s.SizeOfRawData + s.PointerToRawData > self.getMaxRva(): # SizeOfRawData can be misleading. ssize = s.VirtualSize else: ssize = max(s.SizeOfRawData, s.VirtualSize) if rva >= sbase and rva < sbase + ssize: return s.PointerToRawData + (rva - sbase) return 0 def offsetToRva(self, offset): if self.inmem: return offset for s in self.sections: sbase = s.PointerToRawData if s.SizeOfRawData + s.PointerToRawData > self.getMaxRva(): # SizeOfRawData can be misleading. ssize = s.VirtualSize else: ssize = max(s.SizeOfRawData, s.VirtualSize) if sbase <= offset and offset < sbase + ssize: return offset - s.PointerToRawData + s.VirtualAddress return 0 def getSectionByName(self, name): for s in self.getSections(): if s.Name.split("\x00", 1)[0] == name: return s return None def readStructAtRva(self, rva, structname, check=False): s = vstruct.getStructure(structname) slen = len(s) if check and not self.checkRva(rva, size=slen): return None bytes = self.readAtRva(rva, len(s)) if not bytes: return None s.vsParse(bytes) return s def readStructAtOffset(self, offset, structname): s = vstruct.getStructure(structname) sbytes = self.readAtOffset(offset, len(s)) if not sbytes: return None s.vsParse(sbytes) return s def getDataDirectory(self, idx): return self.IMAGE_NT_HEADERS.OptionalHeader.DataDirectory[idx] def getResourceDef(self, rtype, name_id): ''' Get the (rva, size, (codepage,langid,sublangid)) tuple for the specified resource type/id combination. Returns None if not found. ''' return self.ResourceRoot.getResourceDef(rtype, name_id) def getResources(self): ''' Get the (rtype, nameid, (rva, size, (codepage,langid,sublangid))) tuples for each resource in the PE. ''' ret = [] for rtype,subdir in self.ResourceRoot._rsrc_subdirs.items(): for nameid, subsubdir in subdir._rsrc_subdirs.items(): ret.append( (rtype, nameid, subsubdir._rsrc_data[0]) ) return ret def readResource(self, rtype, name_id): ''' Return the bytes which define the specified resource. Returns None if not found. ''' rsdef = self.getResourceDef(rtype, name_id) if rsdef is None: return None rsrva, rssize, rscpage = rsdef return self.readAtRva(rsrva, rssize) def getPdbPath(self): ''' Parse and return the Pdb path from the Code View 4.0 data specified by the IMAGE_DEBUG_DIRECTORY strucutre, or None if a pdb path is not present. ''' ddir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_DEBUG) drva = ddir.VirtualAddress dsize = ddir.Size d = self.readStructAtRva(drva, 'pe.IMAGE_DEBUG_DIRECTORY', check=True) if d is None: return None if d.Type != IMAGE_DEBUG_TYPE_CODEVIEW: return None if not self.checkRva(d.AddressOfRawData, size=d.SizeOfData): return None cv = vs_pe.CV_INFO_PDB70() cv.vsParse( self.readAtRva(d.AddressOfRawData, d.SizeOfData)) if cv.CvSignature != 0x53445352: return None return cv.PdbFileName def getVS_VERSIONINFO(self): ''' Get a VS_VERSIONINFO object for this PE. (returns None if version resource is not found) ''' vbytes = self.readResource(RT_VERSION, 1) if vbytes is None: return None return VS_VERSIONINFO(vbytes) def parseResources(self): self.ResourceRoot = ResourceDirectory() # RP BUG FIX - Binaries can have a .rsrc section it doesn't mean that the .rsrc section contains the resource data we think it does # validate .rsrc == RESOURCE Section by checking data directory entries... dresc = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_RESOURCE) if not dresc.VirtualAddress: return done = {} rsrc_todo = [ (dresc.VirtualAddress, self.ResourceRoot), ] while len(rsrc_todo): rsrva, rsdirobj = rsrc_todo.pop() rsdir = self.readStructAtRva( rsrva, 'pe.IMAGE_RESOURCE_DIRECTORY', check=True ) if rsdir is None: continue totcount = rsdir.NumberOfIdEntries + rsdir.NumberOfNamedEntries # check if our to do is too many, limit borrowed from pefile if totcount > 4096: continue offset = len(rsdir) for i in range(totcount): dentrva = rsrva + offset dirent = self.readStructAtRva( dentrva, 'pe.IMAGE_RESOURCE_DIRECTORY_ENTRY', check=True ) if dirent is None: break # We use name/id interchangably in the python dict... name_id = None if dirent.Name & 0x80000000: # If high bit is set, it's a string! namerva = dresc.VirtualAddress + (dirent.Name & 0x7fffffff) namelen_bytes = self.readAtRva(namerva, 2) if not namelen_bytes: continue namelen = struct.unpack('<H', namelen_bytes)[0] name_raw = self.readAtRva(namerva + 2, namelen * 2) if not name_raw: continue name_id = name_raw.decode('utf-16le', 'ignore') if not name_id: name_id = dirent.Name else: name_id = dirent.Name # if OffsetToData & IMAGE_RESOURCE_DATA_IS_DIRECTORY then we have another directory if dirent.OffsetToData & 0x80000000: # This points to a subdirectory subdir = rsdirobj.addRsrcDirectory(name_id) doffset = dirent.OffsetToData & 0x7fffffff drva = dresc.VirtualAddress + doffset # XXX - prevent infinite loop by making sure the RVA isnt in our list to visit # and we aren't currently examining it. if doffset and rsrva != drva and not done.get(drva): rsrc_todo.append( (drva, subdir) ) done[drva] = 1 else: subdata = self.readStructAtRva( dresc.VirtualAddress + dirent.OffsetToData, 'pe.IMAGE_RESOURCE_DATA_ENTRY') # RP BUG FIX - sanity check the subdata if subdata and self.checkRva(subdata.OffsetToData, size=subdata.Size): # sometimes people are bad and they lie to us try: langid = name_id & 0x3ff sublangid = name_id >> 10 except: langid = None sublangid = None langinfo = (subdata.CodePage, langid, sublangid) rsdirobj.addRsrcData(subdata.OffsetToData, subdata.Size, langinfo) offset += len(dirent) def parseSections(self): self.sections = [] off = self.IMAGE_DOS_HEADER.e_lfanew + len(self.IMAGE_NT_HEADERS) off -= len(self.IMAGE_NT_HEADERS.OptionalHeader.DataDirectory) off += self.IMAGE_NT_HEADERS.OptionalHeader.NumberOfRvaAndSizes * len(vstruct.getStructure("pe.IMAGE_DATA_DIRECTORY")) secsize = len(vstruct.getStructure("pe.IMAGE_SECTION_HEADER")) sbytes = self.readAtOffset(off, secsize * self.IMAGE_NT_HEADERS.FileHeader.NumberOfSections) while sbytes: s = vstruct.getStructure("pe.IMAGE_SECTION_HEADER") s.vsParse(sbytes[:secsize]) self.sections.append(s) sbytes = sbytes[secsize:] def readRvaFormat(self, fmt, rva): size = struct.calcsize(fmt) fbytes = self.readAtRva(rva, size) return struct.unpack(fmt, fbytes) def readAtRva(self, rva, size, shortok=False): offset = self.rvaToOffset(rva) return self.readAtOffset(offset, size, shortok) def readAtOffset(self, offset, size, shortok=False): ret = b"" self.fd.seek(offset) while len(ret) != size: rlen = size - len(ret) x = self.fd.read(rlen) if x == b"": if not shortok: return None return ret ret += x return ret def parseLoadConfig(self): self.IMAGE_LOAD_CONFIG = None cdir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG) rva = cdir.VirtualAddress # RP BUG FIX - validate config directory if self.checkRva(rva, size=cdir.Size): self.IMAGE_LOAD_CONFIG = self.readStructAtRva(rva, "pe.IMAGE_LOAD_CONFIG_DIRECTORY") def readPointerAtOffset(self, off): fmt = "<L" if self.psize == 8: fmt = "<Q" return struct.unpack(fmt, self.readAtOffset(off, self.psize))[0] def readPointerAtRva(self, rva): off = self.rvaToOffset(rva) return self.readPointerAtOffset(off) def getMaxRva(self): ''' Maximum RVA is the largest virtual address that might be observed. ''' if not self.max_rva: max_sec = 0 for sec in self.getSections(): sec_end = sec.VirtualAddress + sec.VirtualSize align = self.IMAGE_NT_HEADERS.OptionalHeader.SectionAlignment if (align > 0): sec_end = align * (int(sec_end / align) + 1) max_sec = max(max_sec, sec_end) self.max_rva = max_sec return self.max_rva def checkRva(self, rva, size=None): ''' Make sure an RVA falls inside the valid mapped range for the file. (also make sure it's not 0...) ''' if rva == 0: return False isize = self.getMaxRva() if rva > isize: #raise Exception('too high! %d > %d' % (rva, isize)) return False if size is not None and (rva + size) > isize: #raise Exception('too big! %d > %d' % (rva+size, isize)) return False return True def readStringAtRva(self, rva, maxsize=None): ret = b'' while True: if maxsize and maxsize <= len(ret): break x = self.readAtRva(rva, 1) if x == b'\x00' or x is None: break ret += x rva += 1 return ret def parseImports(self): idir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_IMPORT) # RP BUG FIX - invalid IAT entry will point of range of file irva = idir.VirtualAddress x = self.readStructAtRva(irva, 'pe.IMAGE_IMPORT_DIRECTORY', check=True) if x is None: self.imports = [] return self.imports = self.parseImportTable(x, irva, is_imports=True) def parseDelayImports(self): didir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT) # RP BUG FIX - invalid IAT entry will point of range of file irva = didir.VirtualAddress x = self.readStructAtRva(irva, 'pe.IMAGE_DELAY_IMPORT_DIRECTORY', check=True) if x is None: self.delayImports = [] return self.delayImports = self.parseImportTable(x, irva, is_imports=False) def parseImportTable(self, x, irva, is_imports=True): ''' Parse a standard or delayed import table, adding to imports_list. Start with x and irva set to the first entry in the table. ''' imports_list = [] isize = len(x) while True: if is_imports: entry_name = x.Name else: entry_name = x.rvaDLLName if not self.checkRva(entry_name): break # RP BUG FIX - we can't assume that we have 256 bytes to read libname = self.readStringAtRva(entry_name, maxsize=256).decode('utf-8') idx = 0 if is_imports: imp_by_name = x.OriginalFirstThunk if imp_by_name == 0: imp_by_name = x.FirstThunk save_name = x.FirstThunk else: imp_by_name = x.rvaINT if imp_by_name == 0: imp_by_name = x.rvaIAT save_name = x.rvaIAT if not self.checkRva(imp_by_name): break while True: arrayoff = self.psize * idx if self.filesize is not None and arrayoff > self.filesize: return [] # we probably put garbage in the list ibn_rva = self.readPointerAtRva(imp_by_name+arrayoff) if ibn_rva == 0: break if ibn_rva & self.high_bit_mask: funcname = ordlookup.ordLookup(libname, ibn_rva & 0x7fffffff) elif not self.checkRva(ibn_rva): break else: # RP BUG FIX - we can't use this API on this call because we can have binaries that put their import table # right at the end of the file, statically saying the imported function name is 128 will cause use to potentially # over run our read and traceback... diff = self.getMaxRva() - ibn_rva - 2 ibn = vstruct.getStructure("pe.IMAGE_IMPORT_BY_NAME") ibn.vsGetField('Name').vsSetLength(min(diff, 128)) bytes = self.readAtRva(ibn_rva, len(ibn), shortok=True) if not bytes: break try: ibn.vsParse(bytes) except: idx+=1 continue funcname = ibn.Name imports_list.append((save_name + arrayoff, libname, funcname)) idx += 1 irva += isize # RP BUG FIX - if the import table is at the end of the file we can't count on the ending to be null if not self.checkRva(irva, size=isize): break x.vsParse(self.readAtRva(irva, isize)) return imports_list def getRelocations(self): """ Return the list of RVA base-relocations in this PE. """ return self.relocations def parseRelocations(self): self.relocations = [] edir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_BASERELOC) rva = edir.VirtualAddress rsize = edir.Size # RP BUG FIX - don't watn to read past the end of the file if not self.checkRva(rva): return reloff = self.rvaToOffset(rva) relbytes = self.readAtOffset(reloff, rsize) while relbytes: # bounce if we have less than 8 bytes to unpack if len(relbytes) < 8: return pageva, chunksize = struct.unpack("<II", relbytes[:8]) relcnt = (chunksize - 8) / 2 # if chunksize == 0 bail if not chunksize: return # RP BUG FIX - sometimes the chunksize is invalid we do a quick check to make sure we dont overrun the buffer if chunksize > len(relbytes): return if relcnt < 0: return rels = struct.unpack("<%dH" % relcnt, relbytes[8:chunksize]) for r in rels: rtype = r >> 12 roff = r & 0xfff self.relocations.append((pageva+roff, rtype)) relbytes = relbytes[chunksize:] def getExportName(self): ''' Return the name of this file acording to it's export entry. (if there are no exports, return None) ''' e = self.IMAGE_EXPORT_DIRECTORY if e is None: return None return self.readAtRva(e.Name, 128).split('\x00')[0] def parseExports(self): # Initialize our required locals. self.exports = [] self.forwarders = [] self.IMAGE_EXPORT_DIRECTORY = None edir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_EXPORT) poff = self.rvaToOffset(edir.VirtualAddress) if poff == 0: # No exports... return self.IMAGE_EXPORT_DIRECTORY = self.readStructAtOffset(poff, "pe.IMAGE_EXPORT_DIRECTORY") if not self.IMAGE_EXPORT_DIRECTORY: return funcoff = self.rvaToOffset(self.IMAGE_EXPORT_DIRECTORY.AddressOfFunctions) funcsize = 4 * self.IMAGE_EXPORT_DIRECTORY.NumberOfFunctions nameoff = self.rvaToOffset(self.IMAGE_EXPORT_DIRECTORY.AddressOfNames) namesize = 4 * self.IMAGE_EXPORT_DIRECTORY.NumberOfNames ordoff = self.rvaToOffset(self.IMAGE_EXPORT_DIRECTORY.AddressOfOrdinals) ordsize = 2 * self.IMAGE_EXPORT_DIRECTORY.NumberOfNames # RP BUG FIX - sanity check the exports before reading # FH BUG FIX - ordoff and nameoff must both be set (named function exports) # or both be null (unnamed function exports) if not funcoff or funcsize > 0x7FFF or ((ordoff > 0) ^ (nameoff > 0)): self.IMAGE_EXPORT_DIRECTORY = None return if funcsize == 0: self.IMAGE_EXPORT_DIRECTORY = None return funcbytes = self.readAtOffset(funcoff, funcsize) if not funcbytes: self.IMAGE_EXPORT_DIRECTORY = None return funclist = struct.unpack("%dI" % (len(funcbytes) / 4), funcbytes) # named function exports if nameoff and ordoff: namebytes = self.readAtOffset(nameoff, namesize) ordbytes = self.readAtOffset(ordoff, ordsize) namelist = struct.unpack("%dI" % (len(namebytes) / 4), namebytes) ordlist = struct.unpack("%dH" % (len(ordbytes) / 2), ordbytes) for i in range(len(namelist)): ordl = ordlist[i] nameoff = self.rvaToOffset(namelist[i]) if ordl > len(funclist): self.IMAGE_EXPORT_DIRECTORY = None return funcoff = funclist[ordl] ffoff = self.rvaToOffset(funcoff) name = None if nameoff != 0: name = self.readAtOffset(nameoff, 256, shortok=True).split(b"\x00", 1)[0] else: name = b'ord_%.4x' % ordl # RP BUG FIX - Export forwarding range check is done using RVA's if funcoff >= edir.VirtualAddress and funcoff < edir.VirtualAddress + edir.Size: fwdname = self.readAtRva(funcoff, 260, shortok=True).split(b'\x00', 1)[0] self.forwarders.append((funclist[ordl], name.decode('utf-8'), fwdname)) else: self.exports.append((funclist[ordl], ordl, name.decode('utf-8'))) # unnamed function exports else: # sanity check length of array containing export functions if len(funclist) != self.IMAGE_EXPORT_DIRECTORY.NumberOfFunctions: self.IMAGE_EXPORT_DIRECTORY = None return for i in range(len(funclist)): funcoff = funclist[i] # The function array will contain X elements, where X equals (highest # ordinal number - lowest ordinal number). For example, a PE with ordinal # exports of 0x10, 0x14, and 0x18 will contain 0x9 elements, with elements # 0x0, 0x4, and 0x8 containing the relative offset of the corresponding # exported function. An element with a value of 0 indicates the element in # the array is a placeholder to preserve the length of the array. if funcoff > 0: ordl = self.IMAGE_EXPORT_DIRECTORY.Base + i self.exports.append((funcoff, ordl, None)) def getSignature(self): ''' Returns the SignatureEntry vstruct if the pe has an embedded certificate, None if the magic bytes are NOT set in the security directory entry AND the size of the signature entry is less than 0. ''' ds = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_SECURITY) va = ds.VirtualAddress size = ds.Size if size <= 0: return None bytez = self.readAtOffset(va, size) if not bytez: return None se = vstruct.getStructure('pe.SignatureEntry') se.vsParse(bytez) if se.magic != "\x00\x02\x02\x00": return None return se def getSignCertInfo(self): sig = self.getSignature() if sig is None: return () # Runtime import these so they are optional dependancies import pyasn1.type.univ import pyasn1.type.namedtype import pyasn1.codec.der.decoder import pyasn1.codec.der.encoder import pyasn1_modules.rfc2315 substrate = sig.pkcs7 contentInfo, rest = pyasn1.codec.der.decoder.decode(substrate, asn1Spec=pyasn1_modules.rfc2315.ContentInfo()) if rest: substrate = substrate[:-len(rest)] contentType = contentInfo.getComponentByName('contentType') contentInfoMap = { (1, 2, 840, 113549, 1, 7, 1): pyasn1_modules.rfc2315.Data(), (1, 2, 840, 113549, 1, 7, 2): pyasn1_modules.rfc2315.SignedData(), (1, 2, 840, 113549, 1, 7, 3): pyasn1_modules.rfc2315.EnvelopedData(), (1, 2, 840, 113549, 1, 7, 4): pyasn1_modules.rfc2315.SignedAndEnvelopedData(), (1, 2, 840, 113549, 1, 7, 5): pyasn1_modules.rfc2315.DigestedData(), (1, 2, 840, 113549, 1, 7, 6): pyasn1_modules.rfc2315.EncryptedData() } seqTypeMap = { (2,5,4,3): 'CN', (2,5,4,7): 'L', (2,5,4,10): 'O', (2,5,4,11): 'OU', (1,2,840,113549,1,9,1): 'E', (2,5,4,6): 'C', (2,5,4,8): 'ST', (2,5,4,9): 'STREET', (2,5,4,12): 'TITLE', (2,5,4,42): 'G', (2,5,4,43): 'I', (2,5,4,4): 'SN', (0,9,2342,19200300,100,1,25): 'DC', } content, _ = pyasn1.codec.der.decoder.decode( contentInfo.getComponentByName('content'), asn1Spec=contentInfoMap[contentType] ) a = content.getComponentByName('certificates') certs = [] for i in a: cbytes = pyasn1.codec.der.encoder.encode( i['certificate'] ) iparts = [] for _, rdnsequence in i["certificate"]["tbsCertificate"]["issuer"].items(): for rdn in rdnsequence: rtype = rdn[0]["type"] rvalue = rdn[0]["value"][2:] iparts.append('%s=%s' % ( seqTypeMap.get( rtype, 'UNK'), rvalue)) issuer = ','.join( iparts ) sparts = [] for _, rdnsequence in i["certificate"]["tbsCertificate"]["subject"].items(): for rdn in rdnsequence: rtype = rdn[0]["type"] rvalue = rdn[0]["value"][2:] sparts.append('%s=%s' % ( seqTypeMap.get( rtype, 'UNK'), rvalue)) subject = ','.join(sparts) serial = int(i["certificate"]["tbsCertificate"]["serialNumber"]) cert = { 'subject':subject, 'issuer':issuer, 'serial':serial, 'bytes':cbytes } certs.append( cert ) return certs def __getattr__(self, name): """ Use a getattr over-ride to allow "on demand" parsing of particular sections. """ if name == "exports": self.parseExports() return self.exports elif name == "IMAGE_IMPORT_DIRECTORY": self.parseImports() return self.IMAGE_IMPORT_DIRECTORY elif name == "imports": self.parseImports() return self.imports elif name == "IMAGE_DELAY_IMPORT_DIRECTORY": self.parseDelayImports() return self.IMAGE_DELAY_IMPORT_DIRECTORY elif name == "delayImports": self.parseDelayImports() return self.delayImports elif name == "IMAGE_EXPORT_DIRECTORY": self.parseExports() return self.IMAGE_EXPORT_DIRECTORY elif name == "forwarders": self.parseExports() return self.forwarders elif name == "sections": self.parseSections() return self.sections elif name == "ResourceRoot": self.parseResources() return self.ResourceRoot elif name == "relocations": self.parseRelocations() return self.relocations elif name == "IMAGE_LOAD_CONFIG": self.parseLoadConfig() return self.IMAGE_LOAD_CONFIG else: raise AttributeError def peFromMemoryObject(memobj, baseaddr): fd = vstruct.MemObjFile(memobj, baseaddr) return PE(fd, inmem=True) def peFromFileName(fname): """ Utility helper that assures that the file is opened in binary mode which is required for proper functioning. """ # TODO api change to make context handler return PE(open(fname, 'rb')) def peFromBytes(fbytes): fd = StringIO(fbytes) return PE(fd)
bat-serjo/vivisect
PE/__init__.py
Python
apache-2.0
43,419
[ "VisIt" ]
c85b25be66af9963b9c151a59a29c695b2ae7141e4d5a2651abd9a7d841b88e5
#!/usr/bin/env python # Script by Jason Kwong # In silico serotyping for L.monocytogenes import sys import logging import os import click import loguru import pkg_resources from Bio import SeqIO from lissero.scripts.Sample import Samples from lissero.scripts.Blast import Blast from lissero.scripts.Serotype import SerotypeDB from .__init__ import __version__ as version logger = loguru.logger DEFAULT_DB = pkg_resources.resource_filename("lissero", "db") def print_version(ctx, param, value): if not value or ctx.resilient_parsing: return click.echo(f"LisSero {version}") ctx.exit() def is_fasta(filename): """ There are no real FASTA validators out there. This is the best I could come up with. If the file is empty or does not contain any FASTA records the parser will return an empty generator which will return a StopIteration exception when running `next(gen)`. But, there is another case where the file starts with a `>`, and thus the generator works, but it returns an empty record, thus the `len(rec)) > 0. Args: filename: FASTA input name Returns: boolean: true if it looks like a FASTA false otherwise. """ gen = SeqIO.parse(filename, "fasta") try: rec = next(gen) return len(rec) > 0 except StopIteration: return False except Exception as e: logger.error(e) sys.exit(1) @click.command() @click.help_option("-h", "--help") @click.option("-s", "--serotype_db", default=DEFAULT_DB, envvar="LISSERO_DB", show_default=True) @click.option( "--min_id", default=95.0, help="Minimum percent identity to accept a match. [0-100]", show_default=True, ) @click.option( "--min_cov", default=95.0, help="Minimum coverage of the gene to accept a match. [0-100]", show_default=True, ) @click.option("--debug", is_flag=True) @click.option("--logfile", help="Save log to a file instead of printing to stderr", default="") @click.argument("fasta", nargs=-1, type=click.Path(exists=True), required=True) # fix Version Issue #10 @click.option("--version", is_flag=True, callback=print_version, expose_value=False, is_eager=True, help="Show Version Information") def run_lissero(serotype_db, min_id, min_cov, debug, logfile, fasta): """ In silico serogroup prediction for L. monocytogenes. Alleles: lmo1118, lmo0737, ORF2819, ORF2110, Prs References: * Doumith et al. Differentiation of the major Listeria monocytogenes serovars by multiplex PCR. J Clin Microbiol, 2004; 42:8; 3819-22 """ if debug: log_level = logging.DEBUG else: log_level = logging.INFO if logfile != "": logger.remove() logger.add(logfile, level=log_level) else: logger.remove() logger.add(sys.stderr, level=log_level) all_fasta = all([is_fasta(fna) for fna in fasta]) if not all_fasta: logger.error("One or more input files do not appear to be " "valid FASTA.") sys.exit(1) try: path_serodb = os.path.realpath(serotype_db) except TypeError as e: logger.error(f"Please provide a valid path for serotype db path or set correct PATH for LISSERO_DB") sys.exit(1) sero_db = SerotypeDB(path_db=path_serodb, db_type="serotype") sero_db.check_db() blast = Blast() samples = Samples( fasta, blast=blast, sero_db=sero_db, sg_min_id=min_id, sg_min_cov=min_cov ) samples.run_typing() samples.simple_report() if __name__ == "__main__": run_lissero()
MDU-PHL/LisSero
lissero/run_lissero.py
Python
gpl-2.0
3,616
[ "BLAST" ]
8b141be0362217c6bd3ec90befa003e30666d68ab86bb33847fc1d99f73d7c51
# Author: Travis Oliphant # 2003 # # Feb. 2010: Updated by Warren Weckesser: # Rewrote much of chirp() # Added sweep_poly() from __future__ import division, print_function, absolute_import import numpy as np from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ exp, cos, sin, polyval, polyint from scipy._lib.six import string_types __all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', 'unit_impulse'] def sawtooth(t, width=1): """ Return a periodic sawtooth or triangle waveform. The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like Time. width : array_like, optional Width of the rising ramp as a proportion of the total cycle. Default is 1, producing a rising ramp, while 0 produces a falling ramp. `width` = 0.5 produces a triangle wave. If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the sawtooth waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500) >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) """ t, w = asarray(t), asarray(width) w = asarray(w + (t - t)) t = asarray(t + (w - w)) if t.dtype.char in ['fFdD']: ytype = t.dtype.char else: ytype = 'd' y = zeros(t.shape, ytype) # width must be between 0 and 1 inclusive mask1 = (w > 1) | (w < 0) place(y, mask1, nan) # take t modulo 2*pi tmod = mod(t, 2 * pi) # on the interval 0 to width*2*pi function is # tmod / (pi*w) - 1 mask2 = (1 - mask1) & (tmod < w * 2 * pi) tsub = extract(mask2, tmod) wsub = extract(mask2, w) place(y, mask2, tsub / (pi * wsub) - 1) # on the interval width*2*pi to 2*pi function is # (pi*(w+1)-tmod) / (pi*(1-w)) mask3 = (1 - mask1) & (1 - mask2) tsub = extract(mask3, tmod) wsub = extract(mask3, w) place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) return y def square(t, duty=0.5): """ Return a periodic square-wave waveform. The square wave has a period ``2*pi``, has value +1 from 0 to ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in the interval [0,1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like The input time array. duty : array_like, optional Duty cycle. Default is 0.5 (50% duty cycle). If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the square waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500, endpoint=False) >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) >>> plt.ylim(-2, 2) A pulse-width modulated sine wave: >>> plt.figure() >>> sig = np.sin(2 * np.pi * t) >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) >>> plt.subplot(2, 1, 1) >>> plt.plot(t, sig) >>> plt.subplot(2, 1, 2) >>> plt.plot(t, pwm) >>> plt.ylim(-1.5, 1.5) """ t, w = asarray(t), asarray(duty) w = asarray(w + (t - t)) t = asarray(t + (w - w)) if t.dtype.char in ['fFdD']: ytype = t.dtype.char else: ytype = 'd' y = zeros(t.shape, ytype) # width must be between 0 and 1 inclusive mask1 = (w > 1) | (w < 0) place(y, mask1, nan) # on the interval 0 to duty*2*pi function is 1 tmod = mod(t, 2 * pi) mask2 = (1 - mask1) & (tmod < w * 2 * pi) place(y, mask2, 1) # on the interval duty*2*pi to 2*pi function is # (pi*(w+1)-tmod) / (pi*(1-w)) mask3 = (1 - mask1) & (1 - mask2) place(y, mask3, -1) return y def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False): """ Return a Gaussian modulated sinusoid: ``exp(-a t^2) exp(1j*2*pi*fc*t).`` If `retquad` is True, then return the real and imaginary parts (in-phase and quadrature). If `retenv` is True, then return the envelope (unmodulated signal). Otherwise, return the real part of the modulated sinusoid. Parameters ---------- t : ndarray or the string 'cutoff' Input array. fc : int, optional Center frequency (e.g. Hz). Default is 1000. bw : float, optional Fractional bandwidth in frequency domain of pulse (e.g. Hz). Default is 0.5. bwr : float, optional Reference level at which fractional bandwidth is calculated (dB). Default is -6. tpr : float, optional If `t` is 'cutoff', then the function returns the cutoff time for when the pulse amplitude falls below `tpr` (in dB). Default is -60. retquad : bool, optional If True, return the quadrature (imaginary) as well as the real part of the signal. Default is False. retenv : bool, optional If True, return the envelope of the signal. Default is False. Returns ------- yI : ndarray Real part of signal. Always returned. yQ : ndarray Imaginary part of signal. Only returned if `retquad` is True. yenv : ndarray Envelope of signal. Only returned if `retenv` is True. See Also -------- scipy.signal.morlet Examples -------- Plot real component, imaginary component, and envelope for a 5 Hz pulse, sampled at 100 Hz for 2 seconds: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False) >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True) >>> plt.plot(t, i, t, q, t, e, '--') """ if fc < 0: raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc) if bw <= 0: raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw) if bwr >= 0: raise ValueError("Reference level for bandwidth (bwr=%.2f) must " "be < 0 dB" % bwr) # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) ref = pow(10.0, bwr / 20.0) # fdel = fc*bw/2: g(fdel) = ref --- solve this for a # # pi^2/a * fc^2 * bw^2 /4=-log(ref) a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) if isinstance(t, string_types): if t == 'cutoff': # compute cut_off point # Solve exp(-a tc**2) = tref for tc # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) if tpr >= 0: raise ValueError("Reference level for time cutoff must " "be < 0 dB") tref = pow(10.0, tpr / 20.0) return sqrt(-log(tref) / a) else: raise ValueError("If `t` is a string, it must be 'cutoff'") yenv = exp(-a * t * t) yI = yenv * cos(2 * pi * fc * t) yQ = yenv * sin(2 * pi * fc * t) if not retquad and not retenv: return yI if not retquad and retenv: return yI, yenv if retquad and not retenv: return yI, yQ if retquad and retenv: return yI, yQ, yenv def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True): """Frequency-swept cosine generator. In the following, 'Hz' should be interpreted as 'cycles per unit'; there is no requirement here that the unit is one second. The important distinction is that the units of rotation are cycles, not radians. Likewise, `t` could be a measurement of space instead of time. Parameters ---------- t : array_like Times at which to evaluate the waveform. f0 : float Frequency (e.g. Hz) at time t=0. t1 : float Time at which `f1` is specified. f1 : float Frequency (e.g. Hz) of the waveform at time `t1`. method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional Kind of frequency sweep. If not given, `linear` is assumed. See Notes below for more details. phi : float, optional Phase offset, in degrees. Default is 0. vertex_zero : bool, optional This parameter is only used when `method` is 'quadratic'. It determines whether the vertex of the parabola that is the graph of the frequency is at t=0 or t=t1. Returns ------- y : ndarray A numpy array containing the signal evaluated at `t` with the requested time-varying frequency. More precisely, the function returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below. See Also -------- sweep_poly Notes ----- There are four options for the `method`. The following formulas give the instantaneous frequency (in Hz) of the signal generated by `chirp()`. For convenience, the shorter names shown below may also be used. linear, lin, li: ``f(t) = f0 + (f1 - f0) * t / t1`` quadratic, quad, q: The graph of the frequency f(t) is a parabola through (0, f0) and (t1, f1). By default, the vertex of the parabola is at (0, f0). If `vertex_zero` is False, then the vertex is at (t1, f1). The formula is: if vertex_zero is True: ``f(t) = f0 + (f1 - f0) * t**2 / t1**2`` else: ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2`` To use a more general quadratic function, or an arbitrary polynomial, use the function `scipy.signal.sweep_poly`. logarithmic, log, lo: ``f(t) = f0 * (f1/f0)**(t/t1)`` f0 and f1 must be nonzero and have the same sign. This signal is also known as a geometric or exponential chirp. hyperbolic, hyp: ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)`` f0 and f1 must be nonzero. Examples -------- The following will be used in the examples: >>> from scipy.signal import chirp, spectrogram >>> import matplotlib.pyplot as plt For the first example, we'll plot the waveform for a linear chirp from 6 Hz to 1 Hz over 10 seconds: >>> t = np.linspace(0, 10, 5001) >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear') >>> plt.plot(t, w) >>> plt.title("Linear Chirp, f(0)=6, f(10)=1") >>> plt.xlabel('t (sec)') >>> plt.show() For the remaining examples, we'll use higher frequency ranges, and demonstrate the result using `scipy.signal.spectrogram`. We'll use a 10 second interval sampled at 8000 Hz. >>> fs = 8000 >>> T = 10 >>> t = np.linspace(0, T, T*fs, endpoint=False) Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds (vertex of the parabolic curve of the frequency is at t=0): >>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic') >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds (vertex of the parabolic curve of the frequency is at t=10): >>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic', ... vertex_zero=False) >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250\\n' + ... '(vertex_zero=False)') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() Logarithmic chirp from 1500 Hz to 250 Hz over 10 seconds: >>> w = chirp(t, f0=1500, f1=250, t1=10, method='logarithmic') >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Logarithmic Chirp, f(0)=1500, f(10)=250') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() Hyperbolic chirp from 1500 Hz to 250 Hz over 10 seconds: >>> w = chirp(t, f0=1500, f1=250, t1=10, method='hyperbolic') >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, ... nfft=2048) >>> plt.pcolormesh(tt, ff[:513], Sxx[:513], cmap='gray_r') >>> plt.title('Hyperbolic Chirp, f(0)=1500, f(10)=250') >>> plt.xlabel('t (sec)') >>> plt.ylabel('Frequency (Hz)') >>> plt.grid() >>> plt.show() """ # 'phase' is computed in _chirp_phase, to make testing easier. phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) # Convert phi to radians. phi *= pi / 180 return cos(phase + phi) def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): """ Calculate the phase used by `chirp` to generate its output. See `chirp` for a description of the arguments. """ t = asarray(t) f0 = float(f0) t1 = float(t1) f1 = float(f1) if method in ['linear', 'lin', 'li']: beta = (f1 - f0) / t1 phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) elif method in ['quadratic', 'quad', 'q']: beta = (f1 - f0) / (t1 ** 2) if vertex_zero: phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) else: phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) elif method in ['logarithmic', 'log', 'lo']: if f0 * f1 <= 0.0: raise ValueError("For a logarithmic chirp, f0 and f1 must be " "nonzero and have the same sign.") if f0 == f1: phase = 2 * pi * f0 * t else: beta = t1 / log(f1 / f0) phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) elif method in ['hyperbolic', 'hyp']: if f0 == 0 or f1 == 0: raise ValueError("For a hyperbolic chirp, f0 and f1 must be " "nonzero.") if f0 == f1: # Degenerate case: constant frequency. phase = 2 * pi * f0 * t else: # Singular point: the instantaneous frequency blows up # when t == sing. sing = -f1 * t1 / (f0 - f1) phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing)) else: raise ValueError("method must be 'linear', 'quadratic', 'logarithmic'," " or 'hyperbolic', but a value of %r was given." % method) return phase def sweep_poly(t, poly, phi=0): """ Frequency-swept cosine generator, with a time-dependent frequency. This function generates a sinusoidal function whose instantaneous frequency varies with time. The frequency at time `t` is given by the polynomial `poly`. Parameters ---------- t : ndarray Times at which to evaluate the waveform. poly : 1-D array_like or instance of numpy.poly1d The desired frequency expressed as a polynomial. If `poly` is a list or ndarray of length n, then the elements of `poly` are the coefficients of the polynomial, and the instantaneous frequency is ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` If `poly` is an instance of numpy.poly1d, then the instantaneous frequency is ``f(t) = poly(t)`` phi : float, optional Phase offset, in degrees, Default: 0. Returns ------- sweep_poly : ndarray A numpy array containing the signal evaluated at `t` with the requested time-varying frequency. More precisely, the function returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above. See Also -------- chirp Notes ----- .. versionadded:: 0.8.0 If `poly` is a list or ndarray of length `n`, then the elements of `poly` are the coefficients of the polynomial, and the instantaneous frequency is: ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` If `poly` is an instance of `numpy.poly1d`, then the instantaneous frequency is: ``f(t) = poly(t)`` Finally, the output `s` is: ``cos(phase + (pi/180)*phi)`` where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``, ``f(t)`` as defined above. Examples -------- Compute the waveform with instantaneous frequency:: f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2 over the interval 0 <= t <= 10. >>> from scipy.signal import sweep_poly >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0]) >>> t = np.linspace(0, 10, 5001) >>> w = sweep_poly(t, p) Plot it: >>> import matplotlib.pyplot as plt >>> plt.subplot(2, 1, 1) >>> plt.plot(t, w) >>> plt.title("Sweep Poly\\nwith frequency " + ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$") >>> plt.subplot(2, 1, 2) >>> plt.plot(t, p(t), 'r', label='f(t)') >>> plt.legend() >>> plt.xlabel('t') >>> plt.tight_layout() >>> plt.show() """ # 'phase' is computed in _sweep_poly_phase, to make testing easier. phase = _sweep_poly_phase(t, poly) # Convert to radians. phi *= pi / 180 return cos(phase + phi) def _sweep_poly_phase(t, poly): """ Calculate the phase used by sweep_poly to generate its output. See `sweep_poly` for a description of the arguments. """ # polyint handles lists, ndarrays and instances of poly1d automatically. intpoly = polyint(poly) phase = 2 * pi * polyval(intpoly, t) return phase def unit_impulse(shape, idx=None, dtype=float): """ Unit impulse signal (discrete delta function) or unit basis vector. Parameters ---------- shape : int or tuple of int Number of samples in the output (1-D), or a tuple that represents the shape of the output (N-D). idx : None or int or tuple of int or 'mid', optional Index at which the value is 1. If None, defaults to the 0th element. If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in all dimensions. If an int, the impulse will be at `idx` in all dimensions. dtype : data-type, optional The desired data-type for the array, e.g., ``numpy.int8``. Default is ``numpy.float64``. Returns ------- y : ndarray Output array containing an impulse signal. Notes ----- The 1D case is also known as the Kronecker delta. .. versionadded:: 0.19.0 Examples -------- An impulse at the 0th element (:math:`\\delta[n]`): >>> from scipy import signal >>> signal.unit_impulse(8) array([ 1., 0., 0., 0., 0., 0., 0., 0.]) Impulse offset by 2 samples (:math:`\\delta[n-2]`): >>> signal.unit_impulse(7, 2) array([ 0., 0., 1., 0., 0., 0., 0.]) 2-dimensional impulse, centered: >>> signal.unit_impulse((3, 3), 'mid') array([[ 0., 0., 0.], [ 0., 1., 0.], [ 0., 0., 0.]]) Impulse at (2, 2), using broadcasting: >>> signal.unit_impulse((4, 4), 2) array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 0.]]) Plot the impulse response of a 4th-order Butterworth lowpass filter: >>> imp = signal.unit_impulse(100, 'mid') >>> b, a = signal.butter(4, 0.2) >>> response = signal.lfilter(b, a, imp) >>> import matplotlib.pyplot as plt >>> plt.plot(np.arange(-50, 50), imp) >>> plt.plot(np.arange(-50, 50), response) >>> plt.margins(0.1, 0.1) >>> plt.xlabel('Time [samples]') >>> plt.ylabel('Amplitude') >>> plt.grid(True) >>> plt.show() """ out = zeros(shape, dtype) shape = np.atleast_1d(shape) if idx is None: idx = (0,) * len(shape) elif idx == 'mid': idx = tuple(shape // 2) elif not hasattr(idx, "__iter__"): idx = (idx,) * len(shape) out[idx] = 1 return out
gertingold/scipy
scipy/signal/waveforms.py
Python
bsd-3-clause
21,065
[ "Gaussian" ]
b40410f16fce2fdc5de31dc805556f5ef8b82ed9414fb68b9c37622d89f5d737
import numpy as np import dolfin from dolfin import * from mpi4py import MPI as pyMPI comm = pyMPI.COMM_WORLD mpi_comm = MPI.comm_world #mark whole boundary, inflow and outflow will overwrite) class Noslip(SubDomain): def inside(self, x, on_boundary): return on_boundary class Left(SubDomain): def inside(self, x, on_boundary): return on_boundary and near(x[0], 0) class Right(SubDomain): def inside(self, x, on_boundary): return on_boundary and near(x[0], 1.0) #Create a unit box mesh n_ele = 6 aspect_ratio = 3 mesh = BoxMesh(comm, Point(0.0, 0.0,0.0), Point(1.0, 1.0, 1.0), n_ele, n_ele, n_ele*aspect_ratio) #read mesh and boundaries from file boundaries = MeshFunction('size_t', mesh, mesh.topology().dim() - 1) mark = {"Internal":0, "wall": 1,"inlet": 2,"outlet": 3 } boundaries.set_all(mark["Internal"]) wall=Noslip() wall.mark(boundaries, mark["wall"]) left = Left() left.mark(boundaries, mark["inlet"]) right = Right() right.mark(boundaries, mark["outlet"]) #read viscosity coefficient from file mu = Constant(0.001) #Define Taylor-Hood element and function space P2 = VectorElement("Lagrange", mesh.ufl_cell(), 2) P1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1) TH = P2 * P1 W = FunctionSpace(mesh, TH) # Define variational problem (u, p) = TrialFunctions(W) (v, q) = TestFunctions(W) ds = dolfin.Measure('ds',domain=mesh,subdomain_data=boundaries) n = dolfin.FacetNormal(mesh) #Define boundary condition p_in = dolfin.Constant(1.0) # pressure inlet p_out = dolfin.Constant(0.0) # pressure outlet noslip = dolfin.Constant([0.0]*mesh.geometry().dim()) # no-slip wall #Boundary conditions # No-slip Dirichlet boundary condition for velocity bc0 = DirichletBC(W.sub(0), noslip, boundaries, mark["wall"]) bcs = [bc0] #Neumann BC gNeumann = - p_in * inner(n, v) * ds(mark["inlet"]) + \ - p_out * inner(n, v) * ds(mark["outlet"]) #Body force f = Constant([0.0]*mesh.geometry().dim()) #Weak form a = mu*inner(grad(u), grad(v))*dx + div(v)*p*dx + q*div(u)*dx # The sign of the pressure has been flipped for symmetric system L= inner(f, v)*dx + gNeumann U = Function(W) solve(a == L, U, bcs) uh, ph = U.split() #Output solution p,u to paraview dolfin.XDMFFile("pressure.xdmf").write_checkpoint(ph, "p") dolfin.XDMFFile("velocity.xdmf").write_checkpoint(uh, "u") flux = [dolfin.assemble(dolfin.dot(uh, n)*ds(i)) for i in range(len(mark))] if comm.Get_rank() == 0: for key, value in mark.items(): print("Flux_%s= %.15lf"%(key,flux[value]))
BradHub/SL-SPH
hdg_test/anistropic/cg_test.py
Python
mit
2,561
[ "ParaView" ]
b94806a950df7354b576384f58ac19b2201621fb482684779d72ac806302d2fe
import logging import random import tba_config from google.appengine.ext import deferred from google.appengine.api import memcache from consts.client_type import ClientType from consts.notification_type import NotificationType from helpers.notification_sender import NotificationSender from sitevars.notifications_enable import NotificationsEnable class BaseNotification(object): # List of clients this notification type supports (these are default values) # Can be overridden by subclasses to only send to some types _supported_clients = [ClientType.OS_ANDROID, ClientType.WEBHOOK] # Send analytics updates for this notification? # Can be overridden by subclasses if not _track_call = True # GCM Priority for this message, set to "High" for important pushes # Valid types are 'high' and 'normal' # https://developers.google.com/cloud-messaging/concept-options#setting-the-priority-of-a-message _priority = 'normal' # If set to (key, timeout_seconds), won't send multiple notifications _timeout = None """ Class that acts as a basic notification. To send a notification, instantiate one and call this method """ def send(self, keys, push_firebase=True, track_call=True): if self._timeout is not None: key, timeout = self._timeout if memcache.get(key): # Using memcache is a hacky implementation, since it is not guaranteed. logging.info("Notification timeout for: {}".format(key)) return # Currently in timeout. Don't send. else: memcache.set(key, True, timeout) self.keys = keys # dict like {ClientType : [ key ] } ... The list for webhooks is a tuple of (key, secret) deferred.defer(self.render, self._supported_clients, _queue="push-notifications", _url='/_ah/queue/deferred_notification_send') if self._track_call and track_call: num_keys = 0 for v in keys.values(): # Count the number of clients receiving the notification num_keys += len(v) if random.random() < tba_config.GA_RECORD_FRACTION: deferred.defer(self.track_notification, self._type, num_keys, _queue="api-track-call", _url='/_ah/queue/deferred_notification_track_send') """ This method will create platform specific notifications and send them to the platform specified Clients should implement the referenced methods in order to build the notification for each platform """ def render(self, client_types): if not isinstance(client_types, list): # Listify client types, if needed client_types = [client_types] if not self.check_enabled(): # Don't send for NotificationTypes that aren't enabled return for client_type in client_types: if client_type is ClientType.OS_ANDROID and client_type in self.keys: client_render_method = self.render_method(client_type) notification = client_render_method() if len(self.keys[client_type]) > 0: # this is after _render because if it's an update fav/subscription notification, then NotificationSender.send_gcm(notification) # we remove the client id that sent the update so it doesn't get notified redundantly elif client_type == ClientType.WEBHOOK and ClientType.WEBHOOK in self.keys and len(self.keys[ClientType.WEBHOOK]) > 0: notification = self._render_webhook() NotificationSender.send_webhook(notification, self.keys[ClientType.WEBHOOK]) def check_enabled(self): return NotificationsEnable.notifications_enabled() """ Subclasses should override this method and return a dict containing the payload of the notification. The dict should have two entries: 'notification_type' (should be one of NotificationType, string) and 'message_data' """ def _build_dict(self): raise NotImplementedError("Subclasses must implement this method to build JSON data to send") @property def _type(self): raise NotImplementedError("Subclasses must implement this message to set its notification type") """ The following methods are default render methods. Often, the way we construct the messages doesn't change, so we abstract it to here. However, if a notification type needs to do something special (e.g. specify a GCM collapse key), then subclasses can override them in order to provide that functionality. """ def _render_android(self): return self._render_gcm(ClientType.OS_ANDROID) def _render_webhook(self): # Note: webhooks use `message_type` instead of the `notification_type` data = self._build_dict() message_type = data.pop('notification_type') data['message_type'] = message_type return data def _render_gcm(self, client_type): from controllers.gcm.gcm import GCMMessage gcm_keys = self.keys[client_type] data = self._build_dict() return GCMMessage(gcm_keys, data, priority=self._priority) def render_method(self, client_type): if client_type == ClientType.OS_ANDROID: return self._render_android elif client_type == ClientType.WEBHOOK: return self._render_webhook else: return self._render_gcm(client_type) # used for deferred analytics call def track_notification(self, notification_type_enum, num_keys): """ For more information about GAnalytics Protocol Parameters, visit https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters """ from sitevars.google_analytics_id import GoogleAnalyticsID google_analytics_id = GoogleAnalyticsID.google_analytics_id() if not google_analytics_id: logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.") else: import uuid cid = uuid.uuid3(uuid.NAMESPACE_X500, str('tba-notification-tracking')) from urllib import urlencode params = urlencode({ 'v': 1, 'tid': google_analytics_id, 'cid': cid, 't': 'event', 'ec': 'notification', 'ea': NotificationType.type_names[notification_type_enum], 'ev': num_keys, 'ni': 1, 'sc': 'end', # forces tracking session to end }) from google.appengine.api import urlfetch analytics_url = 'http://www.google-analytics.com/collect?%s' % params urlfetch.fetch( url=analytics_url, method=urlfetch.GET, deadline=10, )
bdaroz/the-blue-alliance
notifications/base_notification.py
Python
mit
6,868
[ "VisIt" ]
bc82ea67e172f311921da4117f04e572ab6e6626476b878180c6768ee95ba14e
"""StrongConnectivity.py DFS-based algorithm for computing strongly connected components. If G is a graph, then - StronglyConnectedComponents(G) returns a list of its components, each represented as a subgraph of G - Condensation(G) returns a directed acyclic graph, the vertices of which are strongly connected components of G. Each vertex of the condensation is represented as a frozenset of the vertices of G within a single strongly connected component. D. Eppstein, July 2005. """ from .dfs import Searcher class StronglyConnectedComponents(Searcher): """ Generate the strongly connected components of G. G should be represented in such a way that "for v in G" loops through the vertices, and "G[v]" produces a list of the neighbors of v; for instance, G may be a dictionary mapping each vertex to its neighbor set. The result of StronglyConnectedComponents(G) is a sequence of subgraphs of G. """ def __init__(self, G): """Search for strongly connected components of graph G.""" # set up data structures for DFS self._components = [] self._dfsnumber = {} self._activelen = {} self._active = [] self._low = {} self._biglow = len(G) self._graph = G # perform the Depth First Search Searcher.__init__(self, G) # clean up now-useless data structures del self._dfsnumber, self._activelen, self._active, self._low def __iter__(self): """Return iterator for sequence of strongly connected components.""" return iter(self._components) def __len__(self): """How many components are there?""" return len(self._components) def _component(self, vertices): """Make a new SCC.""" vertices = set(vertices) induced = { v: {w for w in self._graph[v] if w in vertices} for v in vertices} self._components.append(induced) def preorder(self, parent, child): """Handle first visit to vertex in DFS search for components.""" if parent == child: self._active = [] self._activelen[child] = len(self._active) self._active.append(child) self._low[child] = self._dfsnumber[child] = len(self._dfsnumber) def backedge(self, source, destination): """Handle non-tree edge in DFS search for components.""" self._low[source] = min(self._low[source], self._low[destination]) def postorder(self, parent, child): """Handle last visit to vertex in DFS search for components.""" if self._low[child] == self._dfsnumber[child]: self._component(self._active[self._activelen[child]:]) for v in self._components[-1]: self._low[v] = self._biglow del self._active[self._activelen[child]:] else: self._low[parent] = min(self._low[parent], self._low[child]) def Condensation(G): """Return a DAG with vertices equal to sets of vertices in SCCs of G.""" components = {} GtoC = {} for C in StronglyConnectedComponents(G): C = frozenset(C) for v in C: GtoC[v] = C components[C] = set() for v in G: for w in G[v]: if GtoC[v] != GtoC[w]: components[GtoC[v]].add(GtoC[w]) return components
jfinkels/PADS
pads/strong_connectivity.py
Python
mit
3,363
[ "VisIt" ]
e4152c46a18656c6b8cac5c370dc074a9d06d5805b251694fcde8bcb0d420bff
""" View for Courseware Index """ # pylint: disable=attribute-defined-outside-init from datetime import datetime from django.conf import settings from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.core.context_processors import csrf from django.core.urlresolvers import reverse from django.http import Http404 from django.utils.decorators import method_decorator from django.utils.timezone import UTC from django.views.decorators.cache import cache_control from django.views.decorators.csrf import ensure_csrf_cookie from django.views.generic import View from django.shortcuts import redirect from courseware.url_helpers import get_redirect_url_for_global_staff from edxmako.shortcuts import render_to_response, render_to_string import logging import newrelic.agent import urllib from lang_pref import LANGUAGE_KEY from xblock.fragment import Fragment from opaque_keys.edx.keys import CourseKey from openedx.core.lib.gating import api as gating_api from openedx.core.djangoapps.user_api.preferences.api import get_user_preference from shoppingcart.models import CourseRegistrationCode from student.models import CourseEnrollment from student.views import is_course_blocked from student.roles import GlobalStaff from util.views import ensure_valid_course_key from xmodule.modulestore.django import modulestore from xmodule.x_module import STUDENT_VIEW from survey.utils import must_answer_survey from ..access import has_access, _adjust_start_date_for_beta_testers from ..access_utils import in_preview_mode from ..courses import get_studio_url, get_course_with_access from ..entrance_exams import ( course_has_entrance_exam, get_entrance_exam_content, get_entrance_exam_score, user_has_passed_entrance_exam, user_must_complete_entrance_exam, ) from ..exceptions import Redirect from ..masquerade import setup_masquerade from ..model_data import FieldDataCache from ..module_render import toc_for_course, get_module_for_descriptor from .views import get_current_child, registered_for_course log = logging.getLogger("edx.courseware.views.index") TEMPLATE_IMPORTS = {'urllib': urllib} CONTENT_DEPTH = 2 class CoursewareIndex(View): """ View class for the Courseware page. """ @method_decorator(login_required) @method_decorator(ensure_csrf_cookie) @method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True)) @method_decorator(ensure_valid_course_key) def get(self, request, course_id, chapter=None, section=None, position=None): """ Displays courseware accordion and associated content. If course, chapter, and section are all specified, renders the page, or returns an error if they are invalid. If section is not specified, displays the accordion opened to the right chapter. If neither chapter or section are specified, displays the user's most recent chapter, or the first chapter if this is the user's first visit. Arguments: request: HTTP request course_id (unicode): course id chapter (unicode): chapter url_name section (unicode): section url_name position (unicode): position in module, eg of <sequential> module """ self.course_key = CourseKey.from_string(course_id) self.request = request self.original_chapter_url_name = chapter self.original_section_url_name = section self.chapter_url_name = chapter self.section_url_name = section self.position = position self.chapter, self.section = None, None self.url = request.path try: self._init_new_relic() self._clean_position() with modulestore().bulk_operations(self.course_key): self.course = get_course_with_access(request.user, 'load', self.course_key, depth=CONTENT_DEPTH) self.is_staff = has_access(request.user, 'staff', self.course) self._setup_masquerade_for_effective_user() return self._get() except Redirect as redirect_error: return redirect(redirect_error.url) except UnicodeEncodeError: raise Http404("URL contains Unicode characters") except Http404: # let it propagate raise except Exception: # pylint: disable=broad-except return self._handle_unexpected_error() def _setup_masquerade_for_effective_user(self): """ Setup the masquerade information to allow the request to be processed for the requested effective user. """ self.real_user = self.request.user self.masquerade, self.effective_user = setup_masquerade( self.request, self.course_key, self.is_staff, reset_masquerade_data=True ) # Set the user in the request to the effective user. self.request.user = self.effective_user def _get(self): """ Render the index page. """ self._redirect_if_needed_to_access_course() self._prefetch_and_bind_course() if self.course.has_children_at_depth(CONTENT_DEPTH): self._reset_section_to_exam_if_required() self.chapter = self._find_chapter() self.section = self._find_section() if self.chapter and self.section: self._redirect_if_not_requested_section() self._verify_section_not_gated() self._save_positions() self._prefetch_and_bind_section() return render_to_response('courseware/courseware.html', self._create_courseware_context()) def _redirect_if_not_requested_section(self): """ If the resulting section and chapter are different from what was initially requested, redirect back to the index page, but with an updated URL that includes the correct section and chapter values. We do this so that our analytics events and error logs have the appropriate URLs. """ if ( self.chapter.url_name != self.original_chapter_url_name or (self.original_section_url_name and self.section.url_name != self.original_section_url_name) ): raise Redirect( reverse( 'courseware_section', kwargs={ 'course_id': unicode(self.course_key), 'chapter': self.chapter.url_name, 'section': self.section.url_name, }, ) ) def _init_new_relic(self): """ Initialize metrics for New Relic so we can slice data in New Relic Insights """ newrelic.agent.add_custom_parameter('course_id', unicode(self.course_key)) newrelic.agent.add_custom_parameter('org', unicode(self.course_key.org)) def _clean_position(self): """ Verify that the given position is an integer. If it is not positive, set it to 1. """ if self.position is not None: try: self.position = max(int(self.position), 1) except ValueError: raise Http404(u"Position {} is not an integer!".format(self.position)) def _redirect_if_needed_to_access_course(self): """ Verifies that the user can enter the course. """ self._redirect_if_needed_to_pay_for_course() self._redirect_if_needed_to_register() self._redirect_if_needed_for_prereqs() self._redirect_if_needed_for_course_survey() def _redirect_if_needed_to_pay_for_course(self): """ Redirect to dashboard if the course is blocked due to non-payment. """ self.real_user = User.objects.prefetch_related("groups").get(id=self.real_user.id) redeemed_registration_codes = CourseRegistrationCode.objects.filter( course_id=self.course_key, registrationcoderedemption__redeemed_by=self.real_user ) if is_course_blocked(self.request, redeemed_registration_codes, self.course_key): # registration codes may be generated via Bulk Purchase Scenario # we have to check only for the invoice generated registration codes # that their invoice is valid or not log.warning( u'User %s cannot access the course %s because payment has not yet been received', self.real_user, unicode(self.course_key), ) raise Redirect(reverse('dashboard')) def _redirect_if_needed_to_register(self): """ Verify that the user is registered in the course. """ if not registered_for_course(self.course, self.effective_user): log.debug( u'User %s tried to view course %s but is not enrolled', self.effective_user, unicode(self.course.id) ) user_is_global_staff = GlobalStaff().has_user(self.effective_user) user_is_enrolled = CourseEnrollment.is_enrolled(self.effective_user, self.course_key) if user_is_global_staff and not user_is_enrolled: redirect_url = get_redirect_url_for_global_staff(self.course_key, _next=self.url) raise Redirect(redirect_url) raise Redirect(reverse('about_course', args=[unicode(self.course.id)])) def _redirect_if_needed_for_prereqs(self): """ See if all pre-requisites (as per the milestones app feature) have been fulfilled. Note that if the pre-requisite feature flag has been turned off (default) then this check will always pass. """ if not has_access(self.effective_user, 'view_courseware_with_prerequisites', self.course): # Prerequisites have not been fulfilled. # Therefore redirect to the Dashboard. log.info( u'User %d tried to view course %s ' u'without fulfilling prerequisites', self.effective_user.id, unicode(self.course.id)) raise Redirect(reverse('dashboard')) def _redirect_if_needed_for_course_survey(self): """ Check to see if there is a required survey that must be taken before the user can access the course. """ if must_answer_survey(self.course, self.effective_user): raise Redirect(reverse('course_survey', args=[unicode(self.course.id)])) def _reset_section_to_exam_if_required(self): """ Check to see if an Entrance Exam is required for the user. """ if ( course_has_entrance_exam(self.course) and user_must_complete_entrance_exam(self.request, self.effective_user, self.course) ): exam_chapter = get_entrance_exam_content(self.effective_user, self.course) if exam_chapter and exam_chapter.get_children(): exam_section = exam_chapter.get_children()[0] if exam_section: self.chapter_url_name = exam_chapter.url_name self.section_url_name = exam_section.url_name def _verify_section_not_gated(self): """ Verify whether the section is gated and accessible to the user. """ gated_content = gating_api.get_gated_content(self.course, self.effective_user) if gated_content: if unicode(self.section.location) in gated_content: raise Http404 def _get_language_preference(self): """ Returns the preferred language for the actual user making the request. """ language_preference = get_user_preference(self.real_user, LANGUAGE_KEY) if not language_preference: language_preference = settings.LANGUAGE_CODE return language_preference def _is_masquerading_as_student(self): """ Returns whether the current request is masquerading as a student. """ return self.masquerade and self.masquerade.role == 'student' def _find_block(self, parent, url_name, block_type, min_depth=None): """ Finds the block in the parent with the specified url_name. If not found, calls get_current_child on the parent. """ child = None if url_name: child = parent.get_child_by(lambda m: m.location.name == url_name) if not child: # User may be trying to access a child that isn't live yet if not self._is_masquerading_as_student(): raise Http404('No {block_type} found with name {url_name}'.format( block_type=block_type, url_name=url_name, )) elif min_depth and not child.has_children_at_depth(min_depth - 1): child = None if not child: child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child")) return child def _find_chapter(self): """ Finds the requested chapter. """ return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1) def _find_section(self): """ Finds the requested section. """ if self.chapter: return self._find_block(self.chapter, self.section_url_name, 'section') def _prefetch_and_bind_course(self): """ Prefetches all descendant data for the requested section and sets up the runtime, which binds the request user to the section. """ self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents( self.course_key, self.effective_user, self.course, depth=CONTENT_DEPTH, ) self.course = get_module_for_descriptor( self.effective_user, self.request, self.course, self.field_data_cache, self.course_key, course=self.course, ) def _prefetch_and_bind_section(self): """ Prefetches all descendant data for the requested section and sets up the runtime, which binds the request user to the section. """ # Pre-fetch all descendant data self.section = modulestore().get_item(self.section.location, depth=None) self.field_data_cache.add_descriptor_descendents(self.section, depth=None) # Bind section to user self.section = get_module_for_descriptor( self.effective_user, self.request, self.section, self.field_data_cache, self.course_key, self.position, course=self.course, ) def _save_positions(self): """ Save where we are in the course and chapter. """ save_child_position(self.course, self.chapter_url_name) save_child_position(self.chapter, self.section_url_name) def _create_courseware_context(self): """ Returns and creates the rendering context for the courseware. Also returns the table of contents for the courseware. """ courseware_context = { 'csrf': csrf(self.request)['csrf_token'], 'COURSE_TITLE': self.course.display_name_with_default_escaped, 'course': self.course, 'init': '', 'fragment': Fragment(), 'staff_access': self.is_staff, 'studio_url': get_studio_url(self.course, 'course'), 'masquerade': self.masquerade, 'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"), 'bookmarks_api_url': reverse('bookmarks'), 'language_preference': self._get_language_preference(), 'disable_optimizely': True, } table_of_contents = toc_for_course( self.effective_user, self.request, self.course, self.chapter_url_name, self.section_url_name, self.field_data_cache, ) courseware_context['accordion'] = render_accordion(self.request, self.course, table_of_contents['chapters']) # entrance exam data if course_has_entrance_exam(self.course): if getattr(self.chapter, 'is_entrance_exam', False): courseware_context['entrance_exam_current_score'] = get_entrance_exam_score(self.request, self.course) courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.request, self.course) # staff masquerading data now = datetime.now(UTC()) effective_start = _adjust_start_date_for_beta_testers(self.effective_user, self.course, self.course_key) if not in_preview_mode() and self.is_staff and now < effective_start: # Disable student view button if user is staff and # course is not yet visible to students. courseware_context['disable_student_access'] = True if self.section: # chromeless data if self.section.chrome: chrome = [s.strip() for s in self.section.chrome.lower().split(",")] if 'accordion' not in chrome: courseware_context['disable_accordion'] = True if 'tabs' not in chrome: courseware_context['disable_tabs'] = True # default tab if self.section.default_tab: courseware_context['default_tab'] = self.section.default_tab # section data courseware_context['section_title'] = self.section.display_name_with_default_escaped section_context = self._create_section_context( table_of_contents['previous_of_active_section'], table_of_contents['next_of_active_section'], ) courseware_context['fragment'] = self.section.render(STUDENT_VIEW, section_context) return courseware_context def _create_section_context(self, previous_of_active_section, next_of_active_section): """ Returns and creates the rendering context for the section. """ def _compute_section_url(section_info, requested_child): """ Returns the section URL for the given section_info with the given child parameter. """ return "{url}?child={requested_child}".format( url=reverse( 'courseware_section', args=[unicode(self.course.id), section_info['chapter_url_name'], section_info['url_name']], ), requested_child=requested_child, ) section_context = { 'activate_block_id': self.request.GET.get('activate_block_id'), 'requested_child': self.request.GET.get("child"), } if previous_of_active_section: section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last') if next_of_active_section: section_context['next_url'] = _compute_section_url(next_of_active_section, 'first') return section_context def _handle_unexpected_error(self): """ Handle unexpected exceptions raised by View. """ # In production, don't want to let a 500 out for any reason if settings.DEBUG: raise log.exception( u"Error in index view: user=%s, effective_user=%s, course=%s, chapter=%s section=%s position=%s", self.real_user, self.effective_user, unicode(self.course_key), self.chapter_url_name, self.section_url_name, self.position, ) try: return render_to_response('courseware/courseware-error.html', { 'staff_access': self.is_staff, 'course': self.course }) except: # Let the exception propagate, relying on global config to # at least return a nice error message log.exception("Error while rendering courseware-error page") raise def render_accordion(request, course, table_of_contents): """ Returns the HTML that renders the navigation for the given course. Expects the table_of_contents to have data on each chapter and section, including which ones are active. """ context = dict( [ ('toc', table_of_contents), ('course_id', unicode(course.id)), ('csrf', csrf(request)['csrf_token']), ('due_date_display_format', course.due_date_display_format), ] + TEMPLATE_IMPORTS.items() ) return render_to_string('courseware/accordion.html', context) def save_child_position(seq_module, child_name): """ child_name: url_name of the child """ for position, child in enumerate(seq_module.get_display_items(), start=1): if child.location.name == child_name: # Only save if position changed if position != seq_module.position: seq_module.position = position # Save this new position to the underlying KeyValueStore seq_module.save() def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None): """ Recurses up the course tree starting from a leaf Saving the position property based on the previous node as it goes """ current_module = xmodule while current_module: parent_location = modulestore().get_parent_location(current_module.location) parent = None if parent_location: parent_descriptor = modulestore().get_item(parent_location) parent = get_module_for_descriptor( user, request, parent_descriptor, field_data_cache, current_module.location.course_key, course=course ) if parent and hasattr(parent, 'position'): save_child_position(parent, current_module.location.name) current_module = parent
waheedahmed/edx-platform
lms/djangoapps/courseware/views/index.py
Python
agpl-3.0
22,383
[ "VisIt" ]
07697068add4b6088833bf6ce540d5b31c944c445d07dc0ae5c359c4993b1cb7
# -*- coding: utf-8 -*- """\ Visualizing Parameters in a Modern Neural Network ================================================= """ from __future__ import (absolute_import, unicode_literals, print_function) print(__doc__) __author__ = 'Alex J. Champandard' import sys import time import logging import argparse import itertools import numpy from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification # The neural network uses the `sknn` logger to output its information. import logging logging.basicConfig(format="%(message)s", level=logging.WARNING, stream=sys.stdout) from sknn.platform import gpu32 from sknn.backend import pylearn2 from sknn import mlp # All possible parameter options that can be plotted, separately or combined. PARAMETERS = { 'activation': ['Rectifier', 'Tanh', 'Sigmoid', 'Maxout'], 'alpha': [0.001, 0.005, 0.01, 0.05, 0.1, 0.2], 'dropout': [None, 0.25, 0.5, 0.75], 'iterations': [100, 200, 500, 1000], 'output': ['Softmax', 'Linear', 'Gaussian'], 'regularize': [None, 'L1', 'L2', 'dropout'], 'rules': ['sgd', 'momentum', 'nesterov', 'adadelta', 'rmsprop'], 'units': [16, 64, 128, 256], } # Grab command line information from the user. parser = argparse.ArgumentParser() parser.add_argument('-p','--params', nargs='+', help='Parameter to visualize.', choices=PARAMETERS.keys(), required=True) args = parser.parse_args() # Build a list of lists containing all parameter combinations to be tested. params = [] for p in sorted(PARAMETERS): values = PARAMETERS[p] # User requested to test against this parameter? if p in args.params: params.append(values) # Otherwise, use the first item of the list as default. else: params.append(values[:1]) # Build the classifiers for all possible combinations of parameters. names = [] classifiers = [] for (activation, alpha, dropout, iterations, output, regularize, rule, units) in itertools.product(*params): params = {'pieces': 2} if activation == "Maxout" else {} classifiers.append(mlp.Classifier( layers=[mlp.Layer(activation, units=units, **params), mlp.Layer(output)], random_state=1, n_iter=iterations, n_stable=iterations, regularize=regularize, dropout_rate=dropout, learning_rule=rule, learning_rate=alpha),) t = [] for k, v in zip(sorted(PARAMETERS), [activation, alpha, dropout, iterations, output, regularize, rule, units]): if k in args.params: t.append(str(v)) names.append(','.join(t)) # Create randomized datasets for visualizations, on three rows. seed = int(time.time()) X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=0, n_clusters_per_class=1) rng = numpy.random.RandomState(seed+1) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [make_moons(noise=0.3, random_state=seed+2), make_circles(noise=0.2, factor=0.5, random_state=seed+3), linearly_separable] # Create the figure containing plots for each of the classifiers. GRID_RESOLUTION = .02 figure = plt.figure(figsize=(18, 9)) i = 1 for X, y in datasets: # Preprocess dataset, split into training and test part. X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) # Prepare coordinates of 2D grid to be visualized. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = numpy.meshgrid(numpy.arange(x_min, x_max, GRID_RESOLUTION), numpy.arange(y_min, y_max, GRID_RESOLUTION)) # Plot the dataset on its own first. cm = plt.cm.get_cmap("PRGn") cm_bright = ListedColormap(['#FF00FF', '#00FF00']) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # Now iterate over every classifier... for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. Z = clf.predict_proba(numpy.c_[xx.ravel(), yy.ravel()])[:, 1] # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # Plot also the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) ax.set_title(name) ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right', fontweight='bold') i += 1 sys.stdout.write('.'); sys.stdout.flush() sys.stdout.write('\n') figure.subplots_adjust(left=.02, right=.98) plt.show()
gticket/scikit-neuralnetwork
examples/plot_mlp.py
Python
bsd-3-clause
5,613
[ "Gaussian" ]
1a0d0eb7602e74c188bc140cdd1a45ccad2cc6c5637a7b14290479dbb4f0a256
from rauth.service import OAuth2Service # Get a real consumer key & secret from: # https://github.com/settings/applications/new github = OAuth2Service( client_id='8ae4946cc5a9af76f6d7', client_secret='48aeb2b3c9226ae2b698eef4d7e6310473ccafa7', name='github', authorize_url='https://github.com/login/oauth/authorize', access_token_url='https://github.com/login/oauth/access_token', base_url='https://api.github.com/') print 'Visit this URL in your browser: ' + github.get_authorize_url() # This is a bit cumbersome, but you need to copy the code=something (just the # `something` part) out of the URL that's redirected to AFTER you login and # authorize the demo application code = raw_input('Enter code parameter (code=something) from URL: ') # create a dictionary for the data we'll post on the get_access_token request data = dict(code=code, redirect_uri='https://github.com/litl/rauth/') # retrieve the authenticated session session = github.get_auth_session(data=data) # make a request using the authenticated session user = session.get('user').json() print 'currently logged in as: ' + user['login']
arifgursel/rauth
examples/github-cli.py
Python
mit
1,136
[ "VisIt" ]
8905ee5995a51247f2d6f36b17d29f8fcb1a1d311e8992333cb93f78de88807f
# $HeadURL$ __RCSID__ = "$Id$" from DIRAC.Core.DISET.private.Transports import PlainTransport, SSLTransport gProtocolDict = { 'dip' : { 'transport' : PlainTransport.PlainTransport, 'sanity' : PlainTransport.checkSanity, 'delegation' : PlainTransport.delegate }, 'dips' : { 'transport' : SSLTransport.SSLTransport, 'sanity' : SSLTransport.checkSanity, 'delegation' : SSLTransport.delegate } } gDefaultProtocol = 'dips'
Sbalbp/DIRAC
Core/DISET/private/Protocols.py
Python
gpl-3.0
636
[ "DIRAC" ]
c06abb881288003de82d4d09df1ca9590adb2e4439f02f0bc79208c64f771b74
######################################################################## # # (C) 2013, James Cammarata <jcammarata@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os.path import sys import yaml import time from collections import defaultdict from jinja2 import Environment import ansible.constants as C from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.role import GalaxyRole from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement from ansible.utils.unicode import to_unicode try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") def __init__(self, args): self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) self.set_action() # common self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.') # specific to actions if self.action == "delete": self.parser.set_usage("usage: %prog delete [options] github_user github_repo") elif self.action == "import": self.parser.set_usage("usage: %prog import [options] github_user github_repo") self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.') self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.') elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") elif self.action == "init": self.parser.set_usage("usage: %prog init [options] role_name") self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.') elif self.action == "install": self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.') self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": self.parser.set_usage("usage: %prog list [role_name]") elif self.action == "login": self.parser.set_usage("usage: %prog login [options]") self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by') self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by') self.parser.add_option('--author', dest='author', help='GitHub username') elif self.action == "setup": self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret") self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.') self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') # options that apply to more than one action if self.action in ['init', 'info']: self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles") if not self.action in ("delete","import","init","login","setup"): # NOTE: while the option type=str, the default is a list, and the # callback will set the value to a list. self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)') if self.action in ("init","install"): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') self.options, self.args =self.parser.parse_args() display.verbosity = self.options.verbosity self.galaxy = Galaxy(self.options) return True def run(self): super(GalaxyCLI, self).run() self.api = GalaxyAPI(self.galaxy) self.execute() def exit_without_ignore(self, rc=1): """ Exits with the specified return code unless the option --ignore-errors was specified """ if not self.get_opt("ignore_errors", False): raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.') def _display_role_info(self, role_info): text = [u"", u"Role: %s" % to_unicode(role_info['name'])] text.append(u"\tdescription: %s" % role_info.get('description', '')) for k in sorted(role_info.keys()): if k in self.SKIP_INFO_KEYS: continue if isinstance(role_info[k], dict): text += "\t%s: \n" % (k) text.append(u"\t%s:" % (k)) for key in sorted(role_info[k].keys()): if key in self.SKIP_INFO_KEYS: continue text.append(u"\t\t%s: %s" % (key, role_info[k][key])) else: text.append(u"\t%s: %s" % (k, role_info[k])) return u'\n'.join(text) ############################ # execute actions ############################ def execute_init(self): """ Executes the init action, which creates the skeleton framework of a role that complies with the galaxy metadata format. """ init_path = self.get_opt('init_path', './') force = self.get_opt('force', False) offline = self.get_opt('offline', False) role_name = self.args.pop(0).strip() if self.args else None if not role_name: raise AnsibleOptionsError("- no role name specified for init") role_path = os.path.join(init_path, role_name) if os.path.exists(role_path): if os.path.isfile(role_path): raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path) elif not force: raise AnsibleError("- the directory %s already exists." "you can use --force to re-initialize this directory,\n" "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) # create default README.md if not os.path.exists(role_path): os.makedirs(role_path) readme_path = os.path.join(role_path, "README.md") f = open(readme_path, "wb") f.write(self.galaxy.default_readme) f.close() # create default .travis.yml travis = Environment().from_string(self.galaxy.default_travis).render() f = open(os.path.join(role_path, '.travis.yml'), 'w') f.write(travis) f.close() for dir in GalaxyRole.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') # create the directory if it doesn't exist already if not os.path.exists(dir_path): os.makedirs(dir_path) # now create the main.yml file for that directory if dir == "meta": # create a skeleton meta/main.yml with a valid galaxy_info # datastructure in place, plus with all of the available # platforms included (but commented out), the galaxy_tags # list, and the dependencies section platforms = [] if not offline: platforms = self.api.get_list("platforms") or [] # group the list of platforms from the api based # on their names, with the release field being # appended to a list of versions platform_groups = defaultdict(list) for platform in platforms: platform_groups[platform['name']].append(platform['release']) platform_groups[platform['name']].sort() inject = dict( author = 'your name', description = 'your description', company = 'your company (optional)', license = 'license (GPLv2, CC-BY, etc)', issue_tracker_url = 'http://example.com/issue/tracker', min_ansible_version = '1.2', platforms = platform_groups, ) rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject) f = open(main_yml_path, 'w') f.write(rendered_meta) f.close() pass elif dir == "tests": # create tests/test.yml inject = dict( role_name = role_name ) playbook = Environment().from_string(self.galaxy.default_test).render(inject) f = open(os.path.join(dir_path, 'test.yml'), 'w') f.write(playbook) f.close() # create tests/inventory f = open(os.path.join(dir_path, 'inventory'), 'w') f.write('localhost') f.close() elif dir not in ('files','templates'): # just write a (mostly) empty YAML file for main.yml f = open(main_yml_path, 'w') f.write('---\n# %s file for %s\n' % (dir,role_name)) f.close() display.display("- %s was created successfully" % role_name) def execute_info(self): """ Executes the info action. This action prints out detailed information about an installed role as well as info available from the galaxy API. """ if len(self.args) == 0: # the user needs to specify a role raise AnsibleOptionsError("- you must specify a user/role name") roles_path = self.get_opt("roles_path") data = '' for role in self.args: role_info = {'path': roles_path} gr = GalaxyRole(self.galaxy, role) install_info = gr.install_info if install_info: if 'version' in install_info: install_info['intalled_version'] = install_info['version'] del install_info['version'] role_info.update(install_info) remote_data = False if not self.options.offline: remote_data = self.api.lookup_role_by_name(role, False) if remote_data: role_info.update(remote_data) if gr.metadata: role_info.update(gr.metadata) req = RoleRequirement() role_spec= req.role_yaml_parse({'role': role}) if role_spec: role_info.update(role_spec) data = self._display_role_info(role_info) ### FIXME: This is broken in both 1.9 and 2.0 as # _display_role_info() always returns something if not data: data = u"\n- the role %s was not found" % role self.pager(data) def execute_install(self): """ Executes the installation action. The args list contains the roles to be installed, unless -f was specified. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file. """ role_file = self.get_opt("role_file", None) if len(self.args) == 0 and role_file is None: # the user needs to specify one of either --role-file # or specify a single user/role name raise AnsibleOptionsError("- you must specify a user/role name or a roles file") elif len(self.args) == 1 and role_file is not None: # using a role file is mutually exclusive of specifying # the role name on the command line raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both") no_deps = self.get_opt("no_deps", False) force = self.get_opt('force', False) roles_left = [] if role_file: try: f = open(role_file, 'r') if role_file.endswith('.yaml') or role_file.endswith('.yml'): try: required_roles = yaml.safe_load(f.read()) except Exception as e: raise AnsibleError("Unable to load data from the requirements file: %s" % role_file) if required_roles is None: raise AnsibleError("No roles found in file: %s" % role_file) for role in required_roles: role = RoleRequirement.role_yaml_parse(role) display.vvv('found role %s in yaml file' % str(role)) if 'name' not in role and 'scm' not in role: raise AnsibleError("Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) else: display.deprecated("going forward only the yaml format will be supported") # roles listed in a file, one per line for rline in f.readlines(): if rline.startswith("#") or rline.strip() == '': continue display.debug('found role %s in text file' % str(rline)) role = RoleRequirement.role_yaml_parse(rline.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) f.close() except (IOError, OSError) as e: display.error('Unable to open %s: %s' % (role_file, str(e))) else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in self.args: role = RoleRequirement.role_yaml_parse(rname.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) for role in roles_left: display.vvv('Installing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None and not force: display.display('- %s is already installed, skipping.' % role.name) continue try: installed = role.install() except AnsibleError as e: display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e))) self.exit_without_ignore() continue # install dependencies, if we want them if not no_deps and installed: role_dependencies = role.metadata.get('dependencies') or [] for dep in role_dependencies: display.debug('Installing dep %s' % dep) dep_req = RoleRequirement() dep_info = dep_req.role_yaml_parse(dep) dep_role = GalaxyRole(self.galaxy, **dep_info) if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None: # we know we can skip this, as it's not going to # be found on galaxy.ansible.com continue if dep_role.install_info is None or force: if dep_role not in roles_left: display.display('- adding dependency: %s' % dep_role.name) roles_left.append(dep_role) else: display.display('- dependency %s already pending installation.' % dep_role.name) else: display.display('- dependency %s is already installed, skipping.' % dep_role.name) if not installed: display.warning("- %s was NOT installed successfully." % role.name) self.exit_without_ignore() return 0 def execute_remove(self): """ Executes the remove action. The args list contains the list of roles to be removed. This list can contain more than one role. """ if len(self.args) == 0: raise AnsibleOptionsError('- you must specify at least one role to remove.') for role_name in self.args: role = GalaxyRole(self.galaxy, role_name) try: if role.remove(): display.display('- successfully removed %s' % role_name) else: display.display('- %s is not installed, skipping.' % role_name) except Exception as e: raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e))) return 0 def execute_list(self): """ Executes the list action. The args list can contain zero or one role. If one is specified, only that role will be shown, otherwise all roles in the specified directory will be shown. """ if len(self.args) > 1: raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list") if len(self.args) == 1: # show only the request role, if it exists name = self.args.pop() gr = GalaxyRole(self.galaxy, name) if gr.metadata: install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" # show some more info about single roles here display.display("- %s, %s" % (name, version)) else: display.display("- the role %s was not found" % name) else: # show all valid roles in the roles_path directory roles_path = self.get_opt('roles_path') for path in roles_path: role_path = os.path.expanduser(path) if not os.path.exists(role_path): raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path) elif not os.path.isdir(role_path): raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path) path_files = os.listdir(role_path) for path_file in path_files: gr = GalaxyRole(self.galaxy, path_file) if gr.metadata: install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" display.display("- %s, %s" % (path_file, version)) return 0 def execute_search(self): page_size = 1000 search = None if len(self.args): terms = [] for i in range(len(self.args)): terms.append(self.args.pop()) search = '+'.join(terms[::-1]) if not search and not self.options.platforms and not self.options.tags and not self.options.author: raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") response = self.api.search_roles(search, platforms=self.options.platforms, tags=self.options.tags, author=self.options.author, page_size=page_size) if response['count'] == 0: display.display("No roles match your search.", color=C.COLOR_ERROR) return True data = [u''] if response['count'] > page_size: data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size)) else: data.append(u"Found %d roles matching your search:" % response['count']) max_len = [] for role in response['results']: max_len.append(len(role['username'] + '.' + role['name'])) name_len = max(max_len) format_str = u" %%-%ds %%s" % name_len data.append(u'') data.append(format_str % (u"Name", u"Description")) data.append(format_str % (u"----", u"-----------")) for role in response['results']: data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description'])) data = u'\n'.join(data) self.pager(data) return True def execute_login(self): """ Verify user's identify via Github and retreive an auth token from Galaxy. """ # Authenticate with github and retrieve a token if self.options.token is None: login = GalaxyLogin(self.galaxy) github_token = login.create_github_token() else: github_token = self.options.token galaxy_response = self.api.authenticate(github_token) if self.options.token is None: # Remove the token we created login.remove_github_token() # Store the Galaxy token token = GalaxyToken() token.set(galaxy_response['token']) display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username']) return 0 def execute_import(self): """ Import a role into Galaxy """ colors = { 'INFO': 'normal', 'WARNING': C.COLOR_WARN, 'ERROR': C.COLOR_ERROR, 'SUCCESS': C.COLOR_OK, 'FAILED': C.COLOR_ERROR, } if len(self.args) < 2: raise AnsibleError("Expected a github_username and github_repository. Use --help.") github_repo = self.args.pop() github_user = self.args.pop() if self.options.check_status: task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), color='yellow') display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) if not self.options.wait: display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo'])) if self.options.check_status or self.options.wait: # Get the status of the import msg_list = [] finished = False while not finished: task = self.api.get_import_task(task_id=task[0]['id']) for msg in task[0]['summary_fields']['task_messages']: if msg['id'] not in msg_list: display.display(msg['message_text'], color=colors[msg['message_type']]) msg_list.append(msg['id']) if task[0]['state'] in ['SUCCESS', 'FAILED']: finished = True else: time.sleep(10) return 0 def execute_setup(self): """ Setup an integration from Github or Travis """ if self.options.setup_list: # List existing integration secrets secrets = self.api.list_secrets() if len(secrets) == 0: # None found display.display("No integrations found.") return 0 display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK) display.display("---------- ---------- ----------", color=C.COLOR_OK) for secret in secrets: display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], secret['github_repo']),color=C.COLOR_OK) return 0 if self.options.remove_id: # Remove a secret self.api.remove_secret(self.options.remove_id) display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK) return 0 if len(self.args) < 4: raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") return 0 secret = self.args.pop() github_repo = self.args.pop() github_user = self.args.pop() source = self.args.pop() resp = self.api.add_secret(source, github_user, github_repo, secret) display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo'])) return 0 def execute_delete(self): """ Delete a role from galaxy.ansible.com """ if len(self.args) < 2: raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") github_repo = self.args.pop() github_user = self.args.pop() resp = self.api.delete_role(github_user, github_repo) if len(resp['deleted_roles']) > 1: display.display("Deleted the following roles:") display.display("ID User Name") display.display("------ --------------- ----------") for role in resp['deleted_roles']: display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) display.display(resp['status']) return True
levenlabs/ansible
lib/ansible/cli/galaxy.py
Python
gpl-3.0
30,101
[ "Galaxy" ]
e28fcc90b896220d65e26a352bc7eb4fc0bfa2881bb8318a2f233b4aebbe8214
import pytest import UWG import os import math import pprint from test_base import TestBase class TestSimParam(TestBase): def test_uwg_simparam_matlab_init(self): """ Matlab value comparison for simparam initialization in main uwg """ # Initialize UWG from default initialize.uwg for 31 days simulation # Jan 1 00:00 - Feb 1 00:00 # Month,1, # starting month (1-12) # Day,1, # starting day (1-31) # nDay,31, # number of days to run simultion # dtSim,300, # simulation time step (s) # dtWeather,3600, # weather time step (s) self.setup_uwg_integration(uwg_param_file="initialize_singapore.uwg") self.uwg.read_epw() self.uwg.read_input() self.uwg.set_input() # open matlab ref file uwg_matlab_val = self.setup_open_matlab_ref("matlab_simparam","matlab_ref_simparam_init.txt") uwg_python_val = [ self.uwg.simTime.dt, # uwg time simulation time step self.uwg.simTime.timeForcing, # weather data timestep self.uwg.simTime.month, self.uwg.simTime.day, self.uwg.simTime.days, self.uwg.simTime.timePrint, # weather data timestep self.uwg.simTime.timeDay, # how many times weather senses in a day self.uwg.simTime.timeSim, # how many steps in weather data simulation self.uwg.simTime.timeMax, # total seconds in simulation days self.uwg.simTime.nt, # total number of timesteps for uwg simuation self.uwg.simTime.julian, self.uwg.simTime.timeInitial, # sensor data in epw for intial time based on julian day & timesteps self.uwg.simTime.timeFinal, # sensor data in epw for final time based on julian day & timesteps self.uwg.simTime.secDay, # current seconds in day self.uwg.simTime.hourDay ] # matlab ref checking assert len(uwg_matlab_val) == len(uwg_python_val) for i in xrange(len(uwg_matlab_val)): #print uwg_python_val[i], uwg_matlab_val[i] assert uwg_python_val[i] == pytest.approx(uwg_matlab_val[i], abs=1e-15), "error at index={}".format(i) def test_uwg_simparam_matlab_update_date(self): """ Matlab value comparison for simparam UpdateDate function in main uwg """ # Initialize UWG from initialize_simparam.uwg for 150 day simulation # Mar 15 00:00 - Aug 12th, 00:00 # Month,3, # starting month (1-12) # Day,15, # starting day (1-31) # nDay,150, # number of days to run simultion # dtSim,300, # simulation time step (s) # dtWeather,3600, # weather time step (s) uwg_test_param_dir= os.path.join(self.DIR_MATLAB_PATH,"matlab_simparam") self.setup_uwg_integration(uwg_param_file="initialize_simparam.uwg",uwg_param_dir=uwg_test_param_dir) self.uwg.read_epw() self.uwg.read_input() self.uwg.set_input() self.uwg.hvac_autosize() self.uwg.simulate() # open matlab ref file uwg_matlab_val = self.setup_open_matlab_ref("matlab_simparam","matlab_ref_simparam_update_date.txt") uwg_python_val = [ self.uwg.simTime.secDay, self.uwg.simTime.day, self.uwg.simTime.julian, self.uwg.simTime.month, self.uwg.simTime.day, self.uwg.simTime.hourDay, self.uwg.ceil_time_step+1 # Add 1 to keep consistent with matlab list convenction ] # matlab ref checking assert len(uwg_matlab_val) == len(uwg_python_val) for i in xrange(len(uwg_matlab_val)): #print uwg_python_val[i], uwg_matlab_val[i] assert uwg_python_val[i] == pytest.approx(uwg_matlab_val[i], abs=1e-15), "error at index={}".format(i) def test_simparam(self): """ Tests simparam.py""" dtSim = 300 # Sim time step dtWeather = 3600 # Weather data time-step MONTH = 7 # Begin month DAY = 30 # Begin day of the month NUM_DAYS = 7 # Number of days of simulation simTime = UWG.SimParam(dtSim,dtWeather,MONTH,DAY,NUM_DAYS) # Simulation Parameters tests assert simTime.timeSim == pytest.approx(168, abs=1e-6) assert simTime.timeMax == pytest.approx(604800,abs=1e-6) assert simTime.nt == pytest.approx(2017,abs=1e-6) # Test UpdateDate() for < 1 hr for i in xrange(11): #11 * 300 = 3300 seconds = 55min simTime.UpdateDate() assert simTime.secDay == pytest.approx(3300., abs=1e-6) assert simTime.day == pytest.approx(30., abs=1e-6) assert simTime.hourDay == pytest.approx(0., abs=1e-6) # for == 1 hr simTime.UpdateDate() assert simTime.secDay == pytest.approx(3600., abs=1e-6) assert simTime.hourDay == pytest.approx(1., abs=1e-6) # for > 24hr for i in xrange(23 * 12): simTime.UpdateDate() assert simTime.secDay == pytest.approx(0., abs=1e-6) assert simTime.day == pytest.approx(31., abs=1e-6) assert simTime.hourDay == pytest.approx(0., abs=1e-6) # for == 1 month for i in xrange(24 * 12): simTime.UpdateDate() assert simTime.secDay == pytest.approx(0., abs=1e-6) assert simTime.day == pytest.approx(1., abs=1e-6) assert simTime.hourDay == pytest.approx(0., abs=1e-6) assert simTime.month == pytest.approx(8, abs=1e-6) # for + 1 month for i in xrange(24 * 12 * 31): simTime.UpdateDate() assert simTime.secDay == pytest.approx(0., abs=1e-6) assert simTime.day == pytest.approx(1., abs=1e-6) assert simTime.hourDay == pytest.approx(0., abs=1e-6) assert simTime.month == pytest.approx(9, abs=1e-6) if __name__ == "__main__": tsp = TestSimParam() tsp.test_simparam() tsp.test_uwg_simparam_matlab_init() tsp.test_uwg_simparam_matlab_update_date()
saeranv/UWG_Python
tests/test_simparam.py
Python
gpl-3.0
6,096
[ "EPW" ]
33bda6e568f2bc4d046f0009fed06e0089cd2df69b9ffc4cdff8e023c36f4d1b
from matplotlib import pyplot from math import cos, sin, atan class Neuron(): def __init__(self, x, y): self.x = x self.y = y def draw(self, neuron_radius): circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False) pyplot.gca().add_patch(circle) class Layer(): def __init__(self, network, number_of_neurons, number_of_neurons_in_widest_layer): self.vertical_distance_between_layers = 6 self.horizontal_distance_between_neurons = 2 self.neuron_radius = 0.5 self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer self.previous_layer = self.__get_previous_layer(network) self.y = self.__calculate_layer_y_position() self.neurons = self.__intialise_neurons(number_of_neurons) def __intialise_neurons(self, number_of_neurons): neurons = [] x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons) for iteration in range(number_of_neurons): neuron = Neuron(x, self.y) neurons.append(neuron) x += self.horizontal_distance_between_neurons return neurons def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons): return self.horizontal_distance_between_neurons * (self.number_of_neurons_in_widest_layer - number_of_neurons) / 2 def __calculate_layer_y_position(self): if self.previous_layer: return self.previous_layer.y + self.vertical_distance_between_layers else: return 0 def __get_previous_layer(self, network): if len(network.layers) > 0: return network.layers[-1] else: return None def __line_between_two_neurons(self, neuron1, neuron2): angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y)) x_adjustment = self.neuron_radius * sin(angle) y_adjustment = self.neuron_radius * cos(angle) line = pyplot.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y - y_adjustment, neuron2.y + y_adjustment)) pyplot.gca().add_line(line) def draw(self, layerType=0): for neuron in self.neurons: neuron.draw( self.neuron_radius ) if self.previous_layer: for previous_layer_neuron in self.previous_layer.neurons: self.__line_between_two_neurons(neuron, previous_layer_neuron) # write Text x_text = self.number_of_neurons_in_widest_layer * self.horizontal_distance_between_neurons if layerType == 0: pyplot.text(x_text, self.y, 'Camada de Entrada', fontsize = 12) elif layerType == -1: pyplot.text(x_text, self.y, 'Camada de Saída', fontsize = 12) else: pyplot.text(x_text, self.y, 'Camada Escondida {}'.format(layerType), fontsize=12) class NeuralNetwork(): def __init__(self, number_of_neurons_in_widest_layer): self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer self.layers = [] self.layertype = 0 def add_layer(self, number_of_neurons ): layer = Layer(self, number_of_neurons, self.number_of_neurons_in_widest_layer) self.layers.append(layer) def draw(self): pyplot.figure() for i in range( len(self.layers) ): layer = self.layers[i] if i == len(self.layers)-1: i = -1 layer.draw( i ) pyplot.axis('scaled') pyplot.axis('off') #pyplot.title( 'Neural Network architecture', fontsize=15 ) pyplot.show() class DrawNN(): def __init__( self, neural_network ): self.neural_network = neural_network def draw( self ): widest_layer = max( self.neural_network ) network = NeuralNetwork( widest_layer ) for l in self.neural_network: network.add_layer(l) network.draw() network = DrawNN( [4,7,7,1] ) network.draw()
brenoarosa/projeto_final
plotters/neural_net.py
Python
mit
4,008
[ "NEURON" ]
57206b9a71d635ee948e4764293e90e3f36fa8ce6da85bd4284094dea1a8108f
# --- # jupyter: # jupytext: # formats: ipynb,.pct.py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.3.3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Bayesian Gaussian process latent variable model (Bayesian GPLVM) # This notebook shows how to use the Bayesian GPLVM model. This is an unsupervised learning method usually used for dimensionality reduction. For an in-depth overview of GPLVMs,see **[1, 2]**. # %% import gpflow import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import gpflow from gpflow.utilities import ops, print_summary from gpflow.config import set_default_float, default_float, set_default_summary_fmt from gpflow.ci_utils import ci_niter set_default_float(np.float64) set_default_summary_fmt("notebook") # %matplotlib inline # %% [markdown] # ## Data # We are using the "three phase oil flow" dataset used initially for demonstrating the Generative Topographic mapping from **[3]**. # %% data = np.load("./data/three_phase_oil_flow.npz") # %% [markdown] # Following the GPflow notation we assume this dataset has a shape of `[num_data, output_dim]` # %% Y = tf.convert_to_tensor(data["Y"], dtype=default_float()) # %% [markdown] # Integer in $[0, 2]$ indicating to which class the data point belongs (shape `[num_data,]`). Not used for model fitting, only for plotting afterwards. # %% labels = tf.convert_to_tensor(data["labels"]) # %% print("Number of points: {} and Number of dimensions: {}".format(Y.shape[0], Y.shape[1])) # %% [markdown] # ## Model construction # # We start by initializing the required variables: # %% latent_dim = 2 # number of latent dimensions num_inducing = 20 # number of inducing pts num_data = Y.shape[0] # number of data points # %% [markdown] # Initialize via PCA: # %% X_mean_init = ops.pca_reduce(Y, latent_dim) X_var_init = tf.ones((num_data, latent_dim), dtype=default_float()) # %% [markdown] # Pick inducing inputs randomly from dataset initialization: # %% np.random.seed(1) # for reproducibility inducing_variable = tf.convert_to_tensor( np.random.permutation(X_mean_init.numpy())[:num_inducing], dtype=default_float() ) # %% [markdown] # We construct a Squared Exponential (SE) kernel operating on the two-dimensional latent space. # The `ARD` parameter stands for Automatic Relevance Determination, which in practice means that # we learn a different lengthscale for each of the input dimensions. See [Manipulating kernels](../advanced/kernels.ipynb) for more information. # %% lengthscales = tf.convert_to_tensor([1.0] * latent_dim, dtype=default_float()) kernel = gpflow.kernels.RBF(lengthscales=lengthscales) # %% [markdown] # We have all the necessary ingredients to construct the model. GPflow contains an implementation of the Bayesian GPLVM: # %% gplvm = gpflow.models.BayesianGPLVM( Y, X_data_mean=X_mean_init, X_data_var=X_var_init, kernel=kernel, inducing_variable=inducing_variable, ) # Instead of passing an inducing_variable directly, we can also set the num_inducing_variables argument to an integer, which will randomly pick from the data. # %% [markdown] # We change the default likelihood variance, which is 1, to 0.01. # %% gplvm.likelihood.variance.assign(0.01) # %% [markdown] # Next we optimize the created model. Given that this model has a deterministic evidence lower bound (ELBO), we can use SciPy's BFGS optimizer. # %% opt = gpflow.optimizers.Scipy() maxiter = ci_niter(1000) _ = opt.minimize( gplvm.training_loss, method="BFGS", variables=gplvm.trainable_variables, options=dict(maxiter=maxiter), ) # %% [markdown] # ## Model analysis # GPflow allows you to inspect the learned model hyperparameters. # %% print_summary(gplvm) # %% [markdown] # ## Plotting vs. Principle Component Analysis (PCA) # The reduction of the dimensionality of the dataset to two dimensions allows us to visualize the learned manifold. # We compare the Bayesian GPLVM's latent space to the deterministic PCA's one. # %% X_pca = ops.pca_reduce(Y, latent_dim).numpy() gplvm_X_mean = gplvm.X_data_mean.numpy() f, ax = plt.subplots(1, 2, figsize=(10, 6)) for i in np.unique(labels): ax[0].scatter(X_pca[labels == i, 0], X_pca[labels == i, 1], label=i) ax[1].scatter(gplvm_X_mean[labels == i, 0], gplvm_X_mean[labels == i, 1], label=i) ax[0].set_title("PCA") ax[1].set_title("Bayesian GPLVM") # %% # %% [markdown] # ## References # \[1\] Lawrence, Neil D. 'Gaussian process latent variable models for visualization of high dimensional data'. *Advances in Neural Information Processing Systems*. 2004. # # \[2\] Titsias, Michalis, and Neil D. Lawrence. 'Bayesian Gaussian process latent variable model'. *Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics*. 2010. # # \[3\] Bishop, Christopher M., and Gwilym D. James. 'Analysis of multiphase flows using dual-energy gamma densitometry and neural networks'. *Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment* 327.2-3 (1993): 580-593.
GPflow/GPflow
doc/source/notebooks/basics/GPLVM.pct.py
Python
apache-2.0
5,263
[ "Gaussian" ]
9f870aa3741923abfe6e72296192f5171d1ecaf091cec54521a5e31d3ac3dfb8
''' Illustrates the impact of viscosity on the assymptotical variances and convergence rate spectra. (Reproduce figure 3 from the articla, section 3-b) ''' import numpy as np import matplotlib.pyplot as plt from numpy import pi from DM93 import Uncorrelated, Foar, Soar, Gaussian from DM93 import spVarStationary, analSpVar, convRateAssymp #==================================================================== #===| setup and configuration |====================================== execfile('config.py') # -- viscosity nuFactors = [0, .0001, .001,] # -- Correlations obsCorr = Uncorrelated(grid) fctLc = grid.L/20. fctCorr = Soar(grid, fctLc) #==================================================================== #===| computations |================================================= # -- correlation power spectra r2 = obsCorr.powSpecTh() q2 = fctCorr.powSpecTh() f2Plus = dict() cPlus = dict() for nuF in nuFactors: nu = nuF/dt*(2.*pi*grid.L)**2 # -- assymptotic variances spectra (forecast and analysis respectively) f2Plus[nuF] = spVarStationary(grid, r2, q2, dt=dt, nu=nu)[0] # -- assymptotic convergence rate spectrum cPlus[nuF] = convRateAssymp(grid, r2, q2, dt=dt, nu=nu) #==================================================================== #===| plots |======================================================== fig = plt.figure() axVar = plt.subplot(211) axConv = plt.subplot(212) nuFStr = r'$4\pi^2\nu\Delta t/L^2=$' for nuF in nuFactors: axVar.plot(grid.halfK, f2Plus[nuF], label='%s %.0e'%(nuFStr, nuF)) axConv.plot(grid.halfK, cPlus[nuF], label='%s %.0e'%(nuFStr, nuF)) axVar.set_xscale('log') axVar.set_yscale('log') axVar.set_ylim(bottom=1e-6) axConv.set_xscale('log') axConv.set_yscale('log') axConv.set_ylim(bottom=1e-6) axVar.set_xticks(()) axConv.set_xlabel('wavenumber $k$') axVar.set_title('Assymptotical variance spectra') axConv.set_title('Assymptotical convergence spectra') axVar.legend(loc='best') plt.show()
martndj/DaleyMenard1993
viscosity.py
Python
gpl-3.0
1,987
[ "Gaussian" ]
bd9c8fd560db12fcf7c2341a45c4951e40fed54f95bb0dbad773292674617aa6
"Tools for working with parso ASTs." from abc import ABC, abstractmethod import io import sys import parso.python.tree import parso.tree class Visitor(ABC): """AST visitor for parso trees. This supports both simple traversal as well as editing of the tree. """ def walk(self, node): "Walk a parse tree, calling visit for each node." node = self.visit(node) if node is None: return None if isinstance(node, parso.tree.BaseNode): walked = map(self.walk, node.children) node.children = [child for child in walked if child is not None] return node @abstractmethod def visit(self, node): """Called for each node in the walk. This should return a node that will replace the node argument in the AST. This can be the node argument itself, a new node, or None. If None is returned, then the node is removed from the tree. Args: node: The node currently being visited. Returns: A node or `None`. """ def ast_nodes(node): """Iterable of all nodes in a tree. Args: node: The top node in a parso tree to iterate. Yields: All of the nodes in the tree. """ yield node if isinstance(node, parso.tree.BaseNode): for child in node.children: yield from ast_nodes(child) def get_ast(module_path): """Get the AST for the code in a file. Args: module_path: pathlib.Path to the file containing the code. Returns: The parso parse tree for the code in `module_path`. """ with module_path.open(mode="rt", encoding="utf-8") as handle: source = handle.read() return parso.parse(source) def is_none(node): "Determine if a node is the `None` keyword." return isinstance(node, parso.python.tree.Keyword) and node.value == "None" def is_number(node): "Determine if a node is a number." return isinstance(node, parso.python.tree.Number) def dump_node(node): "Generate string version of node." buffer = io.StringIO() write = buffer.write def do_dump(node, indent=""): write("{}{}({}".format(indent, type(node).__name__, node.type)) value = getattr(node, "value", None) if value: value = value.replace("\n", "\\n") write(", '{}'".format(value)) children = getattr(node, "children", None) if children: write(", [\n") for child in children: do_dump(child, indent + " " * 4) write(",\n") write("{}]".format(indent)) write(")") if not indent: write("\n") do_dump(node) return buffer.getvalue()
sixty-north/cosmic-ray
src/cosmic_ray/ast/__init__.py
Python
mit
2,775
[ "VisIt" ]
ba1bd075d85655bf0b337bec4e18e0ed222d882f831f0a920758a8cef4ae7023
# $HeadURL: $ """ JobEfficiencyPolicy Policy that calculates the efficiency following the formula: ( completed + done ) / ( completed + done + failed ) if the denominator is smaller than 10, it does not take any decision. """ from DIRAC import S_OK from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase __RCSID__ = '$Id: JobEfficiencyPolicy.py 60769 2013-01-18 11:50:36Z ubeda $' class JobEfficiencyPolicy( PolicyBase ): """ The JobEfficiencyPolicy class is a policy that checks the efficiency of the jobs according to what is on JobDB. Evaluates the JobEfficiency results given by the JobCommand.JobCommand """ @staticmethod def _evaluate( commandResult ): """ _evaluate efficiency < 0.5 :: Banned efficiency < 0.9 :: Degraded """ result = { 'Status' : None, 'Reason' : None } if not commandResult[ 'OK' ]: result[ 'Status' ] = 'Error' result[ 'Reason' ] = commandResult[ 'Message' ] return S_OK( result ) commandResult = commandResult[ 'Value' ] if not commandResult: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'No values to take a decision' return S_OK( result ) commandResult = commandResult[ 0 ] if not commandResult: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'No values to take a decision' return S_OK( result ) completed = float( commandResult[ 'Completed' ] ) done = float( commandResult[ 'Done' ] ) failed = float( commandResult[ 'Failed' ] ) total = completed + done + failed #we want a minimum amount of jobs to take a decision ( at least 10 pilots ) if total < 10: result[ 'Status' ] = 'Unknown' result[ 'Reason' ] = 'Not enough jobs to take a decision' return S_OK( result ) efficiency = ( done + completed ) / total if efficiency < 0.5: result[ 'Status' ] = 'Banned' elif efficiency < 0.90: result[ 'Status' ] = 'Degraded' else: result[ 'Status' ] = 'Active' result[ 'Reason' ] = 'Jobs Efficiency of %.2f' % efficiency return S_OK( result ) #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
Sbalbp/DIRAC
ResourceStatusSystem/Policy/JobEfficiencyPolicy.py
Python
gpl-3.0
2,430
[ "DIRAC" ]
09943d347c5897bf7d4065894b72d6b18fb20c42adca08df961800a2105f36a1
# Contributed by t0rm3nt0r to the Official L2J Datapack Project. # With some minor cleanup by DrLecter. # Visit http://forum.l2jdp.com for more details. import sys from com.l2scoria.gameserver.model.quest import State from com.l2scoria.gameserver.model.quest import QuestState from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest qn = "18_MeetingWithTheGoldenRam" DONAL = 31314 DAISY = 31315 ABERCROMBIE = 31555 BOX = 7245 class Quest (JQuest) : def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr) def onEvent (self,event,st) : htmltext = event if event == "31314-03.htm" : if st.getPlayer().getLevel() >= 66 : st.set("cond","1") st.setState(STARTED) st.playSound("ItemSound.quest_accept") else : htmltext = "31314-02.htm" st.exitQuest(1) elif event == "31315-02.htm" : st.set("cond","2") htmltext = "31315-02.htm" st.giveItems(BOX,1) elif event == "31555-02.htm" : st.giveItems(57,15000) st.takeItems(BOX,-1) st.addExpAndSp(50000,0) st.unset("cond") st.playSound("ItemSound.quest_finish") st.setState(COMPLETED) return htmltext def onTalk (self,npc,player): npcId = npc.getNpcId() htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>" st = player.getQuestState(qn) if not st : return htmltext id = st.getState() cond = st.getInt("cond") if id == COMPLETED : htmltext = htmltext = "<html><body>This quest has already been completed.</body></html>" elif id == CREATED and npcId == DONAL : htmltext = "31314-01.htm" elif id == STARTED : if npcId == DONAL : htmltext = "31314-04.htm" elif npcId == DAISY : if cond < 2 : htmltext = "31315-01.htm" else : htmltext = "31315-03.htm" elif npcId == ABERCROMBIE and cond == 2 and st.getQuestItemsCount(BOX): htmltext = "31555-01.htm" return htmltext QUEST = Quest(18, qn, "Meeting With The Golden Ram") CREATED = State('Start', QUEST) STARTED = State('Started', QUEST) COMPLETED = State('Completed', QUEST) QUEST.setInitialState(CREATED) QUEST.addStartNpc(DONAL) QUEST.addTalkId(DONAL) QUEST.addTalkId(DAISY) QUEST.addTalkId(ABERCROMBIE)
zenn1989/scoria-interlude
L2Jscoria-Game/data/scripts/quests/18_MeetingWithTheGoldenRam/__init__.py
Python
gpl-3.0
2,398
[ "VisIt" ]
f764fcec3e6d46c3f4e63aab50c1346f6fe8bf862a308501561c507cd2b29f74
# Copyright (c) 2020, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause from ..var import Var def _get_input_vars(op, only_nonconst_vars=False): """ Return type : List[Var] """ input_vars = [] for name, val in op.inputs.items(): if isinstance(val, Var): if only_nonconst_vars: if val.op and val.op.op_type == "const": continue input_vars.append(val) elif isinstance(val, (list, tuple)): for var in val: if not isinstance(var, Var): msg = "unrecognized input type of op='{}', input='{}'" raise ValueError(msg.format(op.name, name)) if only_nonconst_vars: if var.op and var.op.op_type == "const": continue input_vars.append(var) else: msg = "unrecognized input type of op='{}', input='{}'" raise ValueError(msg.format(op.name, name)) return input_vars class DotVisitor(object): """ Generates a dot description of a ssa block """ def __init__(self, annotation=True): self.result = [] self.visited_memo = {} self.highlights = {} self.alternate_labeller = lambda o: o.op_type + ": " + o.name self.annotation = annotation def labeller(self, labeller): self.alternate_labeller = labeller return self def highlight_nodes(self, nodeset, color="yellow"): for i in nodeset: self.highlights[i] = color return self def visit(self, block, op, nodename_prefix=""): """ Append edges connecting parents of op to the op """ if op in self.visited_memo: return self label = self.alternate_labeller(op) self.visited_memo[op] = 1 if op.name in self.highlights and op.name not in [ o.name for o in block.outputs ]: self.result.append( '"' + nodename_prefix + "op: " + op.name + '"' + '[label="' + label + '",fillcolor=%s,style=filled,fontcolor=%s]' % (self.highlights[op.name], "violetred") ) else: self.result.append( '"' + nodename_prefix + "op: " + op.name + '"' + '[label="' + label + '",fontcolor=%s]' % ("violetred") ) for input_var in _get_input_vars(op, only_nonconst_vars=True): if input_var.op is not None: input_name = "op: " + input_var.op.name else: input_name = input_var.name edge = ( '"' + nodename_prefix + input_name + '"' + " -> " + '"' + nodename_prefix + "op: " + op.name + '"' ) self.result.append(edge) if input_var.op is not None: self.visit(block, input_var.op, nodename_prefix) else: self.visit_input_var(input_var, nodename_prefix) return self def visit_input_var(self, var, nodename_prefix=""): label = "input: " + var.name if var.name in self.highlights: self.result.append( '"' + nodename_prefix + var.name + '"' + '[label="' + label + '",fillcolor=%s,style=filled,fontcolor=%s]' % (self.highlights[var.name], "violetred") ) else: self.result.append( '"' + nodename_prefix + var.name + '"' + '[label="' + label + '",fontcolor=%s]' % ("violetred") ) def visit_output_vars(self, block, var, nodename_prefix=""): label = "output: " + var.name if var.name in self.highlights: self.result.append( '"' + nodename_prefix + var.name + '"' + '[label="' + label + '",fillcolor=%s,style=filled,fontcolor=%s]' % (self.highlights[var.name], "violetred") ) else: self.result.append( '"' + nodename_prefix + var.name + '"' + '[label="' + label + '",fontcolor=%s]' % ("violetred") ) parent_op = var.op edge = ( '"' + nodename_prefix + "op: " + parent_op.name + '"' + " -> " + '"' + nodename_prefix + var.name + '"' ) self.result.append(edge) self.visit(block, parent_op, nodename_prefix=nodename_prefix) def visit_all(self, block, nodename_prefix=""): for out_var in block.outputs: self.visit_output_vars(block, out_var, nodename_prefix=nodename_prefix) for op in block.operations: if op.op_type != "const": self.visit(block, op, nodename_prefix=nodename_prefix) return self def get_result(self, graphtype="digraph", graph_name="g"): return ( graphtype + " " + graph_name + " {\n\t" + "\n\t".join(str(i) for i in self.result) + ';\n\tlabel="' + graph_name[8:] + '";\n\tfontsize=96;\n}' ) def __str__(self): return self.get_result()
apple/coremltools
coremltools/converters/mil/mil/visitors/dot_visitor.py
Python
bsd-3-clause
6,108
[ "VisIt" ]
80c82c6306824e019d0f689c06c8aeaf24f05ed5eef8e87d2bc12200f5d497c9
# (C) British Crown Copyright 2010 - 2018, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ A package for handling multi-dimensional data and associated metadata. .. note :: The Iris documentation has further usage information, including a :ref:`user guide <user_guide_index>` which should be the first port of call for new users. The functions in this module provide the main way to load and/or save your data. The :func:`load` function provides a simple way to explore data from the interactive Python prompt. It will convert the source data into :class:`Cubes <iris.cube.Cube>`, and combine those cubes into higher-dimensional cubes where possible. The :func:`load_cube` and :func:`load_cubes` functions are similar to :func:`load`, but they raise an exception if the number of cubes is not what was expected. They are more useful in scripts, where they can provide an early sanity check on incoming data. The :func:`load_raw` function is provided for those occasions where the automatic combination of cubes into higher-dimensional cubes is undesirable. However, it is intended as a tool of last resort! If you experience a problem with the automatic combination process then please raise an issue with the Iris developers. To persist a cube to the file-system, use the :func:`save` function. All the load functions share very similar arguments: * uris: Either a single filename/URI expressed as a string, or an iterable of filenames/URIs. Filenames can contain `~` or `~user` abbreviations, and/or Unix shell-style wildcards (e.g. `*` and `?`). See the standard library function :func:`os.path.expanduser` and module :mod:`fnmatch` for more details. * constraints: Either a single constraint, or an iterable of constraints. Each constraint can be either a string, an instance of :class:`iris.Constraint`, or an instance of :class:`iris.AttributeConstraint`. If the constraint is a string it will be used to match against cube.name(). .. _constraint_egs: For example:: # Load air temperature data. load_cube(uri, 'air_temperature') # Load data with a specific model level number. load_cube(uri, iris.Constraint(model_level_number=1)) # Load data with a specific STASH code. load_cube(uri, iris.AttributeConstraint(STASH='m01s00i004')) * callback: A function to add metadata from the originating field and/or URI which obeys the following rules: 1. Function signature must be: ``(cube, field, filename)``. 2. Modifies the given cube inplace, unless a new cube is returned by the function. 3. If the cube is to be rejected the callback must raise an :class:`iris.exceptions.IgnoreCubeException`. For example:: def callback(cube, field, filename): # Extract ID from filenames given as: <prefix>__<exp_id> experiment_id = filename.split('__')[1] experiment_coord = iris.coords.AuxCoord( experiment_id, long_name='experiment_id') cube.add_aux_coord(experiment_coord) """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import six import contextlib import glob import itertools import os.path import threading import iris.config import iris.cube import iris._constraints from iris._deprecation import IrisDeprecation, warn_deprecated import iris.fileformats import iris.io try: import iris_sample_data except ImportError: iris_sample_data = None # Iris revision. __version__ = '2.3.0dev0' # Restrict the names imported when using "from iris import *" __all__ = ['load', 'load_cube', 'load_cubes', 'load_raw', 'save', 'Constraint', 'AttributeConstraint', 'sample_data_path', 'site_configuration', 'Future', 'FUTURE', 'IrisDeprecation'] Constraint = iris._constraints.Constraint AttributeConstraint = iris._constraints.AttributeConstraint class Future(threading.local): """Run-time configuration controller.""" def __init__(self, cell_datetime_objects=True, netcdf_promote=True, netcdf_no_unlimited=True, clip_latitudes=True): """ A container for run-time options controls. To adjust the values simply update the relevant attribute from within your code. For example:: iris.FUTURE.cell_datetime_objects = False If Iris code is executed with multiple threads, note the values of these options are thread-specific. .. deprecated:: 2.0.0 The option `cell_datetime_objects` is deprecated and will be removed in a future release. `cell_datetime_objects` is set to True by default and should not be altered. The option `cell_datetime_objects` controlled whether the :meth:`iris.coords.Coord.cell()` method would return time coordinate values as simple numbers or as time objects with attributes for year, month, day, etc. Cells are now represented as time objects by default, allowing you to express time constraints using a simpler syntax. For example:: # To select all data defined at midday. Constraint(time=lambda cell: cell.point.hour == 12) # To ignore the 29th of February. Constraint(time=lambda cell: cell.point.day != 29 and cell.point.month != 2) For more details, see :ref:`using-time-constraints`. .. deprecated:: 2.0.0 The option `netcdf_promote` is deprecated and will be removed in a future release and the deprecated code paths this option used to toggle have been removed. The option `netcdf_promote` controlled whether the netCDF loader exposed variables that defined reference surfaces for dimensionless vertical coordinates as independent Cubes. .. deprecated:: 2.0.0 The option `netcdf_no_unlimited` is deprecated and will be removed in a future release. The deprecated code paths this option used to toggle have been removed. The option `netcdf_no_unlimited` changed the behaviour of the netCDF saver regarding unlimited dimensions. The netCDF saver now sets no dimensions to unlimited. .. deprecated:: 2.0.0 The option `clip_latitudes` is deprecated and will be removed in a future release. `clip_latitudes` is set to True by default and should not be altered. The option `clip_latitudes` controlled whether the :meth:`iris.coords.Coord.guess_bounds()` method would limit the guessed bounds to [-90, 90] for latitudes. """ self.__dict__['cell_datetime_objects'] = cell_datetime_objects self.__dict__['netcdf_promote'] = netcdf_promote self.__dict__['netcdf_no_unlimited'] = netcdf_no_unlimited self.__dict__['clip_latitudes'] = clip_latitudes def __repr__(self): msg = ('Future(cell_datetime_objects={}, netcdf_promote={}, ' 'netcdf_no_unlimited={}, clip_latitudes={})') return msg.format(self.cell_datetime_objects, self.netcdf_promote, self.netcdf_no_unlimited, self.clip_latitudes) deprecated_options = {'cell_datetime_objects': 'warning', 'netcdf_no_unlimited': 'error', 'netcdf_promote': 'error', 'clip_latitudes': 'warning'} def __setattr__(self, name, value): if name in self.deprecated_options: level = self.deprecated_options[name] if level == 'error' and not value: emsg = ("setting the 'Future' property {prop!r} has been " "deprecated to be removed in a future release, and " "deprecated {prop!r} behaviour has been removed. " "Please remove code that sets this property.") raise AttributeError(emsg.format(prop=name)) else: msg = ("setting the 'Future' property {!r} is deprecated " "and will be removed in a future release. " "Please remove code that sets this property.") warn_deprecated(msg.format(name)) if name not in self.__dict__: msg = "'Future' object has no attribute {!r}".format(name) raise AttributeError(msg) self.__dict__[name] = value @contextlib.contextmanager def context(self, **kwargs): """ Return a context manager which allows temporary modification of the option values for the active thread. On entry to the `with` statement, all keyword arguments are applied to the Future object. On exit from the `with` statement, the previous state is restored. For example:: with iris.FUTURE.context(cell_datetime_objects=False): # ... code that expects numbers and not datetimes """ # Save the current context current_state = self.__dict__.copy() # Update the state for name, value in six.iteritems(kwargs): setattr(self, name, value) try: yield finally: # Return the state self.__dict__.clear() self.__dict__.update(current_state) #: Object containing all the Iris run-time options. FUTURE = Future() # Initialise the site configuration dictionary. #: Iris site configuration dictionary. site_configuration = {} try: from iris.site_config import update as _update except ImportError: pass else: _update(site_configuration) def _generate_cubes(uris, callback, constraints): """Returns a generator of cubes given the URIs and a callback.""" if isinstance(uris, six.string_types): uris = [uris] # Group collections of uris by their iris handler # Create list of tuples relating schemes to part names uri_tuples = sorted(iris.io.decode_uri(uri) for uri in uris) for scheme, groups in (itertools.groupby(uri_tuples, key=lambda x: x[0])): # Call each scheme handler with the appropriate URIs if scheme == 'file': part_names = [x[1] for x in groups] for cube in iris.io.load_files(part_names, callback, constraints): yield cube elif scheme in ['http', 'https']: urls = [':'.join(x) for x in groups] for cube in iris.io.load_http(urls, callback): yield cube else: raise ValueError('Iris cannot handle the URI scheme: %s' % scheme) def _load_collection(uris, constraints=None, callback=None): try: cubes = _generate_cubes(uris, callback, constraints) result = iris.cube._CubeFilterCollection.from_cubes(cubes, constraints) except EOFError as e: raise iris.exceptions.TranslationError( "The file appears empty or incomplete: {!r}".format(str(e))) return result def load(uris, constraints=None, callback=None): """ Loads any number of Cubes for each constraint. For a full description of the arguments, please see the module documentation for :mod:`iris`. Args: * uris: One or more filenames/URIs. Kwargs: * constraints: One or more constraints. * callback: A modifier/filter function. Returns: An :class:`iris.cube.CubeList`. """ return _load_collection(uris, constraints, callback).merged().cubes() def load_cube(uris, constraint=None, callback=None): """ Loads a single cube. For a full description of the arguments, please see the module documentation for :mod:`iris`. Args: * uris: One or more filenames/URIs. Kwargs: * constraints: A constraint. * callback: A modifier/filter function. Returns: An :class:`iris.cube.Cube`. """ constraints = iris._constraints.list_of_constraints(constraint) if len(constraints) != 1: raise ValueError('only a single constraint is allowed') cubes = _load_collection(uris, constraints, callback) cubes = cubes.merged().cubes() try: cube = cubes.merge_cube() except iris.exceptions.MergeError as e: raise iris.exceptions.ConstraintMismatchError(str(e)) except ValueError: raise iris.exceptions.ConstraintMismatchError('no cubes found') return cube def load_cubes(uris, constraints=None, callback=None): """ Loads exactly one Cube for each constraint. For a full description of the arguments, please see the module documentation for :mod:`iris`. Args: * uris: One or more filenames/URIs. Kwargs: * constraints: One or more constraints. * callback: A modifier/filter function. Returns: An :class:`iris.cube.CubeList`. """ # Merge the incoming cubes collection = _load_collection(uris, constraints, callback).merged() # Make sure we have exactly one merged cube per constraint bad_pairs = [pair for pair in collection.pairs if len(pair) != 1] if bad_pairs: fmt = ' {} -> {} cubes' bits = [fmt.format(pair.constraint, len(pair)) for pair in bad_pairs] msg = '\n' + '\n'.join(bits) raise iris.exceptions.ConstraintMismatchError(msg) return collection.cubes() def load_raw(uris, constraints=None, callback=None): """ Loads non-merged cubes. This function is provided for those occasions where the automatic combination of cubes into higher-dimensional cubes is undesirable. However, it is intended as a tool of last resort! If you experience a problem with the automatic combination process then please raise an issue with the Iris developers. For a full description of the arguments, please see the module documentation for :mod:`iris`. Args: * uris: One or more filenames/URIs. Kwargs: * constraints: One or more constraints. * callback: A modifier/filter function. Returns: An :class:`iris.cube.CubeList`. """ from iris.fileformats.um._fast_load import _raw_structured_loading with _raw_structured_loading(): return _load_collection(uris, constraints, callback).cubes() save = iris.io.save def sample_data_path(*path_to_join): """ Given the sample data resource, returns the full path to the file. .. note:: This function is only for locating files in the iris sample data collection (installed separately from iris). It is not needed or appropriate for general file access. """ target = os.path.join(*path_to_join) if os.path.isabs(target): raise ValueError('Absolute paths, such as {!r}, are not supported.\n' 'NB. This function is only for locating files in the ' 'iris sample data collection. It is not needed or ' 'appropriate for general file access.'.format(target)) if iris_sample_data is not None: target = os.path.join(iris_sample_data.path, target) else: raise ImportError("Please install the 'iris-sample-data' package to " "access sample data.") if not glob.glob(target): raise ValueError('Sample data file(s) at {!r} not found.\n' 'NB. This function is only for locating files in the ' 'iris sample data collection. It is not needed or ' 'appropriate for general file access.'.format(target)) return target
dkillick/iris
lib/iris/__init__.py
Python
lgpl-3.0
16,714
[ "NetCDF" ]
01962fa38a9e8c9b611348b065c160dbccbcda834a0a4fbd832ff2e9e4cbbef8
#! coding: utf-8 import copy import datetime import inspect import sys from sqlalchemy import exc from sqlalchemy import sql from sqlalchemy import testing from sqlalchemy import util from sqlalchemy.sql import column from sqlalchemy.sql.base import DedupeColumnCollection from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing import ne_ from sqlalchemy.testing.util import gc_collect from sqlalchemy.testing.util import picklers from sqlalchemy.util import _preloaded from sqlalchemy.util import classproperty from sqlalchemy.util import compat from sqlalchemy.util import get_callable_argspec from sqlalchemy.util import langhelpers from sqlalchemy.util import timezone from sqlalchemy.util import WeakSequence class WeakSequenceTest(fixtures.TestBase): @testing.requires.predictable_gc def test_cleanout_elements(self): class Foo(object): pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence([f1, f2, f3]) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2) @testing.requires.predictable_gc def test_cleanout_appended(self): class Foo(object): pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence() w.append(f1) w.append(f2) w.append(f3) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2) class OrderedDictTest(fixtures.TestBase): def test_odict(self): o = util.OrderedDict() o["a"] = 1 o["b"] = 2 o["snack"] = "attack" o["c"] = 3 eq_(list(o.keys()), ["a", "b", "snack", "c"]) eq_(list(o.values()), [1, 2, "attack", 3]) o.pop("snack") eq_(list(o.keys()), ["a", "b", "c"]) eq_(list(o.values()), [1, 2, 3]) try: o.pop("eep") assert False except KeyError: pass eq_(o.pop("eep", "woot"), "woot") try: o.pop("whiff", "bang", "pow") assert False except TypeError: pass eq_(list(o.keys()), ["a", "b", "c"]) eq_(list(o.values()), [1, 2, 3]) o2 = util.OrderedDict(d=4) o2["e"] = 5 eq_(list(o2.keys()), ["d", "e"]) eq_(list(o2.values()), [4, 5]) o.update(o2) eq_(list(o.keys()), ["a", "b", "c", "d", "e"]) eq_(list(o.values()), [1, 2, 3, 4, 5]) o.setdefault("c", "zzz") o.setdefault("f", 6) eq_(list(o.keys()), ["a", "b", "c", "d", "e", "f"]) eq_(list(o.values()), [1, 2, 3, 4, 5, 6]) def test_odict_constructor(self): o = util.OrderedDict( [("name", "jbe"), ("fullname", "jonathan"), ("password", "")] ) eq_(list(o.keys()), ["name", "fullname", "password"]) def test_odict_copy(self): o = util.OrderedDict() o["zzz"] = 1 o["aaa"] = 2 eq_(list(o.keys()), ["zzz", "aaa"]) o2 = o.copy() eq_(list(o2.keys()), list(o.keys())) o3 = copy.copy(o) eq_(list(o3.keys()), list(o.keys())) def test_no_sort_legacy_dictionary(self): d1 = {"c": 1, "b": 2, "a": 3} if testing.requires.python37.enabled: util.sort_dictionary(d1) eq_(list(d1), ["a", "b", "c"]) else: assert_raises(AttributeError, util.sort_dictionary, d1) def test_sort_dictionary(self): o = util.OrderedDict() o["za"] = 1 o["az"] = 2 o["cc"] = 3 eq_( list(o), ["za", "az", "cc"], ) util.sort_dictionary(o) eq_(list(o), ["az", "cc", "za"]) util.sort_dictionary(o, lambda key: key[1]) eq_(list(o), ["za", "cc", "az"]) class OrderedSetTest(fixtures.TestBase): def test_mutators_against_iter(self): # testing a set modified against an iterator o = util.OrderedSet([3, 2, 4, 5]) eq_(o.difference(iter([3, 4])), util.OrderedSet([2, 5])) eq_(o.intersection(iter([3, 4, 6])), util.OrderedSet([3, 4])) eq_(o.union(iter([3, 4, 6])), util.OrderedSet([2, 3, 4, 5, 6])) class ImmutableDictTest(fixtures.TestBase): def test_union_no_change(self): d = util.immutabledict({1: 2, 3: 4}) d2 = d.union({}) is_(d2, d) def test_merge_with_no_change(self): d = util.immutabledict({1: 2, 3: 4}) d2 = d.merge_with({}, None) eq_(d2, {1: 2, 3: 4}) is_(d2, d) def test_merge_with_dicts(self): d = util.immutabledict({1: 2, 3: 4}) d2 = d.merge_with({3: 5, 7: 12}, {9: 18, 15: 25}) eq_(d, {1: 2, 3: 4}) eq_(d2, {1: 2, 3: 5, 7: 12, 9: 18, 15: 25}) assert isinstance(d2, util.immutabledict) d3 = d.merge_with({17: 42}) eq_(d3, {1: 2, 3: 4, 17: 42}) def test_merge_with_tuples(self): d = util.immutabledict({1: 2, 3: 4}) d2 = d.merge_with([(3, 5), (7, 12)], [(9, 18), (15, 25)]) eq_(d, {1: 2, 3: 4}) eq_(d2, {1: 2, 3: 5, 7: 12, 9: 18, 15: 25}) def test_union_dictionary(self): d = util.immutabledict({1: 2, 3: 4}) d2 = d.union({3: 5, 7: 12}) assert isinstance(d2, util.immutabledict) eq_(d, {1: 2, 3: 4}) eq_(d2, {1: 2, 3: 5, 7: 12}) def _dont_test_union_kw(self): d = util.immutabledict({"a": "b", "c": "d"}) d2 = d.union(e="f", g="h") assert isinstance(d2, util.immutabledict) eq_(d, {"a": "b", "c": "d"}) eq_(d2, {"a": "b", "c": "d", "e": "f", "g": "h"}) def test_union_tuples(self): d = util.immutabledict({1: 2, 3: 4}) d2 = d.union([(3, 5), (7, 12)]) eq_(d, {1: 2, 3: 4}) eq_(d2, {1: 2, 3: 5, 7: 12}) def test_keys(self): d = util.immutabledict({1: 2, 3: 4}) eq_(set(d.keys()), {1, 3}) def test_values(self): d = util.immutabledict({1: 2, 3: 4}) eq_(set(d.values()), {2, 4}) def test_items(self): d = util.immutabledict({1: 2, 3: 4}) eq_(set(d.items()), {(1, 2), (3, 4)}) def test_contains(self): d = util.immutabledict({1: 2, 3: 4}) assert 1 in d assert "foo" not in d def test_rich_compare(self): d = util.immutabledict({1: 2, 3: 4}) d2 = util.immutabledict({1: 2, 3: 4}) d3 = util.immutabledict({5: 12}) d4 = {5: 12} eq_(d, d2) ne_(d, d3) ne_(d, d4) eq_(d3, d4) def test_serialize(self): d = util.immutabledict({1: 2, 3: 4}) for loads, dumps in picklers(): d2 = loads(dumps(d)) eq_(d2, {1: 2, 3: 4}) assert isinstance(d2, util.immutabledict) class MemoizedAttrTest(fixtures.TestBase): def test_memoized_property(self): val = [20] class Foo(object): @util.memoized_property def bar(self): v = val[0] val[0] += 1 return v ne_(Foo.bar, None) f1 = Foo() assert "bar" not in f1.__dict__ eq_(f1.bar, 20) eq_(f1.bar, 20) eq_(val[0], 21) eq_(f1.__dict__["bar"], 20) def test_memoized_instancemethod(self): val = [20] class Foo(object): @util.memoized_instancemethod def bar(self): v = val[0] val[0] += 1 return v assert inspect.ismethod(Foo().bar) ne_(Foo.bar, None) f1 = Foo() assert "bar" not in f1.__dict__ eq_(f1.bar(), 20) eq_(f1.bar(), 20) eq_(val[0], 21) def test_memoized_slots(self): canary = mock.Mock() class Foob(util.MemoizedSlots): __slots__ = ("foo_bar", "gogo") def _memoized_method_gogo(self): canary.method() return "gogo" def _memoized_attr_foo_bar(self): canary.attr() return "foobar" f1 = Foob() assert_raises(AttributeError, setattr, f1, "bar", "bat") eq_(f1.foo_bar, "foobar") eq_(f1.foo_bar, "foobar") eq_(f1.gogo(), "gogo") eq_(f1.gogo(), "gogo") eq_(canary.mock_calls, [mock.call.attr(), mock.call.method()]) class WrapCallableTest(fixtures.TestBase): def test_wrapping_update_wrapper_fn(self): def my_fancy_default(): """run the fancy default""" return 10 c = util.wrap_callable(lambda: my_fancy_default, my_fancy_default) eq_(c.__name__, "my_fancy_default") eq_(c.__doc__, "run the fancy default") def test_wrapping_update_wrapper_fn_nodocstring(self): def my_fancy_default(): return 10 c = util.wrap_callable(lambda: my_fancy_default, my_fancy_default) eq_(c.__name__, "my_fancy_default") eq_(c.__doc__, None) def test_wrapping_update_wrapper_cls(self): class MyFancyDefault(object): """a fancy default""" def __call__(self): """run the fancy default""" return 10 def_ = MyFancyDefault() c = util.wrap_callable(lambda: def_(), def_) eq_(c.__name__, "MyFancyDefault") eq_(c.__doc__, "run the fancy default") def test_wrapping_update_wrapper_cls_noclsdocstring(self): class MyFancyDefault(object): def __call__(self): """run the fancy default""" return 10 def_ = MyFancyDefault() c = util.wrap_callable(lambda: def_(), def_) eq_(c.__name__, "MyFancyDefault") eq_(c.__doc__, "run the fancy default") def test_wrapping_update_wrapper_cls_nomethdocstring(self): class MyFancyDefault(object): """a fancy default""" def __call__(self): return 10 def_ = MyFancyDefault() c = util.wrap_callable(lambda: def_(), def_) eq_(c.__name__, "MyFancyDefault") eq_(c.__doc__, "a fancy default") def test_wrapping_update_wrapper_cls_noclsdocstring_nomethdocstring(self): class MyFancyDefault(object): def __call__(self): return 10 def_ = MyFancyDefault() c = util.wrap_callable(lambda: def_(), def_) eq_(c.__name__, "MyFancyDefault") eq_(c.__doc__, None) def test_wrapping_update_wrapper_functools_parial(self): def my_default(x): return x import functools my_functools_default = functools.partial(my_default, 5) c = util.wrap_callable( lambda: my_functools_default(), my_functools_default ) eq_(c.__name__, "partial") if not compat.pypy: # pypy fails this check eq_(c.__doc__, my_functools_default.__call__.__doc__) eq_(c(), 5) class ToListTest(fixtures.TestBase): def test_from_string(self): eq_(util.to_list("xyz"), ["xyz"]) def test_from_set(self): spec = util.to_list(set([1, 2, 3])) assert isinstance(spec, list) eq_(sorted(spec), [1, 2, 3]) def test_from_dict(self): spec = util.to_list({1: "a", 2: "b", 3: "c"}) assert isinstance(spec, list) eq_(sorted(spec), [1, 2, 3]) def test_from_tuple(self): eq_(util.to_list((1, 2, 3)), [1, 2, 3]) def test_from_bytes(self): eq_(util.to_list(compat.b("abc")), [compat.b("abc")]) eq_( util.to_list([compat.b("abc"), compat.b("def")]), [compat.b("abc"), compat.b("def")], ) class ColumnCollectionCommon(testing.AssertsCompiledSQL): def _assert_collection_integrity(self, coll): eq_(coll._colset, set(c for k, c in coll._collection)) d = {} for k, col in coll._collection: d.setdefault(k, col) d.update({idx: col for idx, (k, col) in enumerate(coll._collection)}) eq_(coll._index, d) def test_keys(self): c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3") c2.key = "foo" cc = self._column_collection( columns=[("c1", c1), ("foo", c2), ("c3", c3)] ) keys = cc.keys() eq_(keys, ["c1", "foo", "c3"]) ne_(id(keys), id(cc.keys())) ci = cc.as_immutable() eq_(ci.keys(), ["c1", "foo", "c3"]) def test_values(self): c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3") c2.key = "foo" cc = self._column_collection( columns=[("c1", c1), ("foo", c2), ("c3", c3)] ) val = cc.values() eq_(val, [c1, c2, c3]) ne_(id(val), id(cc.values())) ci = cc.as_immutable() eq_(ci.values(), [c1, c2, c3]) def test_items(self): c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3") c2.key = "foo" cc = self._column_collection( columns=[("c1", c1), ("foo", c2), ("c3", c3)] ) items = cc.items() eq_(items, [("c1", c1), ("foo", c2), ("c3", c3)]) ne_(id(items), id(cc.items())) ci = cc.as_immutable() eq_(ci.items(), [("c1", c1), ("foo", c2), ("c3", c3)]) def test_key_index_error(self): cc = self._column_collection( columns=[ ("col1", sql.column("col1")), ("col2", sql.column("col2")), ] ) assert_raises(KeyError, lambda: cc["foo"]) assert_raises(KeyError, lambda: cc[object()]) assert_raises(IndexError, lambda: cc[5]) def test_contains_column(self): c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3") cc = self._column_collection(columns=[("c1", c1), ("c2", c2)]) is_true(cc.contains_column(c1)) is_false(cc.contains_column(c3)) def test_contains_column_not_column(self): c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3") cc = self._column_collection(columns=[("c1", c1), ("c2", c2)]) is_false(cc.contains_column(c3 == 2)) with testing.expect_raises_message( exc.ArgumentError, "contains_column cannot be used with string arguments", ): cc.contains_column("c1") with testing.expect_raises_message( exc.ArgumentError, "contains_column cannot be used with string arguments", ): cc.contains_column("foo") def test_in(self): col1 = sql.column("col1") cc = self._column_collection( columns=[ ("col1", col1), ("col2", sql.column("col2")), ("col3", sql.column("col3")), ] ) assert "col1" in cc assert "col2" in cc assert_raises_message( exc.ArgumentError, "__contains__ requires a string argument", lambda: col1 in cc, ) def test_compare(self): c1 = sql.column("col1") c2 = c1.label("col2") c3 = sql.column("col3") is_true( self._column_collection( [("col1", c1), ("col2", c2), ("col3", c3)] ).compare( self._column_collection( [("col1", c1), ("col2", c2), ("col3", c3)] ) ) ) is_false( self._column_collection( [("col1", c1), ("col2", c2), ("col3", c3)] ).compare(self._column_collection([("col1", c1), ("col2", c2)])) ) def test_str(self): c1 = sql.column("col1") c2 = c1.label("col2") c3 = sql.column("col3") cc = self._column_collection( [("col1", c1), ("col2", c2), ("col3", c3)] ) eq_(str(cc), "%s(%s, %s, %s)" % (type(cc).__name__, c1, c2, c3)) eq_(repr(cc), object.__repr__(cc)) class ColumnCollectionTest(ColumnCollectionCommon, fixtures.TestBase): def _column_collection(self, columns=None): return sql.ColumnCollection(columns=columns) def test_separate_key_all_cols(self): c1, c2 = sql.column("col1"), sql.column("col2") cc = self._column_collection([("kcol1", c1), ("kcol2", c2)]) eq_(cc._all_columns, [c1, c2]) def test_separate_key_get(self): c1, c2 = sql.column("col1"), sql.column("col2") cc = self._column_collection([("kcol1", c1), ("kcol2", c2)]) is_(cc.kcol1, c1) is_(cc.kcol2, c2) def test_separate_key_in(self): cc = self._column_collection( columns=[ ("kcol1", sql.column("col1")), ("kcol2", sql.column("col2")), ("kcol3", sql.column("col3")), ] ) assert "col1" not in cc assert "kcol2" in cc def test_dupes_add(self): c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c2"), ) cc = sql.ColumnCollection() cc.add(c1) cc.add(c2a, "c2") cc.add(c3) cc.add(c2b) eq_(cc._all_columns, [c1, c2a, c3, c2b]) eq_(list(cc), [c1, c2a, c3, c2b]) eq_(cc.keys(), ["c1", "c2", "c3", "c2"]) assert cc.contains_column(c2a) assert cc.contains_column(c2b) # this is deterministic is_(cc["c2"], c2a) self._assert_collection_integrity(cc) ci = cc.as_immutable() eq_(ci._all_columns, [c1, c2a, c3, c2b]) eq_(list(ci), [c1, c2a, c3, c2b]) eq_(ci.keys(), ["c1", "c2", "c3", "c2"]) def test_dupes_construct(self): c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c2"), ) cc = sql.ColumnCollection( columns=[("c1", c1), ("c2", c2a), ("c3", c3), ("c2", c2b)] ) eq_(cc._all_columns, [c1, c2a, c3, c2b]) eq_(list(cc), [c1, c2a, c3, c2b]) eq_(cc.keys(), ["c1", "c2", "c3", "c2"]) assert cc.contains_column(c2a) assert cc.contains_column(c2b) # this is deterministic is_(cc["c2"], c2a) self._assert_collection_integrity(cc) ci = cc.as_immutable() eq_(ci._all_columns, [c1, c2a, c3, c2b]) eq_(list(ci), [c1, c2a, c3, c2b]) eq_(ci.keys(), ["c1", "c2", "c3", "c2"]) def test_identical_dupe_construct(self): c1, c2, c3 = (column("c1"), column("c2"), column("c3")) cc = sql.ColumnCollection( columns=[("c1", c1), ("c2", c2), ("c3", c3), ("c2", c2)] ) eq_(cc._all_columns, [c1, c2, c3, c2]) # for iter, c2a is replaced by c2b, ordering # is maintained in that way. ideally, iter would be # the same as the "_all_columns" collection. eq_(list(cc), [c1, c2, c3, c2]) assert cc.contains_column(c2) self._assert_collection_integrity(cc) ci = cc.as_immutable() eq_(ci._all_columns, [c1, c2, c3, c2]) eq_(list(ci), [c1, c2, c3, c2]) class DedupeColumnCollectionTest(ColumnCollectionCommon, fixtures.TestBase): def _column_collection(self, columns=None): return DedupeColumnCollection(columns=columns) def test_separate_key_cols(self): c1, c2 = sql.column("col1"), sql.column("col2") assert_raises_message( exc.ArgumentError, "DedupeColumnCollection requires columns be under " "the same key as their .key", self._column_collection, [("kcol1", c1), ("kcol2", c2)], ) cc = self._column_collection() assert_raises_message( exc.ArgumentError, "DedupeColumnCollection requires columns be under " "the same key as their .key", cc.add, c1, "kcol1", ) def test_pickle_w_mutation(self): c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3") c2.key = "foo" cc = self._column_collection(columns=[("c1", c1), ("foo", c2)]) ci = cc.as_immutable() d = {"cc": cc, "ci": ci} for loads, dumps in picklers(): dp = loads(dumps(d)) cp = dp["cc"] cpi = dp["ci"] self._assert_collection_integrity(cp) self._assert_collection_integrity(cpi) assert cp._colset is cpi._colset assert cp._index is cpi._index assert cp._collection is cpi._collection cp.add(c3) eq_(cp.keys(), ["c1", "foo", "c3"]) eq_(cpi.keys(), ["c1", "foo", "c3"]) assert cp.contains_column(c3) assert cpi.contains_column(c3) def test_keys_after_replace(self): c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3") c2.key = "foo" cc = self._column_collection( columns=[("c1", c1), ("foo", c2), ("c3", c3)] ) eq_(cc.keys(), ["c1", "foo", "c3"]) c4 = sql.column("c3") cc.replace(c4) eq_(cc.keys(), ["c1", "foo", "c3"]) self._assert_collection_integrity(cc) def test_dupes_add_dedupe(self): cc = DedupeColumnCollection() c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c2"), ) cc.add(c1) cc.add(c2a) cc.add(c3) cc.add(c2b) eq_(cc._all_columns, [c1, c2b, c3]) eq_(list(cc), [c1, c2b, c3]) assert not cc.contains_column(c2a) assert cc.contains_column(c2b) self._assert_collection_integrity(cc) def test_dupes_construct_dedupe(self): c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c2"), ) cc = DedupeColumnCollection( columns=[("c1", c1), ("c2", c2a), ("c3", c3), ("c2", c2b)] ) eq_(cc._all_columns, [c1, c2b, c3]) eq_(list(cc), [c1, c2b, c3]) assert not cc.contains_column(c2a) assert cc.contains_column(c2b) self._assert_collection_integrity(cc) def test_identical_dupe_add_dedupes(self): cc = DedupeColumnCollection() c1, c2, c3 = (column("c1"), column("c2"), column("c3")) cc.add(c1) cc.add(c2) cc.add(c3) cc.add(c2) eq_(cc._all_columns, [c1, c2, c3]) # for iter, c2a is replaced by c2b, ordering # is maintained in that way. ideally, iter would be # the same as the "_all_columns" collection. eq_(list(cc), [c1, c2, c3]) assert cc.contains_column(c2) self._assert_collection_integrity(cc) ci = cc.as_immutable() eq_(ci._all_columns, [c1, c2, c3]) eq_(list(ci), [c1, c2, c3]) def test_identical_dupe_construct_dedupes(self): c1, c2, c3 = (column("c1"), column("c2"), column("c3")) cc = DedupeColumnCollection( columns=[("c1", c1), ("c2", c2), ("c3", c3), ("c2", c2)] ) eq_(cc._all_columns, [c1, c2, c3]) # for iter, c2a is replaced by c2b, ordering # is maintained in that way. ideally, iter would be # the same as the "_all_columns" collection. eq_(list(cc), [c1, c2, c3]) assert cc.contains_column(c2) self._assert_collection_integrity(cc) ci = cc.as_immutable() eq_(ci._all_columns, [c1, c2, c3]) eq_(list(ci), [c1, c2, c3]) def test_replace(self): cc = DedupeColumnCollection() ci = cc.as_immutable() c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c2"), ) cc.add(c1) cc.add(c2a) cc.add(c3) cc.replace(c2b) eq_(cc._all_columns, [c1, c2b, c3]) eq_(list(cc), [c1, c2b, c3]) is_(cc[1], c2b) assert not cc.contains_column(c2a) assert cc.contains_column(c2b) self._assert_collection_integrity(cc) eq_(ci._all_columns, [c1, c2b, c3]) eq_(list(ci), [c1, c2b, c3]) is_(ci[1], c2b) def test_replace_key_matches_name_of_another(self): cc = DedupeColumnCollection() ci = cc.as_immutable() c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c4"), ) c2b.key = "c2" cc.add(c1) cc.add(c2a) cc.add(c3) cc.replace(c2b) eq_(cc._all_columns, [c1, c2b, c3]) eq_(list(cc), [c1, c2b, c3]) is_(cc[1], c2b) self._assert_collection_integrity(cc) assert not cc.contains_column(c2a) assert cc.contains_column(c2b) eq_(ci._all_columns, [c1, c2b, c3]) eq_(list(ci), [c1, c2b, c3]) is_(ci[1], c2b) def test_replace_key_matches(self): cc = DedupeColumnCollection() ci = cc.as_immutable() c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("X"), ) c2b.key = "c2" cc.add(c1) cc.add(c2a) cc.add(c3) cc.replace(c2b) assert not cc.contains_column(c2a) assert cc.contains_column(c2b) is_(cc[1], c2b) assert_raises(IndexError, lambda: cc[3]) self._assert_collection_integrity(cc) eq_(cc._all_columns, [c1, c2b, c3]) eq_(list(cc), [c1, c2b, c3]) eq_(ci._all_columns, [c1, c2b, c3]) eq_(list(ci), [c1, c2b, c3]) is_(ci[1], c2b) assert_raises(IndexError, lambda: ci[3]) def test_replace_name_matches(self): cc = DedupeColumnCollection() ci = cc.as_immutable() c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c2"), ) c2b.key = "X" cc.add(c1) cc.add(c2a) cc.add(c3) cc.replace(c2b) assert not cc.contains_column(c2a) assert cc.contains_column(c2b) eq_(cc._all_columns, [c1, c2b, c3]) eq_(list(cc), [c1, c2b, c3]) eq_(len(cc), 3) is_(cc[1], c2b) self._assert_collection_integrity(cc) eq_(ci._all_columns, [c1, c2b, c3]) eq_(list(ci), [c1, c2b, c3]) eq_(len(ci), 3) is_(ci[1], c2b) def test_replace_no_match(self): cc = DedupeColumnCollection() ci = cc.as_immutable() c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4") c4.key = "X" cc.add(c1) cc.add(c2) cc.add(c3) cc.replace(c4) assert cc.contains_column(c2) assert cc.contains_column(c4) eq_(cc._all_columns, [c1, c2, c3, c4]) eq_(list(cc), [c1, c2, c3, c4]) is_(cc[3], c4) self._assert_collection_integrity(cc) eq_(ci._all_columns, [c1, c2, c3, c4]) eq_(list(ci), [c1, c2, c3, c4]) is_(ci[3], c4) def test_replace_switch_key_name(self): c1 = column("id") c2 = column("street") c3 = column("user_id") cc = DedupeColumnCollection( columns=[("id", c1), ("street", c2), ("user_id", c3)] ) # for replace col with different key than name, it necessarily # removes two columns c4 = column("id") c4.key = "street" cc.replace(c4) eq_(list(cc), [c4, c3]) self._assert_collection_integrity(cc) def test_remove(self): c1, c2, c3 = column("c1"), column("c2"), column("c3") cc = DedupeColumnCollection( columns=[("c1", c1), ("c2", c2), ("c3", c3)] ) ci = cc.as_immutable() eq_(cc._all_columns, [c1, c2, c3]) eq_(list(cc), [c1, c2, c3]) assert cc.contains_column(c2) assert "c2" in cc eq_(ci._all_columns, [c1, c2, c3]) eq_(list(ci), [c1, c2, c3]) assert ci.contains_column(c2) assert "c2" in ci cc.remove(c2) eq_(cc._all_columns, [c1, c3]) eq_(list(cc), [c1, c3]) is_(cc[0], c1) is_(cc[1], c3) assert not cc.contains_column(c2) assert "c2" not in cc self._assert_collection_integrity(cc) eq_(ci._all_columns, [c1, c3]) eq_(list(ci), [c1, c3]) is_(ci[0], c1) is_(ci[1], c3) assert not ci.contains_column(c2) assert "c2" not in ci assert_raises(IndexError, lambda: ci[2]) def test_remove_doesnt_change_iteration(self): c1, c2, c3, c4, c5 = ( column("c1"), column("c2"), column("c3"), column("c4"), column("c5"), ) cc = DedupeColumnCollection( columns=[ ("c1", c1), ("c2", c2), ("c3", c3), ("c4", c4), ("c5", c5), ] ) for col in cc: if col.name not in ["c1", "c2"]: cc.remove(col) eq_(cc.keys(), ["c1", "c2"]) eq_([c.name for c in cc], ["c1", "c2"]) self._assert_collection_integrity(cc) def test_dupes_extend(self): cc = DedupeColumnCollection() ci = cc.as_immutable() c1, c2a, c3, c2b = ( column("c1"), column("c2"), column("c3"), column("c2"), ) cc.add(c1) cc.add(c2a) cc.extend([c3, c2b]) # this should remove c2a eq_(cc._all_columns, [c1, c2b, c3]) eq_(list(cc), [c1, c2b, c3]) is_(cc[1], c2b) is_(cc[2], c3) assert_raises(IndexError, lambda: cc[3]) self._assert_collection_integrity(cc) assert not cc.contains_column(c2a) assert cc.contains_column(c2b) eq_(ci._all_columns, [c1, c2b, c3]) eq_(list(ci), [c1, c2b, c3]) is_(ci[1], c2b) is_(ci[2], c3) assert_raises(IndexError, lambda: ci[3]) assert not ci.contains_column(c2a) assert ci.contains_column(c2b) def test_extend_existing_maintains_ordering(self): cc = DedupeColumnCollection() c1, c2, c3, c4, c5 = ( column("c1"), column("c2"), column("c3"), column("c4"), column("c5"), ) cc.extend([c1, c2]) eq_(cc._all_columns, [c1, c2]) self._assert_collection_integrity(cc) cc.extend([c3]) eq_(cc._all_columns, [c1, c2, c3]) self._assert_collection_integrity(cc) cc.extend([c4, c2, c5]) eq_(cc._all_columns, [c1, c2, c3, c4, c5]) self._assert_collection_integrity(cc) class LRUTest(fixtures.TestBase): def test_lru(self): class item(object): def __init__(self, id_): self.id = id_ def __str__(self): return "item id %d" % self.id lru = util.LRUCache(10, threshold=0.2) for id_ in range(1, 20): lru[id_] = item(id_) # first couple of items should be gone assert 1 not in lru assert 2 not in lru # next batch over the threshold of 10 should be present for id_ in range(11, 20): assert id_ in lru lru[12] lru[15] lru[23] = item(23) lru[24] = item(24) lru[25] = item(25) lru[26] = item(26) lru[27] = item(27) assert 11 not in lru assert 13 not in lru for id_ in (25, 24, 23, 14, 12, 19, 18, 17, 16, 15): assert id_ in lru lru[25] i2 = item(25) lru[25] = i2 assert 25 in lru assert lru[25] is i2 class ImmutableSubclass(str): pass class FlattenIteratorTest(fixtures.TestBase): def test_flatten(self): assert list(util.flatten_iterator([[1, 2, 3], [4, 5, 6], 7, 8])) == [ 1, 2, 3, 4, 5, 6, 7, 8, ] def test_str_with_iter(self): """ensure that a str object with an __iter__ method (like in PyPy) is not interpreted as an iterable. """ class IterString(str): def __iter__(self): return iter(self + "") iter_list = [IterString("asdf"), [IterString("x"), IterString("y")]] assert list(util.flatten_iterator(iter_list)) == ["asdf", "x", "y"] class HashOverride(object): def __init__(self, value=None): self.value = value def __hash__(self): return hash(self.value) class NoHash(object): def __init__(self, value=None): self.value = value __hash__ = None class EqOverride(object): def __init__(self, value=None): self.value = value __hash__ = object.__hash__ def __eq__(self, other): if isinstance(other, EqOverride): return self.value == other.value else: return False def __ne__(self, other): if isinstance(other, EqOverride): return self.value != other.value else: return True class HashEqOverride(object): def __init__(self, value=None): self.value = value def __hash__(self): return hash(self.value) def __eq__(self, other): if isinstance(other, EqOverride): return self.value == other.value else: return False def __ne__(self, other): if isinstance(other, EqOverride): return self.value != other.value else: return True class IdentitySetTest(fixtures.TestBase): obj_type = object def assert_eq(self, identityset, expected_iterable): expected = sorted([id(o) for o in expected_iterable]) found = sorted([id(o) for o in identityset]) eq_(found, expected) def test_init(self): ids = util.IdentitySet([1, 2, 3, 2, 1]) self.assert_eq(ids, [1, 2, 3]) ids = util.IdentitySet(ids) self.assert_eq(ids, [1, 2, 3]) ids = util.IdentitySet() self.assert_eq(ids, []) ids = util.IdentitySet([]) self.assert_eq(ids, []) ids = util.IdentitySet(ids) self.assert_eq(ids, []) def test_add(self): for type_ in (object, ImmutableSubclass): data = [type_(), type_()] ids = util.IdentitySet() for i in list(range(2)) + list(range(2)): ids.add(data[i]) self.assert_eq(ids, data) for type_ in (NoHash, EqOverride, HashOverride, HashEqOverride): data = [type_(1), type_(1), type_(2)] ids = util.IdentitySet() for i in list(range(3)) + list(range(3)): ids.add(data[i]) self.assert_eq(ids, data) def test_dunder_sub2(self): IdentitySet = util.IdentitySet o1, o2, o3 = self.obj_type(), self.obj_type(), self.obj_type() ids1 = IdentitySet([o1]) ids2 = IdentitySet([o1, o2, o3]) eq_(ids2 - ids1, IdentitySet([o2, o3])) ids2 -= ids1 eq_(ids2, IdentitySet([o2, o3])) def test_dunder_eq(self): _, _, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(twin1 == twin2, True) eq_(unique1 == unique2, False) # not an IdentitySet not_an_identity_set = object() eq_(unique1 == not_an_identity_set, False) def test_dunder_ne(self): _, _, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(twin1 != twin2, False) eq_(unique1 != unique2, True) # not an IdentitySet not_an_identity_set = object() eq_(unique1 != not_an_identity_set, True) def test_dunder_le(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ <= super_, True) eq_(super_ <= sub_, False) # the same sets eq_(twin1 <= twin2, True) eq_(twin2 <= twin1, True) # totally different sets eq_(unique1 <= unique2, False) eq_(unique2 <= unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 <= not_an_identity_set self._assert_unorderable_types(should_raise) def test_dunder_lt(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ < super_, True) eq_(super_ < sub_, False) # the same sets eq_(twin1 < twin2, False) eq_(twin2 < twin1, False) # totally different sets eq_(unique1 < unique2, False) eq_(unique2 < unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 < not_an_identity_set self._assert_unorderable_types(should_raise) def test_dunder_ge(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ >= super_, False) eq_(super_ >= sub_, True) # the same sets eq_(twin1 >= twin2, True) eq_(twin2 >= twin1, True) # totally different sets eq_(unique1 >= unique2, False) eq_(unique2 >= unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 >= not_an_identity_set self._assert_unorderable_types(should_raise) def test_dunder_gt(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ > super_, False) eq_(super_ > sub_, True) # the same sets eq_(twin1 > twin2, False) eq_(twin2 > twin1, False) # totally different sets eq_(unique1 > unique2, False) eq_(unique2 > unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 > not_an_identity_set self._assert_unorderable_types(should_raise) def test_issubset(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_.issubset(super_), True) eq_(super_.issubset(sub_), False) # the same sets eq_(twin1.issubset(twin2), True) eq_(twin2.issubset(twin1), True) # totally different sets eq_(unique1.issubset(unique2), False) eq_(unique2.issubset(unique1), False) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.issubset, not_an_identity_set) def test_issuperset(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_.issuperset(super_), False) eq_(super_.issuperset(sub_), True) # the same sets eq_(twin1.issuperset(twin2), True) eq_(twin2.issuperset(twin1), True) # totally different sets eq_(unique1.issuperset(unique2), False) eq_(unique2.issuperset(unique1), False) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.issuperset, not_an_identity_set) def test_union(self): super_, sub_, twin1, twin2, _, _ = self._create_sets() # basic set math eq_(sub_.union(super_), super_) eq_(super_.union(sub_), super_) # the same sets eq_(twin1.union(twin2), twin1) eq_(twin2.union(twin1), twin1) # empty sets empty = util.IdentitySet([]) eq_(empty.union(empty), empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1.union(unique2), util.IdentitySet([1, 2])) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.union, not_an_identity_set) def test_dunder_or(self): super_, sub_, twin1, twin2, _, _ = self._create_sets() # basic set math eq_(sub_ | super_, super_) eq_(super_ | sub_, super_) # the same sets eq_(twin1 | twin2, twin1) eq_(twin2 | twin1, twin1) # empty sets empty = util.IdentitySet([]) eq_(empty | empty, empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1 | unique2, util.IdentitySet([1, 2])) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 | not_an_identity_set assert_raises(TypeError, should_raise) def test_update(self): pass # TODO def test_dunder_ior(self): super_, sub_, _, _, _, _ = self._create_sets() # basic set math sub_ |= super_ eq_(sub_, super_) super_ |= sub_ eq_(super_, super_) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) unique1 |= unique2 eq_(unique1, util.IdentitySet([1, 2])) eq_(unique2, util.IdentitySet([2])) # not an IdentitySet def should_raise(): unique = util.IdentitySet([1]) not_an_identity_set = object() unique |= not_an_identity_set assert_raises(TypeError, should_raise) def test_difference(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1.difference(set2), util.IdentitySet([1])) eq_(set2.difference(set1), util.IdentitySet([4])) # empty sets empty = util.IdentitySet([]) eq_(empty.difference(empty), empty) # the same sets eq_(twin1.difference(twin2), empty) eq_(twin2.difference(twin1), empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1.difference(unique2), util.IdentitySet([1])) eq_(unique2.difference(unique1), util.IdentitySet([2])) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.difference, not_an_identity_set) def test_dunder_sub(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1 - set2, util.IdentitySet([1])) eq_(set2 - set1, util.IdentitySet([4])) # empty sets empty = util.IdentitySet([]) eq_(empty - empty, empty) # the same sets eq_(twin1 - twin2, empty) eq_(twin2 - twin1, empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1 - unique2, util.IdentitySet([1])) eq_(unique2 - unique1, util.IdentitySet([2])) # not an IdentitySet def should_raise(): not_an_identity_set = object() unique1 - not_an_identity_set assert_raises(TypeError, should_raise) def test_difference_update(self): pass # TODO def test_dunder_isub(self): pass # TODO def test_intersection(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_.intersection(super_), sub_) eq_(super_.intersection(sub_), sub_) # the same sets eq_(twin1.intersection(twin2), twin1) eq_(twin2.intersection(twin1), twin1) # empty sets empty = util.IdentitySet([]) eq_(empty.intersection(empty), empty) # totally different sets eq_(unique1.intersection(unique2), empty) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.intersection, not_an_identity_set) def test_dunder_and(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ & super_, sub_) eq_(super_ & sub_, sub_) # the same sets eq_(twin1 & twin2, twin1) eq_(twin2 & twin1, twin1) # empty sets empty = util.IdentitySet([]) eq_(empty & empty, empty) # totally different sets eq_(unique1 & unique2, empty) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 & not_an_identity_set assert_raises(TypeError, should_raise) def test_intersection_update(self): pass # TODO def test_dunder_iand(self): pass # TODO def test_symmetric_difference(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1.symmetric_difference(set2), util.IdentitySet([1, 4])) eq_(set2.symmetric_difference(set1), util.IdentitySet([1, 4])) # empty sets empty = util.IdentitySet([]) eq_(empty.symmetric_difference(empty), empty) # the same sets eq_(twin1.symmetric_difference(twin2), empty) eq_(twin2.symmetric_difference(twin1), empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1.symmetric_difference(unique2), util.IdentitySet([1, 2])) eq_(unique2.symmetric_difference(unique1), util.IdentitySet([1, 2])) # not an IdentitySet not_an_identity_set = object() assert_raises( TypeError, unique1.symmetric_difference, not_an_identity_set ) def test_dunder_xor(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1 ^ set2, util.IdentitySet([1, 4])) eq_(set2 ^ set1, util.IdentitySet([1, 4])) # empty sets empty = util.IdentitySet([]) eq_(empty ^ empty, empty) # the same sets eq_(twin1 ^ twin2, empty) eq_(twin2 ^ twin1, empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1 ^ unique2, util.IdentitySet([1, 2])) eq_(unique2 ^ unique1, util.IdentitySet([1, 2])) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 ^ not_an_identity_set assert_raises(TypeError, should_raise) def test_symmetric_difference_update(self): pass # TODO def _create_sets(self): o1, o2, o3, o4, o5 = ( self.obj_type(), self.obj_type(), self.obj_type(), self.obj_type(), self.obj_type(), ) super_ = util.IdentitySet([o1, o2, o3]) sub_ = util.IdentitySet([o2]) twin1 = util.IdentitySet([o3]) twin2 = util.IdentitySet([o3]) unique1 = util.IdentitySet([o4]) unique2 = util.IdentitySet([o5]) return super_, sub_, twin1, twin2, unique1, unique2 def _assert_unorderable_types(self, callable_): if util.py3k: assert_raises_message( TypeError, "not supported between instances of", callable_ ) else: assert_raises_message( TypeError, "cannot compare sets using cmp()", callable_ ) def test_basic_sanity(self): IdentitySet = util.IdentitySet o1, o2, o3 = self.obj_type(), self.obj_type(), self.obj_type() ids = IdentitySet([o1]) ids.discard(o1) ids.discard(o1) ids.add(o1) ids.remove(o1) assert_raises(KeyError, ids.remove, o1) eq_(ids.copy(), ids) # explicit __eq__ and __ne__ tests assert ids != None # noqa assert not (ids == None) # noqa ne_(ids, IdentitySet([o1, o2, o3])) ids.clear() assert o1 not in ids ids.add(o2) assert o2 in ids eq_(ids.pop(), o2) ids.add(o1) eq_(len(ids), 1) isuper = IdentitySet([o1, o2]) assert ids < isuper assert ids.issubset(isuper) assert isuper.issuperset(ids) assert isuper > ids eq_(ids.union(isuper), isuper) eq_(ids | isuper, isuper) eq_(isuper - ids, IdentitySet([o2])) eq_(isuper.difference(ids), IdentitySet([o2])) eq_(ids.intersection(isuper), IdentitySet([o1])) eq_(ids & isuper, IdentitySet([o1])) eq_(ids.symmetric_difference(isuper), IdentitySet([o2])) eq_(ids ^ isuper, IdentitySet([o2])) ids.update(isuper) ids |= isuper ids.difference_update(isuper) ids -= isuper ids.intersection_update(isuper) ids &= isuper ids.symmetric_difference_update(isuper) ids ^= isuper ids.update("foobar") try: ids |= "foobar" assert False except TypeError: assert True try: s = set([o1, o2]) s |= ids assert False except TypeError: assert True assert_raises(TypeError, util.cmp, ids) assert_raises(TypeError, hash, ids) class NoHashIdentitySetTest(IdentitySetTest): obj_type = NoHash class OrderedIdentitySetTest(fixtures.TestBase): def assert_eq(self, identityset, expected_iterable): expected = [id(o) for o in expected_iterable] found = [id(o) for o in identityset] eq_(found, expected) def test_add(self): elem = object s = util.OrderedIdentitySet() s.add(elem()) s.add(elem()) def test_intersection(self): elem = object eq_ = self.assert_eq a, b, c, d, e, f, g = ( elem(), elem(), elem(), elem(), elem(), elem(), elem(), ) s1 = util.OrderedIdentitySet([a, b, c]) s2 = util.OrderedIdentitySet([d, e, f]) s3 = util.OrderedIdentitySet([a, d, f, g]) eq_(s1.intersection(s2), []) eq_(s1.intersection(s3), [a]) eq_(s1.union(s2).intersection(s3), [a, d, f]) class DictlikeIteritemsTest(fixtures.TestBase): baseline = set([("a", 1), ("b", 2), ("c", 3)]) def _ok(self, instance): iterator = util.dictlike_iteritems(instance) eq_(set(iterator), self.baseline) def _notok(self, instance): assert_raises(TypeError, util.dictlike_iteritems, instance) def test_dict(self): d = dict(a=1, b=2, c=3) self._ok(d) def test_subdict(self): class subdict(dict): pass d = subdict(a=1, b=2, c=3) self._ok(d) if util.py2k: def test_UserDict(self): import UserDict d = UserDict.UserDict(a=1, b=2, c=3) self._ok(d) def test_object(self): self._notok(object()) if util.py2k: def test_duck_1(self): class duck1(object): def iteritems(duck): return iter(self.baseline) self._ok(duck1()) def test_duck_2(self): class duck2(object): def items(duck): return list(self.baseline) self._ok(duck2()) if util.py2k: def test_duck_3(self): class duck3(object): def iterkeys(duck): return iter(["a", "b", "c"]) def __getitem__(duck, key): return dict(a=1, b=2, c=3).get(key) self._ok(duck3()) def test_duck_4(self): class duck4(object): def iterkeys(duck): return iter(["a", "b", "c"]) self._notok(duck4()) def test_duck_5(self): class duck5(object): def keys(duck): return ["a", "b", "c"] def get(duck, key): return dict(a=1, b=2, c=3).get(key) self._ok(duck5()) def test_duck_6(self): class duck6(object): def keys(duck): return ["a", "b", "c"] self._notok(duck6()) class DuckTypeCollectionTest(fixtures.TestBase): def test_sets(self): class SetLike(object): def add(self): pass class ForcedSet(list): __emulates__ = set for type_ in (set, SetLike, ForcedSet): eq_(util.duck_type_collection(type_), set) instance = type_() eq_(util.duck_type_collection(instance), set) for type_ in (frozenset,): is_(util.duck_type_collection(type_), None) instance = type_() is_(util.duck_type_collection(instance), None) class PublicFactoryTest(fixtures.TestBase): def _fixture(self): class Thingy(object): def __init__(self, value): "make a thingy" self.value = value @classmethod def foobar(cls, x, y): "do the foobar" return Thingy(x + y) return Thingy def test_classmethod(self): Thingy = self._fixture() foob = langhelpers.public_factory(Thingy.foobar, ".sql.elements.foob") eq_(foob(3, 4).value, 7) eq_(foob(x=3, y=4).value, 7) eq_(foob.__doc__, "do the foobar") eq_(foob.__module__, "sqlalchemy.sql.elements") assert Thingy.foobar.__doc__.startswith("This function is mirrored;") def test_constructor(self): Thingy = self._fixture() foob = langhelpers.public_factory(Thingy, ".sql.elements.foob") eq_(foob(7).value, 7) eq_(foob(value=7).value, 7) eq_(foob.__doc__, "make a thingy") eq_(foob.__module__, "sqlalchemy.sql.elements") assert Thingy.__init__.__doc__.startswith( "Construct a new :class:`.Thingy` object." ) class ArgInspectionTest(fixtures.TestBase): def test_get_cls_kwargs(self): class A(object): def __init__(self, a): pass class A1(A): def __init__(self, a1): pass class A11(A1): def __init__(self, a11, **kw): pass class B(object): def __init__(self, b, **kw): pass class B1(B): def __init__(self, b1, **kw): pass class B2(B): def __init__(self, b2): pass class AB(A, B): def __init__(self, ab): pass class BA(B, A): def __init__(self, ba, **kwargs): pass class BA1(BA): pass class CAB(A, B): pass class CBA(B, A): pass class CB1A1(B1, A1): pass class CAB1(A, B1): pass class CB1A(B1, A): pass class CB2A(B2, A): pass class D(object): pass class BA2(B, A): pass class A11B1(A11, B1): pass def test(cls, *expected): eq_(set(util.get_cls_kwargs(cls)), set(expected)) test(A, "a") test(A1, "a1") test(A11, "a11", "a1") test(B, "b") test(B1, "b1", "b") test(AB, "ab") test(BA, "ba", "b", "a") test(BA1, "ba", "b", "a") test(CAB, "a") test(CBA, "b", "a") test(CAB1, "a") test(CB1A, "b1", "b", "a") test(CB2A, "b2") test(CB1A1, "a1", "b1", "b") test(D) test(BA2, "a", "b") test(A11B1, "a1", "a11", "b", "b1") def test_get_func_kwargs(self): def f1(): pass def f2(foo): pass def f3(*foo): pass def f4(**foo): pass def test(fn, *expected): eq_(set(util.get_func_kwargs(fn)), set(expected)) test(f1) test(f2, "foo") test(f3) test(f4) def test_callable_argspec_fn(self): def foo(x, y, **kw): pass eq_( get_callable_argspec(foo), compat.FullArgSpec(["x", "y"], None, "kw", None, [], None, {}), ) def test_callable_argspec_fn_no_self(self): def foo(x, y, **kw): pass eq_( get_callable_argspec(foo, no_self=True), compat.FullArgSpec(["x", "y"], None, "kw", None, [], None, {}), ) def test_callable_argspec_fn_no_self_but_self(self): def foo(self, x, y, **kw): pass eq_( get_callable_argspec(foo, no_self=True), compat.FullArgSpec( ["self", "x", "y"], None, "kw", None, [], None, {} ), ) @testing.requires.cpython def test_callable_argspec_py_builtin(self): import datetime assert_raises(TypeError, get_callable_argspec, datetime.datetime.now) @testing.requires.cpython def test_callable_argspec_obj_init(self): assert_raises(TypeError, get_callable_argspec, object) def test_callable_argspec_method(self): class Foo(object): def foo(self, x, y, **kw): pass eq_( get_callable_argspec(Foo.foo), compat.FullArgSpec( ["self", "x", "y"], None, "kw", None, [], None, {} ), ) def test_callable_argspec_instance_method_no_self(self): class Foo(object): def foo(self, x, y, **kw): pass eq_( get_callable_argspec(Foo().foo, no_self=True), compat.FullArgSpec(["x", "y"], None, "kw", None, [], None, {}), ) def test_callable_argspec_unbound_method_no_self(self): class Foo(object): def foo(self, x, y, **kw): pass eq_( get_callable_argspec(Foo.foo, no_self=True), compat.FullArgSpec( ["self", "x", "y"], None, "kw", None, [], None, {} ), ) def test_callable_argspec_init(self): class Foo(object): def __init__(self, x, y): pass eq_( get_callable_argspec(Foo), compat.FullArgSpec( ["self", "x", "y"], None, None, None, [], None, {} ), ) def test_callable_argspec_init_no_self(self): class Foo(object): def __init__(self, x, y): pass eq_( get_callable_argspec(Foo, no_self=True), compat.FullArgSpec(["x", "y"], None, None, None, [], None, {}), ) def test_callable_argspec_call(self): class Foo(object): def __call__(self, x, y): pass eq_( get_callable_argspec(Foo()), compat.FullArgSpec( ["self", "x", "y"], None, None, None, [], None, {} ), ) def test_callable_argspec_call_no_self(self): class Foo(object): def __call__(self, x, y): pass eq_( get_callable_argspec(Foo(), no_self=True), compat.FullArgSpec(["x", "y"], None, None, None, [], None, {}), ) @testing.requires.cpython def test_callable_argspec_partial(self): from functools import partial def foo(x, y, z, **kw): pass bar = partial(foo, 5) assert_raises(TypeError, get_callable_argspec, bar) def test_getargspec_6_tuple(self): def foo(x, y, z, **kw): pass spec = compat.inspect_getfullargspec(foo) eq_( spec, compat.FullArgSpec( args=["x", "y", "z"], varargs=None, varkw="kw", defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={}, ), ) class SymbolTest(fixtures.TestBase): def test_basic(self): sym1 = util.symbol("foo") assert sym1.name == "foo" sym2 = util.symbol("foo") assert sym1 is sym2 assert sym1 == sym2 sym3 = util.symbol("bar") assert sym1 is not sym3 assert sym1 != sym3 def test_pickle(self): sym1 = util.symbol("foo") sym2 = util.symbol("foo") assert sym1 is sym2 # default s = util.pickle.dumps(sym1) util.pickle.loads(s) for protocol in 0, 1, 2: print(protocol) serial = util.pickle.dumps(sym1) rt = util.pickle.loads(serial) assert rt is sym1 assert rt is sym2 def test_bitflags(self): sym1 = util.symbol("sym1", canonical=1) sym2 = util.symbol("sym2", canonical=2) assert sym1 & sym1 assert not sym1 & sym2 assert not sym1 & sym1 & sym2 def test_composites(self): sym1 = util.symbol("sym1", canonical=1) sym2 = util.symbol("sym2", canonical=2) sym3 = util.symbol("sym3", canonical=4) sym4 = util.symbol("sym4", canonical=8) assert sym1 & (sym2 | sym1 | sym4) assert not sym1 & (sym2 | sym3) assert not (sym1 | sym2) & (sym3 | sym4) assert (sym1 | sym2) & (sym2 | sym4) def test_parser(self): sym1 = util.symbol("sym1", canonical=1) sym2 = util.symbol("sym2", canonical=2) sym3 = util.symbol("sym3", canonical=4) sym4 = util.symbol("sym4", canonical=8) lookup_one = {sym1: [], sym2: [True], sym3: [False], sym4: [None]} lookup_two = {sym1: [], sym2: [True], sym3: [False]} lookup_three = {sym1: [], sym2: ["symbol2"], sym3: []} is_( util.symbol.parse_user_argument( "sym2", lookup_one, "some_name", resolve_symbol_names=True ), sym2, ) assert_raises_message( exc.ArgumentError, "Invalid value for 'some_name': 'sym2'", util.symbol.parse_user_argument, "sym2", lookup_one, "some_name", ) is_( util.symbol.parse_user_argument( True, lookup_one, "some_name", resolve_symbol_names=False ), sym2, ) is_( util.symbol.parse_user_argument(sym2, lookup_one, "some_name"), sym2, ) is_( util.symbol.parse_user_argument(None, lookup_one, "some_name"), sym4, ) is_( util.symbol.parse_user_argument(None, lookup_two, "some_name"), None, ) is_( util.symbol.parse_user_argument( "symbol2", lookup_three, "some_name" ), sym2, ) assert_raises_message( exc.ArgumentError, "Invalid value for 'some_name': 'foo'", util.symbol.parse_user_argument, "foo", lookup_three, "some_name", ) class _Py3KFixtures(object): def _kw_only_fixture(self): pass def _kw_plus_posn_fixture(self): pass def _kw_opt_fixture(self): pass if util.py3k: _locals = {} exec( """ def _kw_only_fixture(self, a, *, b, c): pass def _kw_plus_posn_fixture(self, a, *args, b, c): pass def _kw_opt_fixture(self, a, *, b, c="c"): pass """, _locals, ) for k in _locals: setattr(_Py3KFixtures, k, _locals[k]) py3k_fixtures = _Py3KFixtures() class TestFormatArgspec(_Py3KFixtures, fixtures.TestBase): @testing.combinations( ( lambda: None, { "args": "()", "self_arg": None, "apply_kw": "()", "apply_pos": "()", "apply_pos_proxied": "()", "apply_kw_proxied": "()", }, True, ), ( lambda: None, { "args": "", "self_arg": None, "apply_kw": "", "apply_pos": "", "apply_pos_proxied": "", "apply_kw_proxied": "", }, False, ), ( lambda self: None, { "args": "(self)", "self_arg": "self", "apply_kw": "(self)", "apply_pos": "(self)", "apply_pos_proxied": "()", "apply_kw_proxied": "()", }, True, ), ( lambda self: None, { "args": "self", "self_arg": "self", "apply_kw": "self", "apply_pos": "self", "apply_pos_proxied": "", "apply_kw_proxied": "", }, False, ), ( lambda *a: None, { "args": "(*a)", "self_arg": "a[0]", "apply_kw": "(*a)", "apply_pos": "(*a)", "apply_pos_proxied": "(*a)", "apply_kw_proxied": "(*a)", }, True, ), ( lambda **kw: None, { "args": "(**kw)", "self_arg": None, "apply_kw": "(**kw)", "apply_pos": "(**kw)", "apply_pos_proxied": "(**kw)", "apply_kw_proxied": "(**kw)", }, True, ), ( lambda *a, **kw: None, { "args": "(*a, **kw)", "self_arg": "a[0]", "apply_kw": "(*a, **kw)", "apply_pos": "(*a, **kw)", "apply_pos_proxied": "(*a, **kw)", "apply_kw_proxied": "(*a, **kw)", }, True, ), ( lambda a, *b: None, { "args": "(a, *b)", "self_arg": "a", "apply_kw": "(a, *b)", "apply_pos": "(a, *b)", "apply_pos_proxied": "(*b)", "apply_kw_proxied": "(*b)", }, True, ), ( lambda a, **b: None, { "args": "(a, **b)", "self_arg": "a", "apply_kw": "(a, **b)", "apply_pos": "(a, **b)", "apply_pos_proxied": "(**b)", "apply_kw_proxied": "(**b)", }, True, ), ( lambda a, *b, **c: None, { "args": "(a, *b, **c)", "self_arg": "a", "apply_kw": "(a, *b, **c)", "apply_pos": "(a, *b, **c)", "apply_pos_proxied": "(*b, **c)", "apply_kw_proxied": "(*b, **c)", }, True, ), ( lambda a, b=1, **c: None, { "args": "(a, b=1, **c)", "self_arg": "a", "apply_kw": "(a, b=b, **c)", "apply_pos": "(a, b, **c)", "apply_pos_proxied": "(b, **c)", "apply_kw_proxied": "(b=b, **c)", }, True, ), ( lambda a=1, b=2: None, { "args": "(a=1, b=2)", "self_arg": "a", "apply_kw": "(a=a, b=b)", "apply_pos": "(a, b)", "apply_pos_proxied": "(b)", "apply_kw_proxied": "(b=b)", }, True, ), ( lambda a=1, b=2: None, { "args": "a=1, b=2", "self_arg": "a", "apply_kw": "a=a, b=b", "apply_pos": "a, b", "apply_pos_proxied": "b", "apply_kw_proxied": "b=b", }, False, ), ( py3k_fixtures._kw_only_fixture, { "args": "self, a, *, b, c", "self_arg": "self", "apply_pos": "self, a, *, b, c", "apply_kw": "self, a, b=b, c=c", "apply_pos_proxied": "a, *, b, c", "apply_kw_proxied": "a, b=b, c=c", }, False, testing.requires.python3, ), ( py3k_fixtures._kw_plus_posn_fixture, { "args": "self, a, *args, b, c", "self_arg": "self", "apply_pos": "self, a, *args, b, c", "apply_kw": "self, a, b=b, c=c, *args", "apply_pos_proxied": "a, *args, b, c", "apply_kw_proxied": "a, b=b, c=c, *args", }, False, testing.requires.python3, ), ( py3k_fixtures._kw_opt_fixture, { "args": "self, a, *, b, c='c'", "self_arg": "self", "apply_pos": "self, a, *, b, c", "apply_kw": "self, a, b=b, c=c", "apply_pos_proxied": "a, *, b, c", "apply_kw_proxied": "a, b=b, c=c", }, False, testing.requires.python3, ), argnames="fn,wanted,grouped", ) def test_specs(self, fn, wanted, grouped): # test direct function if grouped is None: parsed = util.format_argspec_plus(fn) else: parsed = util.format_argspec_plus(fn, grouped=grouped) eq_(parsed, wanted) # test sending fullargspec spec = compat.inspect_getfullargspec(fn) if grouped is None: parsed = util.format_argspec_plus(spec) else: parsed = util.format_argspec_plus(spec, grouped=grouped) eq_(parsed, wanted) @testing.requires.cpython def test_init_grouped(self): object_spec = { "args": "(self)", "self_arg": "self", "apply_pos": "(self)", "apply_kw": "(self)", "apply_pos_proxied": "()", "apply_kw_proxied": "()", } wrapper_spec = { "args": "(self, *args, **kwargs)", "self_arg": "self", "apply_pos": "(self, *args, **kwargs)", "apply_kw": "(self, *args, **kwargs)", "apply_pos_proxied": "(*args, **kwargs)", "apply_kw_proxied": "(*args, **kwargs)", } custom_spec = { "args": "(slef, a=123)", "self_arg": "slef", # yes, slef "apply_pos": "(slef, a)", "apply_pos_proxied": "(a)", "apply_kw_proxied": "(a=a)", "apply_kw": "(slef, a=a)", } self._test_init(None, object_spec, wrapper_spec, custom_spec) self._test_init(True, object_spec, wrapper_spec, custom_spec) @testing.requires.cpython def test_init_bare(self): object_spec = { "args": "self", "self_arg": "self", "apply_pos": "self", "apply_kw": "self", "apply_pos_proxied": "", "apply_kw_proxied": "", } wrapper_spec = { "args": "self, *args, **kwargs", "self_arg": "self", "apply_pos": "self, *args, **kwargs", "apply_kw": "self, *args, **kwargs", "apply_pos_proxied": "*args, **kwargs", "apply_kw_proxied": "*args, **kwargs", } custom_spec = { "args": "slef, a=123", "self_arg": "slef", # yes, slef "apply_pos": "slef, a", "apply_kw": "slef, a=a", "apply_pos_proxied": "a", "apply_kw_proxied": "a=a", } self._test_init(False, object_spec, wrapper_spec, custom_spec) def _test_init(self, grouped, object_spec, wrapper_spec, custom_spec): def test(fn, wanted): if grouped is None: parsed = util.format_argspec_init(fn) else: parsed = util.format_argspec_init(fn, grouped=grouped) eq_(parsed, wanted) class Obj(object): pass test(Obj.__init__, object_spec) class Obj(object): def __init__(self): pass test(Obj.__init__, object_spec) class Obj(object): def __init__(slef, a=123): pass test(Obj.__init__, custom_spec) class Obj(list): pass test(Obj.__init__, wrapper_spec) class Obj(list): def __init__(self, *args, **kwargs): pass test(Obj.__init__, wrapper_spec) class Obj(list): def __init__(self): pass test(Obj.__init__, object_spec) class Obj(list): def __init__(slef, a=123): pass test(Obj.__init__, custom_spec) class GenericReprTest(fixtures.TestBase): def test_all_positional(self): class Foo(object): def __init__(self, a, b, c): self.a = a self.b = b self.c = c eq_(util.generic_repr(Foo(1, 2, 3)), "Foo(1, 2, 3)") def test_positional_plus_kw(self): class Foo(object): def __init__(self, a, b, c=5, d=4): self.a = a self.b = b self.c = c self.d = d eq_(util.generic_repr(Foo(1, 2, 3, 6)), "Foo(1, 2, c=3, d=6)") def test_kw_defaults(self): class Foo(object): def __init__(self, a=1, b=2, c=3, d=4): self.a = a self.b = b self.c = c self.d = d eq_(util.generic_repr(Foo(1, 5, 3, 7)), "Foo(b=5, d=7)") def test_multi_kw(self): class Foo(object): def __init__(self, a, b, c=3, d=4): self.a = a self.b = b self.c = c self.d = d class Bar(Foo): def __init__(self, e, f, g=5, **kw): self.e = e self.f = f self.g = g super(Bar, self).__init__(**kw) eq_( util.generic_repr( Bar("e", "f", g=7, a=6, b=5, d=9), to_inspect=[Bar, Foo] ), "Bar('e', 'f', g=7, a=6, b=5, d=9)", ) eq_( util.generic_repr(Bar("e", "f", a=6, b=5), to_inspect=[Bar, Foo]), "Bar('e', 'f', a=6, b=5)", ) def test_multi_kw_repeated(self): class Foo(object): def __init__(self, a=1, b=2): self.a = a self.b = b class Bar(Foo): def __init__(self, b=3, c=4, **kw): self.c = c super(Bar, self).__init__(b=b, **kw) eq_( util.generic_repr(Bar(a="a", b="b", c="c"), to_inspect=[Bar, Foo]), "Bar(b='b', c='c', a='a')", ) def test_discard_vargs(self): class Foo(object): def __init__(self, a, b, *args): self.a = a self.b = b self.c, self.d = args[0:2] eq_(util.generic_repr(Foo(1, 2, 3, 4)), "Foo(1, 2)") def test_discard_vargs_kwargs(self): class Foo(object): def __init__(self, a, b, *args, **kw): self.a = a self.b = b self.c, self.d = args[0:2] eq_(util.generic_repr(Foo(1, 2, 3, 4, x=7, y=4)), "Foo(1, 2)") def test_significant_vargs(self): class Foo(object): def __init__(self, a, b, *args): self.a = a self.b = b self.args = args eq_(util.generic_repr(Foo(1, 2, 3, 4)), "Foo(1, 2, 3, 4)") def test_no_args(self): class Foo(object): def __init__(self): pass eq_(util.generic_repr(Foo()), "Foo()") def test_no_init(self): class Foo(object): pass eq_(util.generic_repr(Foo()), "Foo()") class AsInterfaceTest(fixtures.TestBase): class Something(object): def _ignoreme(self): pass def foo(self): pass def bar(self): pass class Partial(object): def bar(self): pass class Object(object): pass def test_no_cls_no_methods(self): obj = object() assert_raises(TypeError, util.as_interface, obj) def test_instance(self): obj = object() assert_raises(TypeError, util.as_interface, obj, cls=self.Something) assert_raises(TypeError, util.as_interface, obj, methods=("foo")) assert_raises( TypeError, util.as_interface, obj, cls=self.Something, required=("foo"), ) obj = self.Something() eq_(obj, util.as_interface(obj, cls=self.Something)) eq_(obj, util.as_interface(obj, methods=("foo",))) eq_( obj, util.as_interface( obj, cls=self.Something, required=("outofband",) ), ) partial = self.Partial() slotted = self.Object() slotted.bar = lambda self: 123 for obj in partial, slotted: eq_(obj, util.as_interface(obj, cls=self.Something)) assert_raises(TypeError, util.as_interface, obj, methods=("foo")) eq_(obj, util.as_interface(obj, methods=("bar",))) eq_( obj, util.as_interface(obj, cls=self.Something, required=("bar",)), ) assert_raises( TypeError, util.as_interface, obj, cls=self.Something, required=("foo",), ) assert_raises( TypeError, util.as_interface, obj, cls=self.Something, required=self.Something, ) def test_dict(self): obj = {} assert_raises(TypeError, util.as_interface, obj, cls=self.Something) assert_raises(TypeError, util.as_interface, obj, methods="foo") assert_raises( TypeError, util.as_interface, obj, cls=self.Something, required="foo", ) def assertAdapted(obj, *methods): assert isinstance(obj, type) found = set([m for m in dir(obj) if not m.startswith("_")]) for method in methods: assert method in found found.remove(method) assert not found def fn(self): return 123 obj = {"foo": fn, "bar": fn} res = util.as_interface(obj, cls=self.Something) assertAdapted(res, "foo", "bar") res = util.as_interface( obj, cls=self.Something, required=self.Something ) assertAdapted(res, "foo", "bar") res = util.as_interface(obj, cls=self.Something, required=("foo",)) assertAdapted(res, "foo", "bar") res = util.as_interface(obj, methods=("foo", "bar")) assertAdapted(res, "foo", "bar") res = util.as_interface(obj, methods=("foo", "bar", "baz")) assertAdapted(res, "foo", "bar") res = util.as_interface(obj, methods=("foo", "bar"), required=("foo",)) assertAdapted(res, "foo", "bar") assert_raises(TypeError, util.as_interface, obj, methods=("foo",)) assert_raises( TypeError, util.as_interface, obj, methods=("foo", "bar", "baz"), required=("baz",), ) obj = {"foo": 123} assert_raises(TypeError, util.as_interface, obj, cls=self.Something) class TestClassHierarchy(fixtures.TestBase): def test_object(self): eq_(set(util.class_hierarchy(object)), set((object,))) def test_single(self): class A(object): pass class B(object): pass eq_(set(util.class_hierarchy(A)), set((A, object))) eq_(set(util.class_hierarchy(B)), set((B, object))) class C(A, B): pass eq_(set(util.class_hierarchy(A)), set((A, B, C, object))) eq_(set(util.class_hierarchy(B)), set((A, B, C, object))) if util.py2k: def test_oldstyle_mixin(self): class A(object): pass class Mixin: pass class B(A, Mixin): pass eq_(set(util.class_hierarchy(B)), set((A, B, object))) eq_(set(util.class_hierarchy(Mixin)), set()) eq_(set(util.class_hierarchy(A)), set((A, B, object))) class ReraiseTest(fixtures.TestBase): @testing.requires.python3 def test_raise_from_cause_same_cause(self): class MyException(Exception): pass def go(): try: raise MyException("exc one") except Exception as err: util.raise_from_cause(err) try: go() assert False except MyException as err: is_(err.__cause__, None) def test_raise_from_cause_legacy(self): class MyException(Exception): pass class MyOtherException(Exception): pass me = MyException("exc on") def go(): try: raise me except Exception: util.raise_from_cause(MyOtherException("exc two")) try: go() assert False except MyOtherException as moe: if testing.requires.python3.enabled: is_(moe.__cause__, me) def test_raise_from(self): class MyException(Exception): pass class MyOtherException(Exception): pass me = MyException("exc on") def go(): try: raise me except Exception as err: util.raise_(MyOtherException("exc two"), from_=err) try: go() assert False except MyOtherException as moe: if testing.requires.python3.enabled: is_(moe.__cause__, me) @testing.requires.python2 def test_safe_reraise_py2k_warning(self): class MyException(Exception): pass class MyOtherException(Exception): pass m1 = MyException("exc one") m2 = MyOtherException("exc two") def go2(): raise m2 def go(): try: raise m1 except Exception: with util.safe_reraise(): go2() with expect_warnings( "An exception has occurred during handling of a previous " "exception. The previous exception " "is:.*MyException.*exc one" ): try: go() assert False except MyOtherException: pass class TestClassProperty(fixtures.TestBase): def test_simple(self): class A(object): something = {"foo": 1} class B(A): @classproperty def something(cls): d = dict(super(B, cls).something) d.update({"bazz": 2}) return d eq_(B.something, {"foo": 1, "bazz": 2}) class TestProperties(fixtures.TestBase): def test_pickle(self): data = {"hello": "bla"} props = util.Properties(data) for loader, dumper in picklers(): s = dumper(props) p = loader(s) eq_(props._data, p._data) eq_(props.keys(), p.keys()) def test_keys_in_dir(self): data = {"hello": "bla"} props = util.Properties(data) in_("hello", dir(props)) def test_pickle_immuatbleprops(self): data = {"hello": "bla"} props = util.Properties(data).as_immutable() for loader, dumper in picklers(): s = dumper(props) p = loader(s) eq_(props._data, p._data) eq_(props.keys(), p.keys()) def test_pickle_orderedprops(self): data = {"hello": "bla"} props = util.OrderedProperties() props.update(data) for loader, dumper in picklers(): s = dumper(props) p = loader(s) eq_(props._data, p._data) eq_(props.keys(), p.keys()) class QuotedTokenParserTest(fixtures.TestBase): def _test(self, string, expected): eq_(langhelpers.quoted_token_parser(string), expected) def test_single(self): self._test("name", ["name"]) def test_dotted(self): self._test("schema.name", ["schema", "name"]) def test_dotted_quoted_left(self): self._test('"Schema".name', ["Schema", "name"]) def test_dotted_quoted_left_w_quote_left_edge(self): self._test('"""Schema".name', ['"Schema', "name"]) def test_dotted_quoted_left_w_quote_right_edge(self): self._test('"Schema""".name', ['Schema"', "name"]) def test_dotted_quoted_left_w_quote_middle(self): self._test('"Sch""ema".name', ['Sch"ema', "name"]) def test_dotted_quoted_right(self): self._test('schema."SomeName"', ["schema", "SomeName"]) def test_dotted_quoted_right_w_quote_left_edge(self): self._test('schema."""name"', ["schema", '"name']) def test_dotted_quoted_right_w_quote_right_edge(self): self._test('schema."name"""', ["schema", 'name"']) def test_dotted_quoted_right_w_quote_middle(self): self._test('schema."na""me"', ["schema", 'na"me']) def test_quoted_single_w_quote_left_edge(self): self._test('"""name"', ['"name']) def test_quoted_single_w_quote_right_edge(self): self._test('"name"""', ['name"']) def test_quoted_single_w_quote_middle(self): self._test('"na""me"', ['na"me']) def test_dotted_quoted_left_w_dot_left_edge(self): self._test('".Schema".name', [".Schema", "name"]) def test_dotted_quoted_left_w_dot_right_edge(self): self._test('"Schema.".name', ["Schema.", "name"]) def test_dotted_quoted_left_w_dot_middle(self): self._test('"Sch.ema".name', ["Sch.ema", "name"]) def test_dotted_quoted_right_w_dot_left_edge(self): self._test('schema.".name"', ["schema", ".name"]) def test_dotted_quoted_right_w_dot_right_edge(self): self._test('schema."name."', ["schema", "name."]) def test_dotted_quoted_right_w_dot_middle(self): self._test('schema."na.me"', ["schema", "na.me"]) def test_quoted_single_w_dot_left_edge(self): self._test('".name"', [".name"]) def test_quoted_single_w_dot_right_edge(self): self._test('"name."', ["name."]) def test_quoted_single_w_dot_middle(self): self._test('"na.me"', ["na.me"]) class BackslashReplaceTest(fixtures.TestBase): def test_ascii_to_utf8(self): eq_( compat.decode_backslashreplace(util.b("hello world"), "utf-8"), util.u("hello world"), ) def test_utf8_to_utf8(self): eq_( compat.decode_backslashreplace( util.u("some message méil").encode("utf-8"), "utf-8" ), util.u("some message méil"), ) def test_latin1_to_utf8(self): eq_( compat.decode_backslashreplace( util.u("some message méil").encode("latin-1"), "utf-8" ), util.u("some message m\\xe9il"), ) eq_( compat.decode_backslashreplace( util.u("some message méil").encode("latin-1"), "latin-1" ), util.u("some message méil"), ) def test_cp1251_to_utf8(self): message = util.u("some message П").encode("cp1251") eq_(message, b"some message \xcf") eq_( compat.decode_backslashreplace(message, "utf-8"), util.u("some message \\xcf"), ) eq_( compat.decode_backslashreplace(message, "cp1251"), util.u("some message П"), ) class TimezoneTest(fixtures.TestBase): """test the python 2 backport of the "timezone" class. Note under python 3, these tests work against the builtin timezone, thereby providing confirmation that the tests are correct. """ @testing.combinations( (datetime.timedelta(0), "UTC"), (datetime.timedelta(hours=5), "UTC+05:00"), (datetime.timedelta(hours=5, minutes=10), "UTC+05:10"), ( datetime.timedelta(hours=5, minutes=10, seconds=27), "UTC+05:10:27", testing.requires.granular_timezone, ), (datetime.timedelta(hours=-3, minutes=10), "UTC-02:50"), ( datetime.timedelta( hours=5, minutes=10, seconds=27, microseconds=550 ), "UTC+05:10:27.000550", testing.requires.granular_timezone, ), ) def test_tzname(self, td, expected): eq_(timezone(td).tzname(None), expected) def test_utcoffset(self): eq_( timezone(datetime.timedelta(hours=5)).utcoffset(None), datetime.timedelta(hours=5), ) def test_fromutc(self): tzinfo = timezone(datetime.timedelta(hours=5)) dt = datetime.datetime(2017, 10, 5, 12, 55, 38, tzinfo=tzinfo) eq_( dt.astimezone(timezone.utc), datetime.datetime(2017, 10, 5, 7, 55, 38, tzinfo=timezone.utc), ) # this is the same as hours=-3 del_ = datetime.timedelta(days=-1, seconds=75600) eq_( dt.astimezone(timezone(datetime.timedelta(hours=-3))), datetime.datetime(2017, 10, 5, 4, 55, 38, tzinfo=timezone(del_)), ) @testing.requires.python3 def test_repr_py3k(self): eq_( repr(timezone(datetime.timedelta(hours=5))), "datetime.timezone(%r)" % (datetime.timedelta(hours=5)), ) @testing.requires.python2 def test_repr_py2k(self): eq_( repr(timezone(datetime.timedelta(hours=5))), "sqlalchemy.util.timezone(%r)" % (datetime.timedelta(hours=5)), ) class TestModuleRegistry(fixtures.TestBase): def test_modules_are_loaded(self): to_restore = [] for m in ("xml.dom", "wsgiref.simple_server"): to_restore.append((m, sys.modules.pop(m, None))) try: mr = _preloaded._ModuleRegistry() ret = mr.preload_module( "xml.dom", "wsgiref.simple_server", "sqlalchemy.sql.util" ) o = object() is_(ret(o), o) is_false(hasattr(mr, "xml_dom")) mr.import_prefix("xml") is_true("xml.dom" in sys.modules) is_(sys.modules["xml.dom"], mr.xml_dom) is_true("wsgiref.simple_server" not in sys.modules) mr.import_prefix("wsgiref") is_true("wsgiref.simple_server" in sys.modules) is_(sys.modules["wsgiref.simple_server"], mr.wsgiref_simple_server) mr.import_prefix("sqlalchemy") is_(sys.modules["sqlalchemy.sql.util"], mr.sql_util) finally: for name, mod in to_restore: if mod is not None: sys.modules[name] = mod class MethodOveriddenTest(fixtures.TestBase): def test_subclass_overrides_cls_given(self): class Foo(object): def bar(self): pass class Bar(Foo): def bar(self): pass is_true(util.method_is_overridden(Bar, Foo.bar)) def test_subclass_overrides(self): class Foo(object): def bar(self): pass class Bar(Foo): def bar(self): pass is_true(util.method_is_overridden(Bar(), Foo.bar)) def test_subclass_overrides_skiplevel(self): class Foo(object): def bar(self): pass class Bar(Foo): pass class Bat(Bar): def bar(self): pass is_true(util.method_is_overridden(Bat(), Foo.bar)) def test_subclass_overrides_twolevels(self): class Foo(object): def bar(self): pass class Bar(Foo): def bar(self): pass class Bat(Bar): pass is_true(util.method_is_overridden(Bat(), Foo.bar)) def test_subclass_doesnt_override_cls_given(self): class Foo(object): def bar(self): pass class Bar(Foo): pass is_false(util.method_is_overridden(Bar, Foo.bar)) def test_subclass_doesnt_override(self): class Foo(object): def bar(self): pass class Bar(Foo): pass is_false(util.method_is_overridden(Bar(), Foo.bar)) def test_subclass_overrides_multi_mro(self): class Base(object): pass class Foo(object): pass class Bat(Base): def bar(self): pass class HoHo(Foo, Bat): def bar(self): pass is_true(util.method_is_overridden(HoHo(), Bat.bar))
zzzeek/sqlalchemy
test/base/test_utils.py
Python
mit
94,962
[ "MOE" ]
1f053c929b78b168f548f75b1deea9a1334d55dccb1437a0828812d80bb6f2a7
import sys, os from CodeGen.visitor import * from CodeGen.irgenerator import IRGenerator from CodeGen.compiler import IRCompiler rtype = " - git-unstable" def debug(msg): if "--enable-debug" in sys.argv: print(msg) if __name__ == "__main__": if len(sys.argv) < 2: print() print("LeFT Compiler" + rtype) print("--------------------------") print("Usage:") print("-in : Specifies the input file") print("-out : Specifies the output file") print("-std : Specifies the framework path") print("-jit : Run as JIT compiler") print() print("--no-optimizations: Don't optimize the bytecode") print("--emit-llvm : Don't delete the final IR") print("--generate-bitcode : Only generate the bitcode") print("--enable-debug : Dump the generated IR") print() exit(-1) if "-in" in sys.argv: filename = sys.argv[sys.argv.index("-in")+1] else: filename = sys.argv[1] stream = FileStream(filename) if "-std" in sys.argv: stdleft = sys.argv[sys.argv.index("-std")+1] else: stdleft = "/usr/include/left" irgenerator = IRGenerator(filename) lexer = leftLexer(stream) tokenstream = CommonTokenStream(lexer) parser = leftParser(tokenstream) parsetree = parser.script() visitor = LeftVisitor(irgenerator, stdleft) instructions = visitor.visit(parsetree) debug2 = str(irgenerator.module) debug("\nGenerated IR:") debug(debug2) if "-jit" in sys.argv: print("Running program ...") ircompiler = IRCompiler() mod = ircompiler.compile(str(irgenerator.module)) ecode = ircompiler.execute(mod) print("Done. Exit code: " + str(ecode)) else: if "-out" in sys.argv: filename = sys.argv[sys.argv.index("-out")+1] else: filename = "a.out" with open(filename + ".ll", "w") as output: output.write(str(irgenerator.module)) if "--no-optimizations" not in sys.argv: os.system("opt " + filename + ".ll" + " -S -o " + filename + ".ll") debug("opt " + filename + ".ll" + " -S -o " + filename + ".ll") os.system("llvm-as " + filename + ".ll -o " + filename + ".bc") debug("llvm-as " + filename + ".ll -o " + filename + ".bc") if "--emit-llvm" not in sys.argv: os.system("rm " + filename + ".ll") if len(visitor.usedBitcode) != 0: link = "" for bc in visitor.usedBitcode: link += " " + bc os.system("llvm-link " + filename + ".bc" + link + " -o " + filename + ".bc") debug("llvm-link " + filename + ".bc" + link + " -o " + filename + ".bc") if "--no-optimizations" not in sys.argv: os.system("opt " + filename + ".bc" + " -o " + filename + ".bc") debug("opt " + filename + ".bc" + " -o " + filename + ".bc") if "--generate-bitcode" not in sys.argv: os.system("llc " + filename + ".bc -o " + filename + ".s") debug("llc " + filename + ".bc -o " + filename + ".s") os.system("gcc " + filename + ".s -o " + filename) debug("gcc " + filename + ".s -o " + filename) os.system("rm " + filename + ".s") os.system("rm " + filename + ".bc")
LFUnion/left
compiler/leftc.py
Python
mit
3,401
[ "VisIt" ]
0eedd724bc4a1a41ca82a19f0dec53f5b12056722cb8e4d1ee265d7108cb2182
from __future__ import print_function import re, sys info = """ This script collects the relevant simulation parameters and generates a set of input options for `fft_test.x` that replicate the execution of vloc_psi done in the production run. This simplifies the identification of the optimal fft tasking paramete. Usage: python run_test.py pw_out_file """ match_alat = re.compile(r'lattice parameter \(alat\)\s+=\s+([+-]?([0-9]*[.])?[0-9]+)') match_nbnd = re.compile(r'number of Kohn-Sham states=\s+([+-]?([0-9]*[.])?[0-9]+)') match_ecutwfc = re.compile(r'kinetic-energy cutoff\s+=\s+([+-]?([0-9]*[.])?[0-9]+)') match_ecutrho = re.compile(r'charge density cutoff\s+=\s+([+-]?([0-9]*[.])?[0-9]+)') match_k = re.compile(r'number of k points=\s+(\d+)') if __name__ == "__main__": if len(sys.argv) <= 1: print(info) else: with open(sys.argv[1],'r') as f: data = f.read(30000) gamma = False maxk = '' alat = match_alat.findall(data) nbnd = match_nbnd.findall(data) ewfc = match_ecutwfc.findall(data) erho = match_ecutrho.findall(data) if len(alat[0]) == 0 or len(nbnd[0]) == 0 or len(ewfc[0]) == 0 or len(erho[0]) == 0: print("Could not parse file. Sorry.") alat = alat[0][0]; nbnd=nbnd[0][0]; ewfc=ewfc[0][0];erho=erho[0][0] a1 = [] a2 = [] a3 = [] lines = data.splitlines() for i, line in enumerate(lines): if 'gamma-point specific algorithms are used' in line: gamma = True if 'crystal axes' in line: a1 = [float(x)*float(alat) for x in lines[i+1].split()[3:6]] a2 = [float(x)*float(alat) for x in lines[i+2].split()[3:6]] a3 = [float(x)*float(alat) for x in lines[i+3].split()[3:6]] if 'number of k points' in line: nk = int(match_k.findall(line)[0]) if '2pi/alat' in lines[i+1]: nrm2 = 0 for k in range(nk): kn, v = re.findall(r"\(([-\d\s\.]*)\)", lines[i+k+2]) v2 = [float(x)**2 for x in v.split()] if sum(v2) > nrm2: maxk=v print ("To analize performances run with:") buf = ("mpirun -np X ./fft_test.x -ntg Y -ecutwfc {ewfc} -ecutrho {erho} " + \ "-av1 {av1} -av2 {av2} -av3 {av3} -nbnd {nbnd} -gamma {gamma}").format(\ ewfc=ewfc, erho=erho, \ av1=' '.join([str(x) for x in a1]), \ av2=' '.join([str(x) for x in a2]), \ av3=' '.join([str(x) for x in a3]), \ nbnd=nbnd, gamma=('.true.' if gamma else '.false.')) if maxk: buf += " -kmax "+maxk print(buf)
QEF/q-e_schrodinger
FFTXlib/gen_test_params.py
Python
gpl-2.0
3,021
[ "CRYSTAL" ]
696ca0f5e783444667618d8a1b5f5f37cbd2fac3ba6a70bc5e6593f3a9270b98
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import unicode_literals import unittest2 as unittest import os import json from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType from pymatgen.electronic_structure.dos import CompleteDos test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files') import scipy class DosTest(unittest.TestCase): def setUp(self): with open(os.path.join(test_dir, "complete_dos.json"), "r") as f: self.dos = CompleteDos.from_dict(json.load(f)) def test_get_gap(self): dos = self.dos self.assertAlmostEqual(dos.get_gap(), 2.0589, 4) self.assertEqual(len(dos.energies), 301) self.assertAlmostEqual(dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0], 2.16815942458015, 7) self.assertAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001)) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.up], 1.744588888888891, 7) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.down], 1.756888888888886, 7) self.assertRaises(ValueError, dos.get_interpolated_value, 1000) def test_get_smeared_densities(self): dos = self.dos smeared = dos.get_smeared_densities(0.2) dens = dos.densities for spin in Spin: self.assertAlmostEqual(sum(dens[spin]), sum(smeared[spin])) class CompleteDosTest(unittest.TestCase): def setUp(self): with open(os.path.join(test_dir, "complete_dos.json"), "r") as f: self.dos = CompleteDos.from_dict(json.load(f)) def test_get_gap(self): dos = self.dos self.assertAlmostEqual(dos.get_gap(), 2.0589, 4, "Wrong gap from dos!") self.assertEqual(len(dos.energies), 301) self.assertAlmostEqual(dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0], 2.16815942458015, 7) spd_dos = dos.get_spd_dos() self.assertEqual(len(spd_dos), 3) el_dos = dos.get_element_dos() self.assertEqual(len(el_dos), 4) sum_spd = spd_dos[OrbitalType.s] + spd_dos[OrbitalType.p] + spd_dos[OrbitalType.d] sum_element = None for pdos in el_dos.values(): if sum_element is None: sum_element = pdos else: sum_element += pdos #The sums of the SPD or the element doses should be the same. self.assertTrue((abs(sum_spd.energies - sum_element.energies) < 0.0001).all()) self.assertTrue((abs(sum_spd.densities[Spin.up] - sum_element.densities[Spin.up]) < 0.0001).all()) self.assertTrue((abs(sum_spd.densities[Spin.down] - sum_element.densities[Spin.down]) < 0.0001).all()) site = dos.structure[0] self.assertIsNotNone(dos.get_site_dos(site)) self.assertAlmostEqual(sum(dos.get_site_dos(site).get_densities( Spin.up)), 2.0391) self.assertAlmostEqual(sum(dos.get_site_dos(site).get_densities( Spin.down)), 2.0331999999999995) self.assertIsNotNone(dos.get_site_orbital_dos(site, Orbital.s)) egt2g = dos.get_site_t2g_eg_resolved_dos(site) self.assertAlmostEqual(sum(egt2g["e_g"].get_densities(Spin.up)), 0.0) self.assertAlmostEqual(sum(egt2g["t2g"].get_densities(Spin.up)), 0.0) egt2g = dos.get_site_t2g_eg_resolved_dos(dos.structure[4]) self.assertAlmostEqual(sum(egt2g["e_g"].get_densities(Spin.up)), 15.004399999999997) self.assertAlmostEqual(sum(egt2g["t2g"].get_densities(Spin.up)), 22.910399999999999) self.assertAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001)) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.up], 1.744588888888891, 7) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.down], 1.756888888888886, 7) self.assertRaises(ValueError, dos.get_interpolated_value, 1000) def test_to_from_dict(self): d = self.dos.as_dict() dos = CompleteDos.from_dict(d) el_dos = dos.get_element_dos() self.assertEqual(len(el_dos), 4) spd_dos = dos.get_spd_dos() sum_spd = spd_dos[OrbitalType.s] + spd_dos[OrbitalType.p] + spd_dos[OrbitalType.d] sum_element = None for pdos in el_dos.values(): if sum_element is None: sum_element = pdos else: sum_element += pdos #The sums of the SPD or the element doses should be the same. self.assertTrue((abs(sum_spd.energies - sum_element.energies) < 0.0001).all()) def test_str(self): self.assertIsNotNone(str(self.dos)) if __name__ == '__main__': unittest.main()
aykol/pymatgen
pymatgen/electronic_structure/tests/test_dos.py
Python
mit
5,487
[ "pymatgen" ]
eef4b373cf86093f93968eaaa8bbe97a431746de965e772b96b0036914a3b5b0
from __future__ import print_function from __future__ import division from past.utils import old_div import unittest import numpy as np import statsmodels.api as sm from statsmodels.genmod.tests.results.results_glm import InvGauss from IOHMM import GLM class PoissonTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.data = sm.datasets.cpunish.load() cls.X = cls.data.exog cls.X[:, 3] = np.log(cls.X[:, 3]) cls.Y = cls.data.endog def test_glm_IRLS(self): self.model = GLM( solver='IRLS', family=sm.families.Poisson(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y) # coefficient self.assertEqual(self.model.coef.shape, (7, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((-6.801480e+00, 2.611017e-04, 7.781801e-02, -9.493111e-02, 2.969349e-01, 2.301183e+00, -1.872207e+01)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (7, )) np.testing.assert_array_almost_equal( self.model.stderr, np.array((4.146850e+00, 5.187132e-05, 7.940193e-02, 2.291926e-02, 4.375164e-01, 4.283826e-01, 4.283961e+00)), decimal=2) # scale self.assertEqual(self.model.dispersion, 1) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([35.2263655, 8.1965744, 1.3118966, 3.6862982, 2.0823003, 1.0650316, 1.9260424, 2.4171405, 1.8473219, 2.8643241, 3.1211989, 3.3382067, 2.5269969, 0.8972542, 0.9793332, 0.5346209, 1.9790936]), decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y), -31.92732869482515, places=3) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -31.92732869482515, places=3) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (17,)) # to_json json_dict = self.model.to_json('./tests/linear_models/GLM/Poisson/') self.assertEqual(json_dict['properties']['solver'], 'IRLS') # from_json self.model_from_json = GLM.from_json(json_dict) np.testing.assert_array_almost_equal( self.model.coef, self.model_from_json.coef, decimal=3) np.testing.assert_array_almost_equal( self.model.stderr, self.model_from_json.stderr, decimal=3) self.assertEqual( self.model.dispersion, self.model_from_json.dispersion) np.testing.assert_array_almost_equal( self.model_from_json.predict(self.X), np.array([35.2263655, 8.1965744, 1.3118966, 3.6862982, 2.0823003, 1.0650316, 1.9260424, 2.4171405, 1.8473219, 2.8643241, 3.1211989, 3.3382067, 2.5269969, 0.8972542, 0.9793332, 0.5346209, 1.9790936]), decimal=3) def test_glm_regularized(self): # there is a bug in sklearn with weights, it can only use list right now self.model = GLM( solver='auto', family=sm.families.Poisson(), fit_intercept=True, est_stderr=True, reg_method='elastic_net', alpha=0.01, l1_ratio=0.5, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y, sample_weight=0.5) self.assertEqual(self.model.coef.shape, (7, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((2.104e-01, 8.331e-05, -2.736e-02, -1.347e-01, -4.327e-02, 3.241e+00, -4.788e+00)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertTrue(self.model.stderr is None) # scale self.assertEqual(self.model.dispersion, 1) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([23.949, 10.275, 1.12, 7.302, 2.707, 1.585, 0.776, 1.894, 3.242, 8.968, 2.265, 1.735, 1.152, 0.202, 2.412, 0.952, 3.488]), decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y), -42.636883391983268, places=3) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -42.636883391983268, places=3) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (17,)) def test_glm_sample_weight_all_half(self): self.model = GLM( solver='IRLS', family=sm.families.Poisson(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y, sample_weight=0.5) # coefficient self.assertEqual(self.model.coef.shape, (7, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((-6.801480e+00, 2.611017e-04, 7.781801e-02, -9.493111e-02, 2.969349e-01, 2.301183e+00, -1.872207e+01)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (7, )) np.testing.assert_array_almost_equal( self.model.stderr, np.array((5.86e+00, 7.33e-05, 1.12e-01, 3.24e-02, 6.19e-01, 6.06e-01, 6.06e+00)), decimal=2) # scale self.assertEqual(self.model.dispersion, 1) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([35.2263655, 8.1965744, 1.3118966, 3.6862982, 2.0823003, 1.0650316, 1.9260424, 2.4171405, 1.8473219, 2.8643241, 3.1211989, 3.3382067, 2.5269969, 0.8972542, 0.9793332, 0.5346209, 1.9790936]), decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y, sample_weight=0.5), old_div(-31.92732869482515, 2.), places=3) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -31.92732869482515, places=3) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (17,)) def test_glm_sample_weight_all_zero(self): self.model = GLM( solver='IRLS', family=sm.families.Poisson(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0) def test_GLM_sample_weight_half_zero_half_one(self): self.model = GLM( solver='IRLS', family=sm.families.Poisson(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) len_half = 8 self.model.fit(self.X, self.Y, sample_weight=np.array([1] * len_half + [0] * (self.X.shape[0] - len_half))) self.model_half = GLM( solver='IRLS', family=sm.families.Poisson(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, max_iter=100, coef=None, stderr=None, dispersion=None) self.model_half.fit(self.X[:len_half], self.Y[:len_half]) # coefficient np.testing.assert_array_almost_equal( self.model.coef, self.model_half.coef, decimal=3) # std.err np.testing.assert_array_almost_equal( self.model.stderr, self.model_half.stderr, decimal=2) # scale np.testing.assert_array_almost_equal( self.model.dispersion, self.model_half.dispersion, decimal=3) # corner cases def test_glm_one_data_point(self): self.model = GLM( solver='IRLS', family=sm.families.Poisson(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[0:1, :], self.Y[0:1, ], sample_weight=0.5) # coef self.assertEqual(self.model.coef.shape, (7, )) # scale self.assertEqual(self.model.dispersion, 1) # loglike_per_sample np.testing.assert_array_almost_equal(self.model.loglike_per_sample( self.X[0:1, :], self.Y[0:1, ]), np.array([-2.72665]), decimal=3) np.testing.assert_array_almost_equal(self.model.loglike_per_sample( np.array(self.X[0:1, :].tolist() * 6), np.array([31, 32, 33, 34, 35, 36])), np.array([-3.154, -3.009, -2.894, -2.81, -2.754, -2.727]), decimal=3) def test_ols_multicolinearty(self): self.model_col = GLM( solver='irls', family=sm.families.Poisson(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) X = np.hstack([self.X[:, 0:1], 2 * self.X[:, 0:1]]) self.model_col.fit(X, self.Y, sample_weight=0.5) self.model = GLM( solver='IRLS', family=sm.families.Poisson(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[:, 0:1], self.Y, sample_weight=0.5) # coef np.testing.assert_array_almost_equal( self.model_col.coef, np.array([8.000e-06, 1.6000e-05]), decimal=3) # stderr np.testing.assert_array_almost_equal( self.model_col.stderr, np.array([9.09531196e-07, 1.81906239e-06]), decimal=3) # scale np.testing.assert_array_almost_equal( self.model_col.dispersion, self.model.dispersion, decimal=3) # loglike_per_sample np.testing.assert_array_almost_equal( self.model_col.loglike_per_sample(X, self.Y), self.model.loglike_per_sample(self.X[:, 0:1], self.Y), decimal=3) np.testing.assert_array_almost_equal( self.model_col.predict(X), self.model.predict(self.X[:, 0:1]), decimal=3) class GammaTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.data = sm.datasets.scotland.load() cls.X = cls.data.exog cls.Y = cls.data.endog def test_glm_IRLS(self): self.model = GLM( solver='IRLS', family=sm.families.Gamma(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y) # coefficient self.assertEqual(self.model.coef.shape, (8, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((-1.776527e-02, 4.961768e-05, 2.034423e-03, -7.181429e-05, 1.118520e-04, -1.467515e-07, -5.186831e-04, -2.42717498e-06)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (8, )) np.testing.assert_array_almost_equal( self.model.stderr * np.sqrt(old_div(32., 24.)), np.array((1.147922e-02, 1.621577e-05, 5.320802e-04, 2.711664e-05, 4.057691e-05, 1.236569e-07, 2.402534e-04, 7.460253e-07)), decimal=2) # scale self.assertAlmostEqual(self.model.dispersion * 32. / 24., 0.003584283, places=6) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([57.80431482, 53.2733447, 50.56347993, 58.33003783, 70.46562169, 56.88801284, 66.81878401, 66.03410393, 57.92937473, 63.23216907, 53.9914785, 61.28993391, 64.81036393, 63.47546816, 60.69696114, 74.83508176, 56.56991106, 72.01804172, 64.35676519, 52.02445881, 64.24933079, 71.15070332, 45.73479688, 54.93318588, 66.98031261, 52.02479973, 56.18413736, 58.12267471, 67.37947398, 60.49162862, 73.82609217, 69.61515621]), decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y), -82.47352, places=2) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -82.47352, places=2) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (32,)) # to_json json_dict = self.model.to_json('./tests/linear_models/GLM/Gamma/') self.assertEqual(json_dict['properties']['solver'], 'IRLS') # from_json self.model_from_json = GLM.from_json(json_dict) np.testing.assert_array_almost_equal( self.model.coef, self.model_from_json.coef, decimal=3) np.testing.assert_array_almost_equal( self.model.stderr, self.model_from_json.stderr, decimal=3) self.assertEqual( self.model.dispersion, self.model_from_json.dispersion) np.testing.assert_array_almost_equal( self.model_from_json.predict(self.X), np.array([57.80431482, 53.2733447, 50.56347993, 58.33003783, 70.46562169, 56.88801284, 66.81878401, 66.03410393, 57.92937473, 63.23216907, 53.9914785, 61.28993391, 64.81036393, 63.47546816, 60.69696114, 74.83508176, 56.56991106, 72.01804172, 64.35676519, 52.02445881, 64.24933079, 71.15070332, 45.73479688, 54.93318588, 66.98031261, 52.02479973, 56.18413736, 58.12267471, 67.37947398, 60.49162862, 73.82609217, 69.61515621]), decimal=3) def test_glm_regularized(self): pass def test_glm_sample_weight_all_half(self): self.model = GLM( solver='IRLS', family=sm.families.Gamma(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y, sample_weight=0.5) # coefficient self.assertEqual(self.model.coef.shape, (8, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((-1.776527e-02, 4.961768e-05, 2.034423e-03, -7.181429e-05, 1.118520e-04, -1.467515e-07, -5.186831e-04, -2.42717498e-06)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (8, )) np.testing.assert_array_almost_equal( self.model.stderr * np.sqrt(32. / 24. / 2.), np.array((1.147922e-02, 1.621577e-05, 5.320802e-04, 2.711664e-05, 4.057691e-05, 1.236569e-07, 2.402534e-04, 7.460253e-07)), decimal=3) # scale self.assertAlmostEqual(self.model.dispersion * 32. / 24., 0.003584283, places=6) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([57.80431482, 53.2733447, 50.56347993, 58.33003783, 70.46562169, 56.88801284, 66.81878401, 66.03410393, 57.92937473, 63.23216907, 53.9914785, 61.28993391, 64.81036393, 63.47546816, 60.69696114, 74.83508176, 56.56991106, 72.01804172, 64.35676519, 52.02445881, 64.24933079, 71.15070332, 45.73479688, 54.93318588, 66.98031261, 52.02479973, 56.18413736, 58.12267471, 67.37947398, 60.49162862, 73.82609217, 69.61515621]), decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y, sample_weight=0.5), old_div(-82.47352, 2.), places=2) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -82.47352, places=2) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (32,)) def test_glm_sample_weight_all_zero(self): self.model = GLM( solver='IRLS', family=sm.families.Gamma(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0) def test_GLM_sample_weight_half_zero_half_one(self): self.model = GLM( solver='IRLS', family=sm.families.Gamma(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) len_half = 16 self.model.fit(self.X, self.Y, sample_weight=np.array([1] * len_half + [0] * (self.X.shape[0] - len_half))) self.model_half = GLM( solver='IRLS', family=sm.families.Gamma(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, max_iter=100, coef=None, stderr=None, dispersion=None) self.model_half.fit(self.X[:len_half], self.Y[:len_half]) # coefficient np.testing.assert_array_almost_equal( self.model.coef, self.model_half.coef, decimal=3) # std.err np.testing.assert_array_almost_equal( self.model.stderr, self.model_half.stderr, decimal=3) # scale np.testing.assert_array_almost_equal( self.model.dispersion, self.model_half.dispersion, decimal=3) # corner cases def test_glm_one_data_point(self): pass def test_ols_multicolinearty(self): self.model_col = GLM( solver='irls', family=sm.families.Gamma(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]]) self.model_col.fit(X, self.Y, sample_weight=0.5) self.model = GLM( solver='IRLS', family=sm.families.Gamma(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[:, 0:1], self.Y, sample_weight=0.5) # coef np.testing.assert_array_almost_equal( self.model_col.coef, np.array([1.080e-05, 1.080e-05]), decimal=3) # stderr # scale np.testing.assert_array_almost_equal( self.model_col.dispersion, self.model.dispersion, decimal=3) # loglike_per_sample np.testing.assert_array_almost_equal( self.model_col.loglike_per_sample(X, self.Y), self.model.loglike_per_sample(self.X[:, 0:1], self.Y), decimal=3) np.testing.assert_array_almost_equal( self.model_col.predict(X), self.model.predict(self.X[:, 0:1]), decimal=3) class GaussianTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.data = sm.datasets.longley.load() cls.X = cls.data.exog cls.Y = cls.data.endog def test_glm_IRLS(self): self.model = GLM( solver='IRLS', family=sm.families.Gaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y) # coefficient self.assertEqual(self.model.coef.shape, (7, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((-3.48225863e+06, 1.50618723e+01, -3.58191793e-02, -2.02022980e+00, -1.03322687e+00, -5.11041057e-02, 1.82915146e+03)), decimal=2) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (7, )) np.testing.assert_array_almost_equal( self.model.stderr * np.sqrt(old_div(16., 9.)), np.array((8.90420384e+05, 8.49149258e+01, 3.34910078e-02, 4.88399682e-01, 2.14274163e-01, 2.26073200e-01, 4.55478499e+02)), decimal=3) # scale self.assertAlmostEqual(self.model.dispersion * 16. / 9., 92936.006167311629, places=6) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([60055.659970240202, 61216.013942398131, 60124.71283224225, 61597.114621930756, 62911.285409240052, 63888.31121532945, 65153.048956395127, 63774.180356866214, 66004.695227399934, 67401.605905447621, 68186.268927114084, 66552.055042522494, 68810.549973595422, 69649.67130804155, 68989.068486039061, 70757.757825193927]), decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y), -109.61743480847952, places=3) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -109.61743480847952, places=3) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (16,)) # to_json json_dict = self.model.to_json('./tests/linear_models/GLM/Gaussian/') self.assertEqual(json_dict['properties']['solver'], 'IRLS') # from_json self.model_from_json = GLM.from_json(json_dict) np.testing.assert_array_almost_equal( self.model.coef, self.model_from_json.coef, decimal=3) np.testing.assert_array_almost_equal( self.model.stderr, self.model_from_json.stderr, decimal=3) self.assertEqual( self.model.dispersion, self.model_from_json.dispersion) np.testing.assert_array_almost_equal( self.model_from_json.predict(self.X), np.array([60055.659970240202, 61216.013942398131, 60124.71283224225, 61597.114621930756, 62911.285409240052, 63888.31121532945, 65153.048956395127, 63774.180356866214, 66004.695227399934, 67401.605905447621, 68186.268927114084, 66552.055042522494, 68810.549973595422, 69649.67130804155, 68989.068486039061, 70757.757825193927]), decimal=3) def test_glm_regularized(self): pass def test_glm_sample_weight_all_half(self): self.model = GLM( solver='IRLS', family=sm.families.Gaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y, sample_weight=0.5) # coefficient self.assertEqual(self.model.coef.shape, (7, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((-3.48225863e+06, 1.50618723e+01, -3.58191793e-02, -2.02022980e+00, -1.03322687e+00, -5.11041057e-02, 1.82915146e+03)), decimal=2) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (7, )) np.testing.assert_array_almost_equal( self.model.stderr * np.sqrt(16. / 9. / 2.), np.array((8.90420384e+05, 8.49149258e+01, 3.34910078e-02, 4.88399682e-01, 2.14274163e-01, 2.26073200e-01, 4.55478499e+02)), decimal=3) # scale self.assertAlmostEqual(self.model.dispersion * 16. / 9., 92936.006167311629, places=6) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([60055.659970240202, 61216.013942398131, 60124.71283224225, 61597.114621930756, 62911.285409240052, 63888.31121532945, 65153.048956395127, 63774.180356866214, 66004.695227399934, 67401.605905447621, 68186.268927114084, 66552.055042522494, 68810.549973595422, 69649.67130804155, 68989.068486039061, 70757.757825193927]), decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y, sample_weight=0.5), old_div(-109.61743480847952, 2.), places=3) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -109.61743480847952, places=3) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (16,)) def test_glm_sample_weight_all_zero(self): self.model = GLM( solver='IRLS', family=sm.families.Gaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0) def test_GLM_sample_weight_half_zero_half_one(self): self.model = GLM( solver='IRLS', family=sm.families.Gaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) len_half = 8 self.model.fit(self.X, self.Y, sample_weight=np.array([1] * len_half + [0] * (self.X.shape[0] - len_half))) self.model_half = GLM( solver='IRLS', family=sm.families.Gaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, max_iter=100, coef=None, stderr=None, dispersion=None) self.model_half.fit(self.X[:len_half], self.Y[:len_half]) # coefficient np.testing.assert_array_almost_equal( self.model.coef, self.model_half.coef, decimal=3) # std.err np.testing.assert_array_almost_equal( self.model.stderr, self.model_half.stderr, decimal=3) # scale np.testing.assert_array_almost_equal( self.model.dispersion, self.model_half.dispersion, decimal=3) # corner cases def test_glm_one_data_point(self): pass def test_ols_multicolinearty(self): self.model_col = GLM( solver='irls', family=sm.families.Gaussian(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]]) self.model_col.fit(X, self.Y, sample_weight=0.5) self.model = GLM( solver='IRLS', family=sm.families.Gaussian(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[:, 0:1], self.Y, sample_weight=0.5) # coef np.testing.assert_array_almost_equal( self.model_col.coef, np.array([319.48, 319.48]), decimal=3) # stderr # scale np.testing.assert_array_almost_equal( self.model_col.dispersion, self.model.dispersion, decimal=3) # loglike_per_sample np.testing.assert_array_almost_equal( self.model_col.loglike_per_sample(X, self.Y), self.model.loglike_per_sample(self.X[:, 0:1], self.Y), decimal=3) np.testing.assert_array_almost_equal( self.model_col.predict(X), self.model.predict(self.X[:, 0:1]), decimal=3) class BinomialTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.data = sm.datasets.star98.load() cls.X = cls.data.exog cls.Y = cls.data.endog def test_glm_IRLS(self): self.model = GLM( solver='IRLS', family=sm.families.Binomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y) # coefficient self.assertEqual(self.model.coef.shape, (21, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((2.9588779262, -0.0168150366, 0.0099254766, -0.0187242148, -0.0142385609, 0.2544871730, 0.2406936644, 0.0804086739, -1.9521605027, -0.3340864748, -0.1690221685, 0.0049167021, -0.0035799644, -0.0140765648, -0.0040049918, -0.0039063958, 0.0917143006, 0.0489898381, 0.0080407389, 0.0002220095, -0.0022492486)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (21, )) np.testing.assert_array_almost_equal( self.model.stderr, np.array((1.546712e+00, 4.339467e-04, 6.013714e-04, 7.435499e-04, 4.338655e-04, 2.994576e-02, 5.713824e-02, 1.392359e-02, 3.168109e-01, 6.126411e-02, 3.270139e-02, 1.253877e-03, 2.254633e-04, 1.904573e-03, 4.739838e-04, 9.623650e-04, 1.450923e-02, 7.451666e-03, 1.499497e-03, 2.988794e-05, 3.489838e-04)), decimal=2) # scale self.assertEqual(self.model.dispersion, 1) # predict pred = np.array([0.5833118, 0.75144661, 0.50058272, 0.68534524, 0.32251021, 0.68693601, 0.33299827, 0.65624766, 0.49851481, 0.506736, 0.23954874, 0.86631452, 0.46432936, 0.44171873, 0.66797935, 0.73988491, 0.51966014, 0.42442446, 0.5649369, 0.59251634, 0.34798337, 0.56415024, 0.49974355, 0.3565539, 0.20752309, 0.18269097, 0.44932642, 0.48025128, 0.59965277, 0.58848671, 0.36264203, 0.33333196, 0.74253352, 0.5081886, 0.53421878, 0.56291445, 0.60205239, 0.29174423, 0.2954348, 0.32220414, 0.47977903, 0.23687535, 0.11776464, 0.1557423, 0.27854799, 0.22699533, 0.1819439, 0.32554433, 0.22681989, 0.15785389, 0.15268609, 0.61094772, 0.20743222, 0.51649059, 0.46502006, 0.41031788, 0.59523288, 0.65733285, 0.27835336, 0.2371213, 0.25137045, 0.23953942, 0.27854519, 0.39652413, 0.27023163, 0.61411863, 0.2212025, 0.42005842, 0.55940397, 0.35413774, 0.45724563, 0.57399437, 0.2168918, 0.58308738, 0.17181104, 0.49873249, 0.22832683, 0.14846056, 0.5028073, 0.24513863, 0.48202096, 0.52823155, 0.5086262, 0.46295993, 0.57869402, 0.78363217, 0.21144435, 0.2298366, 0.17954825, 0.32232586, 0.8343015, 0.56217006, 0.47367315, 0.52535649, 0.60350746, 0.43210701, 0.44712008, 0.35858239, 0.2521347, 0.19787004, 0.63256553, 0.51386532, 0.64997027, 0.13402072, 0.81756174, 0.74543642, 0.30825852, 0.23988707, 0.17273125, 0.27880599, 0.17395893, 0.32052828, 0.80467697, 0.18726218, 0.23842081, 0.19020381, 0.85835388, 0.58703615, 0.72415106, 0.64433695, 0.68766653, 0.32923663, 0.16352185, 0.38868816, 0.44980444, 0.74810044, 0.42973792, 0.53762581, 0.72714996, 0.61229484, 0.30267667, 0.24713253, 0.65086008, 0.48957265, 0.54955545, 0.5697156, 0.36406211, 0.48906545, 0.45919413, 0.4930565, 0.39785555, 0.5078719, 0.30159626, 0.28524393, 0.34687707, 0.22522042, 0.52947159, 0.29277287, 0.8585002, 0.60800389, 0.75830521, 0.35648175, 0.69508796, 0.45518355, 0.21567675, 0.39682985, 0.49042948, 0.47615798, 0.60588234, 0.62910299, 0.46005639, 0.71755165, 0.48852156, 0.47940661, 0.60128813, 0.16589699, 0.68512861, 0.46305199, 0.68832227, 0.7006721, 0.56564937, 0.51753941, 0.54261733, 0.56072214, 0.34545715, 0.30226104, 0.3572956, 0.40996287, 0.33517519, 0.36248407, 0.33937041, 0.34140691, 0.2627528, 0.29955161, 0.38581683, 0.24840026, 0.15414272, 0.40415991, 0.53936252, 0.52111887, 0.28060168, 0.45600958, 0.51110589, 0.43757523, 0.46891953, 0.39425249, 0.5834369, 0.55817308, 0.32051259, 0.43567448, 0.34134195, 0.43016545, 0.4885413, 0.28478325, 0.2650776, 0.46784606, 0.46265983, 0.42655938, 0.18972234, 0.60448491, 0.211896, 0.37886032, 0.50727577, 0.39782309, 0.50427121, 0.35882898, 0.39596807, 0.49160806, 0.35618002, 0.6819922, 0.36871093, 0.43079679, 0.67985516, 0.41270595, 0.68952767, 0.52587734, 0.32042126, 0.39120123, 0.56870985, 0.32962349, 0.32168989, 0.54076251, 0.4592907, 0.48480182, 0.4408386, 0.431178, 0.47078232, 0.55911605, 0.30331618, 0.50310393, 0.65036038, 0.45078895, 0.62354291, 0.56435463, 0.50034281, 0.52693538, 0.57217285, 0.49221472, 0.40707122, 0.44226533, 0.3475959, 0.54746396, 0.86385832, 0.48402233, 0.54313657, 0.61586824, 0.27097185, 0.69717808, 0.52156974, 0.50401189, 0.56724181, 0.6577178, 0.42732047, 0.44808396, 0.65435634, 0.54766225, 0.38160648, 0.49890847, 0.50879037, 0.5875452, 0.45101593, 0.5709704, 0.3175516, 0.39813159, 0.28305688, 0.40521062, 0.30120578, 0.26400428, 0.44205496, 0.40545798, 0.39366599, 0.55288196, 0.14104184, 0.17550155, 0.1949095, 0.40255144, 0.21016822, 0.09712017, 0.63151487, 0.25885514, 0.57323748, 0.61836898, 0.43268601, 0.67008878, 0.75801989, 0.50353406, 0.64222315, 0.29925757, 0.32592036, 0.39634977, 0.39582747, 0.41037006, 0.34174944]) np.testing.assert_array_almost_equal( self.model.predict(self.X), pred, decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y), -2998.61255899391, places=3) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -2998.61255899391, places=3) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (303,)) self.assertEqual( self.model.loglike_per_sample(self.X[:5], self.Y[:5]).shape, (5,)) # to_json json_dict = self.model.to_json('./tests/linear_models/GLM/Binomial/') self.assertEqual(json_dict['properties']['solver'], 'IRLS') # from_json self.model_from_json = GLM.from_json(json_dict) np.testing.assert_array_almost_equal( self.model.coef, self.model_from_json.coef, decimal=3) np.testing.assert_array_almost_equal( self.model.stderr, self.model_from_json.stderr, decimal=3) self.assertEqual( self.model.dispersion, self.model_from_json.dispersion) np.testing.assert_array_almost_equal( self.model_from_json.predict(self.X), pred, decimal=3) def test_glm_regularized(self): # not supported by statsmodels pass def test_glm_sample_weight_all_half(self): self.model = GLM( solver='IRLS', family=sm.families.Binomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y, sample_weight=0.5) # coefficient self.assertEqual(self.model.coef.shape, (21, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((2.9588779262, -0.0168150366, 0.0099254766, -0.0187242148, -0.0142385609, 0.2544871730, 0.2406936644, 0.0804086739, -1.9521605027, -0.3340864748, -0.1690221685, 0.0049167021, -0.0035799644, -0.0140765648, -0.0040049918, -0.0039063958, 0.0917143006, 0.0489898381, 0.0080407389, 0.0002220095, -0.0022492486)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (21, )) np.testing.assert_array_almost_equal( old_div(self.model.stderr, np.sqrt(2)), np.array((1.546712e+00, 4.339467e-04, 6.013714e-04, 7.435499e-04, 4.338655e-04, 2.994576e-02, 5.713824e-02, 1.392359e-02, 3.168109e-01, 6.126411e-02, 3.270139e-02, 1.253877e-03, 2.254633e-04, 1.904573e-03, 4.739838e-04, 9.623650e-04, 1.450923e-02, 7.451666e-03, 1.499497e-03, 2.988794e-05, 3.489838e-04)), decimal=2) # scale self.assertEqual(self.model.dispersion, 1) # predict pred = np.array([0.5833118, 0.75144661, 0.50058272, 0.68534524, 0.32251021, 0.68693601, 0.33299827, 0.65624766, 0.49851481, 0.506736, 0.23954874, 0.86631452, 0.46432936, 0.44171873, 0.66797935, 0.73988491, 0.51966014, 0.42442446, 0.5649369, 0.59251634, 0.34798337, 0.56415024, 0.49974355, 0.3565539, 0.20752309, 0.18269097, 0.44932642, 0.48025128, 0.59965277, 0.58848671, 0.36264203, 0.33333196, 0.74253352, 0.5081886, 0.53421878, 0.56291445, 0.60205239, 0.29174423, 0.2954348, 0.32220414, 0.47977903, 0.23687535, 0.11776464, 0.1557423, 0.27854799, 0.22699533, 0.1819439, 0.32554433, 0.22681989, 0.15785389, 0.15268609, 0.61094772, 0.20743222, 0.51649059, 0.46502006, 0.41031788, 0.59523288, 0.65733285, 0.27835336, 0.2371213, 0.25137045, 0.23953942, 0.27854519, 0.39652413, 0.27023163, 0.61411863, 0.2212025, 0.42005842, 0.55940397, 0.35413774, 0.45724563, 0.57399437, 0.2168918, 0.58308738, 0.17181104, 0.49873249, 0.22832683, 0.14846056, 0.5028073, 0.24513863, 0.48202096, 0.52823155, 0.5086262, 0.46295993, 0.57869402, 0.78363217, 0.21144435, 0.2298366, 0.17954825, 0.32232586, 0.8343015, 0.56217006, 0.47367315, 0.52535649, 0.60350746, 0.43210701, 0.44712008, 0.35858239, 0.2521347, 0.19787004, 0.63256553, 0.51386532, 0.64997027, 0.13402072, 0.81756174, 0.74543642, 0.30825852, 0.23988707, 0.17273125, 0.27880599, 0.17395893, 0.32052828, 0.80467697, 0.18726218, 0.23842081, 0.19020381, 0.85835388, 0.58703615, 0.72415106, 0.64433695, 0.68766653, 0.32923663, 0.16352185, 0.38868816, 0.44980444, 0.74810044, 0.42973792, 0.53762581, 0.72714996, 0.61229484, 0.30267667, 0.24713253, 0.65086008, 0.48957265, 0.54955545, 0.5697156, 0.36406211, 0.48906545, 0.45919413, 0.4930565, 0.39785555, 0.5078719, 0.30159626, 0.28524393, 0.34687707, 0.22522042, 0.52947159, 0.29277287, 0.8585002, 0.60800389, 0.75830521, 0.35648175, 0.69508796, 0.45518355, 0.21567675, 0.39682985, 0.49042948, 0.47615798, 0.60588234, 0.62910299, 0.46005639, 0.71755165, 0.48852156, 0.47940661, 0.60128813, 0.16589699, 0.68512861, 0.46305199, 0.68832227, 0.7006721, 0.56564937, 0.51753941, 0.54261733, 0.56072214, 0.34545715, 0.30226104, 0.3572956, 0.40996287, 0.33517519, 0.36248407, 0.33937041, 0.34140691, 0.2627528, 0.29955161, 0.38581683, 0.24840026, 0.15414272, 0.40415991, 0.53936252, 0.52111887, 0.28060168, 0.45600958, 0.51110589, 0.43757523, 0.46891953, 0.39425249, 0.5834369, 0.55817308, 0.32051259, 0.43567448, 0.34134195, 0.43016545, 0.4885413, 0.28478325, 0.2650776, 0.46784606, 0.46265983, 0.42655938, 0.18972234, 0.60448491, 0.211896, 0.37886032, 0.50727577, 0.39782309, 0.50427121, 0.35882898, 0.39596807, 0.49160806, 0.35618002, 0.6819922, 0.36871093, 0.43079679, 0.67985516, 0.41270595, 0.68952767, 0.52587734, 0.32042126, 0.39120123, 0.56870985, 0.32962349, 0.32168989, 0.54076251, 0.4592907, 0.48480182, 0.4408386, 0.431178, 0.47078232, 0.55911605, 0.30331618, 0.50310393, 0.65036038, 0.45078895, 0.62354291, 0.56435463, 0.50034281, 0.52693538, 0.57217285, 0.49221472, 0.40707122, 0.44226533, 0.3475959, 0.54746396, 0.86385832, 0.48402233, 0.54313657, 0.61586824, 0.27097185, 0.69717808, 0.52156974, 0.50401189, 0.56724181, 0.6577178, 0.42732047, 0.44808396, 0.65435634, 0.54766225, 0.38160648, 0.49890847, 0.50879037, 0.5875452, 0.45101593, 0.5709704, 0.3175516, 0.39813159, 0.28305688, 0.40521062, 0.30120578, 0.26400428, 0.44205496, 0.40545798, 0.39366599, 0.55288196, 0.14104184, 0.17550155, 0.1949095, 0.40255144, 0.21016822, 0.09712017, 0.63151487, 0.25885514, 0.57323748, 0.61836898, 0.43268601, 0.67008878, 0.75801989, 0.50353406, 0.64222315, 0.29925757, 0.32592036, 0.39634977, 0.39582747, 0.41037006, 0.34174944]) np.testing.assert_array_almost_equal( self.model.predict(self.X), pred, decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y, sample_weight=0.5), old_div(-2998.61255899391, 2.), places=3) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -2998.61255899391, places=3) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (303,)) self.assertEqual( self.model.loglike_per_sample(self.X[:5], self.Y[:5]).shape, (5,)) def test_glm_sample_weight_all_zero(self): self.model = GLM( solver='IRLS', family=sm.families.Binomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0) def test_GLM_sample_weight_half_zero_half_one(self): self.model = GLM( solver='IRLS', family=sm.families.Binomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) len_half = 160 self.model.fit(self.X, self.Y, sample_weight=np.array([1] * len_half + [0] * (self.X.shape[0] - len_half))) self.model_half = GLM( solver='IRLS', family=sm.families.Binomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, max_iter=100, coef=None, stderr=None, dispersion=None) self.model_half.fit(self.X[:len_half], self.Y[:len_half]) # coefficient np.testing.assert_array_almost_equal( self.model.coef, self.model_half.coef, decimal=3) # std.err np.testing.assert_array_almost_equal( self.model.stderr, self.model_half.stderr, decimal=2) # scale np.testing.assert_array_almost_equal( self.model.dispersion, self.model_half.dispersion, decimal=3) # corner cases def test_glm_one_data_point(self): self.model = GLM( solver='IRLS', family=sm.families.Binomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[0:1, :], self.Y[0:1, ], sample_weight=0.5) # coef self.assertEqual(self.model.coef.shape, (21, )) # scale self.assertEqual(self.model.dispersion, 1) # loglike_per_sample np.testing.assert_array_almost_equal(self.model.loglike_per_sample( self.X[0:1, :], self.Y[0:1, ]), np.array([-3.565]), decimal=3) np.testing.assert_array_almost_equal(self.model.loglike_per_sample( np.array(self.X[0:1, :].tolist() * 6), np.array([[452., 355.], [510., 235.], [422., 335.], [454., 355.], [452., 355.], [422., 355.]])), np.array([-3.565, -27.641, -3.545, -3.568, -3.565, -4.004]), decimal=3) def test_ols_multicolinearty(self): self.model_col = GLM( solver='irls', family=sm.families.Binomial(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]]) self.model_col.fit(X, self.Y, sample_weight=0.5) self.model = GLM( solver='IRLS', family=sm.families.Binomial(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[:, 0:1], self.Y, sample_weight=0.5) # coef np.testing.assert_array_almost_equal( self.model_col.coef, np.array([-0.006, -0.006]), decimal=3) # stderr np.testing.assert_array_almost_equal( self.model_col.stderr, np.array([5.684e-05, 5.684e-05]), decimal=3) # scale np.testing.assert_array_almost_equal( self.model_col.dispersion, self.model.dispersion, decimal=3) # loglike_per_sample np.testing.assert_array_almost_equal( self.model_col.loglike_per_sample(X, self.Y), self.model.loglike_per_sample(self.X[:, 0:1], self.Y), decimal=3) np.testing.assert_array_almost_equal( self.model_col.predict(X), self.model.predict(self.X[:, 0:1]), decimal=3) class InverseGaussianTests(unittest.TestCase): @classmethod def setUpClass(cls): filename = 'tests/linear_models/GLM/InverseGaussian/inv_gaussian.csv' data = np.genfromtxt(open(filename, 'rb'), delimiter=",", dtype=float)[1:] cls.Y = data[:5000, 0] cls.X = data[:5000, 1:] cls.res = InvGauss() def test_glm_IRLS(self): self.model = GLM( solver='IRLS', family=sm.families.InverseGaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y) # coefficient self.assertEqual(self.model.coef.shape, (3, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((1.0359574, 0.4519770, -0.2508288)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (3, )) np.testing.assert_array_almost_equal( self.model.stderr * np.sqrt(old_div(5000., 4997.)), np.array((0.03429943, 0.03148291, 0.02237211)), decimal=3) # scale self.assertAlmostEqual(self.model.dispersion * 5000. / 4997., 0.2867266359127567, places=6) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), self.res.fittedvalues, decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y), -2525.70955823223, places=1) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -2525.70955823223, places=1) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (5000,)) # to_json json_dict = self.model.to_json('./tests/linear_models/GLM/InverseGaussian/') self.assertEqual(json_dict['properties']['solver'], 'IRLS') # from_json self.model_from_json = GLM.from_json(json_dict) np.testing.assert_array_almost_equal( self.model.coef, self.model_from_json.coef, decimal=3) np.testing.assert_array_almost_equal( self.model.stderr, self.model_from_json.stderr, decimal=3) self.assertEqual( self.model.dispersion, self.model_from_json.dispersion) np.testing.assert_array_almost_equal( self.model_from_json.predict(self.X), self.res.fittedvalues, decimal=3) def test_glm_regularized(self): pass def test_glm_sample_weight_all_half(self): self.model = GLM( solver='IRLS', family=sm.families.InverseGaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y, sample_weight=0.5) # coefficient self.assertEqual(self.model.coef.shape, (3, )) np.testing.assert_array_almost_equal( self.model.coef, np.array((1.0359574, 0.4519770, -0.2508288)), decimal=3) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (3, )) np.testing.assert_array_almost_equal( self.model.stderr * np.sqrt(5000. / 4997. / 2.), np.array((0.03429943, 0.03148291, 0.02237211)), decimal=3) # scale self.assertAlmostEqual(self.model.dispersion * 5000. / 4997., 0.2867266359127567, places=6) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), self.res.fittedvalues, decimal=3) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y, sample_weight=0.5), old_div(-2525.70955823223, 2.), places=1) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -2525.70955823223, places=1) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (5000,)) def test_glm_sample_weight_all_zero(self): self.model = GLM( solver='IRLS', family=sm.families.InverseGaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0) def test_GLM_sample_weight_half_zero_half_one(self): self.model = GLM( solver='IRLS', family=sm.families.InverseGaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) len_half = 2500 self.model.fit(self.X, self.Y, sample_weight=np.array([1] * len_half + [0] * (self.X.shape[0] - len_half))) self.model_half = GLM( solver='IRLS', family=sm.families.InverseGaussian(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, max_iter=100, coef=None, stderr=None, dispersion=None) self.model_half.fit(self.X[:len_half], self.Y[:len_half]) # coefficient np.testing.assert_array_almost_equal( self.model.coef, self.model_half.coef, decimal=3) # std.err np.testing.assert_array_almost_equal( self.model.stderr, self.model_half.stderr, decimal=3) # scale np.testing.assert_array_almost_equal( self.model.dispersion, self.model_half.dispersion, decimal=3) # corner cases def test_glm_one_data_point(self): pass def test_ols_multicolinearty(self): self.model_col = GLM( solver='irls', family=sm.families.InverseGaussian(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]]) self.model_col.fit(X, self.Y, sample_weight=0.5) self.model = GLM( solver='IRLS', family=sm.families.InverseGaussian(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[:, 0:1], self.Y, sample_weight=0.5) # coef np.testing.assert_array_almost_equal( self.model_col.coef, np.array([0.712, 0.712]), decimal=3) # stderr # scale np.testing.assert_array_almost_equal( self.model_col.dispersion, self.model.dispersion, decimal=3) # loglike_per_sample np.testing.assert_array_almost_equal( self.model_col.loglike_per_sample(X, self.Y), self.model.loglike_per_sample(self.X[:, 0:1], self.Y), decimal=3) np.testing.assert_array_almost_equal( self.model_col.predict(X), self.model.predict(self.X[:, 0:1]), decimal=3) class NegativeBinomialTests(unittest.TestCase): @classmethod def setUpClass(cls): data = sm.datasets.committee.load() data.exog[:, 2] = np.log(data.exog[:, 2]) interaction = data.exog[:, 2] * data.exog[:, 1] data.exog = np.column_stack((data.exog, interaction)) cls.Y = data.endog cls.X = data.exog def test_glm_IRLS(self): self.model = GLM( solver='IRLS', family=sm.families.NegativeBinomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y) # coefficient self.assertEqual(self.model.coef.shape, (7, )) np.testing.assert_array_almost_equal( self.model.coef, np.array([-6.44847076, -0.0268147, 1.25103364, 2.91070663, -0.34799563, 0.00659808, -0.31303026]), decimal=2) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (7, )) np.testing.assert_array_almost_equal( self.model.stderr, np.array([3.21429775e+00, 3.22130435e-02, 7.68090529e-01, 1.04436390e+00, 6.73309516e-01, 2.27984343e-03, 1.73596557e-01]), decimal=3) # scale self.assertEqual(self.model.dispersion, 1) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([12.62019383, 30.18289514, 21.48377849, 496.74068604, 103.23024673, 219.94693494, 324.4301163, 110.82526477, 112.44244488, 219.86056381, 56.84399998, 61.19840382, 114.09290269, 75.29071944, 61.21994387, 21.05130889, 42.75939828, 55.56133536, 0.72532053, 18.14664665]), decimal=0) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y), -101.33286676188968, places=1) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -101.33286676188968, places=1) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (20,)) # to_json json_dict = self.model.to_json('./tests/linear_models/GLM/NegativeBinomial/') self.assertEqual(json_dict['properties']['solver'], 'IRLS') # from_json self.model_from_json = GLM.from_json(json_dict) np.testing.assert_array_almost_equal( self.model.coef, self.model_from_json.coef, decimal=3) np.testing.assert_array_almost_equal( self.model.stderr, self.model_from_json.stderr, decimal=3) self.assertEqual( self.model.dispersion, self.model_from_json.dispersion) np.testing.assert_array_almost_equal( self.model_from_json.predict(self.X), np.array([12.62019383, 30.18289514, 21.48377849, 496.74068604, 103.23024673, 219.94693494, 324.4301163, 110.82526477, 112.44244488, 219.86056381, 56.84399998, 61.19840382, 114.09290269, 75.29071944, 61.21994387, 21.05130889, 42.75939828, 55.56133536, 0.72532053, 18.14664665]), decimal=0) def test_glm_regularized(self): pass def test_glm_sample_weight_all_half(self): self.model = GLM( solver='IRLS', family=sm.families.NegativeBinomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X, self.Y, sample_weight=0.5) # coefficient self.assertEqual(self.model.coef.shape, (7, )) np.testing.assert_array_almost_equal( self.model.coef, np.array([-6.44847076, -0.0268147, 1.25103364, 2.91070663, -0.34799563, 0.00659808, -0.31303026]), decimal=2) # std.err of coefficient (calibrated by df_resid) self.assertEqual(self.model.stderr.shape, (7, )) np.testing.assert_array_almost_equal( self.model.stderr, np.array([4.54570348e+00, 4.55561229e-02, 1.08624404e+00, 1.47695359e+00, 9.52203449e-01, 3.22418550e-03, 2.45502605e-01]), decimal=3) # scale self.assertAlmostEqual(self.model.dispersion, 1, places=4) # predict np.testing.assert_array_almost_equal( self.model.predict(self.X), np.array([12.62019383, 30.18289514, 21.48377849, 496.74068604, 103.23024673, 219.94693494, 324.4301163, 110.82526477, 112.44244488, 219.86056381, 56.84399998, 61.19840382, 114.09290269, 75.29071944, 61.21994387, 21.05130889, 42.75939828, 55.56133536, 0.72532053, 18.14664665]), decimal=0) # loglike/_per_sample self.assertAlmostEqual( self.model.loglike(self.X, self.Y, sample_weight=0.5), old_div(-101.33286676188968, 2.), places=1) self.assertAlmostEqual( self.model.loglike_per_sample(self.X, self.Y).sum(), -101.33286676188968, places=1) self.assertEqual( self.model.loglike_per_sample(self.X, self.Y).shape, (20,)) def test_glm_sample_weight_all_zero(self): self.model = GLM( solver='IRLS', family=sm.families.NegativeBinomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0) def test_GLM_sample_weight_half_zero_half_one(self): self.model = GLM( solver='IRLS', family=sm.families.NegativeBinomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) len_half = 10 self.model.fit(self.X, self.Y, sample_weight=np.array([1] * len_half + [0] * (self.X.shape[0] - len_half))) self.model_half = GLM( solver='IRLS', family=sm.families.NegativeBinomial(), fit_intercept=True, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, max_iter=100, coef=None, stderr=None, dispersion=None) self.model_half.fit(self.X[:len_half], self.Y[:len_half]) # coefficient np.testing.assert_array_almost_equal( self.model.coef, self.model_half.coef, decimal=2) # std.err np.testing.assert_array_almost_equal( self.model.stderr, self.model_half.stderr, decimal=3) # scale np.testing.assert_array_almost_equal( self.model.dispersion, self.model_half.dispersion, decimal=3) # corner cases def test_glm_one_data_point(self): pass def test_ols_multicolinearty(self): self.model_col = GLM( solver='irls', family=sm.families.NegativeBinomial(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]]) self.model_col.fit(X, self.Y, sample_weight=0.5) self.model = GLM( solver='IRLS', family=sm.families.NegativeBinomial(), fit_intercept=False, est_stderr=True, reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100, coef=None, stderr=None, dispersion=None) self.model.fit(self.X[:, 0:1], self.Y, sample_weight=0.5) # coef np.testing.assert_array_almost_equal( self.model_col.coef, np.array([0.059, 0.059]), decimal=3) # stderr # scale np.testing.assert_array_almost_equal( self.model_col.dispersion, self.model.dispersion, decimal=3) # loglike_per_sample np.testing.assert_array_almost_equal( self.model_col.loglike_per_sample(X, self.Y), self.model.loglike_per_sample(self.X[:, 0:1], self.Y), decimal=3) np.testing.assert_array_almost_equal( self.model_col.predict(X), self.model.predict(self.X[:, 0:1]), decimal=3)
Mogeng/IO-HMM
tests/test_GLM.py
Python
mit
67,433
[ "Gaussian" ]
b30fe330f0b40be631af8ed953f36d4a1bfeac1d49131b9ee9da3ce5b40127e0
# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr> # Copyright (c) 2011-2014 Google, Inc. # Copyright (c) 2012 Tim Hatch <tim@timhatch.com> # Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com> # Copyright (c) 2014 Brett Cannon <brett@python.org> # Copyright (c) 2014 Arun Persaud <arun@nubati.net> # Copyright (c) 2015 Rene Zhang <rz99@cornell.edu> # Copyright (c) 2015 Florian Bruhin <me@the-compiler.org> # Copyright (c) 2015 Steven Myint <hg@stevenmyint.com> # Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro> # Copyright (c) 2016 Erik <erik.eriksson@yahoo.com> # Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net> # Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com> # Copyright (c) 2017 Martin von Gagern <gagern@google.com> # Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com> # Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com> # Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local> # Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com> # Copyright (c) 2018 Carey Metcalfe <carey@cmetcalfe.ca> # Copyright (c) 2018 Mike Frysinger <vapier@gmail.com> # Copyright (c) 2018 Alexander Todorov <atodorov@otb.bg> # Copyright (c) 2018 Ville Skyttä <ville.skytta@iki.fi> # Copyright (c) 2019, 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com> # Copyright (c) 2019 Djailla <bastien.vallet@gmail.com> # Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com> # Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com> # Copyright (c) 2020 Ram Rachum <ram@rachum.com> # Copyright (c) 2020 Anthony Sottile <asottile@umich.edu> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/LICENSE """Checks for various exception related errors.""" import builtins import inspect import typing import astroid from pylint import checkers, interfaces from pylint.checkers import utils def _builtin_exceptions(): def predicate(obj): return isinstance(obj, type) and issubclass(obj, BaseException) members = inspect.getmembers(builtins, predicate) return {exc.__name__ for (_, exc) in members} def _annotated_unpack_infer(stmt, context=None): """ Recursively generate nodes inferred by the given statement. If the inferred value is a list or a tuple, recurse on the elements. Returns an iterator which yields tuples in the format ('original node', 'inferred node'). """ if isinstance(stmt, (astroid.List, astroid.Tuple)): for elt in stmt.elts: inferred = utils.safe_infer(elt) if inferred and inferred is not astroid.Uninferable: yield elt, inferred return for inferred in stmt.infer(context): if inferred is astroid.Uninferable: continue yield stmt, inferred def _is_raising(body: typing.List) -> bool: """Return true if the given statement node raise an exception""" for node in body: if isinstance(node, astroid.Raise): return True return False OVERGENERAL_EXCEPTIONS = ("BaseException", "Exception") BUILTINS_NAME = builtins.__name__ MSGS = { "E0701": ( "Bad except clauses order (%s)", "bad-except-order", "Used when except clauses are not in the correct order (from the " "more specific to the more generic). If you don't fix the order, " "some exceptions may not be caught by the most specific handler.", ), "E0702": ( "Raising %s while only classes or instances are allowed", "raising-bad-type", "Used when something which is neither a class, an instance or a " "string is raised (i.e. a `TypeError` will be raised).", ), "E0703": ( "Exception context set to something which is not an exception, nor None", "bad-exception-context", 'Used when using the syntax "raise ... from ...", ' "where the exception context is not an exception, " "nor None.", ), "E0704": ( "The raise statement is not inside an except clause", "misplaced-bare-raise", "Used when a bare raise is not used inside an except clause. " "This generates an error, since there are no active exceptions " "to be reraised. An exception to this rule is represented by " "a bare raise inside a finally clause, which might work, as long " "as an exception is raised inside the try block, but it is " "nevertheless a code smell that must not be relied upon.", ), "E0710": ( "Raising a new style class which doesn't inherit from BaseException", "raising-non-exception", "Used when a new style class which doesn't inherit from " "BaseException is raised.", ), "E0711": ( "NotImplemented raised - should raise NotImplementedError", "notimplemented-raised", "Used when NotImplemented is raised instead of NotImplementedError", ), "E0712": ( "Catching an exception which doesn't inherit from Exception: %s", "catching-non-exception", "Used when a class which doesn't inherit from " "Exception is used as an exception in an except clause.", ), "W0702": ( "No exception type(s) specified", "bare-except", "Used when an except clause doesn't specify exceptions type to catch.", ), "W0703": ( "Catching too general exception %s", "broad-except", "Used when an except catches a too general exception, " "possibly burying unrelated errors.", ), "W0705": ( "Catching previously caught exception type %s", "duplicate-except", "Used when an except catches a type that was already caught by " "a previous handler.", ), "W0706": ( "The except handler raises immediately", "try-except-raise", "Used when an except handler uses raise as its first or only " "operator. This is useless because it raises back the exception " "immediately. Remove the raise operator or the entire " "try-except-raise block!", ), "W0707": ( "Consider explicitly re-raising using the 'from' keyword", "raise-missing-from", "Python 3's exception chaining means it shows the traceback of the " "current exception, but also the original exception. Not using `raise " "from` makes the traceback inaccurate, because the message implies " "there is a bug in the exception-handling code itself, which is a " "separate situation than wrapping an exception.", ), "W0711": ( 'Exception to catch is the result of a binary "%s" operation', "binary-op-exception", "Used when the exception to catch is of the form " '"except A or B:". If intending to catch multiple, ' 'rewrite as "except (A, B):"', ), "W0715": ( "Exception arguments suggest string formatting might be intended", "raising-format-tuple", "Used when passing multiple arguments to an exception " "constructor, the first of them a string literal containing what " "appears to be placeholders intended for formatting", ), "W0716": ( "Invalid exception operation. %s", "wrong-exception-operation", "Used when an operation is done against an exception, but the operation " "is not valid for the exception in question. Usually emitted when having " "binary operations between exceptions in except handlers.", ), } class BaseVisitor: """Base class for visitors defined in this module.""" def __init__(self, checker, node): self._checker = checker self._node = node def visit(self, node): name = node.__class__.__name__.lower() dispatch_meth = getattr(self, "visit_" + name, None) if dispatch_meth: dispatch_meth(node) else: self.visit_default(node) def visit_default(self, node): # pylint: disable=unused-argument """Default implementation for all the nodes.""" class ExceptionRaiseRefVisitor(BaseVisitor): """Visit references (anything that is not an AST leaf).""" def visit_name(self, name): if name.name == "NotImplemented": self._checker.add_message("notimplemented-raised", node=self._node) def visit_call(self, call): if isinstance(call.func, astroid.Name): self.visit_name(call.func) if ( len(call.args) > 1 and isinstance(call.args[0], astroid.Const) and isinstance(call.args[0].value, str) ): msg = call.args[0].value if "%" in msg or ("{" in msg and "}" in msg): self._checker.add_message("raising-format-tuple", node=self._node) class ExceptionRaiseLeafVisitor(BaseVisitor): """Visitor for handling leaf kinds of a raise value.""" def visit_const(self, const): if not isinstance(const.value, str): # raising-string will be emitted from python3 porting checker. self._checker.add_message( "raising-bad-type", node=self._node, args=const.value.__class__.__name__ ) def visit_instance(self, instance): # pylint: disable=protected-access cls = instance._proxied self.visit_classdef(cls) # Exception instances have a particular class type visit_exceptioninstance = visit_instance def visit_classdef(self, cls): if not utils.inherit_from_std_ex(cls) and utils.has_known_bases(cls): if cls.newstyle: self._checker.add_message("raising-non-exception", node=self._node) def visit_tuple(self, _): self._checker.add_message("raising-bad-type", node=self._node, args="tuple") def visit_default(self, node): name = getattr(node, "name", node.__class__.__name__) self._checker.add_message("raising-bad-type", node=self._node, args=name) class ExceptionsChecker(checkers.BaseChecker): """Exception related checks.""" __implements__ = interfaces.IAstroidChecker name = "exceptions" msgs = MSGS priority = -4 options = ( ( "overgeneral-exceptions", { "default": OVERGENERAL_EXCEPTIONS, "type": "csv", "metavar": "<comma-separated class names>", "help": "Exceptions that will emit a warning " 'when being caught. Defaults to "%s".' % (", ".join(OVERGENERAL_EXCEPTIONS),), }, ), ) def open(self): self._builtin_exceptions = _builtin_exceptions() super().open() @utils.check_messages( "misplaced-bare-raise", "raising-bad-type", "raising-non-exception", "notimplemented-raised", "bad-exception-context", "raising-format-tuple", "raise-missing-from", ) def visit_raise(self, node): if node.exc is None: self._check_misplaced_bare_raise(node) return if node.cause is None: self._check_raise_missing_from(node) else: self._check_bad_exception_context(node) expr = node.exc ExceptionRaiseRefVisitor(self, node).visit(expr) try: inferred_value = expr.inferred()[-1] except astroid.InferenceError: pass else: if inferred_value: ExceptionRaiseLeafVisitor(self, node).visit(inferred_value) def _check_misplaced_bare_raise(self, node): # Filter out if it's present in __exit__. scope = node.scope() if ( isinstance(scope, astroid.FunctionDef) and scope.is_method() and scope.name == "__exit__" ): return current = node # Stop when a new scope is generated or when the raise # statement is found inside a TryFinally. ignores = (astroid.ExceptHandler, astroid.FunctionDef) while current and not isinstance(current.parent, ignores): current = current.parent expected = (astroid.ExceptHandler,) if not current or not isinstance(current.parent, expected): self.add_message("misplaced-bare-raise", node=node) def _check_bad_exception_context(self, node: astroid.Raise) -> None: """Verify that the exception context is properly set. An exception context can be only `None` or an exception. """ cause = utils.safe_infer(node.cause) if cause in (astroid.Uninferable, None): return if isinstance(cause, astroid.Const): if cause.value is not None: self.add_message("bad-exception-context", node=node) elif not isinstance(cause, astroid.ClassDef) and not utils.inherit_from_std_ex( cause ): self.add_message("bad-exception-context", node=node) def _check_raise_missing_from(self, node: astroid.Raise) -> None: if node.exc is None: # This is a plain `raise`, raising the previously-caught exception. No need for a # cause. return # We'd like to check whether we're inside an `except` clause: containing_except_node = utils.find_except_wrapper_node_in_scope(node) if not containing_except_node: return # We found a surrounding `except`! We're almost done proving there's a # `raise-missing-from` here. The only thing we need to protect against is that maybe # the `raise` is raising the exception that was caught, possibly with some shenanigans # like `exc.with_traceback(whatever)`. We won't analyze these, we'll just assume # there's a violation on two simple cases: `raise SomeException(whatever)` and `raise # SomeException`. if containing_except_node.name is None: # The `except` doesn't have an `as exception:` part, meaning there's no way that # the `raise` is raising the same exception. self.add_message("raise-missing-from", node=node) elif isinstance(node.exc, astroid.Call) and isinstance( node.exc.func, astroid.Name ): # We have a `raise SomeException(whatever)`. self.add_message("raise-missing-from", node=node) elif ( isinstance(node.exc, astroid.Name) and node.exc.name != containing_except_node.name.name ): # We have a `raise SomeException`. self.add_message("raise-missing-from", node=node) def _check_catching_non_exception(self, handler, exc, part): if isinstance(exc, astroid.Tuple): # Check if it is a tuple of exceptions. inferred = [utils.safe_infer(elt) for elt in exc.elts] if any(node is astroid.Uninferable for node in inferred): # Don't emit if we don't know every component. return if all( node and (utils.inherit_from_std_ex(node) or not utils.has_known_bases(node)) for node in inferred ): return if not isinstance(exc, astroid.ClassDef): # Don't emit the warning if the inferred stmt # is None, but the exception handler is something else, # maybe it was redefined. if isinstance(exc, astroid.Const) and exc.value is None: if ( isinstance(handler.type, astroid.Const) and handler.type.value is None ) or handler.type.parent_of(exc): # If the exception handler catches None or # the exception component, which is None, is # defined by the entire exception handler, then # emit a warning. self.add_message( "catching-non-exception", node=handler.type, args=(part.as_string(),), ) else: self.add_message( "catching-non-exception", node=handler.type, args=(part.as_string(),), ) return if ( not utils.inherit_from_std_ex(exc) and exc.name not in self._builtin_exceptions ): if utils.has_known_bases(exc): self.add_message( "catching-non-exception", node=handler.type, args=(exc.name,) ) def _check_try_except_raise(self, node): def gather_exceptions_from_handler( handler, ) -> typing.Optional[typing.List[astroid.node_classes.NodeNG]]: exceptions: typing.List[astroid.node_classes.NodeNG] = [] if handler.type: exceptions_in_handler = utils.safe_infer(handler.type) if isinstance(exceptions_in_handler, astroid.Tuple): exceptions = list( { exception for exception in exceptions_in_handler.elts if isinstance(exception, astroid.Name) } ) elif exceptions_in_handler: exceptions = [exceptions_in_handler] else: # Break when we cannot infer anything reliably. return None return exceptions bare_raise = False handler_having_bare_raise = None excs_in_bare_handler = [] for handler in node.handlers: if bare_raise: # check that subsequent handler is not parent of handler which had bare raise. # since utils.safe_infer can fail for bare except, check it before. # also break early if bare except is followed by bare except. excs_in_current_handler = gather_exceptions_from_handler(handler) if not excs_in_current_handler: break if excs_in_bare_handler is None: # It can be `None` when the inference failed break for exc_in_current_handler in excs_in_current_handler: inferred_current = utils.safe_infer(exc_in_current_handler) if any( utils.is_subclass_of(utils.safe_infer(e), inferred_current) for e in excs_in_bare_handler ): bare_raise = False break # `raise` as the first operator inside the except handler if _is_raising([handler.body[0]]): # flags when there is a bare raise if handler.body[0].exc is None: bare_raise = True handler_having_bare_raise = handler excs_in_bare_handler = gather_exceptions_from_handler(handler) else: if bare_raise: self.add_message("try-except-raise", node=handler_having_bare_raise) @utils.check_messages("wrong-exception-operation") def visit_binop(self, node): if isinstance(node.parent, astroid.ExceptHandler): # except (V | A) suggestion = "Did you mean '({}, {})' instead?".format( node.left.as_string(), node.right.as_string(), ) self.add_message("wrong-exception-operation", node=node, args=(suggestion,)) @utils.check_messages("wrong-exception-operation") def visit_compare(self, node): if isinstance(node.parent, astroid.ExceptHandler): # except (V < A) suggestion = "Did you mean '({}, {})' instead?".format( node.left.as_string(), ", ".join(operand.as_string() for _, operand in node.ops), ) self.add_message("wrong-exception-operation", node=node, args=(suggestion,)) @utils.check_messages( "bare-except", "broad-except", "try-except-raise", "binary-op-exception", "bad-except-order", "catching-non-exception", "duplicate-except", ) def visit_tryexcept(self, node): """check for empty except""" self._check_try_except_raise(node) exceptions_classes = [] nb_handlers = len(node.handlers) for index, handler in enumerate(node.handlers): if handler.type is None: if not _is_raising(handler.body): self.add_message("bare-except", node=handler) # check if an "except:" is followed by some other # except if index < (nb_handlers - 1): msg = "empty except clause should always appear last" self.add_message("bad-except-order", node=node, args=msg) elif isinstance(handler.type, astroid.BoolOp): self.add_message( "binary-op-exception", node=handler, args=handler.type.op ) else: try: excs = list(_annotated_unpack_infer(handler.type)) except astroid.InferenceError: continue for part, exc in excs: if exc is astroid.Uninferable: continue if isinstance(exc, astroid.Instance) and utils.inherit_from_std_ex( exc ): # pylint: disable=protected-access exc = exc._proxied self._check_catching_non_exception(handler, exc, part) if not isinstance(exc, astroid.ClassDef): continue exc_ancestors = [ anc for anc in exc.ancestors() if isinstance(anc, astroid.ClassDef) ] for previous_exc in exceptions_classes: if previous_exc in exc_ancestors: msg = "{} is an ancestor class of {}".format( previous_exc.name, exc.name, ) self.add_message( "bad-except-order", node=handler.type, args=msg ) if ( exc.name in self.config.overgeneral_exceptions and exc.root().name == utils.EXCEPTIONS_MODULE and not _is_raising(handler.body) ): self.add_message( "broad-except", args=exc.name, node=handler.type ) if exc in exceptions_classes: self.add_message( "duplicate-except", args=exc.name, node=handler.type ) exceptions_classes += [exc for _, exc in excs] def register(linter): """required method to auto register this checker""" linter.register_checker(ExceptionsChecker(linter))
ruchee/vimrc
vimfiles/bundle/vim-python/submodules/pylint/pylint/checkers/exceptions.py
Python
mit
23,645
[ "VisIt" ]
96a5154e6aa3b6687a3cc101bc120907dbb5268b1f7671b22d863e5e618b5424
""" Given a fasta file and a list of sequence IDs, write the whole file out but with those sequence IDs reverse complemented. """ import os import sys import argparse import gzip def stream_fasta(fastafile): """ Stream a fasta file, one read at a time. Saves memory! :param fastafile: The fasta file to stream :type fastafile: str :return:A single read :rtype:str, str """ try: if fastafile.endswith('.gz'): f = gzip.open(fastafile, 'rb') else: f = open(fastafile, 'r', encoding='utf-8') except IOError as e: sys.stderr.write(str(e) + "\n") sys.stderr.write("Message: \n" + str(e.message) + "\n") sys.exit("Unable to open file " + fastafile) posn = 0 while f: # first line should start with > idline = f.readline() if not idline: break if not idline.startswith('>'): sys.exit("Do not have a fasta file at: {}".format(idline)) idline = idline.strip().replace('>', '', 1) posn = f.tell() line = f.readline() seq = "" while not line.startswith('>'): seq += line.strip() posn = f.tell() line = f.readline() if not line: break f.seek(posn) yield idline, seq def rc(dna): """ Reverse complement a DNA sequence :param dna: The DNA sequence :type dna: str :return: The reverse complement of the DNA sequence :rtype: str """ complements = str.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB') rcseq = dna.translate(complements)[::-1] return rcseq if __name__ == '__main__': parser = argparse.ArgumentParser(description='Reverse complement some sequences, and eliminate without a blast hit') parser.add_argument('-f', help='fasta file of sequences', required=True) parser.add_argument('-b', help='blast output file', required=True) parser.add_argument('-o', help='output file', required=True) parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() torc = set() tokeep = set() # perl -ne '@a=split /\t/; if (!$$s{$$a[0]} && $$a[8] > $$a[9]) {print "$$a[0]\n"} $$s{$$a[0]}=1' seqs.$(PRIMER).crassphage.blastn > seqs.$(PRIMER)_to_rc.txt seen = set() with open(args.b, 'r') as f: for l in f: p = l.strip().split("\t") if p[0] in seen: continue seen.add(p[0]) # so we don't worry about 2nd best hits tokeep.add(p[0]) if int(p[8]) > int(p[9]): torc.add(p[0]) with open(args.o, 'w', encoding='utf-8') as out: for ids, seq in stream_fasta(args.f): name = ids.split(" ")[0] if name in torc: seq = rc(seq) if name in tokeep: out.write(">{}\n{}\n".format(ids, seq))
linsalrob/crAssphage
bin/reverse_complement.py
Python
mit
2,965
[ "BLAST" ]
75e15f789c122f3b1abdf6d687890cf96dc865fb731bb1b521972bf14ca1c2fb
# -*- coding: utf-8 -*- ############################ Copyrights and license ############################ # # # Copyright 2012 Andrew Bettison <andrewb@zip.com.au> # # Copyright 2012 Dima Kukushkin <dima@kukushkin.me> # # Copyright 2012 Michael Woodworth <mwoodworth@upverter.com> # # Copyright 2012 Petteri Muilu <pmuilu@xena.(none)> # # Copyright 2012 Steve English <steve.english@navetas.com> # # Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2012 Zearin <zearin@gonk.net> # # Copyright 2013 AKFish <akfish@gmail.com> # # Copyright 2013 Cameron White <cawhite@pdx.edu> # # Copyright 2013 Ed Jackson <ed.jackson@gmail.com> # # Copyright 2013 Jonathan J Hunt <hunt@braincorporation.com> # # Copyright 2013 Mark Roddy <markroddy@gmail.com> # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2014 Jimmy Zelinskie <jimmyzelinskie@gmail.com> # # Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2015 Brian Eugley <Brian.Eugley@capitalone.com> # # Copyright 2015 Daniel Pocock <daniel@pocock.pro> # # Copyright 2015 Jimmy Zelinskie <jimmyzelinskie@gmail.com> # # Copyright 2016 Denis K <f1nal@cgaming.org> # # Copyright 2016 Jared K. Smith <jaredsmith@jaredsmith.net> # # Copyright 2016 Jimmy Zelinskie <jimmy.zelinskie+git@gmail.com> # # Copyright 2016 Mathieu Mitchell <mmitchell@iweb.com> # # Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> # # Copyright 2017 Chris McBride <thehighlander@users.noreply.github.com> # # Copyright 2017 Hugo <hugovk@users.noreply.github.com> # # Copyright 2017 Simon <spam@esemi.ru> # # Copyright 2018 Dylan <djstein@ncsu.edu> # # Copyright 2018 Maarten Fonville <mfonville@users.noreply.github.com> # # Copyright 2018 Mike Miller <github@mikeage.net> # # Copyright 2018 R1kk3r <R1kk3r@users.noreply.github.com> # # Copyright 2018 sfdye <tsfdye@gmail.com> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ import base64 import json import logging import mimetypes import os import re import time import urllib from io import IOBase import requests from . import Consts, GithubException class RequestsResponse: # mimic the httplib response object def __init__(self, r): self.status = r.status_code self.headers = r.headers self.text = r.text def getheaders(self): return self.headers.items() def read(self): return self.text class HTTPSRequestsConnectionClass(object): # mimic the httplib connection object def __init__( self, host, port=None, strict=False, timeout=None, retry=None, **kwargs ): self.port = port if port else 443 self.host = host self.protocol = "https" self.timeout = timeout self.verify = kwargs.get("verify", True) self.session = requests.Session() # Code to support retries if retry: self.retry = retry self.adapter = requests.adapters.HTTPAdapter(max_retries=self.retry) self.session.mount("https://", self.adapter) def request(self, verb, url, input, headers): self.verb = verb self.url = url self.input = input self.headers = headers def getresponse(self): verb = getattr(self.session, self.verb.lower()) url = "%s://%s:%s%s" % (self.protocol, self.host, self.port, self.url) r = verb( url, headers=self.headers, data=self.input, timeout=self.timeout, verify=self.verify, allow_redirects=False, ) return RequestsResponse(r) def close(self): return class HTTPRequestsConnectionClass(object): # mimic the httplib connection object def __init__( self, host, port=None, strict=False, timeout=None, retry=None, **kwargs ): self.port = port if port else 80 self.host = host self.protocol = "http" self.timeout = timeout self.verify = kwargs.get("verify", True) self.session = requests.Session() # Code to support retries if retry: self.retry = retry self.adapter = requests.adapters.HTTPAdapter(max_retries=self.retry) self.session.mount("http://", self.adapter) def request(self, verb, url, input, headers): self.verb = verb self.url = url self.input = input self.headers = headers def getresponse(self): verb = getattr(self.session, self.verb.lower()) url = "%s://%s:%s%s" % (self.protocol, self.host, self.port, self.url) r = verb( url, headers=self.headers, data=self.input, timeout=self.timeout, verify=self.verify, allow_redirects=False, ) return RequestsResponse(r) def close(self): return class Requester: __httpConnectionClass = HTTPRequestsConnectionClass __httpsConnectionClass = HTTPSRequestsConnectionClass __connection = None __persist = True __logger = None @classmethod def injectConnectionClasses(cls, httpConnectionClass, httpsConnectionClass): cls.__persist = False cls.__httpConnectionClass = httpConnectionClass cls.__httpsConnectionClass = httpsConnectionClass @classmethod def resetConnectionClasses(cls): cls.__persist = True cls.__httpConnectionClass = HTTPRequestsConnectionClass cls.__httpsConnectionClass = HTTPSRequestsConnectionClass @classmethod def injectLogger(cls, logger): cls.__logger = logger @classmethod def resetLogger(cls): cls.__logger = None ############################################################# # For Debug @classmethod def setDebugFlag(cls, flag): cls.DEBUG_FLAG = flag @classmethod def setOnCheckMe(cls, onCheckMe): cls.ON_CHECK_ME = onCheckMe DEBUG_FLAG = False DEBUG_FRAME_BUFFER_SIZE = 1024 DEBUG_HEADER_KEY = "DEBUG_FRAME" ON_CHECK_ME = None def NEW_DEBUG_FRAME(self, requestHeader): """ Initialize a debug frame with requestHeader Frame count is updated and will be attached to respond header The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data] Some of them may be None """ if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests) new_frame = [requestHeader, None, None, None] if ( self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1 ): # pragma no branch (Should be covered) self._frameBuffer.append(new_frame) else: self._frameBuffer[0] = new_frame # pragma no cover (Should be covered) self._frameCount = len(self._frameBuffer) - 1 def DEBUG_ON_RESPONSE(self, statusCode, responseHeader, data): """ Update current frame with response Current frame index will be attached to responseHeader """ if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests) self._frameBuffer[self._frameCount][1:4] = [ statusCode, responseHeader, data, ] responseHeader[self.DEBUG_HEADER_KEY] = self._frameCount def check_me(self, obj): if ( self.DEBUG_FLAG and self.ON_CHECK_ME is not None ): # pragma no branch (Flag always set in tests) frame = None if self.DEBUG_HEADER_KEY in obj._headers: frame_index = obj._headers[self.DEBUG_HEADER_KEY] frame = self._frameBuffer[frame_index] self.ON_CHECK_ME(obj, frame) def _initializeDebugFeature(self): self._frameCount = 0 self._frameBuffer = [] ############################################################# def __init__( self, login_or_token, password, jwt, base_url, timeout, client_id, client_secret, user_agent, per_page, verify, retry, ): self._initializeDebugFeature() if password is not None: login = login_or_token self.__authorizationHeader = "Basic " + base64.b64encode( (login + ":" + password).encode("utf-8") ).decode("utf-8").replace("\n", "") elif login_or_token is not None: token = login_or_token self.__authorizationHeader = "token " + token elif jwt is not None: self.__authorizationHeader = "Bearer " + jwt else: self.__authorizationHeader = None self.__base_url = base_url o = urllib.parse.urlparse(base_url) self.__hostname = o.hostname self.__port = o.port self.__prefix = o.path self.__timeout = timeout self.__retry = retry # NOTE: retry can be either int or an urllib3 Retry object self.__scheme = o.scheme if o.scheme == "https": self.__connectionClass = self.__httpsConnectionClass elif o.scheme == "http": self.__connectionClass = self.__httpConnectionClass else: assert False, "Unknown URL scheme" self.rate_limiting = (-1, -1) self.rate_limiting_resettime = 0 self.FIX_REPO_GET_GIT_REF = True self.per_page = per_page self.oauth_scopes = None self.__clientId = client_id self.__clientSecret = client_secret assert user_agent is not None, ( "github now requires a user-agent. " "See http://developer.github.com/v3/#user-agent-required" ) self.__userAgent = user_agent self.__verify = verify def requestJsonAndCheck(self, verb, url, parameters=None, headers=None, input=None): return self.__check( *self.requestJson( verb, url, parameters, headers, input, self.__customConnection(url) ) ) def requestMultipartAndCheck( self, verb, url, parameters=None, headers=None, input=None ): return self.__check( *self.requestMultipart( verb, url, parameters, headers, input, self.__customConnection(url) ) ) def requestBlobAndCheck(self, verb, url, parameters=None, headers=None, input=None): return self.__check( *self.requestBlob( verb, url, parameters, headers, input, self.__customConnection(url) ) ) def __check(self, status, responseHeaders, output): output = self.__structuredFromJson(output) if status >= 400: raise self.__createException(status, responseHeaders, output) return responseHeaders, output def __customConnection(self, url): cnx = None if not url.startswith("/"): o = urllib.parse.urlparse(url) if ( o.hostname != self.__hostname or (o.port and o.port != self.__port) or ( o.scheme != self.__scheme and not (o.scheme == "https" and self.__scheme == "http") ) ): # issue80 if o.scheme == "http": cnx = self.__httpConnectionClass( o.hostname, o.port, retry=self.__retry ) elif o.scheme == "https": cnx = self.__httpsConnectionClass( o.hostname, o.port, retry=self.__retry ) return cnx def __createException(self, status, headers, output): if status == 401 and output.get("message") == "Bad credentials": cls = GithubException.BadCredentialsException elif ( status == 401 and Consts.headerOTP in headers and re.match(r".*required.*", headers[Consts.headerOTP]) ): cls = GithubException.TwoFactorException elif status == 403 and output.get("message").startswith( "Missing or invalid User Agent string" ): cls = GithubException.BadUserAgentException elif status == 403 and ( output.get("message").lower().startswith("api rate limit exceeded") or output.get("message") .lower() .endswith("please wait a few minutes before you try again.") ): cls = GithubException.RateLimitExceededException elif status == 404 and output.get("message") == "Not Found": cls = GithubException.UnknownObjectException else: cls = GithubException.GithubException return cls(status, output) def __structuredFromJson(self, data): if len(data) == 0: return None else: if isinstance(data, bytes): data = data.decode("utf-8") try: return json.loads(data) except ValueError: return {"data": data} def requestJson( self, verb, url, parameters=None, headers=None, input=None, cnx=None ): def encode(input): return "application/json", json.dumps(input) return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode) def requestMultipart( self, verb, url, parameters=None, headers=None, input=None, cnx=None ): def encode(input): boundary = "----------------------------3c3ba8b523b2" eol = "\r\n" encoded_input = "" for name, value in input.items(): encoded_input += "--" + boundary + eol encoded_input += ( 'Content-Disposition: form-data; name="' + name + '"' + eol ) encoded_input += eol encoded_input += value + eol encoded_input += "--" + boundary + "--" + eol return "multipart/form-data; boundary=" + boundary, encoded_input return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode) def requestBlob(self, verb, url, parameters={}, headers={}, input=None, cnx=None): def encode(local_path): if "Content-Type" in headers: mime_type = headers["Content-Type"] else: guessed_type = mimetypes.guess_type(input) mime_type = ( guessed_type[0] if guessed_type[0] is not None else Consts.defaultMediaType ) f = open(local_path, "rb") return mime_type, f if input: headers["Content-Length"] = str(os.path.getsize(input)) return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode) def requestMemoryBlobAndCheck( self, verb, url, parameters, headers, file_like, cnx=None ): # The expected signature of encode means that the argument is ignored. def encode(_): return headers["Content-Type"], file_like if not cnx: cnx = self.__customConnection(url) return self.__check( *self.__requestEncode( cnx, verb, url, parameters, headers, file_like, encode ) ) def __requestEncode( self, cnx, verb, url, parameters, requestHeaders, input, encode ): assert verb in ["HEAD", "GET", "POST", "PATCH", "PUT", "DELETE"] if parameters is None: parameters = dict() if requestHeaders is None: requestHeaders = dict() self.__authenticate(url, requestHeaders, parameters) requestHeaders["User-Agent"] = self.__userAgent url = self.__makeAbsoluteUrl(url) url = self.__addParametersToUrl(url, parameters) encoded_input = None if input is not None: requestHeaders["Content-Type"], encoded_input = encode(input) self.NEW_DEBUG_FRAME(requestHeaders) status, responseHeaders, output = self.__requestRaw( cnx, verb, url, requestHeaders, encoded_input ) if ( Consts.headerRateRemaining in responseHeaders and Consts.headerRateLimit in responseHeaders ): self.rate_limiting = ( int(responseHeaders[Consts.headerRateRemaining]), int(responseHeaders[Consts.headerRateLimit]), ) if Consts.headerRateReset in responseHeaders: self.rate_limiting_resettime = int(responseHeaders[Consts.headerRateReset]) if Consts.headerOAuthScopes in responseHeaders: self.oauth_scopes = responseHeaders[Consts.headerOAuthScopes].split(", ") self.DEBUG_ON_RESPONSE(status, responseHeaders, output) return status, responseHeaders, output def __requestRaw(self, cnx, verb, url, requestHeaders, input): original_cnx = cnx if cnx is None: cnx = self.__createConnection() cnx.request(verb, url, input, requestHeaders) response = cnx.getresponse() status = response.status responseHeaders = dict((k.lower(), v) for k, v in response.getheaders()) output = response.read() cnx.close() if input: if isinstance(input, IOBase): input.close() self.__log(verb, url, requestHeaders, input, status, responseHeaders, output) if status == 202 and ( verb == "GET" or verb == "HEAD" ): # only for requests that are considered 'safe' in RFC 2616 time.sleep(Consts.PROCESSING_202_WAIT_TIME) return self.__requestRaw(original_cnx, verb, url, requestHeaders, input) if status == 301 and "location" in responseHeaders: o = urllib.parse.urlparse(responseHeaders["location"]) return self.__requestRaw(original_cnx, verb, o.path, requestHeaders, input) return status, responseHeaders, output def __authenticate(self, url, requestHeaders, parameters): if self.__clientId and self.__clientSecret and "client_id=" not in url: parameters["client_id"] = self.__clientId parameters["client_secret"] = self.__clientSecret if self.__authorizationHeader is not None: requestHeaders["Authorization"] = self.__authorizationHeader def __makeAbsoluteUrl(self, url): # URLs generated locally will be relative to __base_url # URLs returned from the server will start with __base_url if url.startswith("/"): url = self.__prefix + url else: o = urllib.parse.urlparse(url) assert o.hostname in [ self.__hostname, "uploads.github.com", "status.github.com", "github.com", ], o.hostname assert o.path.startswith((self.__prefix, "/api/")) assert o.port == self.__port url = o.path if o.query != "": url += "?" + o.query return url def __addParametersToUrl(self, url, parameters): if len(parameters) == 0: return url else: return url + "?" + urllib.parse.urlencode(parameters) def __createConnection(self): kwds = {} kwds["timeout"] = self.__timeout kwds["verify"] = self.__verify if self.__persist and self.__connection is not None: return self.__connection self.__connection = self.__connectionClass( self.__hostname, self.__port, retry=self.__retry, **kwds ) return self.__connection def __log(self, verb, url, requestHeaders, input, status, responseHeaders, output): if self.__logger is None: self.__logger = logging.getLogger(__name__) if self.__logger.isEnabledFor(logging.DEBUG): if "Authorization" in requestHeaders: if requestHeaders["Authorization"].startswith("Basic"): requestHeaders[ "Authorization" ] = "Basic (login and password removed)" elif requestHeaders["Authorization"].startswith("token"): requestHeaders["Authorization"] = "token (oauth token removed)" elif requestHeaders["Authorization"].startswith("Bearer"): requestHeaders["Authorization"] = "Bearer (jwt removed)" else: # pragma no cover (Cannot happen, but could if we add an authentication method => be prepared) requestHeaders[ "Authorization" ] = "(unknown auth removed)" # pragma no cover (Cannot happen, but could if we add an authentication method => be prepared) self.__logger.debug( "%s %s://%s%s %s %s ==> %i %s %s", verb, self.__scheme, self.__hostname, url, requestHeaders, input, status, responseHeaders, output, )
Vagab0nd/SiCKRAGE
lib3/github/Requester.py
Python
gpl-3.0
23,546
[ "Brian" ]
c9e38b4a0d66b5525173a935cbe20441d8a26f493903b8156f508e5409a6b7b7
# -*- coding: utf-8 -*- { '(Recipient)': '(Empfänger)', "'Cancel' will indicate an asset log entry did not occur": "'Abbrechen' zeigt an, dass ein Asset Log Eintrag nicht eingetreten ist", "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Eine Position, die den geografischen Bereich für diese Region definiert. Dies kann ein Standort aus der Standorthierarchie, oder ein Gruppenstandort, oder ein Standort mit Grenzbereich sein.', "Acronym of the organization's name, eg. IFRC.": 'Abkürzung des Organisationsnamen, z. B. IFRC.', "Authenticate system's Twitter account": 'Authentifizierung für den Twitter Account des Systems', "Can't import tweepy": 'Tweepy kann nicht importiert werden', "Caution: doesn't respect the framework rules!": 'Achtung: Die Rahmenbedingungen des Frameworks werden nicht beachtet!', "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatieren Sie die Liste der Attributwerte und die RGB-Wert zur Verwendung dieser als ein JSON-Objekt, z. B.: {Rot: '#FF0000 ', grün: '#00FF00 ', gelb: '#FFFF00 '}", "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Wenn ausgewählt, wird der Ort dieser Anlage immer aktualisiert, sobald der Standort der Person aktualisiert wird.', "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Wenn diese Konfiguration einen Bereich für die Regionenauswahl repräsentiert, geben Sie einen Namen für die Verwendung in der Auswahl. Der Name für eine persönliche Kartenkonfiguration wird mit dem Namen des Benutzers festgelegt.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer, der diese Organisation definiert, automatisch als Mitarbeiter dieser Organisation zugeordnet sobald er sich anmeldet, ausgenommen die Domäne stimmt nicht mit dem Domänenfeld überein.', "If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Wenn dies angekreuzt ist, wird es die Basisposition des Benutzers und dadurch gesteuert wo der Benutzer auf der Karte angezeigt wird.', "If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Wenn sie das Krankenhaus nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Krankenhaus hinzufügen' anklicken.", "If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Wenn sie das Büro nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Büro hinzufügen' anklicken.", "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Wenn sie die Organisation nicht in der Liste sehen, dann können sie eine neue hinzufügen indem sie auf den Link "Organisation hinzufügen" klicken.', "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Anstelle der automatischen Synchronisation von anderen Peers über das Netz, können sie auch über Dateien synchronisieren, was nötig ist, wenn kein Netzwerk vorhanden ist. Sie können diese Seite verwenden um Sync Daten aus Dateien zu importieren and auch um Daten in Form von Sync Dateien zu exportieren. Ein Klick auf den Link rechts bringt Sie zu dieser Seite.', "Level is higher than parent's": 'Die Stufe ist höher als das übergeordnete Element', "Need a 'url' argument!": "Braucht eine 'url' als Argument!", "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. Der Name der Geometrie-Spalte. In PostGIS ist der Standardwert 'the_geom'.", "Parent level should be higher than this record's level. Parent level is": 'Übergeordnete Ebene muss höher als dieser Eintrag. Die Stufe seines Eltern Elements ist', "Password fields don't match": 'Kennwortfelder stimmer nicht überein', "Phone number to donate to this organization's relief efforts.": 'Telefonnummer für Spenden an diese Nothilfeorganisation.', "Please come back after sometime if that doesn't help.": 'Wenn das nicht hilft, kommen Sie nach einiger Zeit bitte wieder.', "Quantity in %s's Inventory": "Menge in %s's Bestand", "Select a Room from the list or click 'Create Room'": "Wählen Sie einen Raum aus der Liste oder klicken Sie auf 'Raum hinzufügen'", "Select a person in charge for status 'assigned'": 'Wählen Sie eine verantwortliche Person aus für den Status "zugeordnet"', "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche auf der untersten Hierarchieebene einen übergeordneten Zuständigkeitsbereich brauchen. Beispiel: Wenn 'district' der kleinste Bereich in der Hierarchie ist, dann müssen alle speziellen Bereiche einen 'district' als übergeordnetes Element haben.", "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche einen übergeordneten Zuständigkeitsbereich in der Gebietshierarchie brauchen. Es kann dabei hilfreich sein eine "region" festzulegen, die den betroffenen Bereich repräsentiert.', "Sorry, things didn't get done on time.": 'Leider konnten die Aufgaben nicht rechtzeitig ausgeführt werden.', "Sorry, we couldn't find that page.": 'Leider konnte diese Seite nicht gefunden werden.', "System's Twitter account updated": 'Der Twitter Account des Systems wurde aktualisiert', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Die Spender für dieses Projekt. Mehrere Werte können durch Halten der 'Steuerungstaste' (Strg / Ctrl) ausgewählt werden.", "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'Die URL der Bilddatei. Wenn Sie keine Grafikdatei hochladen, dann müssen Sie hier eine URL angeben.', "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einem Namen zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.", "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Um nach einem Körper zu suchen, geben Sie die Identifikationsmarken-Nummer des Körpers ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Körper.", "To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben sie entweder den Namen, die ID, den Organisationsnamen oder ein Acronym jeweils getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.", "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben Sie Namen oder die ID des Krankenhauses getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.", "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Um einen Ort zu suchen, geben Sie den Namen ein. Sie können % als Wildcard verwenden. Die Auswahl von Drücken 'Suchen' ohne Eingabe führt zur Auflistung aller Orte.", "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einer Person zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.", "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Für die Suche nach einer Bewertung, geben Sie einen beliebigen Teil der Ticketnummer der Bewertung ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Bewertungen.", "Type the first few characters of one of the Person's names.": 'Geben Sie die ersten paar Zeichen des Namens einer Person ein.', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Laden Sie hier die Grafikdatei hoch. Wenn sie keine Grafikdatei hochladen, dann müssen Sie im Feld eine URL auf eine im Web verfügbare Grafikdatei angeben.', "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Beim Synchronisieren der Daten mit anderen Installationen, können Konflikte auftreten wenn beide (oder mehrere) Parteien die gleichen Daten geändert haben, d. h. widersprüchliche Informationen vorliegen. Das Synchronisationsmodul versucht solche Konflikte automatisch zu beheben, was jedoch in manchen Fällen nicht möglich ist. In solchen Fällen ist es Ihre Aufgabe, diese Konflikte manuell zu beheben; klicken Sie auf den rechten Link, um auf diese Seite zu gelangen.', "You haven't made any calculations": 'Sie haben keine Brechnungen gemacht', "couldn't be parsed so NetworkLinks not followed.": 'konnte nicht interpretiert so dass Netzwerklinks nicht verfolgt werden.', "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Enthält ein GroundOverlay oder ScreenOverlay die in OpenLayers noch nicht unterstützt werden, es wird möglicherweise nicht richtig funktionieren.', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" ist ein optionaler Ausdruck wie "field1=\'newvalue\'\\ ". Sie können die Ergebnisse eines JOINs nicht aktualisieren oder löschen.', '# of International Staff': '# der internationalen Mitarbeiter', '# of National Staff': '# der nationalen Mitarbeiter', '# of Vehicles': '# der Fahrzeuge', '%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\n Wenn der Typ des Requests "%(type)s" ist, geben Sie die %(type)s bitte auf der nächsten Seite ein.', '%(system_name)s - Verify Email': '%(system_name)s - Email überprüfen', '%s rows deleted': '%s gelöschte Zeilen', '%s rows updated': '%s Zeilen aktualisiert', '& then click on the map below to adjust the Lat/Lon fields': '& anschließend klicken Sie auf die Karte weiter unten um die Längen- und Breitengradwerte zu korrigieren', '* Required Fields': '* erforderliche Felder', '0-15 minutes': '0 - 15 Minuten', '1 Assessment': '1 Bewertung', '1 location, shorter time, can contain multiple Tasks': '1 Position, kürzere Zeit, kann mehrere Aufgaben beinhalten', '1-3 days': '1-3 Tage', '15-30 minutes': '15-30 Minuten', '2 different options are provided here currently:': '2 verschiedene Optionen stehen hier derzeit zur Verfügung:', '2x4 Car': 'Fahrzeug mit einer Antriebsachse', '30-60 minutes': '30-60 Minuten', '4-7 days': '4-7 Tage', '4x4 Car': 'Allradfahrzeug', '8-14 days': '8-14 Tage', '3W': 'Wer? Was? Wo?', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Es kann eine Zuordnung eines Symbol zu einer individuellen Position erfolgen, um damit die Symbolisierung der Objektklasse zu überschreiben.', 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Ein Referenzdokument wie z. B. eine Datei, URL oder Ansprechpartner zur Überprüfung dieser Daten. Sie können die ersten Zeichen eines vorhandenen Dokumentnamens eingeben um dieses zu referenzieren.', 'A brief description of the group (optional)': 'Eine kurze Beschreibung der Gruppe (optional)', 'A catalog of different Assessment Templates including summary information': 'Ein Katalog von verschiedenen Beurteilungsvorlagen inklusive einer Zusammenfassung', 'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Eine Datei von einem GPS Gerät das eine Reihe von geographischen Positionen im XML-Format enthält.', 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Eine Datei im GPX-Format aus einem GPS Gerät deren Zeitstempel genutzt werden können, um sie mit den Zeitstempeln von Fotos zu verknüpfen und diese dann auf einer Karte darzustellen.', 'A library of digital resources, such as photos, documents and reports': 'Eine Bibliothek von digitalen Ressourcen, wie z. B. Fotos, Dokumente und Berichte', 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Eine Gebietsgruppe kann verwendet werden, um den Bereich eines betroffenen Gebietes zu definieren, falls dieses nicht mit einer vorhandenen administrativen Einheit zusammenfällt.', 'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Eine Gebietsgruppe besteht aus mehreren Gebieten (häufig eine Gruppe von Verwaltungsregionen, die einen eigenen Zuständigkeitsbereich bilden).', 'A location group must have at least one member.': 'Eine Gebietsgruppe muss mindestens ein Element beinhalten.', 'ABOUT THIS MODULE': 'ÜBER DIESES MODUL', 'ACCESS DATA': 'ZUGRIFFSDATEN', 'Actioning officer': 'Verantwortliche Person', 'ANY': 'Irgendwelche', 'API is documented here': 'Die API ist hier dokumentiert', 'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Schnelle Evaluierung - angepasst für Neuseeland', 'Abbreviation': 'Abkürzung', 'Ability to Fill Out Surveys': 'Möglichkeit Umfragen auszufüllen', 'Ability to customize the list of details tracked at a Shelter': 'Möglichkeit die Liste der Detailangaben zu einer Unterkunft anzupassen', 'Ability to customize the list of human resource tracked at a Shelter': 'Möglichkeit die Liste der menschlichen Ressourcen einer Unterkunft anzupassen', 'Ability to customize the list of important facilities needed at a Shelter': 'Möglichkeit die Liste mit den wichtigen Einrichtungen, die in einer Unterkunft benötigt werden, anzupassen', 'Ability to view Results of Completed and/or partially filled out Surveys': 'Möglichkeit die Ergebnisse von abgeschlossen und/oder teilweise ausgefüllten Umfragen zu einzusehen', 'About': 'Über', 'About Us': 'Über uns', 'Accept Push': 'Akzeptiert Push', 'Access denied': 'Zugriff verweigert', 'Access to Shelter': 'Zugang zu Unterkünften', 'Access to education services': 'Zugang zu Ausbildungsdienstleistungen', 'Accessibility of Affected Location': 'Erreichbarkeit der betroffenen Region', 'Account Registered - Please Check Your Email': 'Benutzerkonto registriert - Bitte überprüfen Sie Ihre E-Mail', 'Account SID': 'SID des Accounts', 'Acronym': 'Abkürzung', 'Actionable by all targeted recipients': 'Bearbeitbar von allen adressierten Empfängern', 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Bearbeitbar nur von bestimmten Übungsteilnehmern; Übungsidentifikator sollte unter <note> auftauchen', 'Actioned?': 'Bearbeitet?', 'Actions taken as a result of this request.': 'Als Ergebnis auf diese Anfrage gestartete Aktionen.', 'Actions': 'Aktionen', 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktivieren Sie Ereignisse aus den SZENARIO Vorlagen um die passenden Ressourcen zuzuordnen (Menschen, Anlagen und Einrichtungen).', 'Active Problems': 'Aktive Probleme', 'Active': 'aktiv', 'Activities matching Assessments': 'Aktivitäten passend zur Beurteilung', 'Activities of boys 13-17yrs before disaster': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren vor der Katastrophe', 'Activities of boys 13-17yrs now': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren heute', 'Activities of boys <12yrs before disaster': 'Aktivitäten von Jungen unter 12 Jahren vor der Katastrophe', 'Activities of boys <12yrs now': 'Aktivitäten von Jungen unter 12 Jahren heute', 'Activities of children': 'Aktivitäten von Kindern', 'Activities of girls 13-17yrs before disaster': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren vor der Katastrophe', 'Activities of girls 13-17yrs now': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren heute', 'Activities of girls <12yrs before disaster': 'Aktivitäten von Mädchen unter 12 Jahren vor der Katastrophe', 'Activities of girls <12yrs now': 'Aktivitäten von Mädchen unter 12 Jahre heute', 'Activities': 'Aktivitäten', 'Activity Added': 'Aktivität hinzugefügt', 'Activity Deleted': 'Aktivität gelöscht', 'Activity Details': 'Details zur Aktivität', 'Activity Report': 'Bericht zur Aktivität', 'Activity Reports': 'Berichte zu Aktivitäten', 'Activity Type': 'Typ der Aktivität', 'Activity Types': 'Typen von Aktivität', 'Activity Updated': 'Aktivität aktualisiert', 'Activity': 'Aktivität', 'Add Activity Type': 'Aktivitätstyp hinzufügen', 'Add Address': 'Adresse hinzufügen', 'Add Alternative Item': 'Alternativen Artikel hinzufügen', 'Add Assessment Summary': 'Zusammenfassung der Beurteilung hinzufügen', 'Add Assessment': 'Beurteilung hinzufügen', 'Add Asset Log Entry - Change Label': 'Bestandsprotokoll Eintrag hinzufügen - Beschriftung verändern', 'Add Availability': 'Verfügbarkeit hinzufügen', 'Add Baseline Type': 'Basislinien-Typ hinzufügen', 'Add Baseline': 'Basislinie hinzufügen', 'Add Bundle': 'Paket hinzufügen', 'Add Camp Service': 'Camp-Dienst hinzufügen', 'Add Camp Type': 'Camp Art hinzufügen', 'Add Camp': 'Camp hinzufügen', 'Add Certificate for Course': 'Zertifikat für Kurs hinzufügen', 'Add Certification': 'Zertifizierung hinzufügen', 'Add Competency': 'Qualifikation hinzufügen', 'Add Contact': 'Kontaktperson hinzufügen', 'Add Contact Information': 'Kontaktinformation hinzufügen', 'Add Credential': 'Qualifikation hinzufügen', 'Add Credentials': 'Qualifikationen hinzufügen', 'Add Disaster Victims': 'Katastrophenopfer hinzufügen', 'Add Distribution.': 'Verteilung hinzufügen.', 'Add Donor': 'Spender hinzufügen', 'Add Flood Report': 'Flut Bericht hinzufügen', 'Add Group Member': 'Gruppenmitglied hinzufügen', 'Add Human Resource': 'Personal hinzufügen', 'Add Identity': 'Identität hinzufügen', 'Add Image': 'Bild hinzufügen', 'Add Impact Type': 'Auswirkungstyp Hinzufügen', 'Add Impact': 'Auswirkung hinzufügen', 'Add Item to Catalog': 'Artikel zu Katalog hinzufügen', 'Add Item to Commitment': 'Eintrag zur Zusage hinzufügen', 'Add Item to Inventory': 'Artikel zu Inventar hinzufügen', 'Add Item to Request': 'Artikel zur Anforderung hinzufügen', 'Add Item to Shipment': 'Artikel der Lieferung hinzufügen', 'Add Item': 'Artikel hinzufügen', 'Add Job Role': 'Tätigkeit hinzufügen', 'Add Key': 'Schlüssel hinzufügen', 'Add Kit': 'Ausstattung (Kit) hinzufügen', 'Add Layer to this Profile': 'Kartenebene zu diesem Profil hinzufügen', 'Add Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen', 'Add Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen', 'Add Log Entry': 'Protokolleintrag hinzufügen', 'Add Member': 'Mitglied hinzufügen', 'Add Membership': 'Mitgliedschaft hinzufügen', 'Add Message': 'Nachricht hinzufügen', 'Add Mission': 'Auftrag hinzufügen', 'Add Mobile Commons Settings': 'Mobile Commons Einstellungen hinzufügen', 'Add Need Type': 'Bedarfstyp hinzufügen', 'Add Need': 'Bedarf hinzufügen', 'Add New Assessment Summary': 'Neue Beurteilungsbeschreibung hinzufügen', 'Add New Baseline Type': 'Einen neuen Grundlinientyp hinzufügen', 'Add New Baseline': 'Eine neue Grundlinie hinzufügen', 'Add New Budget': 'Ein neues Budget hinzufügen', 'Add New Bundle': 'Ein neues Paket hinzufügen', 'Add New Camp Service': 'Neuen Camp Service hinzufügen', 'Add New Camp Type': 'Neuen Camp Typ hinzufügen', 'Add New Camp': 'Neues Camp hinzufügen', 'Add New Cluster Subsector': 'Neuen Cluster Unterbereich hinzufügen', 'Add New Cluster': 'Neuen Cluster hinzufügen', 'Add New Commitment Item': 'Zugesagten Artikel hinzufügen', 'Add New Document': 'Neues Dokument hinzufügen', 'Add New Donor': 'Neuen Spender hinzufügen', 'Add New Entry': 'Neuen Eintrag hinzufügen', 'Add New Event': 'Neues Ereignis hinzufügen', 'Add New Flood Report': 'Neuen Flutbericht hinzufügen', 'Add New Human Resource': 'Neue Human Resource hinzufügen', 'Add New Image': 'Neue Grafik hinzufügen', 'Add New Impact Type': 'Neuen Auswirkungstyp hinzufügen', 'Add New Impact': 'Neue Auswirkung hinzufügen', 'Add New Item to Kit': 'Neuen Artikel zur Ausstattung (Kit) hinzufügen', 'Add New Key': 'Neuen Schlüssel hinzufügen', 'Add New Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen', 'Add New Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen', 'Add New Member': 'Neues Mitglied hinzufügen', 'Add New Membership': 'Neue Mitgliedschaft hinzufügen', 'Add New Need Type': 'Neuen Bedarfstyp hinzufügen', 'Add New Need': 'Neuen Bedarf hinzufügen', 'Add New Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen', 'Add New Problem': 'Neues Problem hinzufügen', 'Add New Rapid Assessment': 'Neue Schnell-Beurteilung hinzufügen', 'Add New Received Item': 'Neuen erhaltenen Artikel hinzufügen', 'Add New Record': 'Neuen Datensatz hinzufügen', 'Add New Request Item': 'Neuen Anfrageartikel hinzufügen', 'Add New Request': 'Neue Anfrage hinzufügen', 'Add New River': 'Neuen Fluss hinzufügen', 'Add New Role to User': 'Benutzer eine neue Rolle zuweisen', 'Add New Scenario': 'Neues Szenario hinzufügen', 'Add New Sent Item': 'Neuen gesendeten Artikel hinzufügen', 'Add New Setting': 'Neue Einstellung hinzufügen', 'Add New Solution': 'Neue Lösung hinzufügen', 'Add New Staff Type': 'Neue Mitarbeitertyp hinzufügen', 'Add New Subsector': 'Neuen Teilbereich hinzufügen', 'Add New Survey Answer': 'Neue Antwort zur Umfrage hinzufügen', 'Add New Survey Question': 'Neue Frage zur Umfrage hinzufügen', 'Add New Survey Series': 'Neue Umfrageserie hinzufügen', 'Add New Survey Template': 'Neue Umfragevorlage hinzufügen', 'Add New Team': 'Neues Team hinzufügen', 'Add New Ticket': 'Neues Ticket hinzufügen', 'Add New Track': 'Neuen Pfad hinzufügen', 'Add New User to Role': 'Neuen Benutzer der Rolle hinzufügen', 'Add New': 'Neu hinzufügen', 'Add Organization Domain': 'Organisationsdomain hinzufügen', 'Add Peer': 'Peer-Zugriffspunkt hinzufügen', 'Add Person': 'Person hinzufügen', 'Add Photo': 'Foto hinzufügen', 'Add PoI': 'PoI hinzufügen', 'Add Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen', 'Add Position': 'Position hinzufügen', 'Add Problem': 'Problem hinzufügen', 'Add Question': 'Frage hinzufügen', 'Add Rapid Assessment': 'Schnell-Beurteilung hinzufügen', 'Add Record': 'Datensatz hinzufügen', 'Add Reference Document': 'Referenzdokument hinzufügen', 'Add Report': 'Bericht hinzufügen', 'Add Request': 'Anfrage hinzufügen', 'Add Section': 'Abschnitt hinzufügen', 'Add Setting': 'Einstellung hinzufügen', 'Add Skill': 'Fähigkeit hinzufügen', 'Add Skill Equivalence': 'Fähigkeitsäquivalenz hinzufügen', 'Add Skill Provision': 'Fähigkeitsbestimmung hinzufügen', 'Add Solution': 'Lösung hinzufügen', 'Add Staff Type': 'Mitarbeitertyp hinzufügen', 'Add Subscription': 'Abonnement hinzufügen', 'Add Subsector': 'Teilbereich hinzufügen', 'Add Survey Answer': 'Umfrageantwort hinzufügen', 'Add Survey Question': 'Umfrage Frage hinzufügen', 'Add Survey Series': 'Umfrage Serie hinzufügen', 'Add Survey Template': 'Umfrage Vorlage hinzufügen', 'Add Team Member': 'Teammitglied hinzufügen', 'Add Team': 'Team hinzufügen', 'Add Ticket': 'Ticket hinzufügen', 'Add to Bin': 'Zum Lagerbehälter hinzufügen', 'Add Training': 'Schulung hinzufügen', 'Add Twilio Channel': 'Twilio Kanal hinzufügen', 'Add Twitter Channel': 'Twitter Kanal hinzufügen', 'Add Unit': 'Einheit hinzufügen', 'Add Vehicle': 'Fahrzeug hinzufügen', 'Add Vehicle Type': 'Fahrzeugtyp hinzufügen', 'Add Volunteer Availability': 'Verfügbarkeit von Freiwilligen hinzufügen', 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Fügen Sie ein Referenzdokument z. B. eine Datei, URL oder einen Ansprechpartner zur Überprüfung dieser Daten ein. Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt.', 'Add a Volunteer': 'Einen Freiwilligen hinzufügen', 'Add a new certificate to the catalog.': 'Hinzufügen eines neuen Zertifikats zum Katalog', 'Add a new competency rating to the catalog.': 'Neue Kompetenzeinstufung zum Katalog hinzufügen', 'Add a new course to the catalog.': 'Neuen Kurs zum Katalog hinzufügen', 'Add a new job role to the catalog.': 'Neue Tätigkeit zum Katalog hinzufügen', 'Add a new skill provision to the catalog.': 'Neue Bereitstellung einer Fähigkeit zum Katalog hinzufügen', 'Add a new skill to the catalog.': 'Neue Fähigkeit zum Katalog hinzufügen', 'Add a new skill type to the catalog.': 'Neue Fähigkeitsart zum Katalog hinzufügen.', 'Add new Group': 'Neue Gruppe hinzufügen', 'Add new Individual': 'Hinzufügen neues Individuum', 'Add new project.': 'Neues Projekt hinzufügen.', 'Add staff members': 'Mitarbeiter hinzufügen', 'Add strings manually': 'Texte händisch hinzufügen', 'Add to Bundle': 'Zu Paket hinzufügen', 'Add to budget': 'Zum Budget hinzufügen', 'Add volunteers': 'Freiwillige hinzufügen', 'Add': 'Hinzufügen', 'Add/Edit/Remove Layers': 'Hinzufügen/Bearbeiten/Entfernen von Kartenebenen', 'Added to Group': 'Zur Gruppe hinzugefügt', 'Added to Team': 'Zum Team hinzugefügt', 'Additional Beds / 24hrs': 'Zusätzliche Betten / 24 Std.', 'Address Details': 'Details zur Adresse', 'Address Type': 'Typ der Adresse', 'Address added': 'Adresse hinzugefügt', 'Address deleted': 'Adresse gelöscht', 'Address updated': 'Adresse aktualisiert', 'Address': 'Adresse', 'Addresses': 'Adressen', 'Adequate food and water available': 'Angemessene Nahrung und Wasser verfügbar', 'Adequate': 'Angemessen', 'Adjust Stock Levels': 'Anpassen der Lagerbestände', 'Adjust Stock': 'Anpassen des Bestands', 'Admin': 'Administration', 'Admin Email': 'Email Administrator ', 'Admin Name': 'Name Administrator', 'Admin Tel': 'Telefonnummer Administrator', 'Administration': 'Administrator', 'Administrative support cost': 'Kosten für administrative Unterstützung', 'Admissions/24hrs': 'Einlass / 24 Stunden', 'Adolescent (12-20)': 'Heranwachsende (12-20)', 'Adolescent participating in coping activities': 'Teenager Teilnahme an Aktivitäten kopieren', 'Adult (21-50)': 'Erwachsene (21-50)', 'Adult ICU': 'Erwachsene ICU', 'Adult Psychiatric': 'Erwachsener - psychiatrisch auffällig', 'Adult female': 'Erwachsener - weiblich', 'Adult male': 'Erwachsener - männlich', 'Adults in prisons': 'Erwachsenen in Gefängnis', 'Advanced': 'Erweitert', 'Advanced Javascript Layers': 'Advanced Javascript Layers', 'Advisory': 'Beratend', 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nach einem Klick auf den Button, wird ein Satz von gekoppelten Elemente nacheinander gezeigt werden. Bitte wählen Sie diejenige Lösung aus jedem Paar, die sie gegenüber der anderen bevorzugen.', 'Age': 'Alter', 'Age Group': 'Altersgruppe', 'Age group does not match actual age.': 'Altersgruppe passt nicht zum tatsächlichen Alter.', 'Age group': 'Altersgruppe', 'Aggravating factors': 'Erschwerende Faktoren', 'Aggregate': 'Zusammenstellung', 'Agriculture': 'Landwirtschaft', 'Air Transport Service': 'Lufttransportsservice', 'Aircraft Crash': 'Flugzeugabsturz', 'Aircraft Hijacking': 'Flugzeugentführung', 'Aircraft Maximum Size': 'Maximale Größe des Flugzeugs', 'Airports': 'Flughäfen', 'Airport Closure': 'Flughafenschließung', 'Airspace Closure': 'Luftraumsperrung', 'Alcohol': 'Alkohol', 'All Inbound & Outbound Messages are stored here': 'Alle eingehenden und abgehenden Nachrichten werden hier gespeichert', 'All Resources': 'Alle Ressourcen', 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Alle von der Sahana Software Foundation bereitgestellten Daten dieser Seite sind unter der Creative Commons Attribution licence lizenziert. Es stammen jedoch nicht alle Daten von hier. Bitte beachten Sie das Quellen-Feld des jeweiligen Eintrags.', 'All': 'Alles', 'All Records': 'Alle Datensätze', 'Allocate Group': 'Gruppe zuweisen', 'Allowed to push': 'Dürfen push', 'Allows a Budget to be drawn up': 'Ermöglicht ein Budget aufzustellen.', 'Allows authorized users to control which layers are available to the situation map.': 'Erlaubt berechtigten Benutzern zu steuern, welche Kartenebenen auf der Lagekarte verfügbar sind.', 'Alternative Item Details': 'Details zum alternativen Artikel', 'Alternative Item added': 'Alternativer Artikel hinzugefügt.', 'Alternative Item deleted': 'Alternativer Artikel gelöscht', 'Alternative Item updated': 'Alternativer Artikel aktualisiert', 'Alternative Item': 'Alternativer Artikel', 'Alternative Items': 'Alternative Artikel', 'Alternative places for studying': 'Alternative Orte für das Studium', 'Ambulance Service': 'Ambulanter Krankendienst', 'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Es kann eine Beurteilungsvorlage zur Erstellung einer Katastrophenbeurteilung ausgewählt werden. Innerhalb der Katastrophenbeurteilung können Antworten gesammmelt und Ergebnisse in Form von Tabellen, Graphiken und Karten erzeugt werden.', 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Ein Aufnahmesystem, ein Warenhausmanagementsystem, Warenlieferungsverfolgung, Versorgungskettenmanagement, Beschaffung und andere Anlagen-und Verwaltungsfunktionen.', 'An item which can be used in place of another item': 'Ein Artikel, der anstatt eines anderen Artikels verwendet werden kann', 'Analysis of Completed Surveys': 'Analyse von abgeschlossenen Umfragen', 'Animal Die Off': 'Tiere Sterben', 'Animal Feed': 'Tierfutter', 'Anthropology': 'Anthropologie', 'Antibiotics available': 'Antibiotika verfügbar', 'Antibiotics needed per 24h': 'Menge an Antibiotika die pro 24h benötigt wird', 'Apparent Age': 'Offensichtliches Alter', 'Apparent Gender': 'Offensichtliches Geschlecht', 'Application': 'Anwendung', 'Application Deadline': 'Anwendung Frist', 'Application Permissions': 'Anwendungsberechtigungen', 'Approve': 'Bestätigen', 'Approved': 'Bestätigt', 'Approver': 'Bestätigende Stelle', 'Arctic Outflow': 'Arktischer Abfluss', 'Areas inspected': 'Untersuchte Gebiete', 'Assessment Details': 'Details zur Beurteilung', 'Assessment Reported': 'Beurteilung gemeldet', 'Assessment Summaries': 'Zusammenfassungen der Beurteilung', 'Assessment Summary Details': 'Details zur Zusammenfassung der Beurteilung', 'Assessment Summary added': 'Zusammenfassung der Beurteilung hinzugefügt', 'Assessment Summary deleted': 'Zusammenfassung der Beurteilung gelöscht', 'Assessment Summary updated': 'Zusammenfassung der Beurteilung aktualisiert', 'Assessment added': 'Beurteilung hinzugefügt', 'Assessment admin level': 'Admin Ebene zur Beurteilung', 'Assessment deleted': 'Beurteilung gelöscht', 'Assessment timeline': 'Beurteilungszeitachse', 'Assessment updated': 'Beurteilung aktualisiert', 'Assessment': 'Beurteilung', 'Assessment Templates': 'Beurteilungsvorlage', 'Assessments Needs vs. Activities': 'Bedarf für Beurteilungen gegenüber den Aktivitäten', 'Assessments and Activities': 'Beurteilungen und Aktivitäten', 'Assessments': 'Beurteilungen', 'Assessor': 'Beurteilender', 'Asset Details': 'Details zur Anlage', 'Asset Log Details': 'Anlage Protokoll Details', 'Asset Log Empty': 'Anlage Protokoll leer', 'Asset Log Entry Added - Change Label': 'Anlage Protokolleintrag hinzugefügt - Beschriftung ändern', 'Asset Log Entry deleted': 'Anlage Protokolleintrag gelöscht', 'Asset Log Entry updated': 'Anlage Protokolleintrag aktualisiert', 'Asset Management': 'Anlageverwaltung', 'Asset Number': 'Anlagenummer', 'Asset added': 'Anlage hinzugefügt', 'Asset deleted': 'Anlage gelöscht', 'Asset removed': 'Anlage entfernt', 'Asset updated': 'Anlage aktualisiert', 'Asset': 'Anlage', 'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Anlagen sind Ressourcen, die nicht verbrauchbar sind aber zurück erwartet werden, daher müssen sie nachverfolgt werden.', 'Assets': 'Anlagen', 'Assign Group': 'Gruppe zuordnen', 'Assign Staff': 'Mitarbeiter zuordnen', 'Assign to Org.': 'Der Org. zuordnen', 'Assign to Organization': 'Der Organisation zuordnen', 'Assign to Person': 'Der Person zuordnen', 'Assign to Site': 'Dem Standort zuordnen', 'Assign': 'Zuordnen', 'Assign ': 'Zuordnung ', 'Assigned By': 'Zugeordnet von', 'Assigned To': 'Zugeordnet zu', 'Assigned to Organization': 'Zur Organisation zugeordnet', 'Assigned to Person': 'Zur Person zugeordnet', 'Assigned to Site': 'Zum Standort zugeordnet', 'Assigned to': 'Zugeordnet zu', 'Assigned': 'Zugeordnet', 'At/Visited Location (not virtual)': '/ In Augenschein genommener Ort (nicht virtuell)', 'Attachments': 'Anhänge', 'Attend to information sources as described in <instruction>': 'Sich um Informationsquellen kümmern wie im Abschnitt beschrieben', 'Attribution': 'Eigenschaften', 'Author': 'Autor', 'Availability': 'Verfügbarkeit', 'Available Alternative Inventories': 'Verfügbare alternative Bestände', 'Available Beds': 'Verfügbare Betten', 'Available Inventories': 'Verfügbare Bestände', 'Available Messages': 'Verfügbare Nachrichten', 'Available Records': 'Verfügbare Datensätze', 'Available databases and tables': 'Verfügbare Datenbanken und Tabellen', 'Available for Location': 'Verfügbar für Ort', 'Available from': 'Verfügbar von', 'Available in Viewer?': 'Verfügbar in Lagedarstellung?', 'Available until': 'Verfügbar bis', 'Avalanche': 'Lawine', 'Average': 'Durchschnitt', 'Avoid the subject event as per the <instruction>': 'Vermeiden das Thema Ereignis als je<instruction>', 'Awards': 'Auszeichnungen', 'Background Color for Text blocks': 'Hintergrundfarbe für Textblöcke', 'Background Color': 'Hintergrundfarbe', 'Baldness': 'Kahlköpfigkeit', 'Banana': 'Banane', 'Bank/micro finance': 'Bank/Mikro Finanzierung', 'Barge Capacity': 'Frachtschiffkapazitäten', 'Barricades are needed': 'Barrikaden sind erforderlich', 'Base Layer?': 'Basis Kartenebene?', 'Base Location': 'Basis Standort/Region', 'Base Site Set': 'Basisstandort definieren', 'Baseline Data': 'Referenzdatum Daten', 'Baseline Number of Beds': 'Referenzdatum Anzahl von Betten', 'Baseline Type Details': 'Referenzdatumstyp Details', 'Baseline Type added': 'Referenzdatumstyp hinzugefügt', 'Baseline Type deleted': 'Referenzdatumstyp gelöscht', 'Baseline Type updated': 'Referenzdatumstyp aktualisiert', 'Baseline Type': 'Referenzdatumstyp', 'Baseline Types': 'Referenzdatumstypen', 'Baseline added': 'Referenzdatum hinzugefügt', 'Baseline deleted': 'Referenzdatum gelöscht', 'Baseline number of beds of that type in this unit.': 'Referenzdatum Anzahl von Betten dieses Typs in dieser Einheit.', 'Baseline updated': 'Referenzdatum aktualisiert', 'Baselines Details': 'Referenzdaten Details', 'Baselines': 'Referenzdaten', 'Basic Assessment Reported': 'Grundlegende Beurteilung berichtet', 'Basic Assessment': 'Grundlegende Beurteilung', 'Basic Details': 'Grundlegende Details', 'Basic reports on the Shelter and drill-down by region': 'Grundlegende Berichte über Unterkunft und Drill-down nach Region', 'Baud rate to use for your modem - The default is safe for most cases': 'Baudrate für das Modem - der Standardwert in den meisten Fällen ausreichend', 'Beam': 'Träger', 'Bed Capacity per Unit': 'Bettenkapazität pro Einheit', 'Bed Capacity': 'Bettenkapazität', 'Bed Type': 'Bett-Typ', 'Bed type already registered': 'Bett-Typ bereits registriert', 'Below ground level': 'Unter dem Erdgeschoss', 'Beneficiaries': 'Begünstigte', 'Beneficiary': 'Begünstigter', 'Beneficiary Type': 'Typ des Begünstigten', 'Biological Hazard': 'Biologische Gefahr', 'Bin': 'Lagerbehälter', 'Biscuits': 'Kekse', 'Blizzard': 'Schneesturm', 'Blood Type (AB0)': 'Blutgruppe (ABO)', 'Blowing Snow': 'Schneewehen', 'Boat': 'Boot', 'Bodies found': 'Leichen gefunden', 'Bodies recovered': 'Leichen geborgen', 'Body Recovery Request': 'Leichenbergungsanforderung', 'Body Recovery Requests': 'Leichenbergungsanforderungen', 'Body': 'Body', 'Bomb Explosion': 'Bombenexplosion', 'Bomb Threat': 'Bombendrohung', 'Bomb': 'Bombe', 'Border Color for Text blocks': 'Rahmenfarbe für Textblöcke', 'Both': 'Beides', 'Brand Details': 'Details zur Marke', 'Brand added': 'Marke hinzugefügt', 'Brand deleted': 'Marke gelöscht', 'Brand updated': 'Marke aktualisiert', 'Brand': 'Marke', 'Brands': 'Marken', 'Bricks': 'Ziegelsteine', 'Bridge Closed': 'Brücke ist geschlossen', 'Bucket': 'Eimer', 'Budget Details': 'Details zum Budget', 'Budget Updated': 'Budget aktualisiert', 'Budget added': 'Budget hinzugefügt', 'Budget deleted': 'Budget gelöscht', 'Budget updated': 'Budget aktualisiert', 'Budget': 'Budget', 'Budgeting Module': 'Budget Modul', 'Buffer': 'Puffer', 'Bug': 'Programmfehler', 'Building Assessments': 'Gebäudebeurteilungen', 'Building Collapsed': 'Gebäude zusammengebrochen', 'Building Name': 'Name des Gebäudes', 'Building Safety Assessments': 'Bewertung Gebäudesicherheit', 'Building Short Name/Business Name': 'Gebäude Kurzname / Firmenname', 'Building or storey leaning': 'Gebäude- oder Stockwerkneigung', 'Built using the Template agreed by a group of NGOs working together as the': 'Erstellt unter Verwendung einer abgestimmten Vorlage einer Gruppe von NGOs unter dem Namen', 'Bulk Uploader': 'Upload von Massendaten', 'Bundle Contents': 'Produktpaket Inhalt', 'Bundle Details': 'Produktpaket Details', 'Bundle Updated': 'Produktpaket aktualisiert', 'Bundle added': 'Produktpaket hinzugefügt', 'Bundle deleted': 'Produktpaket gelöscht', 'Bundle updated': 'Produktpaket aktualisiert', 'Bundle': 'Produktpaket', 'Bundles': 'Produktpakete', 'Burn ICU': 'ICU brennen', 'Burn': 'Brennen', 'Burned/charred': 'Verbrannt / verkohlt', 'By': 'Nach', 'By Einrichtung': 'Nach Einrichtung', 'By Facility': 'Nach Einrichtung', 'By Inventory': 'Nach Bestand', 'CBA Women': 'Frauen CBA', 'CSS file %s not writable - unable to apply theme!': 'CSS Datei %s nicht beschreibbar - Motiv kann nicht angewendet werden!', 'Calculate': 'Starte Berechnung', 'Camp Coordination/Management': 'Camp Koordinierung / Management', 'Camp Service Details': 'Details zu Camp Leistung', 'Camp Service added': 'Camp Leistung hinzugefügt', 'Camp Service deleted': 'Camp Leistung gelöscht', 'Camp Service updated': 'Leistung des Camps aktualisiert', 'Camp Services': 'Leistungen des Camps', 'Camp Type Details': 'Details zum Camp Typ', 'Camp Type added': 'Camp Typ hinzugefügt', 'Camp Type deleted': 'Camp Typ gelöscht', 'Camp Type updated': 'Camp Typ aktualisiert', 'Camp Type': 'Camp Typ', 'Camp Types and Services': 'Camp Typen und Leistungen', 'Camp Types': 'Camp Typen', 'Camp added': 'Camp hinzugefügt', 'Camp deleted': 'Camp gelöscht', 'Camp updated': 'Camp aktualisiert', 'Camp': 'Camp', 'Campaign ID': 'Kampagnen ID', 'Camps': 'Camps', 'Can only disable 1 record at a time!': 'Ein Datensatz kann nur einzeln deaktiviert werden!', 'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Kann PoIs nur aus einer OpenStreetMap Datei (.osm) oder einem mirror lesen.', 'Cancel': 'Unterbrechen', 'Cancel Log Entry': 'Protokolleintrag abbrechen', 'Cancel Shipment': 'Lieferung stornieren', 'Canceled': 'Abgebrochen', 'Candidate Matches for Body %s': 'Übereinstimmung des Kandidaten mit Körper %s', 'Canned Fish': 'Fischkonserven', 'Cannot be empty': 'Darf nicht leer sein', 'Cannot disable your own account!': 'Eigenes Konto kann nicht deaktiviert werden.', 'Capacity': 'Maximale Kapazität', 'Capacity (Max Persons)': 'Kapazität (Maximale Zahl von Personen)', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Erfassung von Informationen über Opfergruppen einer Katastrophe (Touristen, Fahrgäste, Familien, etc.)', 'Capture Information on each disaster victim': 'Erfassung von Informationen über jedes Opfer einer Katastrophe.', 'Capturing the projects each organization is providing and where': 'Erfassen der Projekte, die von jeder Organisation bereitgestellt werden und wo', 'Cardiology': 'Kardiologie', 'Cargo Pier Depth': 'Wassertiefe Frachtpier', 'Case Number': 'Fallnummer', 'Cases': 'Fälle', 'Cassava': 'Maniok', 'Casual Labor': 'Gelegenheitsarbeit', 'Casualties': 'Todesopfer', 'Catalog Details': 'Details zum Katalog', 'Catalog Item added': 'Katalog Eintrag hinzugefügt', 'Catalog Item deleted': 'Katalog Eintrag gelöscht', 'Catalog Item updated': 'Katalog Eintrag aktualisiert', 'Catalog Items': 'Katalog Einträge', 'Catalog added': 'Katalog hinzugefügt', 'Catalog deleted': 'Katalog gelöscht', 'Catalog updated': 'Katalog aktualisiert', 'Catalog': 'Katalog', 'Catalogs': 'Kataloge', 'Categories': 'Kategorien', 'Category': 'Kategorie', 'Ceilings, light fixtures': 'Höchstgrenzen, Licht Ausstattungsmerkmal', 'Central point to record details on People': 'Zentrale Personenregistrierungsstelle', 'Certificate Catalog': 'Zertifikatskatalog', 'Certificate Details': 'Details zum Zertifikat', 'Certificate Status': 'Status des Zertifikats', 'Certificate added': 'Zertifikat hinzugefügt', 'Certificate deleted': 'Zertifikat gelöscht', 'Certificate updated': 'Zertifikat aktualisiert', 'Certificate': 'Zertifikat', 'Certificates': 'Zertifikate', 'Certification Details': 'Zertifizierungsdetails', 'Certification added': 'Zertifizierung hinzugefügt', 'Certification deleted': 'Zertifizierung gelöscht', 'Certification updated': 'Zertifizierung aktualisiert', 'Certification': 'Zertifizierung', 'Certifications': 'Zertifizierungen', 'Certifying Organization': 'Zertifizierende Organisation', 'Change Password': 'Passwort ändern', 'Channel': 'Kanal', 'Check-in date': 'Check-In Datum', 'Check-out date': 'Check-Out Datum', 'Check Request': 'Anfrage prüfen', 'Check for errors in the URL, maybe the address was mistyped.': 'Prüfen Sie auf Fehler in der URL, vielleicht wurde die Adresse falsch eingegeben.', 'Check if the URL is pointing to a directory instead of a webpage.': 'Prüfen Sie ob die URL auf ein Verzeichnis anstelle einer Webseite verweist', 'Check outbox for the message status': 'Überprüfen sie den Status der Nachricht im Nachrichtenausgang', 'Check to delete': 'Anwahl zum Löschen', 'Check': 'Prüfen', 'Checked': 'Geprüft', 'Checklist created': 'Prüfliste erstellt', 'Checklist deleted': 'Prüfliste gelöscht', 'Checklist of Operations': 'Checkliste für Operationen', 'Checklist updated': 'Checkliste aktualisiert', 'Checklist': 'Prüfliste', 'Chemical Hazard': 'Chemische Gefahr', 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemische, Biologische, Radiologische, Nukleare order höchst explosive Gefahr oder Angriff', 'Chicken': 'Huhn', 'Child (2-11)': 'Kind (2-11)', 'Child (< 18 yrs)': 'Kind (< 18 Jahre)', 'Child Abduction Emergency': 'Kindesentführung Notfall', 'Child headed households (<18 yrs)': 'Kindgeführte Haushalte (<18 Jahre)', 'Child': 'Kind', 'Children (2-5 years)': 'Kinder (2-5 Jahre)', 'Children (5-15 years)': 'Kinder (5-15 Jahre)', 'Children (< 2 years)': 'Kinder (< 2 Jahre)', 'Children in adult prisons': 'Kinder in Gefängnissen für Erwachsene', 'Children in boarding schools': 'Kinder in Internaten', 'Children in homes for disabled children': 'Kinder in Unterkünften für behinderte Kinder', 'Children in juvenile detention': 'Kinder in Jugendstrafheimen', 'Children in orphanages': 'Kinder in Waisenhäusern', 'Children living on their own (without adults)': 'Alleinlebende Kinder (ohne Erwachsene)', 'Children not enrolled in new school': 'Kinder, die nicht in der neuen Schule registriert sind', 'Children orphaned by the disaster': 'Durch die Katastrophe verwaiste Kinder', 'Children separated from their parents/caregivers': 'Von Ihren Eltern/Betreuern getrennte Kinder', 'Children that have been sent to safe places': 'Kinder die an sichere Orte gesendet wurden', 'Children who have disappeared since the disaster': 'Kinder, die seit der Katastrophe verschwunden sind', 'Chinese (Taiwan)': 'Chinesisch (Taiwan)', 'Cholera Treatment Capability': 'Cholera Behandlungsmöglichkeiten', 'Cholera Treatment Center': 'Cholera Behandlungscenter', 'Cholera Treatment': 'Cholera Behandlung', 'Cholera-Treatment-Center': 'Cholera-Behandlung-Center', 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Wählen Sie eine neue Meldung basierend der neuen Bewertung und Teamurteil. Schwerwiegende Bedingungen, die das gesamte Gebäude betreffen sind der Grund für eine UNSICHER Markierung. Lokalisierte schwere und insgesamt moderate Bedingungen können möglicherweise eine eingeschränkte Verwendung erfordern. Platziere GEPRÜFT Plakat am Haupteingang Positionieren Sie alle anderen Schilder auf jeden wichtigen Eingang.', 'Church': 'Kirche', 'City': 'Ort/Stadt', 'City / Town / Village': 'Stadt / Ort / Dorf', 'Civil Emergency': 'Ziviler Notfall', 'Cladding, glazing': 'Verkleidung, Verglasung', 'Clear': 'Löschen', 'Clear filter': 'Filter zurücksetzen', 'Click on the link %(url)s to reset your password': 'Klicken sie auf den Link %(url)s um ihr Kennwort zurückzusetzen', 'Click on the link %(url)s to verify your email': 'Klicken sie auf den Link %(url)s zum Überprüfen ihrer EMail Adresse', 'Click where you want to open Streetview': 'Auswahl um Streetview zu öffnen', 'Clinical Laboratory': 'Klinisches Labor', 'Clinical Operations': 'Klinikbetrieb', 'Clinical Status': 'Klinischer Status', 'Closed': 'Geschlossen', 'Closed at': 'Geschlossen am', 'Clothing': 'Kleidung', 'Cluster Details': 'Details zum Cluster', 'Cluster Distance': 'Cluster Abstand', 'Cluster Subsector Details': 'Cluster Teilbereich Details', 'Cluster Subsector added': 'Cluster Teilbereich hinzugefügt', 'Cluster Subsector deleted': 'Cluster Teilbereich gelöscht', 'Cluster Subsector updated': 'Cluster Teilbereich aktualisiert', 'Cluster Subsector': 'Cluster Teilsektor', 'Cluster Subsectors': 'Cluster Teilsektoren', 'Cluster Threshold': 'Cluster Schwellwert', 'Cluster added': 'Cluster hinzugefügt', 'Cluster deleted': 'Cluster gelöscht', 'Cluster updated': 'Cluster aktualisiert', 'Cluster': 'Cluster', 'Cluster(s)': 'Cluster', 'Clusters': 'Cluster', 'Cold Wave': 'Kältewelle', 'Collapse, partial collapse, off foundation': 'Zusammengefallen, teilweise zusammengefallen, ohne Unterbau', 'Collective center': 'Kollektivcenter', 'Color for Underline of Subheadings': 'Farbe der Unterstreichungslinie von untergeordneten Überschriften', 'Color of Buttons when hovering': 'Farbe von Schaltflächen beim drüberstreichen', 'Color of bottom of Buttons when not pressed': 'Farbe der unteren Seite von Schaltflächen die nicht gedrückt sind', 'Color of bottom of Buttons when pressed': 'Farbe der unteren Seite von Schaltflächen beim Drücken von Tasten', 'Color of dropdown menus': 'Farbe des Dropdown-Menüs', 'Color of selected Input fields': 'Farbe der ausgewählten Eingabefelder', 'Color of selected menu items': 'Farbe ausgewählter Menüpunkte', 'Columns, pilasters, corbels': 'Säulen, Pfeiler, Konsolen', 'Combined Method': 'Kombinierte Methode', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Kommen Sie später noch einmal wieder. Jeder der diese Seite besucht hat derzeit wahrscheinlich das gleiche Problem wie Sie :-( .', 'Come back later.': 'Kommen Sie doch später noch einmal wieder :-( ', 'Comments': 'Kommentare', 'Comments permitted?': 'Kommentare zugelassen?', 'Commercial/Offices': 'Kommerziell / Büros', 'Commit Date': 'Datum der Einstellung', 'Commit from %s': 'Einstellung von %s', 'Commit': 'Zusage', 'Commit Status': 'Status der Zusage', 'Commiting a changed spreadsheet to the database': 'Ein verändertes Spreadsheet in der Datenbank einstellen.', 'Commitment Added': 'Zusage hinzugefügt', 'Commitment Canceled': 'Zusage abgebrochen', 'Commitment Details': 'Details zur Zusage', 'Commitment Item Details': 'Details zum zugesagten Artikel', 'Commitment Item added': 'Zugesagten Artikel hinzugefügt', 'Commitment Item deleted': 'Zugesagten Artikel gelöscht', 'Commitment Item updated': 'Zugesagten Artikel aktualisiert', 'Commitment Items': 'Zugesagte Artikel', 'Commitment Status': 'Status der Zusage', 'Commitment Updated': 'Zusage aktualisiert', 'Commitment': 'Zusage', 'Commitments': 'Zusagen', 'Committed By': 'Zugesagt durch', 'Committed': 'Zugesagt', 'Committed Items': 'Verpflichtete Artikel', 'Committed Skills': 'Verpflichtete Fähigkeiten', 'Committing Inventory': 'Zusageninventar', 'Communication problems': 'Kommunikationsprobleme', 'Community Health Center': 'Gesundheitszentrum der Gemeinschaft', 'Community Member': 'Mitglied der Gemeinschaft', 'Competencies': 'Kompetenzen', 'Competency Details': 'Details zu den Kompetenzen', 'Competency Rating Catalog': 'Kompetenzbewertungskatalog', 'Competency Rating Details': 'Details zur Kompetenzbewertung', 'Competency Rating added': 'Kompetenzbewertung hinzugefügt', 'Competency Rating deleted': 'Kompetenzbewertung gelöscht', 'Competency Rating updated': 'Kompetenzbewertung aktualisiert', 'Competency Ratings': 'Kompetenzbewertungen', 'Competency added': 'Kompetenz hinzugefügt', 'Competency deleted': 'Kompetenz gelöscht', 'Competency updated': 'Kompetenz aktualisiert', 'Competency': 'Kompetenz', 'Complete': 'Vollständig', 'Completed': 'Beendet', 'Complete Stock Adjustment': 'Anpassen des gesamten Bestandes', 'Completion Question': 'Abschlussfrage', 'Complexion': 'Gesichtsfarbe', 'Compose': 'Erstellen', 'Compromised': 'Gefährdet', 'Concrete frame': 'Betonrahmen', 'Concrete shear wall': 'Betonscherwand', 'Condition': 'Bedingung', 'Conduct a Disaster Assessment': 'Durchführung einer Katastrophenbeurteilung', 'Configuration': 'Konfiguration', 'Configurations': 'Konfigurationen', 'Configure Run-time Settings': 'Laufzeiteinstellungen konfigurieren', 'Confirm Shipment Received': 'Bestätigen der erhaltenen Lieferung', 'Confirmed': 'Bestätigt', 'Confirming Organization': 'Organisation bestätigen', 'Conflict Details': 'Details zum Konflikt', 'Conflict Resolution': 'Konfliktlösung', 'Connection': 'Verbindung', 'Connect Parser': 'Verbindungsparser', 'Consignment Note': 'Warenbegleitschein', 'Constraints Only': 'Nur Bedingungen', 'Consumable': 'Verbrauchsartikel', 'Contact Data': 'Kontakt Daten', 'Contact Details': 'Details zum Kontakt', 'Contact Info': 'Kontaktinformationen', 'Contact Information Added': 'Konraktinformationen hinzugefuegt.', 'Contact Information Deleted': 'Kontaktinformationen gelöscht', 'Contact Information Updated': 'Kontakt Informationen aktualisiert', 'Contact Information': 'Kontaktinformationen', 'Contact Method': 'Kontaktmethode', 'Contact Name': 'Name des Ansprechpartners', 'Contact Person': 'Kontaktperson', 'Contact Person / Camp Owner': 'Kontaktperson / Camp-Betreiber', 'Contact Phone': 'Telefonnummer des Kontaktes', 'Contact details': 'Details zum Kontakt', 'Contact information added': 'Kontaktinformationen hinzugefügt', 'Contact information deleted': 'Kontaktinformationen gelöscht', 'Contact information updated': 'Kontaktinformationen aktualisiert', 'Contact Us': 'Kontaktieren Sie uns', 'Contact us': 'Kontaktieren Sie uns', 'Contact': 'Kontakt', 'Contacts': 'Kontakte', 'Content': 'Inhalt', 'Contents': 'Inhalte', 'Content Management': 'Content Management', 'Content Management System': 'Content Management System', 'Contract End Date': 'Ablaufzeit des Vertrags', 'Contributor': 'Mitwirkung', 'Conversion Tool': 'Umrechnungstool', 'Cooking NFIs': 'Kochen NFIs', 'Cooking Oil': 'Speiseöl', 'Coordinate Conversion': 'Koordinatentransformation', 'Coping Activities': 'Bewältigungsaktivitäten', 'Copy': 'Kopieren', 'Cost Type': 'Kostentyp', 'Cost per Megabyte': 'Kosten pro Megabyte', 'Cost per Minute': 'Kosten pro Minute', 'Count': 'Zahl', 'Country of Residence': 'Land des Wohnsitzes', 'Country': 'Land', 'County': 'Bezirk', 'County / District': 'Kreis / Bezirk', 'Course Catalog': 'Katalog der Kurse', 'Course Certificate Details': 'Details zum Kurszertifikat ', 'Course Certificate added': 'Kurszertifikat hinzugefügt', 'Course Certificate deleted': 'Kurszertifikat gelöscht', 'Course Certificate updated': 'Kurszertifikat aktualisiert', 'Course Certificates': 'Kurszertifikate', 'Course Details': 'Details zum Kurs', 'Course added': 'Kurs hinzugefügt', 'Course deleted': 'Kurs gelöscht', 'Course updated': 'Kurs aktualisiert', 'Course': 'Kurs', 'Create': 'Anlegen', 'Create & manage Distribution groups to receive Alerts': 'Erstellen und Verwalten von Verteilergruppen um Warnhinweise zu empfangen', 'Create Activity Report': 'Aktivitätsreport erstellen', 'Create Activity Type': 'Aktivitätstyp erstellen', 'Create Activity': 'Aktivität erstellen', 'Create Airport': 'Fluhafen erstellen', 'Create Assessment': 'Beurteilung erstellen', 'Create Asset': 'Anlage erstellen', 'Create Bed Type': 'Bettentyp erstellen', 'Create Brand': 'Marke erstellen', 'Create Budget': 'Budget erstellen', 'Create Bundle': 'Produktpaket erstellen', 'Create Case': 'Fall erstellen', 'Create Catalog Item': 'Katalogeintrag erstellen', 'Create Catalog': 'Katalog erstellen', 'Create Certificate': 'Zertifikat erstellen', 'Create Checklist': 'Prüfliste erstellen', 'Create Cholera Treatment Capability Information': 'Fügen Sie Informationen zur Möglichkeit der Behandlung von Cholerafällen hinzu', 'Create Cluster Subsector': 'Cluster Teilbereich erstellen', 'Create Cluster': 'Cluster erstellen', 'Create Competency Rating': 'Kompetenzbewertung erstellen', 'Create Contact': 'Kontaktperson erstellen', 'Create Course': 'Kurs erstellen', 'Create Dead Body Report': 'Leichenbericht erstellen', 'Create Department': 'Abteilung erstellen', 'Create Event': 'Neues Ereignis erstellen', 'Create Event Type': 'Ereignistyp erstellen', 'Create Facility': 'Einrichtung erstellen', 'Create Facility Type': 'Einrichtungstyp erstellen', 'Create Feature Layer': 'Kartenebene für Objektart erstellen', 'Create Group Entry': 'Gruppeneintrag erstellen', 'Create Group': 'Gruppe erstellen', 'Create Heliport': 'Hubschrauberlandeplatz erstellen', 'Create Hospital': 'Krankenhaus erstellen', 'Create Identification Report': 'Identifizierungsbericht erstellen', 'Create Impact Assessment': 'Folgenabschätzung erstellen', 'Create Incident Report': 'Vorfallbericht erstellen', 'Create Incident Type': 'Vorfalltyp erstellen', 'Create Incident': 'Vorfall erstellen', 'Create Item Category': 'Element Kategorie erstellen', 'Create Item Pack': 'Artikelgruppe erstellen', 'Create Item': 'Neuen Artikel anlegen', 'Create Job Title': 'Berufsbezeichnung erstellen', 'Create Kit': 'Ausstattung (Kit) anlegen', 'Create Kitting': 'Ausstattung zusammenstellen', 'Create Layer': 'Kartenebene anlegen', 'Create Location': 'Standort anlegen', 'Create Location Hierarchy': 'Standorthierarchie anlegen', 'Create Map Profile': 'Kartenkonfiguration anlegen', 'Create Map Style': 'Kartensymbolisierung erstellen', 'Create Marker': 'Marker/Symbol anlegen', 'Create Member': 'Mitglied erstellen', 'Create Membership Type': 'Mitgliedstyp erstellen', 'Create Mobile Impact Assessment': 'Erstellen Sie Mobile Folgenabschätzung', 'Create Office': 'Büro anlegen', 'Create Office Type': 'Bürotyp anlegen', 'Create Organization': 'Organisation anlegen', 'Create Organization Type': 'Organisationstyp anlegen', 'Create Personal Effects': 'Persönlicher Habe anlegen', 'Create PoI Type': 'PoI-Typ erstellen', 'Create Point of Interest': 'PoI erstellen', 'Create Post': 'POST erstellen', 'Create Program': 'Programm erstellen', 'Create Project': 'Projekt anlegen', 'Create Projection': 'Kartenprojektion anlegen', 'Create Rapid Assessment': 'Schnell-Beurteilung anlegen', 'Create Report': 'Bericht anlegen', 'Create Repository': 'Repository anlegen', 'Create Request': 'Anfrage anlegen', 'Create Request Template': 'Anfragevorlage anlegen', 'Create Resource': 'Ressource anlegen', 'Create River': 'Neuen Fluss anlegen', 'Create Role': 'Neue Rolle anlegen', 'Create Room': 'Neues Zimmer anlegen', 'Create Seaport': 'Seehafen erstellen', 'Create Scenario': 'Neues Szenario anlegen', 'Create Sector': 'Neuen Bereich anlegen', 'Create Series': 'Serie erstellen', 'Create Service Profile': 'Neues Leistungsprofil anlegen', 'Create Shelter Service': 'Neue Unterkunft anlegen', 'Create Shelter Type': 'Neue Art der Unterkunft anlegen', 'Create Shelter': 'Neue Unterkunft anlegen', 'Create Skill Type': 'Art der Qualifikation / Fähigkeit anlegen', 'Create Skill': 'Fähigkeiten / Qualifikationen anlegen', 'Create Staff Member': 'Neuen Mitarbeiter anlegen', 'Create Staff Type': 'Mitarbeitertyp erstellen', 'Create Status': 'Neuen Status anlegen', 'Create Supplier': 'Neuen Lieferanten anlegen', 'Create Task': 'Neue Aufgabe anlegen', 'Create Theme': 'Neues Thema anlegen', 'Create User': 'Neuen Benutzer anlegen', 'Create Training Event': 'Neuen Schulungskurs anlegen', 'Create Vehicle': 'Fahrzeug erstellen', 'Create Vehicle Type': 'Fahrzeugtyp erstellen', 'Create Volunteer': 'Neuen Freiwilligen anlegen', 'Create Volunteer Role': 'Freiwilligenrolle erstellen', 'Create Warehouse': 'Neues Warenlager anlegen', 'Create Warehouse Type': 'Warenlagertyp erstellen', 'Create a Person': 'Neue Person anlegen', 'Create a group entry in the registry.': 'Erstellen Sie eine neue Gruppe in der Registry.', 'Create, enter, and manage surveys.': 'Erstellen, Eingabe und Verwaltung von Umfragen.', 'Creation of Surveys': 'Erstellung von Umfragen', 'Credential Details': 'Details zur Qualifikation', 'Credential added': 'Qualifikation hinzugefügt', 'Credential deleted': 'Qualifikation gelöscht', 'Credential updated': 'Qualifikation aktualisiert', 'Credentialling Organization': 'Bescheinigende Organisation', 'Credentials': 'Qualifikationen', 'Credit Card': 'Kreditkarte', 'Crime': 'Kriminalität', 'Criteria': 'Kriterien', 'CTN': 'CTN', 'Currency': 'Währung', 'Current': 'Aktuell', 'Current Address': 'Aktuelle Adresse', 'Current Entries': 'Aktuelle Einträge', 'Current Group Members': 'Aktuelle Gruppemmitglieder', 'Current Home Address': 'Aktuelle Heimatadresse', 'Current Identities': 'Aktuelle Identitäten', 'Current Location': 'Aktueller Standort', 'Current Log Entries': 'Aktuelle Protokolleinträge', 'Current Memberships': 'Aktuelle Mitgliedschaften', 'Current Needs': 'Aktuelle Bedarfsmeldungen', 'Current Records': 'Aktuelle Datensätze', 'Current Registrations': 'Aktuellen Registrierungen', 'Current Status': 'Aktueller Status', 'Current Team Members': 'Aktuelle Team Mitglieder', 'Current Twitter account': 'Aktueller Benutzeraccount bei Twitter', 'Current community priorities': 'Aktuelle Priorisierung in der Community', 'Current general needs': 'Aktueller allgemeiner Bedarf', 'Current greatest needs of vulnerable groups': 'Wichtigste Bedürfnisse der gefährdeten Gruppen', 'Current health problems': 'Derzeitige Gesundheitsprobleme', 'Current number of patients': 'Aktuelle Anzahl von Patienten', 'Current problems, categories': 'Aktuelle Probleme, Kategorien', 'Current problems, details': 'Aktuelle Probleme, Details', 'Current request': 'Aktuelle Anfrage', 'Current response': 'Aktuelle Antwort', 'Current session': 'Aktuelle Sitzung', 'Currently no Certifications registered': 'Derzeit sind keine Zertifizierungen registriert', 'Currently no Competencies registered': 'Derzeit sind keine Kompetenzen registriert', 'Currently no Course Certificates registered': 'Derzeit sind keine Kurszertifikate registriert', 'Currently no Credentials registered': 'Derzeit sind keine Qualifikationen registriert', 'Currently no Missions registered': 'Derzeit sind keine Aufträge registriert', 'Currently no Skill Equivalences registered': 'Derzeit sind keine Fähigkeits-Vergleichbarkeiten registriert', 'Currently no Trainings registered': 'Derzeit keine Schulungen registriert', 'Currently no entries in the catalog': 'Derzeit keine Einträge im Katalog', 'Customs Capacity': 'Zollkapazität', 'Customs Warehousing Storage Capacity': 'Zollwarenlager Kapazität', 'DNA Profile': 'DNA Profil', 'DNA Profiling': 'DNS-Profiling', 'Dam Overflow': 'Dam Überlauf', 'Damage': 'Beschädigung', 'Dangerous Person': 'Gefährliche Person', 'Data uploaded': 'Daten hochgeladen', 'Data': 'Daten', 'Database': 'Datenbank', 'Date & Time': 'Datum und Zeit', 'Date Available': 'Datum verfügbar', 'Date Joined': 'Eintrittsdatum', 'Date Published': 'Datum Veröffentlichung', 'Date Question': 'Datum Frage', 'Date Received': 'Datum erhalten', 'Date Released': 'Datum der Veröffentlichung', 'Date Requested': 'Datum angefragt', 'Date Required': 'Datum erforderlich', 'Date Needed By': 'Datum benötigt von', 'Date Sent': 'Gemeldetes Datum', 'Date Taken': 'Verwendetes Datum', 'Date Until': 'Datum bis', 'Date and Time': 'Datum und Zeit', 'Date and time this report relates to.': 'Datum und Uhrzeit auf die sich dieser Bericht bezieht.', 'Date of Birth': 'Geburtsdatum', 'Date of Latest Information on Beneficiaries Reached': 'Datum von aktuellen Informationen der Finanzhilfen erreicht', 'Date of Report': 'Datum des Berichts', 'Date resigned': 'Datum der Kündigung', 'Date': 'Datum', 'Date/Time of Find': 'Datum/Zeit des Fundes', 'Date/Time when found': 'Datum / Uhrzeit, wann festgestellt', 'Date/Time when last seen': 'Datum / Uhrzeit, wann zuletzt gesehen', 'Date/Time': 'Datum/Zeit', 'De-duplicate': 'Bestätige Duplikat', 'De-duplicator': 'Duplikate entfernen', 'Dead Body Details': 'Details zur Leiche ', 'Dead Body Reports': 'Leichenbericht', 'Dead Body': 'Leiche', 'Dead body report added': 'Leichenbericht hinzugefügt', 'Dead body report deleted': 'Leichenbericht gelöscht', 'Dead body report updated': 'Leichenbericht aktualisiert', 'Deaths in the past 24h': 'Tote der letzten 24h', 'Deaths/24hrs': 'Todesfälle/24std', 'Decimal Degrees': 'Dezimalgrade', 'Decision': 'Entscheidung', 'Decomposed': 'Zerlegt', 'Default Base layer?': 'Standard Hintergrundkartenebene?', 'Default Location': 'Standard Gebiet/Standort', 'Default Height of the map window.': 'Standardhöhe des Kartenfensters', 'Default Map': 'Standard-Kartenfenster', 'Default Marker': 'Standardsymbol', 'Default Width of the map window.': 'Standardbreite des Kartenfensters.', 'Default map question': 'Standard Kartenfrage', 'Default?': 'Standard?', 'Default synchronization policy': 'Standard-Synchronisationsverfahren', 'Defecation area for animals': 'Kotbereich für Tiere', 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definieren Sie Szenarien für die Zuordnung der entsprechenden Ressourcen (Menschen, Anlagen und Einrichtungen).', 'Defines the icon used for display of features on handheld GPS.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf mobilen GPS-Geräten verwendet wird.', 'Defines the icon used for display of features on interactive map & KML exports.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf der interaktiven Karte sowie für die KML Exporte verwendet wird.', 'Defines the marker used for display & the attributes visible in the popup.': 'Definiert das Symbol, das für die Anzeige und die Attribute im Popup-Fenster verwendet wird.', 'Degrees must be a number between -180 and 180': 'Grad muss eine Zahl zwischen -180 und 180 sein.', 'Delete Alternative Item': 'Alternativen Artikel löschen', 'Delete Assessment Summary': 'Zusammenfassung der Beurteilung löschen', 'Delete Assessment': 'Beurteilung löschen', 'Delete Asset Log Entry': 'Löschen des Protokolleintrags der Anlage', 'Delete Asset': 'Anlage löschen', 'Delete Baseline Type': 'Lösche Typ des Referenzdatums', 'Delete Baseline': 'Referenzdatum löschen', 'Delete Brand': 'Lösche Marke', 'Delete Budget': 'Lösche Budget', 'Delete Bundle': 'Produktpaket löschen', 'Delete Catalog Item': 'Lösche Katalogeintrag', 'Delete Catalog': 'Katalog löschen', 'Delete Certificate': 'Zertifikat löschen', 'Delete Certification': 'Delete Zertifizierung', 'Delete Cluster Subsector': 'Cluster Teilbereich löschen', 'Delete Cluster': 'Cluster löschen', 'Delete Commitment Item': 'Zugesagten Artikel löschen', 'Delete Commitment': 'Zusage löschen', 'Delete Competency Rating': 'Kompetenzbewertung löschen', 'Delete Competency': 'Kompetenz löschen', 'Delete Contact Information': 'Kontaktinformation löschen', 'Delete Course Certificate': 'Lösche Kurszertifikat', 'Delete Course': 'Lösche Kurs', 'Delete Credential': 'Qualifikation löschen', 'Delete Document': 'Dokument löschen', 'Delete Donor': 'Spender löschen', 'Delete Entry': 'Eintrag löschen', 'Delete Event Type': 'Ereignistyp löschen', 'Delete Facility Type': 'Anlagentyp löschen', 'Delete Feature Layer': 'Lösche Objekt Kartenebene', 'Delete Group': 'Gruppe löschen', 'Delete Hospital': 'Krankenhaus löschen', 'Delete Image': 'Grafik löschen', 'Delete Impact Type': 'Löschen des Auswirkungstyps', 'Delete Impact': 'Auswirkung löschen', 'Delete Incident Report': 'Vorfallbericht löschen', 'Delete Item Category': 'Artikel Kategorie löschen', 'Delete Item Pack': 'Artikelgruppe löschen', 'Delete Item': 'Eintrag löschen', 'Delete Job Role': 'Tätigkeit löschen', 'Delete Key': 'Schlüssel löschen', 'Delete Kit': 'Ausstattung (Kit) löschen', 'Delete Layer': 'Ebene löschen', 'Delete Level 1 Assessment': 'Stufe 1 Beurteilung löschen', 'Delete Level 2 Assessment': 'Stufe 2 Beurteilung löschen', 'Delete Location': 'Standort löschen', 'Delete Map Profile': 'Kartenkonfiguration löschen', 'Delete Marker': 'Marker/Symbol löschen', 'Delete Membership': 'Mitgliedschaft löschen', 'Delete Message': 'Nachricht löschen', 'Delete Mission': 'Auftrag löschen', 'Delete Need Type': 'Anforderungstyp löschen', 'Delete Need': 'Anforderung löschen', 'Delete Office': 'Büro löschen', 'Delete Office Type': 'Bürotyp löschen', 'Delete Organization': 'Organisation löschen', 'Delete Organization Type': 'Organisationstyp löschen', 'Delete Peer': 'Peer löschen', 'Delete Person': 'Benutzer löschen', 'Delete Photo': 'Foto löschen', 'Delete Population Statistic': 'Bevölkerungsstatistik löschen', 'Delete Position': 'Position löschen', 'Delete Project': 'Projekt löschen', 'Delete Projection': 'Koordinatensystemprojektion löschen', 'Delete Rapid Assessment': 'Schnell-Beurteilung löschen', 'Delete Received Item': 'Erhaltenen Artikel löschen', 'Delete Received Shipment': 'Erhaltene Lieferung löschen', 'Delete Record': 'Datensatz löschen', 'Delete Report': 'Bericht löschen', 'Delete Request Item': 'Lösche das Anfrageelement', 'Delete Request': 'Lösche die Anfrage', 'Delete Resource': 'Lösche die Ressource', 'Delete Room': 'Raum löschen', 'Delete Scenario': 'Szenario löschen', 'Delete Section': 'Lösche Abschnitt', 'Delete Sector': 'Lösche Bereich', 'Delete Sent Item': 'Lösche gesendeten Artikel', 'Delete Sent Shipment': 'Lösche gesendete Lieferung', 'Delete Service Profile': 'Service-Profil löschen', 'Delete Setting': 'Einstellung löschen', 'Delete Skill Equivalence': 'Fähigkeits-Vergleichbarkeit löschen', 'Delete Skill Provision': 'Fähigkeits-Bereitstellung löschen', 'Delete Skill Type': 'Löschen des Typs der Befähigung', 'Delete Skill': 'Befähigung löschen', 'Delete Staff Type': 'Mitarbeitertyp löschen', 'Delete Status': 'Status löschen', 'Delete Subscription': 'Abonnement löschen', 'Delete Subsector': 'Teilbereich löschen', 'Delete Survey Answer': 'Umfrage - Antwort Löschen', 'Delete Survey Question': 'Umfrage - Frage löschen', 'Delete Survey Series': 'Umfrage Serie löschen', 'Delete Survey Template': 'Umfrage Vorlage löschen', 'Delete Training': 'Schulung löschen', 'Delete Unit': 'Einheit löschen', 'Delete User': 'Benutzer löschen', 'Delete Volunteer': 'Freiwilligen löschen', 'Delete Warehouse': 'Warenlager löschen', 'Delete from Server?': 'Vom Server löschen?', 'Delete': 'Löschen', 'Deliver To': 'Liefern an', 'Delphi Decision Maker': 'Delphi Entscheidungsträger', 'Demographic': 'Demografisch', 'Demonstrations': 'Vorführungen', 'Dental Examination': 'Zahnärztliche Prüfung', 'Dental Profile': 'Zahnärztliches Profil', 'Department / Unit': 'Abteilung / Einheit', 'Department Catalog': 'Abteilungskatalog', 'Describe the condition of the roads to your hospital.': 'Beschreiben Sie den Zustand der Strassen zu Ihrem Krankenhaus.', "Describe the procedure which this record relates to (e.g. 'medical examination')": 'Beschreiben Sie den Arbeitsablauf der sich auf diesen Eintrag bezieht (z. B. \\ " ärztliche Untersuchung")', 'Description of Contacts': 'Beschreibung der Kontakte', 'Description of defecation area': 'Beschreibung der Sanitäranlagen', 'Description of drinking water source': 'Beschreibung der Herkunft des Trinkwassers', 'Description of sanitary water source': 'Beschreibung der Herkunft des Sanitärwassers', 'Description of water source before the disaster': 'Beschreibung der Herkunft des Wassers vor der Katastrophe', 'Description': 'Beschreibung', 'Desire to remain with family': 'Wunsch bei der Familie zu bleiben', 'Destination': 'Ziel', 'Destroyed': 'Zerstört', 'Details field is required!': 'Detailfeld ist erforderlich!', 'Dialysis': 'Dialyse', 'Diaphragms, horizontal bracing': 'Membranen, horizontal stützen', 'Diarrhea': 'Durchfall', 'Dignitary Visit': 'Besuch des Würdenträgers', 'Direction': 'Richtung', 'Disable': 'Deaktivieren', 'Disabled participating in coping activities': 'Behinderte beteiligen sich an Bewältigungsaktivitäten', 'Disabled': 'Deaktiviert', 'Disabled?': 'Behindert?', 'Disaster Assessments': 'Katastrophenbeurteilungen', 'Disaster Victim Identification': 'Katastrophen Opferidentifikation', 'Disaster Victim Registry': 'Katastrophen Opferverzeichnis', 'Disaster clean-up/repairs': 'Katastrophen Reinigung/Reparaturen', 'Discharge (cusecs)': 'Ausfluss', 'Discharges/24hrs': 'Abfluss/24 Stunden', 'Discussion Forum on item': 'Diskussionsforum über Eintrag', 'Discussion Forum': 'Diskussionsforum', 'Disease vectors': 'Krankheitsvektoren', 'Dispensary': 'Ambulatorium', 'Displaced Populations': 'Heimatlose Bevölkerung', 'Displaced': 'Vertriebenen', 'Display Polygons?': 'Anzeige Polygone?', 'Display Routes?': 'Anzeige Routen?', 'Display Tracks?': 'Anzeige Wege?', 'Display Waypoints?': 'Anzeige Wegpunkte?', 'Distance between defecation area and water source': 'Distanz zwischen Sanitärbereich und Wasserquelle', 'Distance from %s:': 'Abstand von %s:', 'Distance(Kms)': 'Distanz (km)', 'Distribution groups': 'Verteilergruppen', 'Distribution': 'Verteilung', 'District': 'Bezirk', 'Rural District / District': 'Landkreis / Kreis', 'Do you really want to delete these records?': 'Sollen diese Datensätze wirklich gelöscht werden?', 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Möchten Sie diese erhaltene Lieferung stornieren? Die Artikel werden aus dem Bestand entfernt werden. Diese Aktion kann NICHT rückgängig gemacht werden!', 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Möchten Sie diese abgeschickte Sendung wirklich stornieren? Die Artikel werden an die Bestandserfassung zurückgegeben werden. Diese Aktion kann NICHT rückgängig gemacht werden!', 'Do you want to receive this shipment?': 'Wollen Sie die Lieferung empfangen?', 'Do you want to send these Committed items?': 'Wollen Sie die zugesagten Artikel schicken?', 'Do you want to send this shipment?': 'Wollen Sie diese Lieferung abschicken?', 'Document Details': 'Details zum Dokument', 'Document Scan': 'Dokument Scannen', 'Document added': 'Dokument hinzugefügt', 'Document deleted': 'Dokument gelöscht', 'Document updated': 'Dokument aktualisiert', 'Documents and Photos': 'Dokumente und Fotos', 'Documents': 'Dokumente', 'Does this facility provide a cholera treatment center?': 'Verfügt diese Einrichtung über ein Behandlungscenter für Cholera?', 'Doing nothing (no structured activity)': 'Untätig (keine strukturierte Aktivität)', 'Dollars': 'Dollar', 'Domain': 'Domäne', 'Domestic chores': 'Hausarbeit', 'Donated': 'Gespendet', 'Donating Organization': 'Spendende Organisationen', 'Donation': 'Spende', 'Donations': 'Spenden', 'Donation Certificate': 'Spendenzertifikat', 'Donations Needed': 'Spenden benötigt', 'Donation Phone #': 'Spender Telefon #', 'Donor Details': 'Details zum Spender', 'Donor added': 'Spender hinzugefügt', 'Donor deleted': 'Spender gelöscht', 'Donor updated': 'Spender aktualisiert', 'Donor': 'Spender', 'Donors Report': 'Bericht zu Spendern', 'Donors': 'Spender', 'Door frame': 'Türrahmen', 'Download PDF': 'PDF herunterladen', 'Download Template': 'Vorlage herunterladen', 'Draft': 'Entwurf', 'Drainage': 'Abfluß', 'Drawing up a Budget for Staff & Equipment across various Locations.': 'Aufstellung eines Budgets für Mitarbeiter und Ausrüstung über mehrere Standorte', 'Drill Down by Group': 'Recherche nach Gruppe', 'Drill Down by Incident': 'Recherche nach Vorfall', 'Drill Down by Shelter': 'Recherche nach Unterkunft', 'Drivers': 'Fahrer', 'Driver Phone Number': 'Telefonnummer des Fahrers', 'Driving License': 'Führerschein', 'Drought': 'Dürre', 'Drop-off Location for Goods?': 'Sammelstelle für Sachspenden?', 'Drugs': 'Drogen', 'Dry Dock': 'Trockendock', 'Dug Well': 'Schachtbrunnen', 'Duplicate?': 'Duplikat?', 'Dust Storm': 'Staub Sturm', 'Dwelling': 'Wohnstätte', 'EMS Reason': 'EMS Grund', 'ER Status Reason': 'Status Notaufnahme Grund', 'ER Status': 'Status Notaufnahme', 'Early Recovery': 'Frühe Besserung / Bergung', 'Earthquake': 'Erdbeben', 'Edit Activity': 'Aktivität bearbeiten', 'Edit Address': 'Adresse bearbeiten', 'Edit Alternative Item': 'Alternativen Artikel bearbeiten', 'Edit Application': 'Anwendung bearbeiten', 'Edit Assessment Summary': 'Zusammenfassung fuer die Beurteilung bearbeiten', 'Edit Assessment': 'Beurteilung bearbeiten', 'Edit Asset Log Entry': 'Protokolleintrag der Beurteilung bearbeiten', 'Edit Asset': 'Beurteilung bearbeiten', 'Edit Baseline Type': 'Bearbeiten des Typs des Referenzdatums', 'Edit Baseline': 'Referenzdatum bearbeiten', 'Edit Brand': 'Marke bearbeiten', 'Edit Budget': 'Budget bearbeiten', 'Edit Bundle': 'Produktpaket bearbeiten', 'Edit Camp Service': 'Camp Leistung bearbeiten', 'Edit Camp Type': 'Camptyp bearbeiten', 'Edit Camp': 'Camp bearbeiten', 'Edit Catalog Item': 'Katalogeintrag bearbeiten', 'Edit Catalog': 'Katalog bearbeiten', 'Edit Certificate': 'Zertifikat bearbeiten', 'Edit Certification': 'Zertifizierung bearbeiten', 'Edit Cluster Subsector': 'Cluster Teilbereich bearbeiten', 'Edit Cluster': 'Cluster bearbeiten', 'Edit Commitment Item': 'Zugesagten Artikel bearbeiten', 'Edit Commitment': 'Zusage bearbeiten', 'Edit Competency Rating': 'Kompetenzbewertung bearbeiten', 'Edit Competency': 'Kompetenz bearbeiten', 'Edit Contact Information': 'Kontaktinformation bearbeiten', 'Edit Contact': 'Kontakt bearbeiten', 'Edit Contents': 'Inhalt bearbeiten', 'Edit Course Certificate': 'Kurszertifikat bearbeiten', 'Edit Course': 'Kurs bearbeiten', 'Edit Credential': 'Qualifikation bearbeiten', 'Edit Dead Body Details': 'Leichendetails bearbeiten', 'Edit Description': 'Beschreibung bearbeiten', 'Edit Details': 'Details bearbeiten', 'Edit Disaster Victims': 'Katastrophenopfer bearbeiten', 'Edit Document': 'Dokument bearbeiten', 'Edit Donor': 'Spender bearbeiten', 'Edit Email Settings': 'Email Einstellungen bearbeiten', 'Edit Entry': 'Eintrag bearbeiten', 'Edit Event': 'Ereignis bearbeiten', 'Edit Event Type': 'Ereignistyp bearbeiten', 'Edit Facility': 'Einrichtung bearbeiten', 'Edit Facility Type': 'Einrichtungstyp bearbeiten', 'Edit Feature Layer': 'Edit Objektlayer', 'Edit Flood Report': 'Flut Bericht Bearbeiten', 'Edit Gateway Settings': 'Gateway-Einstellungen bearbeiten', 'Edit Group': 'Gruppe bearbeiten', 'Edit Hospital': 'Krankenhaus bearbeiten', 'Edit Human Resource': 'Personelle Ressource bearbeiten', 'Edit Identification Report': 'Identifizierungsbericht bearbeiten', 'Edit Identity': 'Identität bearbeiten', 'Edit Image Details': 'Bild Details bearbeiten', 'Edit Impact Type': 'Typ der Auswirkung bearbeiten', 'Edit Impact': 'Auswirkungen bearbeiten', 'Edit Incident Report': 'Vorfallsbericht bearbeiten', 'Edit Inventory Item': 'Artikel des Bestands bearbeiten', 'Edit Item Category': 'Kategorie des Artikel bearbeiten', 'Edit Item Pack': 'Artikelgruppe bearbeiten', 'Edit Item': 'Artikel bearbeiten', 'Edit Job Role': 'Tätigkeit bearbeiten', 'Edit Key': 'Schlüssel bearbeiten', 'Edit Kit': 'Ausstattung (Kit) bearbeiten', 'Edit Layer': 'Kartenebene bearbeiten', 'Edit Level %d Locations?': 'Bearbeiten von Level %en Standorten?', 'Edit Level 1 Assessment': 'Stufe 1 Beurteilung bearbeiten', 'Edit Level 2 Assessment': 'Stufe 2 Beurteilung bearbeiten', 'Edit Location': 'Standort (Position) bearbeiten', 'Edit Log Entry': 'Protokolleintrag bearbeiten', 'Edit Map Profile': 'Kartenkonfiguration bearbeiten', 'Edit Map Services': 'Kartendienste bearbeiten', 'Edit Marker': 'Marker/Symbol bearbeiten', 'Edit Membership': 'Mitgliedschaft bearbeiten', 'Edit Message': 'Nachricht bearbeiten', 'Edit Messaging Settings': 'Messaging-Einstellungen bearbeiten', 'Edit Mission': 'Auftrag bearbeiten', 'Edit Modem Settings': 'Modem Settings bearbeiten', 'Edit Need Type': 'Bedarfstyp bearbeiten', 'Edit Need': 'Bedarf bearbeiten', 'Edit Office': 'Büro bearbeiten', 'Edit Options': 'Optionen bearbeiten', 'Edit Organization': 'Organisation bearbeiten', 'Edit Parameters': 'Parameter bearbeiten', 'Edit Peer Details': 'Details zu Peer bearbeiten', 'Edit Person Details': 'Details zur Person bearbeiten', 'Edit Personal Effects Details': 'Details zur persönlichen Habe bearbeiten', 'Edit Photo': 'Foto bearbeiten', 'Edit Population Statistic': 'Bevölkerungsstatistik bearbeiten', 'Edit Position': 'Position bearbeiten', 'Edit Problem': 'Problem bearbeiten', 'Edit Project': 'Projekt bearbeiten', 'Edit Projection': 'Kartenprojektion bearbeiten', 'Edit Rapid Assessment': 'Schnell-Beurteilung bearbeiten', 'Edit Received Item': 'Erhaltenen Artikel bearbeiten', 'Edit Received Shipment': 'Erhaltene Lieferung bearbeiten', 'Edit Record': 'Datensatz bearbeiten', 'Edit Registration Details': 'Details zur Registrierung bearbeiten', 'Edit Registration': 'Registrierung bearbeiten', 'Edit Request Item': 'Anfrage zu Artikel bearbeiten', 'Edit Request': 'Anfrage bearbeiten', 'Edit Resource': 'Ressource bearbeiten', 'Edit River': 'Fluss bearbeiten', 'Edit Role': 'Rolle bearbeiten', 'Edit Room': 'Raum bearbeiten', 'Edit Scenario': 'Szenario bearbeiten', 'Edit Sector': 'Bereich bearbeiten', 'Edit Sent Item': 'Gesendeten Artikel bearbeiten', 'Edit Setting': 'Einstellung bearbeiten', 'Edit Settings': 'Einstellungen bearbeiten', 'Edit Shelter Service': 'Unterkunft Leistung bearbeiten', 'Edit Shelter Type': 'Typ der Unterkunft bearbeiten', 'Edit Shelter': 'Unterkunft bearbeiten', 'Edit Skill Equivalence': 'Fähigkeits-Vergleichbarkeit bearbeiten', 'Edit Skill Provision': 'Fähigkeits-Bereitstellung bearbeiten', 'Edit Skill Type': 'Typ der Fähigkeit bearbeiten', 'Edit Skill': 'Fähigkeit bearbeiten', 'Edit Solution': 'Lösung bearbeiten', 'Edit Staff Type': 'Typ von Mitarbeitern bearbeiten', 'Edit Subscription': 'Abonnement bearbeiten', 'Edit Subsector': 'Teilbereich bearbeiten', 'Edit Survey Answer': 'Umfrage - Antwort bearbeiten', 'Edit Survey Question': 'Umfrage - Frage bearbeiten', 'Edit Survey Series': 'Umfrage - Serie bearbeiten', 'Edit Survey Template': 'Umfrage Vorlage bearbeiten', 'Edit Task': 'Aufgabe bearbeiten', 'Edit Team': 'Team bearbeiten', 'Edit Theme': 'Thema bearbeiten', 'Edit Themes': 'Themen bearbeiten', 'Edit Ticket': 'Ticket bearbeiten', 'Edit Track': 'Route bearbeiten', 'Edit Training': 'Schulung bearbeiten', 'Edit Tropo Settings': 'Tropo Einstellungen bearbeiten', 'Edit User': 'Benutzer bearbeiten', 'Edit Volunteer Availability': 'Verfügbarkeit von Freiwilligem bearbeiten', 'Edit Volunteer Details': 'Details zu Freiwilligem bearbeiten', 'Edit Warehouse': 'Warenlager bearbeiten', 'Edit current record': 'Aktuellen Datensatz bearbeiten', 'Edit message': 'Nachricht bearbeiten', 'Edit': 'Bearbeiten', 'Editable?': 'Bearbeitbar?', 'Education materials received': 'Ausbildungsmaterialien erhalten', 'Education materials, source': 'Herkunft der Ausbildungsmaterialien', 'Education': 'Ausbildung/Schulung', 'Effects Inventory': 'Auswirkungsbestandliste', 'Eggs': 'Eier', 'Either a shelter or a location must be specified': 'Es muss entweder eine Unterkunft oder ein Standort angegeben werden', 'Either file upload or document URL required.': 'Es ist entweder ein Dateiupload oder ein URL erforderlich', 'Either file upload or image URL required.': 'Es ist entweder ein Dateiupload oder eine Bild-URL erforderlich', 'Elderly person headed households (>60 yrs)': 'Von älteren Menschen (>60 Jahren) geführte Haushalte', 'Electrical': 'elektrisch', 'Electrical, gas, sewerage, water, hazmats': 'Elektrik, Gas, Abwasser, Wasser, Gefahrgut', 'Elevated': 'Erhöht', 'Elevation': 'Höhe', 'Elevators': 'Aufzüge', 'Email Address': 'E-Mail-Adresse', 'Email Channels (Inbound)': 'E-Mail Kanäle (eingehend)', 'Email InBox': 'E-Mail Eingang', 'Email Settings': 'E-Mail-Einstellungen', 'Email settings updated': 'E-Mail-Einstellungen aktualisiert', 'Email': 'E-Mail', 'Embalming': 'Einbalsamierung', 'Embassy': 'Botschaft', 'Emergency Capacity Building project': 'Notfall-Kompetenzbildungsprojekt', 'Emergency Contacts': 'Notfallkontakte', 'Emergency Department': 'Notfall-Abteilung', 'Emergency Shelter': 'Notunterkunft', 'Emergency Support Facility': 'Notfall-Unterstützungseinrichtung', 'Emergency Support Service': 'Notfall-Unterstützungsdienst', 'Emergency Telecommunications': 'Notfall-Telekommunikation', 'Enable/Disable Layers': 'Layer aktivieren/deaktivieren', 'Enabled': 'Aktiviert', 'Enabled?': 'Aktiviert?', 'End Date': 'Enddatum', 'End date should be after start date': 'Enddatum muss nach dem Startdatum liegen', 'End date': 'Enddatum', 'End of Period': 'Ende des Zeitraums', 'Enter a GPS Coord': 'Geben Sie eine GPS Koordinate ein', 'Enter a name for the spreadsheet you are uploading (mandatory).': 'Geben Sie einen Namen für die Tabelle, die Sie hochladen an (obligatorisch).', 'Enter a new support request.': 'Geben Sie eine neue Unterstützungsanfrage ein.', 'Enter a unique label!': 'Geben Sie eine eindeutige Bezeichnung ein!', 'Enter a valid date before': 'Geben Sie zuvor eine gültiges Datum ein', 'Enter a valid email': 'Geben Sie eine gültige E-Mail-Adresse ein', 'Enter a valid future date': 'Geben Sie ein gültiges, zukünftiges Datum ein', 'Enter some characters to bring up a list of possible matches': 'Geben Sie einige Zeichen ein um eine Liste möglicher Übereinstimmungen anzuzeigen', 'Enter some characters to bring up a list of possible matches.': 'Geben Sie einige Zeichen ein um eine Liste von möglichen Übereinstimmungen anzuzeigen.', 'Enter tags separated by commas.': 'Geben Sie die Tags mit Komma getrennt ein.', 'Enter the same password as above': 'Wiederholen Sie das Kennwort von oben', 'Entered': 'Eingegeben', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Die Eingabe einer Telefonnummer ist freiwillig, sie erlaubt Ihnen aber SMS-Nachrichten zu abonnieren und zu empfangen.', 'Entry deleted': 'Eintrag gelöscht', 'Environment': 'Umgebung/Umwelt', 'Equipment': 'Ausrüstung', 'Error Tickets': 'Fehlertickets', 'Error encountered while applying the theme.': 'Bei der Anwendung des Themas ist ein Fehler aufgetreten.', 'Error in message': 'Fehler in der Nachricht', "Error logs for '%(app)s'": 'Fehlerprotokolle für "%(app)s"', 'Errors': 'Fehler', 'ESRI Shapefile': 'ESRI Shapefile', 'Essential Staff': 'Unverzichtbarer Mitarbeiter', 'Est. Delivery Date': 'Geschätztes Lieferdatum', 'Estimated # of households who are affected by the emergency': 'Geschätzte Anzahl von Haushalten, die vom Notfall betroffen sind', 'Estimated # of people who are affected by the emergency': 'Geschätzte Anzahl von Menschen, die vom Notfall betroffen sind', 'Estimated Overall Building Damage': 'Geschätzter allgemeiner Gebäudeschaden', 'Estimated Population': 'Geschätzte Bevölkerungszahl', 'Estimated total number of people in institutions': 'Geschätzte Gesamtzahl von Menschen in Einrichtungen', 'Estimated Delivery Date': 'Voraus. Liefertermin', 'Euros': 'Euro', 'Evacuating': 'Evakuieren', 'Evacuees Capacity (Day and Night)': 'Evakuierungspotential (Tag und Nacht)', 'Evacuees Capacity (Night only)': 'Evakuierungspotential (nur Nacht)', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Informationen in dieser Nachricht bewerten. (Dieser Wert sollte NICHT in öffentlichen Warnung verwendet werden.)', 'Event Details': 'Details zum Ereignis', 'Event Type': 'Ereignistyp', 'Event Types': 'Ereignistypen', 'Event added': 'Ereignis hinzugefügt', 'Event deleted': 'Ereignis gelöscht', 'Event updated': 'Ereignis aktualisiert', 'Event': 'Ereignis', 'Events': 'Ereignisse', 'Example': 'Beispiel', 'Exceeded': 'Überschritten', 'Excellent': 'Ausgezeichnet', 'Exclude contents': 'Inhalte ausschließen', 'Excreta disposal': 'Entsorgung von Exkrementen', 'Execute a pre-planned activity identified in <instruction>': 'Ausführen einer vorausgeplanten Aktivität, identifiziert in <instruction>', 'Exercise': 'Übung', 'Exercise?': 'Übung?', 'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Übungen bedeuten, dass alle Anzeigen eine Wassermarke & alle Benachrichtigungen ein Präfix haben.', 'Existing Placard Type': 'Vorhandener Plakattyp', 'Existing food stocks': 'Vorhandener Lebensmitelvorrat', 'Existing location cannot be converted into a group.': 'Vorhandener Standort kann nicht in eine Gruppe transformiert werden.', 'Exits': 'Ausgänge', 'Experience': 'Erfahrung', 'Expiration Date': 'Ablaufdatum', 'Expiration Report': 'Ablaufbericht', 'Expired?': 'Abgelaufen?', 'Expiring Staff Contracts Report': 'Berichte zu ablaufenden Mitarbeiterverträgen', 'Expiry Date': 'Ablaufdatum', 'Expiry (month)': 'Ablauf (Monat)', 'Expiry (months)': 'Ablauf (Monate)', 'Explosive Hazard': 'Explosionsgefahr', 'Export as': 'Exportieren als', 'Export Data': 'Daten exportieren', 'Export Database as CSV': 'Datenbank als CSV exportieren', 'Export in GPX format': 'Als GPX Format exportieren', 'Export in KML format': 'Als KML Format exportieren', 'Export in OSM format': 'Als OSM Format exportieren', 'Export in PDF format': 'In PDF Format exportieren', 'Export in RSS format': 'In RSS Format exportieren', 'Export in XLS format': 'In XLS Format exportieren', 'Exterior Only': 'Nur Externe', 'Exterior and Interior': 'Externe und Interne', 'Eye Color': 'Augenfarbe', 'Facebook Channels': 'Facebook Kanäle', 'Facial hair, color': 'Gesichtsbehaarung, Farbe', 'Facial hair, type': 'Gesichtsbehaarung, Art', 'Facial hear, length': 'Gesichtsbehaarung, Länge', 'Facility': 'Einrichtung', 'Facilities': 'Einrichtungen', 'Facility Details': 'Details zur Einrichtung', 'Facility Operations': 'Einrichtungsmanagement', 'Facility Status': 'Status der Einrichtung', 'Facility Type': 'Einrichtungstyp', 'Facility Types': 'Einrichtungstypen', 'Facility added': 'Einrichtung hinzugefügt', 'Facility or Location': 'Einrichtung oder Standort', 'Facility removed': 'Einrichtung entfernt', 'Facility updated': 'Einrichtung aktualisiert', 'Facility': 'Einrichtung', 'Fail': 'Fehlgeschlagen', 'Failed!': 'Fehlgeschlagen!', 'Fair': 'Mäßig', 'Falling Object Hazard': 'Gefahr durch herabstürzende Objekte', 'Families/HH': 'Familien/HH', 'Family tarpaulins received': 'Familien hat Planen erhalten', 'Family tarpaulins, source': 'Herkunft der Planen für Familie', 'Family': 'Familie', 'Family/friends': 'Familie/Freunde', 'Farmland/fishing material assistance, Rank': 'Ackerland/Materialhilfe für Fischerei, Rang', 'Fatalities': 'Verstorbene', 'Feature Layer added': 'Objekt-Layer hinzugefügt', 'Feature Layer deleted': 'Objekt-Layer gelöscht', 'Feature Layer updated': 'Objekt-Layer aktualisiert', 'Feature Layers': 'Objekt-Ebenen', 'Feature Namespace': 'Namespace des Objekts', 'Feature Request': 'Objekt-Anfrage', 'Feature Type': 'Objektart', 'Features Include': 'Beinhaltete Objekte', 'Federal State': 'Bundesland', 'Feeds': 'Newsfeeds', 'Female headed households': 'Weiblich geführte Haushalte', 'Female': 'Weiblich', 'Few': 'Wenige', 'Field Hospital': 'Feldlazarett', 'Field': 'Feld', 'File': 'Datei', 'Fill in Latitude': 'Geben Sie den Breitengrad ein', 'Fill in Longitude': 'Geben Sie den Längengrad ein', 'Filter Options': 'Filteroptionen', 'Filter by Tag': 'Nach Tag filtern', 'Filter by Location': 'Nach Standort filtern', 'Filter by Organization': 'Nach Organisation filtern', 'Filter by Date': 'Nach Datum filtern', 'Filter Field': 'Filter Feld', 'Filter Tweets by the date they were tweeted on': 'Filtere Tweets nach dem Datum der Sendung', 'Filter Tweets by who tweeted them': 'Filtere Tweets nach sendender Person', 'Filter Value': 'Filter Wert', 'Find Dead Body Report': 'Suche Leichenbericht', 'Find Hospital': 'Krankenhaus finden', 'Find Person Record': 'Personendatensatz finden', 'Find Volunteers': 'Freiwillige finden', 'Find a Person Record': 'Suche einen Personendatensatz', 'Find': 'Suchen', 'Fingerprint': 'Fingerabdruck', 'Fingerprinting': 'Fingerabdrücke machen', 'Fingerprints': 'Fingerabdrücke', 'Finished Jobs': 'Erledigte Jobs', 'Fire suppression and rescue': 'Feuer - Eindämmung und Rettung', 'Fire': 'Feuer', 'First': 'Erste', 'First Name': 'Vorname', 'First name': 'Vorname', 'Fishing': 'Fischerei', 'Flash Flood': 'Sturzflut', 'Flash Freeze': 'Schockfrost', 'Flexible Impact Assessments': 'Flexible Folgenabschätzungen', 'Flood Alerts show water levels in various parts of the country': 'Flut Alarme zeigen Wasserstände in verschiedenen Teilen des Landes.', 'Flood Alerts': 'Flut Alarme', 'Flood Depth': 'Fluthöhe', 'Flood Report Details': 'Details zum Flutbericht', 'Flood Report added': 'Flutbericht hinzugefügt', 'Flood Report deleted': 'Flutbericht gelöscht', 'Flood Report updated': 'Flutbericht aktualisiert', 'Flood Report': 'Flutbericht', 'Flood Reports': 'Flutberichte', 'Flood': 'Flut', 'Flow Status': 'Status des Ablaufs', 'Fog': 'Nebel', 'Folder': 'Ordner', 'Food Supply': 'Lebensmittelversorgung', 'Food assistance': 'Lebensmittel Hilfe', 'Food': 'Lebensmittel', 'Footer file %s missing!': 'Fußzeile Datei %s fehlt!', 'Footer': 'Fußzeile', 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Für eine Land wäre dies der ISO2-Code, für eine Stadt wäre es der Flughafen Code.', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Für jeden Sync-Partner gibt es einen standard Sync Job, der nach einem vordefiniertem Zeitintervall ausgeführt wird. Sie können auch mehrere Sync Jobs festlegen welche nach ihren Anforderungen entsprechend ausgeführt werden. Klicken Sie auf den Link rechts um zu beginnen.', 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Für erweiterte Sicherheit empfiehlt sich die Eingabe eines Benutzernamens und Passworts. Bitte benachrichtigen Sie die Administratoren der anderen Geräte in Ihrem Unternehmen damit diese die Zugangsdaten unter dem Punkt Synchronization -> Sync-Partner einrichten.', 'For live help from the Sahana community on using this application, go to': 'Für direkte Hilfe von der Sahana Community zur Anwendung dieses Programmes, gehen Sie zu', 'For messages that support alert network internal functions': 'Für Nachrichten, die Netzwerkswarnungen interner Funktionen unterstützen', 'For more details on the Sahana Eden system, see the': 'Weitere Informationen zum Sahana Eden System finden Sie unter', 'For more information, see': 'Weitere Informationen finden Sie unter', 'For': 'Für', 'Forest Fire': 'Waldbrand', 'Formal camp': 'Offizielles Camp', 'Forms': 'Formulare', 'Found': 'Gefunden', 'Foundations': 'Stiftungen', 'Freezing Drizzle': 'Gefrierender Nieselregen', 'Freezing Rain': 'Gefrierender Regen', 'Freezing Spray': 'Kältespray', 'French': 'Französisch', 'Friday': 'Freitag', 'From Adress': 'Herkunftsadresse', 'From Address': 'Herkunftsadresse', 'From Facility': 'Von Einrichtung', 'From Inventory': 'Aus dem Bestand', 'From Location': 'Vom Standort', 'From Organization': 'Von der Organisation', 'From': 'Von', 'From ': 'Von ', 'Fulfil. Status': 'Status der Bedarfsdeckung', 'Fulfill Status': 'Status der Bedarfsdeckung', 'Fulfillment Status': 'Auftragserfüllungsstatus', 'Full beard': 'Vollbart', 'Full': 'vollständig, voll, ganz', 'Fullscreen Map': 'Großbild Karte', 'Functions available': 'Verfügbare Funktionen', 'Funding': 'Finanzierung', 'Funding Organization': 'Finanzierende Organisation', 'Funeral': 'Beerdigung', 'Further Action Recommended': 'Weitere Aktivität empfohlen', 'GIS Reports of Shelter': 'GIS-Berichte der Unterkünfte', 'GIS integration to view location details of the Shelter': 'GIS-Integration um Details zum Standort der Unterkunft zu erhalten', "Google Earth's Keyhole Markup Language": "Google Earth's Keyhole Markup Language", 'GPS Marker': 'GPS Markierung/Symbol', 'GPS Track File': 'GPS Track Datei', 'GPS Track': 'GPS Track', 'GPX Track': 'GPX Track', 'GPS eXchange format': 'GPS Geräte Austauschformat', 'Gap Analysis Map': 'Karte zur Lückenanalyse', 'Gap Analysis Report': 'Bericht zur Lückenanalyse', 'Gap Analysis': 'Lückenanalyse', 'Gap Map': 'Lückenkarte', 'Gap Report': 'Bericht über Lücken', 'Gateway Settings': 'Gateway-Einstellungen', 'Gateway settings updated': 'Gateway-Einstellungen aktualisiert', 'Gateway': 'Gateway', 'Gender': 'Geschlecht', 'General Comment': 'Allgemeine Bemerkung', 'General Medical/Surgical': 'Allgemein - Medizinisch/Chirurgisch', 'General emergency and public safety': 'Allgemein - Notfall und öffentliche Sicherheit', 'General information on demographics': 'Allgemein - Informationen zur Demographie', 'General': 'Allgemein', 'Geocode': 'Geocodierung', 'Geocoder Selection': 'Geocoder Auswahl', 'Geometry Name': 'Name der Geometrie', 'Geophysical (inc. landslide)': 'Geophysikalisch (inc. Erdrutsch)', 'Geotechnical Hazards': 'Geotechnische Gefahren', 'Geotechnical': 'Geotechnisch', 'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Das Modul Geraldo steht innerhalb dier aktiven Python Umgebung nicht zur Verfügung - für die PDF-Ausgabe muss es nachinstalliert werden.', 'German': 'Deutsch', 'Get incoming recovery requests as RSS feed': 'Empfangen von eingehenden Bergungsanforderungen als RSS-Feed', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Kurze Beschreibung des Bildes, z. B. was ist wo auf dem Bild zu sehen ist (nicht verpflichtend).', 'Give information about where and when you have seen them': 'Geben Sie Information wo und wann Sie sie gesehen haben', 'Global Messaging Settings': 'Globale Nachrichteneinstellungen', 'Go to Request': 'Zur Anfrage', 'Go': 'Los', 'Goatee': 'Spitzbart', 'Good Condition': 'Guter Zustand', 'Good': 'Gut', 'Goods Received Note': 'Warenempfangsbestätigung', 'Government UID': 'Regierungs-UID', 'Government building': 'Regierungsgebäude', 'Government District': 'Regierungsbezirk', 'Government': 'Regierung', 'Grade': 'Klasse', 'Greek': 'Griechisch', 'Green': 'Grün', 'GRN': 'GRN', 'GRN Number': 'GRN Nummer', 'Ground movement, fissures': 'Untergrundbewegung, Risse', 'Ground movement, settlement, slips': 'Untergrundbewegung, Bodensenkung, Abrutsche', 'Group Description': 'Gruppenbeschreibung', 'Group Details': 'Gruppendetails', 'Group Head': 'Gruppenleiter', 'Group Member added': 'Gruppenmitglied hinzugefügt', 'Group Members': 'Gruppenmitglieder', 'Group Memberships': 'Gruppenzugehörigkeiten', 'Group Name': 'Gruppenname', 'Group Size Day': 'Gruppengröße Tag', 'Group Size Night': 'Gruppengröße Nacht', 'Group Title': 'Gruppentitel', 'Group Type': 'Gruppentyp', 'Group added': 'Gruppe hinzugefügt', 'Group deleted': 'Gruppe gelöscht', 'Group description': 'Gruppenbeschreibung', 'Group updated': 'Gruppe aktualisiert', 'Group': 'Gruppe', 'Grouped by': 'Gruppiert nach', 'Groups removed': 'Gruppen entfernt', 'Groups': 'Gruppen', 'Guest': 'Gast', 'HR Manager': 'Personalmanager', 'Hail': 'Hagel', 'Hair Color': 'Haarfarbe', 'Hair Length': 'Haarlänge', 'Hair Style': 'Haarschnitt', 'Has data from this Reference Document been entered into Sahana?': 'Wurden Daten von diesem Referenzdokument in Sahana eingetragen?', 'Has the Certificate for receipt of the shipment been given to the sender?': 'Wurde das Zertifikat für den Empfang der Lieferung an den Absender übergeben?', 'Has the GRN (Goods Received Note) been completed?': 'Wurde die Warenempfangsmeldung (GRN) ausgefüllt?', 'Hazard Pay': 'Gefahrenzulage', 'Hazardous Material': 'Gefahrgut', 'Hazardous Road Conditions': 'Gefährliche Strassenverhältnisse', 'Header Background': 'Hintergrund der Kopfzeile', 'Header background file %s missing!': 'Hintergrund der Kopfzeile Datei %s fehlt!', 'Headquarters': 'Hauptquartiere', 'Health care assistance, Rank': 'Unterstützung Gesundsheitspflege, Rang', 'Health center with beds': 'Gesundheitszentrum mit Betten', 'Health center without beds': 'Gesundheitszentrum ohne Betten', 'Health center': 'Gesundheitszentrum', 'Health services status': 'Status des Gesundheitswesens', 'Health': 'Gesundheit', 'Healthcare Worker': 'Arbeiter im Gesundheitswesen', 'Heat Wave': 'Hitzewelle', 'Heat and Humidity': 'Wärme und Feuchtigkeit', 'Height': 'Höhe', 'Height (cm)': 'Höhe (cm)', 'Height (m)': 'Höhe (m)', 'Height': 'Höhe', 'Heliports': 'Hubschrauberlandeplätze', 'Help': 'Hilfe', 'Help Wanted': 'Hilfe benötigt', 'Helps to monitor status of hospitals': 'Hilfe um den Status von Krankenhäusern zu überwachen', 'Helps to report and search for missing persons': 'Hilfe beim Melden von und bei der Suche nach vermissten Personen', 'Here are the solution items related to the problem.': 'Hier sind die mit diesem Problem verbundenen Lösungselemente.', 'Heritage Listed': 'Erbe aufgelistet', 'Hide': 'Verstecken', 'Hierarchy': 'Hierarchie', 'Hierarchy Level 0 Name (i.e. Country)': 'Hierachiestufe 0 Name (d.h. Land)', 'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierachiestufe 1 Name (z. B. Land oder Provinz / Gebiet)', 'Hierarchy Level 2 Name (e.g. District or County)': 'Hierachiestufe 2 Name (z. B. Bezirk)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierachiestufe 3 Name (z. B. Ort / Stadt / Dorf)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierachiestufe 4 Name (z.B. Nachbarschaft)', 'Hierarchy Level 5 Name': 'Hierarchie Stufe 5 Name', 'High Tide Depth': 'Tiefe bei maximaler Tide', 'High Water': 'Hochwasser', 'High': 'Hoch', 'Highest Priority Open Requests': 'Offene Anfragen höchster Priorität', 'History': 'Geschichte', 'Hit the back button on your browser to try again.': 'Verwenden Sie die Back Schaltfläche ihres Browsers um es erneut zu versuchen.', 'Holiday Address': 'Urlaubsadresse', 'Home Address': 'Heimatsadresse', 'Home Country': 'Land des Wohnsitzes', 'Home Crime': 'Häusliche Kriminalität', 'Home': 'Startseite', 'Hospital Details': 'Details zum Krankenhaus', 'Hospital Status Report': 'Statusbericht zum Krankenhaus', 'Hospital information added': 'Krankenhausinformationen hinzugefügt', 'Hospital information deleted': 'Krankenhausinformationen gelöscht', 'Hospital information updated': 'Krankenhausinformationen aktualisiert', 'Hospital status assessment.': 'Beurteilung des Zustand des Krankenhauses', 'Hospital': 'Krankenhaus', 'Hospitals': 'Krankenhäuser', 'Hour': 'Stunde', 'Hours': 'Stunden', 'Hours by': 'Stunden gem.', 'Hours by Program Import': 'Stunden gem. Programm Import', 'Hours by Program Report': 'Stunden nach Programmbericht', 'Hours by Role Import': 'Stunden gem. Rollen Import', 'Hours by Role Report': 'Stunden nach Rollenbericht', 'Household kits received': 'Haushaltsbausätze (-kits) erhalten', 'Household kits, source': 'Herkunft der Haushaltbausätze (-kits)', 'How does it work?': 'Wie funktioniert das?', 'How is this person affected by the disaster? (Select all that apply)': 'Wie ist diese Person von der Katastrophe betroffen? (Wählen Sie alles Zutreffende aus)', 'How long will the food last?': 'Wie lange werden die Lebensmittel reichen?', 'How many Boys (0-17 yrs) are Dead due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise umgekommen', 'How many Boys (0-17 yrs) are Injured due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise verletzt worden', 'How many Boys (0-17 yrs) are Missing due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind aufgrund der Krise verschollen', 'How many Girls (0-17 yrs) are Dead due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise umgekommen', 'How many Girls (0-17 yrs) are Injured due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise verletzt worden', 'How many Girls (0-17 yrs) are Missing due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind aufgrund der Krise verschollen', 'How many Men (18 yrs+) are Dead due to the crisis': 'Wieviele Männer (18 Jahre+) sind durch die Krise umgekommen', 'How many Men (18 yrs+) are Injured due to the crisis': 'Wie viele Männer (18 + Jahre) wurden wegen der Krise verletzt', 'How many Men (18 yrs+) are Missing due to the crisis': 'Wie viele Männer (18 + Jahre) sind aufgrund der Krise verschollen', 'How many Women (18 yrs+) are Dead due to the crisis': 'Wieviele Frauen (18+ Jahre) sind durch die Krise umgekommen', 'How many Women (18 yrs+) are Injured due to the crisis': 'Wieviele Frauen (18+ Jahre) wurden wegen der Krise verletzt', 'How many Women (18 yrs+) are Missing due to the crisis': 'Wie viele Frauen (18 Jahre und älter) sind aufgrund der Krise verschollen', 'How many days will the supplies last?': 'Wie viele Tage werden die Waren reichen?', 'How many new cases have been admitted to this facility in the past 24h?': 'Wie viele neue Fälle wurden während der letzten 24 Stunden dieser Einrichtung zugewiesen?', 'How many of the patients with the disease died in the past 24h at this facility?': 'Wie viele der Patienten mit dieser Krankheit sind in den letzten 24 Stunden in dieser Einrichtung gestorben?', 'How many patients with the disease are currently hospitalized at this facility?': 'Wieviele Patienten mit dieser Krankheit sind momentan in dieser Einrichtung in Behandlung?', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Wie viele Details sind sichtbar. Eine hohe Zoom-Stufe bedeutet viele Details, aber keine gute Übersicht. Eine niedrige Zoom-Stufe führt zu einer guten Übersicht, es fehlen aber die Details.', 'Hub': 'Zentrum', 'Human Resource Details': 'Details zur Personalressource', 'Human Resource Management': 'Management der Personalressourcen', 'Human Resource added': 'Personalressource hinzugefügt', 'Human Resource removed': 'Personalressource entfernt', 'Human Resource updated': 'Personalressource aktualisiert', 'Human Resource': 'Personalressource', 'Human Resources': 'Personalressourcen', 'Humanitarian NGO': 'Humanitäre NGO', 'Humanitarian Use': 'Humanitäre Zwecke', 'Hurricane Force Wind': 'Wind in Hurrikanstärke', 'Hurricane': 'Wirbelsturm', 'Hygiene kits received': 'Hygienekits empfangen', 'Hygiene kits, source': 'Herkunft der Hygienekits', 'Hygiene practice': 'Hygienepraxis', 'Hygiene problems': 'Hygieneprobleme', 'I am available in the following area(s)': 'Ich stehe in folgenden Bereichen zur Verfügung', 'IATA': 'IATA', 'ICAO': 'ICAO', 'ID Tag Number': 'Identifikations-Etikett-Nummer', 'ID Tag': 'Identifikationsetikett', 'ID type': 'ID-Typ', 'Ice Pressure': 'Eisdruck', 'Iceberg': 'Eisberg', 'Identification Report': 'Indentifizierungsbericht', 'Identification Reports': 'Identifizierungsberichte', 'Identification Status': 'Status der Identifizierung', 'Identification': 'Identifizierung', 'Identified as': 'Identifiziert als', 'Identified by': 'Identifiziert durch', 'Identity Details': 'Details zur Identität', 'Identity added': 'Identität hinzugefügt', 'Identity deleted': 'Identität gelöscht', 'Identity updated': 'Identität aktualisiert', 'Identity': 'Identität', 'If a ticket was issued then please provide the Ticket ID.': 'Wenn ein Ticket ausgestellt wurde, bitte die Ticket-ID angeben.', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Wenn ein Benutzer sicherstellt, dass er oder sie eine Email-Adresse in dieser Domäne besitzt, wird das Approver Feld dazu verwendet, um zu bestimmen ob und von wem weitere Genehmigungen erforderlich sind.', 'If it is a URL leading to HTML, then this will downloaded.': 'Handelt es sich um eine URL zu einer HTML Seite, dann wird diese heruntergeladen.', 'If neither are defined, then the Default Marker is used.': 'Wenn nichts davon definiert wurde, wird der Standard Marker (Symbol) verwendet.', 'If no marker defined then the system default marker is used': 'Wenn keine Markierung (Symbolisierung) definiert ist dann wird die im System festgelegte Standardmarkierung verwendet', 'If no, specify why': 'Wenn nein, geben Sie bitte einen Grund dafür an', 'If none are selected, then all are searched.': 'Wird keine ausgewählt, werden alle durchsucht.', 'If the location is a geographic area, then state at what level here.': 'Wenn der Ort ein geographisches Gebiet ist, geben Sie bitte eine entsprechende Stufe an', 'If the request type is "Other", please enter request details here.': 'Wenn der Anfragetyp "Andere" ist, geben Sie bitte hier weitere Details zur Anfrage ein.', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer mit der gleichen Domainadresse automatisch als Mitarbeiter dieser Organisation zugeordnet.', 'If this is set to True then mails will be deleted from the server after downloading.': "Wenn dies auf 'Wahr' gesetzt ist, dann werden die Mails nach dem Herunterladen vom Server gelöscht.", 'If this record should be restricted then select which role is required to access the record here.': 'Wenn der Zugriff auf diesen Datensatz beschränkt werden soll, wählen Sie hier die Rolle aus, die für den Zugriff erforderlich ist.', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Wenn dieser Eintrag beschränkt werden soll, dann wählen Sie hier aus, welche Rolle(n) für den Zugriff auf den Eintrag berechtigt sind.', 'If yes, specify what and by whom': 'Wenn ja, geben Sie an, was und von wem', 'If yes, which and how': 'Wenn ja, welche und wie', 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt damit die Daten verifiziert werden können.', 'If you know what the Geonames ID of this location is then you can enter it here.': 'Wenn sie die Geonames ID des Standortes wissen, dann können Sie diese hier eingeben.', 'If you know what the OSM ID of this location is then you can enter it here.': 'Wenn sie die OSM ID dieser des Standortes wissen, dann können Sie diese hier eingeben.', 'If you need to add a new document then you can click here to attach one.': 'Wenn sie ein neues Dokument hinzufügen wollen, dann können sSie hier Klicken um eines anzufügen.', 'If you want several values, then separate with': 'Wenn Sie mehrere Werte möchten, dann trennen Sie diese mit', 'If you would like to help, then please': 'Wenn Sie helfen möchten, dann bitte', 'Ignore Errors?': 'Fehler ignorieren?', 'Illegal Immigrant': 'Illegaler Einwanderer', 'Image Details': 'Details zum Bild', 'Image Tags': 'Tags für Bild', 'Image Type': 'Typ des Bilds', 'Image Upload': 'Bild hochladen', 'Image added': 'Bild hinzugefügt', 'Image deleted': 'Bild gelöscht', 'Image updated': 'Bild aktualisiert', 'Image': 'Bild', 'Imagery': 'Bilddaten', 'Images': 'Bilder', 'Impact Assessments': 'Folgenabschätzung', 'Impact Details': 'Details zur Folge/Auswirkung', 'Impact Type Details': 'Details zum Typ der Auswirkung', 'Impact Type added': 'ATyp der Auswirkung hinzugefügt', 'Impact Type deleted': 'Typ der Auswirkung gelöscht', 'Impact Type updated': 'Typ der Auswirkung aktualisiert', 'Impact Type': 'Auswirkungsarten', 'Impact Types': 'Auswirkungsarten', 'Impact added': 'Auswirkung hinzugefügt', 'Impact deleted': 'Auswirkung gelöscht', 'Impact updated': 'Auswirkung aktualisiert', 'Impacts': 'Auswirkungen', 'Import & Export Data': 'Import & Export von Daten', 'Import Catalog Items': 'Importiere Katalogartikel', 'Import Data': 'Import von Daten', 'Import Event Types': 'Importiere Ereignistypen', 'Import File': 'Datei importieren', 'Import Heliports': 'Hubschrauberlandeplätze importieren', 'Import Incident Types': 'Ereignistypen importieren', 'Import Locations': 'Gebiete/Standorte importieren', 'Import Projects': 'Projekte importieren', 'Import Staff': 'Mitarbeiter importieren', 'Import Suppliers': 'Lieferanten importieren', 'Import Training Participants': 'Kursteilnehmer importieren', 'Import Users': 'Import von Benutzern', 'Import Volunteers': 'Freiwillige importieren', 'Import Warehouse Stock': 'Warenlagerbestand importieren', 'Import Warehouses': 'Warenlager importieren', 'Import and Export': 'Import und Export', 'Import from CSV': 'Import einer CSV-Datei', 'Import from OpenStreetMap': 'Import aus OpenStreetMap', 'Import from Ushahidi Instance': 'Import aus Ushahidi Instanz', 'Import Hours': 'Import Stundenliste', 'Import if Master': 'Import wenn Master', 'Import multiple tables as CSV': 'Mehrere Tabellen als CSV importieren', 'Import Participant List': 'Import Teilnehmerliste', 'Import Template Layout': 'Import Vorlagenlayout', 'Import Templates': 'Import Vorlagen', 'Import': 'Import', 'Important': 'Wichtig', 'Importantly where there are no aid services being provided': 'Bedeutsam wo keine Hilfsleistungen angeboten werden', 'Importing data from spreadsheets': 'Importieren von Daten aus Tabellendokumenten', 'Improper decontamination': 'Unzureichende Dekontamination', 'Improper handling of dead bodies': 'Unzureichende Behandlung von Leichen', 'In Catalogs': 'In Katalogen', 'In Inventories': 'In den Beständen', 'In Process': 'In Bearbeitung', 'In Progress': 'Bearbeitung schreitet voran', 'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Beim Aufbau des Fensters wird die Karte maximiert um das Fenster auszufüllen, daher ist es nicht notwendig hier einen grossen Wert festzulegen.', 'Inbound Mail Settings': 'Eingehende Mail-Einstellungen', 'InBox': 'Eingang', 'Incident Categories': 'Kategorien für Vorfälle ', 'Incident Report Details': 'Details zum Vorfall-Bericht', 'Incident Report added': 'Vorfall-Bericht hinzugefügt', 'Incident Report deleted': 'Vorfall-Bericht gelöscht', 'Incident Report updated': 'Vorfall-Bericht aktualisiert', 'Incident Report': 'Vorfall-Bericht', 'Incident Reporting System': 'Vorfall-Berichtsystem', 'Incident Reporting': 'Vorfall-Berichtswesen', 'Incident Reports': 'Vorfall-Berichte', 'Incident': 'Vorfall', 'Incidents': 'Vorfälle', 'Incident Type': 'Vorfallstyp', 'Incident Types': 'Typen von Vorfällen', 'Incident Timeline': 'Zeitplan der Ereignisse', 'Incoming Shipment canceled': 'Eingehende Sendung abgebrochen', 'Incoming Shipment updated': 'Eingehende Sendung aktualisiert', 'Incoming': 'Eingehend', 'Incomplete': 'Unvollständig', 'Individuals': 'Einzelpersonen', 'Indirect support cost HQ': 'Indirekte Unterstützungskosten Hauptquartier', 'Industrial Crime': 'Industrielle Kriminalität', 'Industrial': 'Industriell', 'Industry Fire': 'Industriefeuer', 'Infant (0-1)': 'Säugling (0-1)', 'Infectious Disease (Hazardous Material)': 'Ansteckende Krankheit (gefährliches Material)', 'Infectious Disease': 'Ansteckende Krankheit', 'Infectious Diseases': 'Infektionskrankheiten', 'Infestation': 'Aktivierung', 'Informal Leader': 'Informeller Leiter', 'Informal camp': 'Informelles Camp', 'Information gaps': 'Informationenlücken', 'Infusion catheters available': 'Infusionskatheter verfügbar', 'Infusion catheters need per 24h': 'Benötigte Infusionskatheter pro 24h', 'Infusion catheters needed per 24h': 'Benötigte Infusionskatheter pro 24h', 'Infusions available': 'Infusionen verfügbar', 'Infusions needed per 24h': 'Benötigte Infusionen pro 24h', 'Initials': 'Namenskürzel', 'Inspected': 'Geprüft', 'Inspection Date': 'Prüfdatum', 'Inspection date and time': 'Datum und Uhrzeit der Überprüfung', 'Inspection time': 'Zeit der Überprüfung', 'Inspector ID': 'Prüfer-ID', 'Instant Porridge': 'Hafer Fertigbrei', 'Institution': 'Institution', 'Instructor': 'Ausbilder', 'Insufficient vars: Need module, resource, jresource, instance': 'Unzureichende vars: Benötige module, resource, jresource, instance', 'Insufficient': 'Nicht ausreichend', 'Intake Items': 'Annahme Güter', 'Intergovernmental Organization': 'Zwischenstaatliche Organisation', 'Interior walls, partitions': 'Innere Wände, Partitionen', 'Internal Resources': 'Interne Ressourcen', 'Internal Resource': 'Interne Ressource', 'Internal Shipment': 'Interne Lieferung', 'Internal State': 'Interner Zustand', 'International NGO': 'Internationale NGO', 'International Organization': 'Internationale Organisation', 'Interview taking place at': 'Ort des Interviews', 'inv Home Page': 'inv Homepage', 'Invalid Query': 'Ungültige Abfrage', 'Invalid request!': 'Ungültige Anfrage!', 'Invalid ticket': 'Ungültiges Ticket', 'Invalid': 'Ungültig / Invalide', 'Inventories': 'Bestände', 'Inventory': 'Bestand', 'Inventory Item Details': 'Details zu einzelnem Bestandsartikel', 'Inventory Item updated': 'Bestandsartikel aktualisiert', 'Inventory Item': 'Bestandsartikel', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Bestandsartikel umfassen sowohl Verbrauchsmaterialien als auch solche die am Bestimmungsort in Anlagen umgewandelt werden.', 'Inventory Items': 'Bestandsartikel', 'Inventory Management': 'Lagerbestandsverwaltung', 'Inventory of Effects': 'Bestand von Vermögenswerten', 'Is editing level L%d locations allowed?': 'Ist die Bearbeitung von Level L%d Standorten zulässig?', 'Is it safe to collect water?': 'Ist es sicher Wasser zu sammeln?', 'Is this a strict hierarchy?': 'Ist dies eine strenge Hierarchie?', 'Issuing Authority': 'Ausstellende Behörde', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Es erfasst nicht nur die Orte wo sie aktiv sind, sondern erfasst auch Informationen über den Umfang der Projekte die sie im jeweiligen Gebiet durchführen.', 'Item Added to Shipment': 'Artikel der Lieferung hinzugefügt', 'Item Catalog Details': 'Details zum Artikelkatalog', 'Item Categories': 'Artikelkategorien', 'Item Category Details': ' Details zur Artikelkategorie', 'Item Category added': 'Artikelkategorie hinzugefügt', 'Item Category deleted': 'Artikelkategorie gelöscht', 'Item Category updated': 'Artikelkategorie aktualisiert', 'Item Category': 'Artikelkategorie', 'Item Details': 'Details zum Artikel', 'Item Pack Details': 'Details zum Artikelpaket ', 'Item Pack added': 'Artikelpaket hinzugefügt', 'Item Pack deleted': 'Artikelpaket gelöscht', 'Item Pack updated': 'Artikelpaket aktualisiert', 'Item Packs': 'Artikelpaket', 'Item Tracking Status': 'Artikel Verfolgungsstatus', 'Item/Description': 'Artikel/Beschreibung', 'Items/Description': 'Artikel/Beschreibung', 'Item added to Inventory': 'Artikel zum Bestand hinzugefügt', 'Item added to shipment': 'Artikel der Lieferung hinzugefügt', 'Item added': 'Artikel hinzugefügt', 'Item already in Bundle!': 'Artikel bereits in Produktpaket!', 'Item already in Kit!': 'Artikel bereits in Ausstattung (Kit)!', 'Item already in budget!': 'Artikel bereits im Budget!', 'Item deleted': 'Artikel gelöscht', 'Item removed from Inventory': 'Artikel aus dem Bestand entfernt', 'Item updated': 'Artikel aktualisiert', 'Item': 'Artikel', 'Items in Category are Vehicles': 'Artikel in dieser Kategorie sind Fahrzeuge', 'Items in Category can be Assets': 'Artikel in der Kategorie können als Anlagen werden', 'Items': 'Artikel', 'Japanese': 'Japanisch', 'Jerry can': 'Kanister', 'Jew': 'Jude', 'Jewish': 'Jüdisch', 'Job Role Catalog': 'Katalog für Tätigkeiten', 'Job Role Details': 'Details zur Tätigkeit', 'Job Role added': 'Tätigkeit hinzugefügt', 'Job Role deleted': 'Tätigkeit entfernt', 'Job Role updated': 'Tätigkeit aktualisiert', 'Job Role': 'Tätigkeit', 'Job Roles': 'Tätigkeiten', 'Job Title': 'Berufsbezeichnung', 'Job Title Catalog': 'Katalog der Berufsbezeichnungen', 'Journal Entry Details': 'Details zum Journaleintrag', 'Journal entry added': 'Journaleintrag hinzugefügt', 'Journal entry deleted': 'Journaleintrag gelöscht', 'Journal entry updated': 'Journaleintrag aktualisiert', 'Key Details': 'Details zum Schlüssel', 'Key added': 'Schlüssel hinzugefügt', 'Key deleted': 'Schlüssel gelöscht', 'Key updated': 'Schlüssel aktualisiert', 'Key': 'Schlüssel', 'Keys': 'Schlüssel', 'Kit Contents': 'Inhalt der Ausstattung (Kit)', 'Kit Details': 'Details zur Ausstattung (Kit)', 'Kit Updated': 'Ausstattung (Kit) aktualisiert', 'Kit added': 'Ausstattung (Kit) hinzugefügt', 'Kit deleted': 'Ausstattung (Kit) gelöscht', 'Kit updated': 'Ausstattung (Kit) aktualisiert', 'Kits': 'Ausstattungen (Kits)', 'Kit': 'Ausstattung (Kit)', 'Kit?': 'Ausstattung (Kit)?', 'Kitting': 'Ausstattung zusammenstellen', 'Known Identities': 'Bekannte Identitäten', 'Known incidents of violence against women/girls': 'Bekannte Fälle von Gewalt gegen Frauen/Mädchen', 'Known incidents of violence since disaster': 'Bekannte Fällen von Gewalt seit der Katastrophe', 'LICENSE': 'LIZENZ', 'Lack of material': 'Mangel an Material', 'Lack of school uniform': 'Fehlende Schuluniformen', 'Lack of supplies at school': 'Fehlende Vorräte an der Schule', 'Lack of transport to school': 'Fehlender Transportmöglichkeiten zur Schule', 'Lactating women': 'Stillende frauen', 'Lahar': 'Mure', 'Landslide': 'Erdrutsch', 'Language': 'Sprache', 'Last Downloaded': 'Zuletzt heruntergeladen', 'Last Name': 'Nachname', 'Last Pull': 'Letzter Pull', 'Last Push': 'Letzter Push', 'Last known location': 'Letzte bekannte Position', 'Last synchronization time': 'Zeitpunkt der letzte Synchronisierung', 'Last updated by': 'Letzte Aktualisierung durch', 'Last updated on': 'Letzte Aktualisierung am', 'Last updated': 'Letzte Aktualisierung', 'Last': 'Letzte', 'Latest Information': 'Aktuelle Informationen', 'Latitude & Longitude': 'Breitengrad und Längengrad', 'Latitude is North-South (Up-Down).': 'Breitengrad ist Nord-Süd (Oben-Unten).', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Der Breitengrad ist Null am Äquator, Positiv auf der nördlichen und negativ auf der südlichen Erdhalbkugel.', 'Latitude of Map Center': 'Breitengrad der Kartenmitte', 'Latitude of far northern end of the region of interest.': 'Nördlichster Breitengrad der betroffenen Region', 'Latitude of far southern end of the region of interest.': 'Südlichster Breitengrad der betroffenen Region', 'Latitude should be between': 'Breite muss zwischen', 'Latitude': 'Breitengrad', 'Latrines': 'Toiletten', 'Law enforcement, military, homeland and local/private security': 'Executive, Militär und andere lokale/private Sicherheitsagenturen', 'Layer Poperties': 'Kartenebenen anpassen', 'Layer added': 'Layer hinzugefügt', 'Layer deleted': 'Layer gelöscht', 'Layer updated': 'Layer aktualisiert', 'Layer': 'Kartenebene', 'Layers updated': 'Kartenebenen aktualisiert', 'Layers': 'Kartenebenen', 'Leader': 'Anführer', 'Lead Implementer': 'Hauptimplementierer', 'Legend Format': 'Format der Legende', 'Legend': 'Legende', 'Length (m)': 'Länge (m)', 'Less Options': 'Weniger Optionen', 'Level of Award': 'Stufe der Auszeichnung', 'Level 1 Assessment Details': 'Stufe 1 Beurteilung - Details', 'Level 1 Assessment added': 'Stufe 1 Beurteilung hinzugefügt', 'Level 1 Assessment deleted': 'Stufe 1 Beurteilung entfernt', 'Level 1 Assessment updated': 'Stufe 1 Beurteilung aktualisiert', 'Level 1 Assessments': 'Stufe 1 Beurteilungen', 'Level 1': 'Stufe 1', 'Level 2 Assessment Details': 'Stufe 2 Beurteilung - Details', 'Level 2 Assessment added': 'Stufe 2 Beurteilung hinzugefügt', 'Level 2 Assessment deleted': 'Stufe 2 Beurteilung entfernt', 'Level 2 Assessment updated': 'Stufe 2 Beurteilung aktualisiert', 'Level 2 Assessments': 'Stufe 2 Beurteilungen', 'Level 2 or detailed engineering evaluation recommended': 'Stufe 2 oder detaillierte technische Evaluierung empfohlen', 'Level 2': 'Stufe 2', 'Level': 'Stufe', 'Library support not available for OpenID': 'OpenID wird von Bibliothek nicht unterstützt', 'License Plate': 'Nummernschild', 'LineString': 'LineString', 'Link to this result': 'Link zu dieser Liste', 'List / Add Baseline Types': 'Arten von Referenzdaten auflisten / hinzufügen', 'List / Add Impact Types': 'Arten von Auswirkungen auflisten / hinzufügen', 'List / Add Services': 'Leistungen auflisten / hinzufügen', 'List / Add Types': 'Typen auflisten / hinzufügen', 'List Activities': 'Aktivitäten auflisten', 'List All Assets': 'Alle Anlagen auflisten', 'List All Catalog Items': 'Auflisten aller Artikel aus dem Katalog', 'List All Commitments': 'Auflisten aller Zusagen', 'List All Entries': 'Alle Einträgen auflisten', 'List All Item Categories': 'Auflisten aller Artikelkategorien', 'List All Memberships': 'Alle Mitgliedschaften auflisten', 'List All Organization Approvers & Whitelists': 'Zeige alle Organisationsbestätiger & Whitelists', 'List All Received Shipments': 'Auflisten aller empfangenen Lieferungen', 'List All Records': 'Auflisten aller Datensätze', 'List All Requested Items': 'Auflisten aller angefragten Artikel', 'List All Requests': 'Auflisten aller Anfragen', 'List All Roles': 'Zeige alle Rollen', 'List All Sent Shipments': 'Liste aller gesendeten Lieferungen', 'List All Users': 'Zeige alle Nutzer', 'List All Vehicles': 'Liste aller Fahrzeuge', 'List All': 'Alle auflisten', 'List Alternative Items': 'Liste alternativer Artikel', 'List Assessment Summaries': 'Zusammenfassungen der Beurteilungen auflisten', 'List Assessments': 'Beurteilungen auflisten', 'List Assets': 'Anlagen auflisten', 'List Availability': 'Liste Verfügbarkeit', 'List Baseline Types': 'Liste der Typen von Referenzdaten', 'List Baselines': 'Liste der Referenzdaten', 'List Brands': 'Marken auflisten', 'List Budgets': 'Budgets auflisten', 'List Bundles': 'Produktpakete auflisten', 'List Camp Services': 'Liste der Leistungen im Camp', 'List Camp Types': 'Liste Typen von Camps', 'List Camps': 'Liste Camps', 'List Catalog Items': 'Katalogelemente auflisten', 'List Catalogs': 'Liste Kataloge', 'List Certificates': 'Liste Zertifikate', 'List Certifications': 'Liste Zertifizierungen', 'List Checklists': 'Checklisten Auflisten', 'List Cluster Subsectors': 'Cluster Teilbereiche Auflisten', 'List Clusters': 'Cluster Auflisten', 'List Commitment Items': 'Liste zugesagter Artikel', 'List Commitments': 'Liste Zusagen', 'List Competencies': 'Liste Kompetenzen', 'List Competency Ratings': 'Liste Kompetenzrating', 'List Conflicts': 'Liste Konflikte', 'List Contact Information': 'Liste Kontaktinformationen', 'List Contacts': 'Liste Kontakte', 'List Course Certificates': 'Liste Kurszertifikate', 'List Courses': 'Liste Kurse', 'List Credentials': 'Liste von Qualifikationen', 'List Current': 'Aktuelle Liste', 'List Documents': 'Liste Dokumente', 'List Donors': 'Liste Spender', 'List Events': 'Liste Ereignisse', 'List Facilities': 'Liste Einrichtungen', 'List Feature Layers': 'Liste Objekt-Layer', 'List Flood Reports': 'Liste Flutberichte', 'List Groups': 'Liste Gruppen', 'List Groups/View Members': 'Liste Gruppen/Anzeige der Mitglieder', 'List Hospitals': 'Liste Krankenhäuser', 'List Human Resources': 'Liste der personellen Ressourcen', 'List Identities': 'Identitäten auflisten', 'List Images': 'Bilder auflisten', 'List Impact Assessments': 'Folgenabschätzung auflisten', 'List Impact Types': 'Auswirkungsarten auflisten', 'List Impacts': 'Auswirkungen auflisten', 'List Incident Reports': 'Vorfallberichte auflisten', 'List Item Categories': 'Liste Artikelkategorien', 'List Item Packs': 'Liste der Artikelpakete', 'List Items in Inventory': 'Liste der Artikel im Bestand', 'List Items': 'Liste der Artikel', 'List Job Roles': 'Liste der Tätigkeiten', 'List Keys': 'Schlüssel auflisten', 'List Kits': 'Liste Ausstattungen (Kits)', 'List Layers': 'Liste Layer', 'List Level 1 Assessments': 'Liste Stufe 1 Beurteilungen', 'List Level 1 assessments': 'Liste Stufe 1 Beurteilungen', 'List Level 2 Assessments': 'Liste Stufe 2 Beurteilungen', 'List Level 2 assessments': 'Liste Stufe 2 Beurteilungen', 'List Locations': 'Standorte auflisten', 'List Log Entries': 'Protokolleinträge auflisten', 'List Map Profiles': 'Liste der Kartenkonfigurationen', 'List Markers': 'Marker/Symbole auflisten', 'List Members': 'Mitglieder auflisten', 'List Memberships': 'Mitgliedschaften auflisten', 'List Messages': 'Nachrichten auflisten', 'List Missing Persons': 'Vermisste Personen auflisten', 'List Missions': 'Liste Aufträge', 'List Need Types': 'Bedarftypen auflisten', 'List Needs': 'Bedarf auflisten', 'List Offices': 'Liste der Büros', 'List Organizations': 'Liste der Organisationen', 'List Peers': 'Liste der Peers', 'List Personal Effects': 'Liste der persönlichen Habe', 'List Persons': 'Liste der Personen', 'List Photos': 'Liste der Bilder', 'List Population Statistics': 'Liste Bevölkerungsstatistiken', 'List Positions': 'Liste der Positionen', 'List Problems': 'Liste der Probleme', 'List Projections': 'Liste der Kartenprojektionen', 'List Projects': 'Liste Projekte', 'List Rapid Assessments': 'Liste Schnell-Beurteilungen', 'List Recurring Requests': 'Liste wiederkehrender Anfragen', 'List Received Items': 'Liste empfangene Artikel', 'List Received Shipments': 'Liste empfangene Lieferungen', 'List Records': 'Liste Datensätze', 'List Registrations': 'Liste Registrierungen', 'List Reports': 'Liste Berichte', 'List Request Items': 'Angefragte Artikel auflisten', 'List Requests': 'Anfragen auflisten', 'List Resources': 'Ressourcen auflisten', 'List Rivers': 'Flüsse auflisten', 'List Roles': 'Rollen auflisten', 'List Rooms': 'Liste Räume', 'List Scenarios': 'Liste Szenarien', 'List Sections': 'Abschnitte auflisten', 'List Sectors': 'Bereiche auflisten', 'List Sent Items': 'Gesendete Artikel auflisten', 'List Sent Shipments': 'Liste verschickte Lieferungen', 'List Service Profiles': 'Leistungsprofile auflisten', 'List Settings': 'Einstellungen auflisten', 'List Shelter Services': 'Leistungen der Unterkunft auflisten', 'List Shelter Types': 'Typen der Unterkunft auflisten', 'List Shelters': 'Unterkünfte auflisten', 'List Site Needs': 'Alle Bedarfe', 'List Skill Equivalences': 'Liste Fähigkeits-Vergleichbarkeiten', 'List Skill Provisions': 'Fähigkeits-Bereitstellungen auflisten', 'List Skill Types': 'Liste der Typen von Fähigkeiten', 'List Skills': 'Liste Fähigkeiten', 'List Solutions': 'Liste Lösungen', 'List Staff Types': 'Mitarbeitertypen auflisten', 'List Status': 'Status auflisten', 'List Subscriptions': 'Abonnements anzeigen', 'List Subsectors': 'Teilbereiche auflisten', 'List Support Requests': 'Liste der Anfragen nach Unterstützung', 'List Survey Answers': 'Liste Umfrage-Antworten', 'List Survey Questions': 'Liste Umfrage-Fragen', 'List Survey Series': 'Liste Umfrage-Serien', 'List Survey Templates': 'Liste Umfrage-Vorlagen', 'List Tasks': 'Aufgaben auflisten', 'List Teams': 'Teams auflisten', 'List Themes': 'Themen auflisten', 'List Tickets': 'Tickets auflisten', 'List Tracks': 'Tracks auflisten', 'List Trainings': 'Schulungen/Ausbildung auflisten', 'List Units': 'Einheiten auflisten', 'List Users': 'Liste Benutzer', 'List Warehouses': 'Liste Warenlager', 'List all': 'Alle auflisten', 'List available Scenarios': 'Liste verfügbarer Szenarien', 'List of Items': 'Liste der Artikel', 'List of Missing Persons': 'Liste der vermißten Personen', 'List of Peers': 'Liste der Peers', 'List of Reports': 'Liste der Berichte', 'List of Requests': 'Liste der Anfragen', 'List of Spreadsheets uploaded': 'Liste der hochgeladenen Tabellen', 'List of Spreadsheets': 'Liste der Tabellen', 'List of Volunteers for this skill set': 'Liste der Freiwilligen für dieses Fachgebiet', 'List of Volunteers': 'Liste der Freiwilligen', 'List of addresses': 'Liste der Adressen', 'List unidentified': 'Nicht identifizierte Objekte auflisten', 'List': 'Liste', 'List/Add': 'Auflisten/Hinzufügen', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Liste "Wer macht was & wo". Ermöglicht Hilfsorganizationen, ihre Aktivitäten zu koordinieren', 'Live Help': 'Aktuelle Hilfe', 'Livelihood': 'Lebensgrundlage', 'Load Cleaned Data into Database': 'Bereinigte Daten in die Datenbank laden', 'Load Raw File into Grid': 'Unformatierte Datei ins Grid laden', 'Loading': 'Wird geladen', 'Loading Equipment': 'Be-/Entladeaustattung', 'Local Name': 'Lokaler Name', 'Local Names': 'Lokale Namen', 'Location 1': 'Standort 1', 'Location 2': 'Standort 2', 'Location Detail': 'Details zum Gebiet/Standort', 'Location Details': 'Standortdetails', 'Location Hierarchies': 'Standort-Hierachien', 'Location Hierarchy Level 0 Name': 'Standort-Hierachie Level 0 Name', 'Location Hierarchy Level 1 Name': 'Standort-Hierachie Level 1 Name', 'Location Hierarchy Level 2 Name': 'Standort-Hierachie Level 2 Name', 'Location Hierarchy Level 3 Name': 'Standort-Hierarchie Level 3 Name', 'Location Hierarchy Level 4 Name': 'Standort-Hierarchie Level 4 Name', 'Location Hierarchy Level 5 Name': 'Standort-Hierarchie Level 5 Name', 'Location added': 'Standort hinzugefügt.', 'Location deleted': 'Standort gelöscht', 'Location group cannot be a parent.': 'Standortgruppe kann kein übergeordnetes Element sein', 'Location group cannot have a parent.': 'Standortgruppe kann kein übergeordnetes Elemenet haben.', 'Location groups can be used in the Regions menu.': 'Standortgruppen können im Gebietsmenu verwendet werden.', 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Standortgruppen können genutzt werden, um die Ergebnisse auf der Karte und in den Suchergebnissen zu filtern.', 'Location updated': 'Standort aktualisiert', 'Location': 'Standort', 'Locations of this level need to have a parent of level': 'Standorte dieser Ebene müssen ein übergeordnetes Element der folgenden Ebene haben', 'Locations': 'Standorte', 'Lockdown': 'Sperrung', 'Log Entry Details': 'Details zum Protokolleintrag', 'Log entry added': 'Protokolleintrag hinzugefügt', 'Log entry deleted': 'Protokolleintrag gelöscht', 'Log entry updated': 'Protokolleintrag aktualisiert', 'Log': 'Protokoll', 'Logged By': 'Protokolliert durch', 'Logged in': 'Protokolliert in', 'Login': 'Anmeldung', 'Logistics Management System': 'Logistik Managementsystem', 'Logistics': 'Logistik', 'Logo file %s missing!': 'Datei mit Logo %s fehlt!', 'Logout': 'Abmelden', 'Long Name': 'Langschriftlicher Name', 'Long Text': 'Langer Text', 'Longitude is West - East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).', 'Longitude is West-East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.', 'Longitude of Map Center': 'Geographische Länge des Kartenmittelpunktes', 'Longitude of far eastern end of the region of interest.': 'Geographische Länge des östlichen Endes de Interessensgebietes.', 'Longitude of far western end of the region of interest.': 'Geographische Länge des westlichen Endes de Interessensgebietes.', 'Longitude should be between': 'Die Geographische Länge soll in folgendem Bereich liegen', 'Longitude': 'Geographische Länge', 'Looting': 'Plünderung', 'Lost Password': 'Kennwort vergessen', 'Lost': 'Verloren', 'Low': 'Niedrig', 'Low Tide Depth': 'Tiefe bei minimaler Tide', 'Magnetic Storm': 'Magnetischer Sturm', 'Main Facility': 'Haupteinrichtung', 'Major Damage': 'Großer Schaden', 'Major expenses': 'Hauptausgaben', 'Major outward damage': 'Größter nach außen gerichteter Schaden', 'Major': 'Maßgeblich', 'Make Commitment': 'Eine Zusage machen', 'Make New Commitment': 'Neue Zusage machen', 'Make Request': 'Anfrage erstellen', 'Make Supplies Request':'Anfrage nach Vorräten stellen', 'Make preparations per the <instruction>': 'Vorbereitungen treffen für <instruction>', 'Male': 'Männlich', 'Manage Layers in Catalog': 'Kartenebenen im Katalog verwalten', 'Manage Relief Item Catalogue': 'Katalog der Unterstützungselemente verwalten', 'Manage Users & Roles': 'Benutzer- und Rollenverwaltung', 'Manage Warehouses/Sites': 'Warenlager/Orte verwalten', 'Manage Your Facilities': 'Eigene Einrichtungen verwalten', 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Verwaltung der Anfragen nach Vorräten, Anlagen, Mitarbeitern oder anderen Ressourcen. Vergleich mit den Beständen, wo Vorräte angefordert werden', 'Manage requests of hospitals for assistance.': 'Verwaltung der Anfragen von Krankenhäusern nach Unterstützung.', 'Manage volunteers by capturing their skills, availability and allocation': 'Verwaltung der Freiwilligen Helfer anhand ihrer Fähigkeiten, Verfügbarkeit und Zuordnung.', 'Managing Office': 'Verwaltungsbüro', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Verpflichtend. Beim GeoServer, ist das der Name des Layers. In den WFS Capabilities entspricht es dem Namen des FeatureType (ohne namespace - Teil hinter dem Doppelpunkt!).', 'Mandatory. The URL to access the service.': 'Verpflichtend. Die access URL des Dienstes.', 'Manual Synchronization': 'Manuelle Synchronisation', 'Manual': 'Anleitung', 'Many': 'Viele', 'Map Center Latitude': 'Geographische Breite des Kartenmittelpunkt', 'Map Center Longitude': 'Geographische Länge des Kartenmittelpunkt', 'Map Profile Details': 'Details zur Kartenkonfiguration ', 'Map Profile added': 'Kartenkonfiguration hinzugefügt', 'Map Profile deleted': 'Kartenkonfiguration gelöscht', 'Map Profile removed': 'Kartenkonfiguration entfernt', 'Map Profile updated': 'Kartenkonfiguration aktualisiert', 'Map Profile': 'Kartenkonfiguration', 'Map Profiles': 'Kartenkonfigurationen', 'Map Height': 'Höhe des Kartenfensters', 'Map Service Catalog': 'Karten Service-Katalog', 'Map Settings': 'Karteneinstellungen', 'Map Styles': 'Kartensymbolisierungen', 'Map Viewing Client': 'Kartenviewer', 'Map Width': 'Breite des Kartenfensters', 'Map Zoom': 'Kartenvergrößerung', 'Map of Hospitals': 'Karte der Krankenhäuser', 'Map of Offices': 'Karte der Büros', 'Map of Requests': 'Karte der Anfragen', 'Map of Vehicles': 'Karte der Fahrzeuge', 'Map': 'Karte', 'Marine Security': 'Hafensicherheit', 'Marital Status': 'Familienstand', 'Marker Details': 'Details zum Marker/Symbol', 'Marker added': 'Marker/Symbol hinzugefügt', 'Mark as duplicate': 'Markiere als Duplikat', 'Marker deleted': 'Marker/Symbol gelöscht', 'Marker updated': 'Marker/Symbol hinzugefügt', 'Marker': 'Marker/Symbol', 'Markers': 'Marker/Symbole', 'Master Message Log to process incoming reports & requests': 'Haupt-Nachrichtenprotokoll um eingehende Berichte und Anfragen zu bearbeiten', 'Master Message Log': 'Haupt-Nachrichtenprotokoll', 'Match Percentage': 'Grad der Übereinstimmung', 'Match Requests': 'Passende Anfrage', 'Match percentage indicates the % match between these two records': 'Der Grad der Übereinstimmung gibt die prozentuale Übereinstimmung zwischen zwei Datensätzen an', 'Match?': 'Übereinstimmung?', 'Matching Catalog Items': 'Übereinstimmende Katalogelemente', 'Matching Items': 'Übereinstimmende Artikel', 'Matching Records': 'Übereinstimmende Datensätze', 'Maximum Extent': 'Maximale Ausdehnung', 'Maximum Location Latitude': 'Maximale Geographische Breite des Gebietes', 'Maximum Location Longitude': 'Maximale Geographische Länge des Gebietes', 'Max Height': 'Max Höhe', 'Medical and public health': 'Medizinische Betreuung und öffentliches Gesundheitswesen', 'Medium': 'Mittel', 'Megabytes per Month': 'Megabytes pro Monat', 'Member removed from Group': 'Mitglied aus Gruppe entfernt', 'Members': 'Mitglieder', 'Membership Details': 'Details zur Mitgliedschaft', 'Membership Fee': 'Mitgliedsbeitrag', 'Membership Paid': 'Kostenpflichtige Mitgliedschaft', 'Membership Types': 'Mitgliedschaftstypen', 'Membership updated': 'Mitgliedschaft aktualisiert', 'Membership': 'Mitgliedschaft', 'Memberships': 'Mitgliedschaften', 'Message Details': 'Details zur Nachricht', 'Message Log': 'Nachrichtenprotokoll', 'Message Variable': 'Nachrichtenvariable', 'Message added': 'Nachricht hinzugefügt', 'Message deleted': 'Nachricht gelöscht', 'Message updated': 'Nachricht aktualisiert', 'Message variable': 'Nachrichtenvariable', 'Message': 'Nachricht', 'Messages': 'Nachrichten', 'Messaging settings updated': 'Einstellungen zur Nachrichtenübertragung aktualisiert', 'Messaging': 'Nachrichtenübertragung', 'Measure Length: Click the points along the path & end with a double-click': 'Längenmessung: Punkte entlang eines Verlaufs anklicken und mit Doppelklick abschließen', 'Meteorite': 'Meteorit', 'Meteorological (inc. flood)': 'Meteorologisch (auch Flut)', 'Method used': 'Verwendete Methode', 'Middle Name': 'Zweiter Vorname', 'Migrants or ethnic minorities': 'Migranten oder ethnische Minderheiten', 'Military': 'Militär', 'Military Grid Reference System PDFs': 'Military Grid Reference System PDFs', 'Minimum Location Latitude': 'Minimale Geographische Breite des Gebietes', 'Minimum Location Longitude': 'Minimale Geographische Länge des Gebietes', 'Minimum shift time is 6 hours': 'Minimum Dienstzeit ist sechs Stunden.', 'Minor Damage': 'Kleinere Schäden', 'Minor/None': 'Gering / Keine', 'Minorities participating in coping activities': 'Minderheiten beteiligen sich an Bewältigungsaktivitäten / Krisenbewältigungsaktivitäten', 'Minutes must be a number between 0 and 60': 'Minuten muss eine Zahl zwischen 0 und 60 sein', 'Minutes per Month': 'Minuten pro Monat', 'Minutes should be a number greater than 0 and less than 60': 'Minuten muss eine Zahl größer als 0 und kleiner als 60 sein', 'Miscellaneous': 'Verschiedenes', 'Missing Person Details': 'Nähere Angaben zur vermissten Person', 'Missing Person Registry': 'Register der vermissten Personen', 'Missing Person': 'Vermisste Person', 'Missing Persons Registry': 'Register der vermissten Personen', 'Missing Persons Report': 'Bericht über vermisste Personen', 'Missing Persons': 'Vermisste Personen', 'Missing Report': 'Bericht über Vermisste', 'Missing Senior Citizen': 'Vermisster älterer Bürger', 'Missing Vulnerable Person': 'Vermisste gefährdete Person', 'Missing': 'Fehlend', 'Mission Record': 'Auftragsbericht', 'Mission added': 'Auftrag hinzugefügt', 'Mission deleted': 'Auftrag gelöscht', 'Mission updated': 'Auftrag aktualisiert', 'Missions': 'Aufträge', 'Mobile Basic Assessment': 'Mobile Grundlegende Beurteilung', 'Mobile Commons Channels': 'Mobile Commons Kanäle', 'Mobile Phone': 'Mobiltelefon', 'Mobile': 'Handy', 'Mode': 'Modus', 'Model/Type': 'Modell/Typ', 'Modem Settings': 'Modemeinstellungen', 'Modem settings updated': 'Modemeinstellungen aktualisiert', 'Moderate': 'Moderat', 'Modify Information on groups and individuals': 'Anpassen der Information über Gruppen und Einzelpersonen', 'Modifying data in spreadsheet before importing it to the database': 'Anpassen von Daten in der Tabelle vor dem Import in die Datenbank', 'Module provides access to information on current Flood Levels.': 'Modul bietet Zugriff auf Information zum aktuellen Stand der Flut', 'Module': 'Modul', 'Monday': 'Montag', 'Monetization Report': 'Monetarisierungsbericht', 'Monitoring Frequency': 'Monitoring Frequenz', 'Monthly Cost': 'Monatliche Kosten', 'Monthly Salary': 'Monatliches Gehalt', 'Month': 'Monat', 'Monthly': 'Monatlich', 'Months': 'Monate', 'More': 'Mehr', 'More Options': 'Mehr Optionen', 'Morgue Status': 'Status der Leichenhalle', 'Morgue Units Available': 'Leichenhallenplätze verfügbar', 'Mosque': 'Moschee', 'Motorcycle': 'Motorrad', 'Moustache': 'Schnurrbart', 'MultiPolygon': 'MultiPolygon', 'Multiple Matches': 'Mehrere Übereinstimmungen', 'Multiple': 'Mehrere', 'Muslim': 'Moslem', 'Must a location have a parent location?': 'Muss ein Standort einen übergeordneten Standort haben?', 'My Current function': 'Meine aktuelle Funktion', 'My Tasks': 'Meine Aufgaben', 'N/A': 'Nicht zutreffend', 'NO': 'Nein', 'NZSEE Level 1': 'NZSEE Stufe 1', 'NZSEE Level 2': 'NZSEE Stufe 2', 'Name and/or ID': 'Name und/oder ID', 'Name of Award': 'Name der Auszeichnung', 'Name of Driver': 'Name des Fahrers', 'Name of Institute': 'Name der Institution', 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und die für den Hintergrund des Headers benutzt werden soll.', 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und für das obere linke Bild verwendet werden soll.', 'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name der Datei (& optionales Unterverzeichnis) die sich in views befindet und für die Fußzeile verwendet werden soll.', 'Name of the person in local language and script (optional).': 'Name der Person in lokaler Sprache und Schreibweise (optional).', 'Name': 'Name', 'Name, Org and/or ID': 'Name, Org und/oder ID', 'Names can be added in multiple languages': 'Namen können in mehreren Sprachen hinzugefügt werden', 'National ID Card': 'Nationaler Identitätsnachweis', 'National NGO': 'Nationale NGO', 'Nationality of the person.': 'Nationalität der Person.', 'Nationality': 'Nationalität', 'Nautical Accident': 'See-Unfall', 'Nautical Hijacking': 'See-Entführung', 'Need Type Details': 'Details zum Bedarfstyp', 'Need Type added': 'Bedarfstyp hinzugefügt', 'Need Type deleted': 'Bedarfstyp gelöscht', 'Need Type updated': 'Bedarfstyp aktualisiert', 'Need Type': 'Bedarfstyp', 'Need Types': 'Bedarfstypen', 'Need added': 'Bedarf hinzugefügt', 'Need deleted': 'Bedarf gelöscht', 'Need to be logged-in to be able to submit assessments': 'Sie müssen eingeloggt sein um Beurteilungen zu veröffentlichen', 'Need to configure Twitter Authentication': 'Die Twitter Authentifizierungsdaten müssen konfiguriert sein', 'Need to specify a Budget!': 'Sie müssen ein Budget angegeben!', 'Need to specify a Kit!': 'Müssen Sie eine Ausstattung (Kit) angeben!', 'Need to specify a Resource!': 'Sie müssen eine Ressource angeben.', 'Need to specify a bundle!': 'Sie müssen ein Produktpaket angeben!', 'Need to specify a group!': 'Sie müssen einen Gruppe angeben!', 'Need to specify a location to search for.': 'Sie müssen ein Gebiet/Position für die Suche angeben.', 'Need to specify a role!': 'Sie müssen eine Rolle definieren!', 'Need to specify a table!': 'Sie müssen einen Tabellennamen angeben!', 'Need to specify a user!': 'Ein Benutzer muss angegeben werden!', 'Need updated': 'Bedarf aktualisiert', 'Needs Details': 'Details zum Bedarf', 'Needs Maintenance': 'Braucht Wartung', 'Needs to reduce vulnerability to violence': 'Handlungsbedarf um die Anfälligkeit für Gewalt zu verringern', 'Need': 'Bedarf', 'Needs': 'Bedarf', 'Neighborhood': 'Nachbarschaft', 'Neighbouring building hazard': 'Risiko durch benachbarte Gebäude', 'Neonatal ICU': 'Neugeborenen ICU', 'Neonatology': 'Neonatologie', 'Network': 'Netzwerk', 'Neurology': 'Neurologie', 'New Assessment reported from': 'Neue Beurteilung erstellt durch', 'New Certificate': 'Neues Zertifikat', 'New Checklist': 'Neue Prüfliste', 'New Entry': 'Neuer Eintrag', 'New Event': 'Neues Ereignis', 'New Item Category': 'Neue Kategorie für Artikel', 'New Job Role': 'Neue Tätigkeit', 'New Location Group': 'Neue Standortgruppe', 'New Location': 'Neuer Standort/Gebiet', 'New Peer': 'Neuer Peer', 'New Record': 'Neuer Datensatz', 'New Request': 'Neue Anfrage', 'New Role': 'Neue Rolle', 'New Scenario': 'Neues Szenario', 'New Skill': 'Neue Fähigkeit', 'New Solution Choice': 'Neue Lösungswahl', 'New Staff Member': 'Neue Mitarbeiter', 'New Stock Count': 'Neue Anzahl des Lagerbestands', 'New Support Request': 'Neue Unterstützunganfrage', 'New Synchronization Peer': 'Neuer Synchronisations Peer', 'New Team': 'Neues Team', 'New Training Course': 'Neuer Schulungskurs', 'New Volunteer': 'Neuer Freiwilliger', 'New cases in the past 24h': 'Neue Fälle in den letzten 24h', 'New': 'Neu', 'Next': 'Nächste', 'No': 'Nein', 'No Activities Found': 'Keine Aktivitäten gefunden', 'No Alternative Items currently registered': 'Zurzeit sind keine alternativen Artikel registriert', 'No Assessment Summaries currently registered': 'Zurzeit sind keine Beurteilungszusammenfassungen registriert', 'No Assessments currently registered': 'Zurzeit sind keine Beurteilungen registriert.', 'No Assets currently registered in this event': 'Zurzeit sind keine Anlagen zu diesem Ereignis registriert', 'No Assets currently registered in this scenario': 'Zurzeit sind keine Anlagen zu diesem Szenario registriert', 'No Assets currently registered': 'Zurzeit sind keine Anlagen registriert', 'No Baseline Types currently registered': 'Zurzeit sind keine Referenzdatumstypen registriert', 'No Baselines currently registered': 'Zurzeit sind keine Referenzdaten registriert', 'No Brands currently registered': 'Zurzeit sind keine Markenregistriert', 'No Budgets currently registered': 'Zurzeit sind keine Budgets registriert', 'No Bundles currently registered': 'Zurzeit sind keine Produktpakete registriert', 'No Camp Services currently registered': 'Zurzeit sind keine Camp-Leistungen registriert', 'No Camp Types currently registered': 'Zurzeit sind keine Typen von Camps registriert', 'No Camps currently registered': 'Zurzeit sind keine Camps registriert', 'No Catalog Items currently registered': 'Zurzeit sind keine Katalogeinträge registriert', 'No Catalogs currently registered': 'Zurzeit sind keine Kataloge registriert', 'No Checklist available': 'Zurzeit sind keine Checklisten verfügbar', 'No Cluster Subsectors currently registered': 'Zurzeit sind keine Cluster Teilbereiche registriert', 'No Clusters currently registered': 'Zurzeit sind keine Cluster registriert', 'No Commitment Items currently registered': 'Zurzeit sind keine zugesagten Artikel registriert', 'No Commitments': 'Zurzeit sind keine Zusagen registriert', 'No Credentials currently set': 'Derzeit keine Berechtigungen hinterlegt', 'No Details currently registered': 'Zurzeit sind keine Details registriert', 'No Documents found': 'Keine Dokumente gefunden', 'No Donors currently registered': 'Zurzeit sind keine Spender registriert', 'No Events currently registered': 'Zurzeit sind keine Ereignisse registriert', 'No Facilities currently registered in this event': 'Für dieses Ereignis ist zurzeit keine Einrichtung registriert', 'No Facilities currently registered in this scenario': 'Für dieses Szenario ist zurzeit keine Einrichtung registriert.', 'No Feature Layers currently defined': 'Zurzeit sind keine Objekt-Layer definiert', 'No Flood Reports currently registered': 'Zurzeit sind keine Flutberichte registriert', 'No Groups currently defined': 'Zurzeit sind keine Gruppen definiert', 'No Groups currently registered': 'Zurzeit sind keine Gruppen registriert', 'No Hospitals currently registered': 'Zurzeit sind keine Krankenhäuser registriert', 'No Human Resources currently registered in this event': 'Für dieses Ereignis sind zurzeit keine personellen Ressourcen registriert.', 'No Human Resources currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine personellen Ressourcen registriert.', 'No Identification Report Available': 'Kein Identifizierungbericht verfügbar', 'No Identities currently registered': 'Zurzeit sind keine Identitäten registriert', 'No Image': 'Kein Bild', 'No Images currently registered': 'Zurzeit sind keine Bilder registriert', 'No Impact Types currently registered': 'Zurzeit sind keine Auswirkungsarten registriert', 'No Impacts currently registered': 'Zurzeit sind keine Auswirkungen registriert', 'No Incident Reports currently registered': 'Zurzeit sind keine Vorfallberichte registriert', 'No Incoming Shipments': 'Keine eingehenden Lieferungen', 'No Item Categories currently registered': 'Zurzeit sind keine Artikelkategorien registriert', 'No Item Packs currently registered': 'Zurzeit sind keine Artikelpakete registriert', 'No Items currently registered in this Inventory': 'Für diesen Bestand sind zurzeit keine Artikel registriert', 'No Items currently registered': 'Zurzeit sind keine Artikel registriert', 'No Keys currently defined': 'Zurzeit sind keine Schlüssel definiert', 'No Kits currently registered': 'Zurzeit sind keine Ausstattungen (Kits) definiert', 'No Level 1 Assessments currently registered': 'Zurzeit keine Stufe 1 Beurteilungen registriert', 'No Level 2 Assessments currently registered': 'Zurzeit keine Stufe 2 Beurteilungen registriert', 'No Locations currently available': 'Keine Standorte/Gebiete verfügbar', 'No Locations currently registered': 'Zurzeit sind keine Standorte/Gebiete registriert', 'No Map Profiles currently defined': 'Zurzeit sind keine Kartenkonfigurationen definiert', 'No Map Profiles currently registered in this event': 'Für dieses Ereignis sind zurzeit keine Kartenkonfigurationen registriert', 'No Map Profiles currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine Kartenkonfigurationen registriert', 'No Markers currently available': 'Zurzeit sind keine Marker/Symbole verfügbar', 'No Match': 'Keine Übereinstimmung', 'No Matching Catalog Items': 'Keine passenden Katalogelemente', 'No Matching Items': 'Keine passenden Artikel', 'No Matching Records': 'Keine passenden Datensätze', 'No Members currently registered': 'Zurzeit sind keine Mitglieder registriert', 'No Memberships currently defined': 'Zurzeit sind keine Mitgliedschaften definiert', 'No Messages currently in Outbox': 'Zurzeit sind keine Nachrichten im Postausgang', 'No Need Types currently registered': 'Zurzeit sind keine Anforderungstypen registriert', 'No Needs currently registered': 'Zurzeit sind keine Anforderungen registriert', 'No Offices currently registered': 'Zurzeit sind keine Büros registriert', 'No Offices found!': 'Keine Büros gefunden!', 'No Organizations currently registered': 'Zurzeit sind keine Organisationen registriert', 'No options available': 'Keine Optionen verfügbar', 'No People currently registered in this camp': 'Zurzeit sind in diesem Camp keine Personen registriert', 'No People currently registered in this shelter': 'Zurzeit sind in dieser Unterkunft keine Personen registriert', 'No Persons currently registered': 'Zurzeit sind keine Personen registriert', 'No Persons currently reported missing': 'Zurzeit sind keine Personen vermisst gemeldet', 'No Persons found': 'Keine Personen gefunden', 'No Photos found': 'Keine Fotos gefunden', 'No Picture': 'Kein Bild', 'No Population Statistics currently registered': 'Zurzeit sind keine Bevölkerungsstatistiken registriert', 'No Presence Log Entries currently registered': 'Zurzeit gibt es keine Anwesenheitsprotokolleinträge', 'No Problems currently defined': 'Zurzeit sind keine Probleme definiert', 'No Projections currently defined': 'Zurzeit sind keine Kartenprojektionen definiert', 'No Projects currently registered': 'Zurzeit sind keine Projekte registriert', 'No Rapid Assessments currently registered': 'Zurzeit sind keine Schnell-Beurteilungen registriert', 'No Received Items currently registered': 'Zurzeit sind keine erhaltenen Lieferungen registriert', 'No Received Shipments': 'Keine erhaltene Lieferungen', 'No Records currently available': 'Zurzeit sind keine Datensätze registriert', 'No Request Items currently registered': 'Zurzeit sind keine angefragten Artikel registriert', 'No Requests': 'Keine Anfragen', 'No Rivers currently registered': 'Zurzeit sind keine Flüsse registriert', 'No Roles currently defined': 'Zurzeit sind keine Rollen definiert', 'No Rooms currently registered': 'Zurzeit sind keine Räume registriert', 'No Scenarios currently registered': 'Derzeit sind keine Szenarios eingetragenZurzeit sind keine Szenarios registriert', 'No Sections currently registered': 'Zurzeit sind keine Abschnitte registriert', 'No Sectors currently registered': 'Zurzeit sind keine Bereiche registriert', 'No Sent Items currently registered': 'Zurzeit sind keine gesendeten Artikel registriert', 'No Sent Shipments': 'Keine versandten Lieferungen', 'No Settings currently defined': 'Zurzeit sind keine Einstellungen definiert', 'No Shelter Services currently registered': 'Zurzeit sind keine Unterkunftsleistungen registriert', 'No Shelter Types currently registered': 'Zurzeit sind keine Unterkunfttypen registriert', 'No Shelters currently registered': 'Zurzeit sind keine Unterkünfte registriert', 'No Solutions currently defined': 'Zurzeit sind keine Lösungen definiert', 'No Staff Types currently registered': 'Zurzeit sind keine Mitarbeitertypen registriert', 'No Subscription available': 'Keine Abonnements verfügbar', 'No Subsectors currently registered': 'Zurzeit sind keine Teilbereiche registriert', 'No Support Requests currently registered': 'Zurzeit sind keine Unterstützungsanfragen registriert', 'No Survey Answers currently entered.': 'Zurzeit wurden noch keine Antworten auf Umfragen eingegeben.', 'No Survey Questions currently registered': 'Zurzeit wurden noch keine Umfragen-Fragen registriert. ', 'No Survey Series currently registered': 'Zurzeit wurden noch keine Umfragenserie registriert', 'No Survey Template currently registered': 'Zurzeit wurden noch keine Umfragen-Vorlage registriert', 'No Tasks with Location Data': 'Für dieses Gebiet/Standort liegen zurzeit keine Aufgaben vor', 'No Teams currently registered': 'Zurzeit wurden noch keine Teams registriert', 'No Themes currently defined': 'Zurzeit wurden noch keine Themen registriert', 'No Tickets currently registered': 'Zurzeit wurden noch keine Tickets registriert', 'No Tracks currently available': 'Zurzeit sind noch keine Tracks verfügbar', 'No Users currently registered': 'Zurzeit wurden noch keine Benutzer registriert', 'No Volunteers currently registered': 'Zurzeit sind noch keine Freiwilligen registriert', 'No Warehouses currently registered': 'Zurzeit sind noch keine Warenlager registriert', 'No access at all': 'Kein Zugriff', 'No access to this record!': 'Kein Zugriff auf diesen Datensatz!', 'No action recommended': 'Keine Aktion empfohlen', 'No conflicts logged': 'Keine Konflikte protokolliert', 'No contact information available': 'Keine Kontaktinformation verfügbar', 'No contacts currently registered': 'Zurzeit sind noch keine Kontakte registriert', 'No data available': 'Keine Daten verfügbar', 'No data in this table - cannot create PDF!': 'Keine Daten in dieser Tabelle - PDF kann nicht erstellt werden!', 'No databases in this application': 'Keine Datenbanken in dieser Anwendung', 'No dead body reports available': 'Keine Leichenberichte verfügbar', 'No entries found': 'Keine Einträge gefunden', 'No entries matching the query': 'Die Abfrage lieferte keine Einträge', 'No entry available': 'Kein Eintrag verfügbar', 'No location known for this person': 'Für diese Person ist kein Gebiet/Standort bekannt', 'No locations found for members of this team': 'Für Mitglieder dieses Teams ist kein Gebiet/Standort bekannt', 'No log entries matching the query': 'Die Abfrage lieferte keine Protokolleinträge', 'No messages in the system': 'Keine Nachrichten im System', 'No peers currently registered': 'Zurzeit sind keine Peers registriert', 'No pending registrations found': 'Keine anstehenden Registrierungen gefunden', 'No pending registrations matching the query': 'Die Abfrage lieferte keine keine anstehenden Registrierungen', 'No person record found for current user.': 'Kein Personendatensatz für den aktuellen Benutzer gefunden.', 'No problem group defined yet': 'Noch keine Problem-Gruppe definiert', 'No records found': 'Keine Datensätze gefunden', 'No records matching the query': 'Die Abfrage lieferte keine Datensätze', 'No reports available.': 'Keine Berichte verfügbar.', 'No reports currently available': 'Zurzeit sind keine Berichte verfügbar', 'No requests found': 'Keine Anfragen gefunden', 'No resources currently reported': 'Zurzeit sind keine Ressourcen gemeldet', 'No service profile available': 'Kein Leistungsprofil verfügbar', 'No skills currently set': 'Zurzeit sind keine Fähigkeiten festgelegt', 'No staff or volunteers currently registered': 'Zurzeit sind weder Mitarbeiter noch Freiwillige registriert', 'No status information available': 'Keine Statusinformation verfügbar', 'No synchronization': 'Keine Synchronisation', 'No tasks currently registered': 'Zurzeit sind keine Aufgaben registriert', 'No template found!': 'Keine Vorlage gefunden!', 'No units currently registered': 'Zurzeit sind keine Einheiten registriert', 'No volunteer availability registered': 'Zurzeit ist keine Verfügbarkeit von Freiwilligen registriert', 'Non-structural Hazards': 'Nicht-strukturelle Gefahren', 'None (no such record)': 'Nichts (kein entsprechender Datensatz)', 'None': '-', 'Noodles': 'Nudeln', 'Normal Address': 'Normale Adresse', 'Normal Job': 'Normaler Beruf', 'Not Applicable': 'Nicht zutreffend', 'Not Authorised!': 'Nicht berechtigt!', 'Not Possible': 'Nicht möglich', 'Not Set': 'Nicht festgelegt', 'Not Authorized': 'Nicht berechtigt', 'Not installed or incorrectly configured.': 'Nicht installiert oder nicht korrekt konfiguriert.', 'Not yet a Member of any Group': 'Bis jetzt kein Mitglied irgendeiner Gruppe', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Beachten Sie, dass diese Liste nur aktive Freiwillige zeigt. Um alle registrierten Personen im System zu sehen, suchen sie statt dessen auf diesem Bildschirm', 'Notice to Airmen': 'Hinweis für Flieger', 'Number': 'Anzahl', 'Number of Barges': 'Zahl der Lastschiffe', 'Number of Columns': 'Anzahl der Spalten', 'Number of Patients': 'Anzahl der Patienten', 'Number of People Required': 'Anzahl der benötigten Personen', 'Number of Rows': 'Anzahl der Reihen', 'Number of Tugboats': 'Zahl der Schleppkähne', 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Anzahl von zusätzlichen Betten dieses Typs, die voraussichtlich in den nächsten 24 Stunden in dieser Einheit zur Verfügung stehen werden.', 'Number of alternative places for studying': 'Anzahl von alternativen Orten zum studieren.', 'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Anzahl von verfügbaren/freien Betten dieses Typs in dieser Einheit zum Zeitpunkt des Berichtes.', 'Number of deaths during the past 24 hours.': 'Anzahl von Toten in den letzten 24 Stunden', 'Number of discharged patients during the past 24 hours.': 'Anzahl der entlassenen Patienten in den vergangen 24 Stunden', 'Number of doctors': 'Anzahl der Ärzte', 'Number of in-patients at the time of reporting.': 'Anzahl von in-Patienten zum Zeitpunkt der Berichterstellung', 'Number of newly admitted patients during the past 24 hours.': 'Anzahl der neu zugewiesenen Patienten innerhalb der letzten 24 Stunden', 'Number of non-medical staff': 'Anzahl des nicht-medizinischen Personals', 'Number of nurses': 'Anzahl der Krankenschwestern', 'Number of private schools': 'Anzahl der privaten Schulen', 'Number of public schools': 'Anzahl der öffentlichen Schulen', 'Number of religious schools': 'Anzahl der religiösen Schulen', 'Number of residential units not habitable': 'Anzahl der nicht bewohnbaren Wohneinheiten', 'Number of residential units': 'Anzahl der Wohneinheiten', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Anzahl der freien/verfügbaren Betten in diesem Krankenhaus. Automatisch aktualisiert aus täglichen Berichten.', 'Number of vacant/available units to which victims can be transported immediately.': 'Anzahl der freien/verfügbaren Einheiten zu denen die Opfer sofort transportiert werden können.', 'Number or Label on the identification tag this person is wearing (if any).': 'Nummer oder Beschriftung auf der Identifikationsmarke den diese Person trägt (falls vorhanden).', 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Nummer oder Code verwendet markiert den Fundort , z. B. Flaggencode, Koordinaten, Standortnummer oder ähnliches (falls verfügbar)', 'Number': 'Nummer', 'Number/Percentage of affected population that is Female & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 0-5 Jahren', 'Number/Percentage of affected population that is Female & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 13-17 Jahren', 'Number/Percentage of affected population that is Female & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 18-25 Jahren', 'Number/Percentage of affected population that is Female & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 26-60 Jahren', 'Number/Percentage of affected population that is Female & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 6-12 Jahren', 'Number/Percentage of affected population that is Female & Aged 61+': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung über 61', 'Number/Percentage of affected population that is Male & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 0-5 Jahren', 'Number/Percentage of affected population that is Male & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 13-17 Jahren', 'Number/Percentage of affected population that is Male & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 18-25 Jahren', 'Number/Percentage of affected population that is Male & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 26-60 Jahren', 'Number/Percentage of affected population that is Male & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 6-12 Jahren', 'Number/Percentage of affected population that is Male & Aged 61+': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung über 61', 'Nursery Beds': 'Krankenhausbetten', 'Nutrition problems': 'Ernährungsprobleme', 'Nutrition': 'Nahrung', 'Opportunities to Volunteer On-Site?': 'Möglichkeiten für Freiwillige vor Ort?', 'OR Reason': 'oder Grund', 'OR Status Reason': 'oder Statusgrund', 'OR Status': 'oder Status', 'Observer': 'Beobachter', 'Obsolete': 'Veraltet', 'Obstetrics/Gynecology': 'Geburtshilfe/Gynäkologie', 'Office Address': 'Büroadresse', 'Office Details': 'Bürodetails', 'Office Phone': 'Telefon im Büro', 'Office Type': 'Bürotyp', 'Office Types': 'Bürotypen', 'Office added': 'Büro hinzugefügt', 'Office deleted': 'Büro gelöscht', 'Office updated': 'Büro aktualisiert', 'Office': 'Büro', 'Offices & Warehouses': 'Büros & Warenager', 'Offices': 'Büros', 'Offline Sync (from USB/File Backup)': 'Offline-Synchronisation (von USB/Dateisicherung)', 'Offline Sync': 'Offline-Synchronisation', 'Oil Terminal Depth': 'Tiefe des Ölterminals', 'Older people as primary caregivers of children': 'Ältere Menschen als primäre Pfleger von Kindern', 'Older people in care homes': 'Ältere Menschen in Pflegeheimen', 'Older people participating in coping activities': 'Ältere Menschen die sich an Krisenbewältigungsaktivitäten beteiligen', 'Older person (>60 yrs)': 'Ältere Personen (> 60 Jahre)', 'On by default? (only applicable to Overlays)': 'Standardmäßig an? (gilt nur für Overlays)', 'On by default?': 'Standardmäßig an?', 'One Time Cost': 'Einmalige Kosten', 'One time cost': 'Einmalige Kosten', 'One-time costs': 'Einmalige Kosten', 'One-time': 'Einmalig', 'Oops! Something went wrong...': 'Hoppla! Etwas ging schief...', 'Oops! something went wrong on our side.': 'Hoppla! Etwas ging auf unserer Seite schief.', 'Opacity (1 for opaque, 0 for fully-transparent)': 'Opazität (1 für opaque - undurchsichtig, 0 für vollständig transparent)', 'Opacity':'Opazität (Undurchsichtigkeit)', 'Open area': 'Offener Bereich', 'Open recent': 'Kürzlich Bearbeitetes öffnen', 'Open': 'Öffnen', 'Opening Times': 'Öffnungszeiten', 'OpenStreetMap Tiles': 'OpenStreetMap Tiles', 'OpenWeatherMap data': 'OpenWeatherMap Daten', 'Operating Rooms': 'Betriebsräume', 'Optional link to an Incident which this Assessment was triggered by.': 'Optinaler Link zum einem Vorfall, der diese Beurteilung auslöste.', 'Optional': 'Optional', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Optional. Wenn Sie die Darstellung der Objekte auf der Basis von Werten eines Attributs festlegen möchten, wählen sie das zu verwendende Attribut hier aus.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. Bei GeoServer, das ist die Arbeitsbereich Namespace-URI (nicht der Name!). Beim WFS "Capabilities", ist dies die Namensteil des FeatureTypes vor dem Doppelpunkt(:).', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. Der Name eines Elements dessen Inhalt eine URL zu einer Bilddatei die im Dialogfenster angezeigt werden soll.', 'Optional. The name of an element whose contents should be put into Popups.': 'Optional. Name eines Elements, dessen Inhalt in Dialogfenstern angezeigt wird.', 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. Name des Schemas. Bei Geoserver wird das Format http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name verwendet.', 'Options': 'Optionen', 'Organization Details': 'Details zur Organisation', 'Organization Domains': 'Organisationsdomains', 'Organization Registry': 'Organisationsdatenbank', 'Organization Type': 'Organisationstyp', 'Organization Types': 'Organisationstypen', 'Organization added': 'Organisation hinzugefügt', 'Organization deleted': 'Organisation gelöscht', 'Organization updated': 'Organisation aktualisiert', 'Organization': 'Organisation', 'Organizations': 'Organisationen', 'Organization/Supplier': 'Organisation/Anbieter', 'Organized By': 'Organisiert durch', 'Origin of the separated children': 'Ursprung der getrennten Kinder', 'Origin': 'Ursprung', 'Other Address': 'Andere Adresse', 'Other (describe)': 'Andere (näher beschreiben)', 'Other (specify)': 'Sonstige (näher spezifizieren)', 'Other Evidence': 'Anderer Nachweis', 'Other Faucet/Piped Water': 'Andere Wasserrohre/-hähne', 'Other Isolation': 'Andere Isolierung', 'Other Name': 'Sonstiger Name', 'Other activities of boys 13-17yrs before disaster': 'Andere Aktivitäten von Jungen 13-17 Jahre vor der Katastrophe', 'Other activities of boys 13-17yrs': 'Andere Aktivitäten der Jungen 13-17 Jahre', 'Other activities of boys <12yrs before disaster': 'Andere Aktivitäten von Jungen <12 Jahre vor der Katastrophe', 'Other activities of boys <12yrs': 'Andere Aktivitäten von Jungen <12 Jahren', 'Other activities of girls 13-17yrs before disaster': 'Andere Aktivitäten von Mädchen 13-17 Jahre vor der Katastrophe', 'Other activities of girls 13-17yrs': 'Andere Aktivitäten von Mädchen 13-17 Jahre', 'Other activities of girls<12yrs before disaster': 'Andere Aktivitäten von Mädchen <12 Jahre vor der Katastrophe', 'Other activities of girls<12yrs': 'Andere Aktivitäten von Mädchen <12 Jahre', 'Other alternative infant nutrition in use': 'Andere alternative Kindernahrung die Verwendung findet.', 'Other alternative places for study': 'Andere alternative Orte zum Lernen', 'Other assistance needed': 'Andere Unterstützung benötigt', 'Other assistance, Rank': 'Andere Unterstützung, Rang', 'Other current health problems, adults': 'Andere aktuelle gesundheitliche Probleme, Erwachsene', 'Other current health problems, children': 'Andere aktuelle gesundheitliche Probleme, Kinder', 'Other events': 'Sonstige Ereignisse', 'Other factors affecting school attendance': 'Andere Faktoren mit Einfluss auf den Schulbesuch', 'Other major expenses': 'Andere große Ausgaben', 'Other non-food items': 'Andere non-food Posten', 'Other recommendations': 'Andere Empfehlungen', 'Other residential': 'Andere Bewohner/innen', 'Other school assistance received': 'Andere erhaltene Schulunterstützung', 'Other school assistance, details': 'Andere Schulhilfe, Einzelheiten', 'Other school assistance, source': 'Herkunft anderer Schulhilfen', 'Other settings can only be set by editing a file on the server': 'Andere Einstellungen können nur durch Bearbeiten einer Datei auf dem Server festgelegt werden', 'Other side dishes in stock': 'Andere Speisen auf Lager', 'Other types of water storage containers': 'Andere Arten von Wassertanks', 'Other ways to obtain food': 'Weitere Möglichkeiten um an Nahrungsmitteln zu gelangen', 'Other': 'Sonstige', 'Outbound Mail settings are configured in models/000_config.py.': 'Abgehende Mail-Einstellungen werden in der Datei models/000_config.py konfiguriert.', 'Outbox': 'Ausgang', 'Outgoing SMS Handler': 'SMS-Handler für ausgehende Informationen', 'Outgoing SMS handler': 'SMS-Handler für ausgehende Informationen', 'Overall Hazards': 'Gefahren insgesamt', 'Overhead falling hazard': 'Gefahr fallender Objekte', 'Overland Flow Flood': 'Überflutung', 'Owned By (Organization/Branch)': 'Gehört (Organisation/Niederlassung)', 'Owned Records': 'Eigene Datensätze', 'Owned Resources': 'Eigene Ressourcen', 'Ownership': 'Eigentum', 'Owning Organization': 'In Eigentum von', 'PIN number': 'PIN Nummer', 'PIN': 'PIN', 'PL Women': 'PL Frauen', 'Pack': 'Packung', 'Packs': 'Packungen', 'Paid': 'Bezahlt', 'Parameters': 'Parameter', 'Parapets, ornamentation': 'Geländer, Verzierung', 'Parent Office': 'Übergeordnetes Büro', 'Parent needs to be of the correct level': 'Übergeordnetes Element muss auf der richtigen Stufe sein', 'Parent needs to be set for locations of level': 'Ein übergeordnetes Element muss für Gebiete/Standorte dieser Stufe existieren', 'Parent needs to be set': 'Ein übergeordnetes Element muss definiert werden', 'Parent': 'Übergeordnetes Element', 'Parents/Caregivers missing children': 'Eltern/Pfleger vermissen Kinder', 'Parser Connections': 'Parser Verbindungen', 'Parsers': 'Parser', 'Partial': 'partiell', 'Participant': 'Teilnehmer', 'Pashto': 'Paschtu', 'Pass': 'Übergeben', 'Passport': 'Reisepass', 'Password': 'Passwort', 'Path': 'Pfad', 'Pathology': 'Pathologie', 'Patients': 'Patienten', 'Payload Height (m)': 'Ladekapazität Höhe (m)', 'Payload Length (m)': 'Ladekapazität Länge (m)', 'Payload Volume (m3)': 'Ladekapazität Volumen (m3)', 'Payload Weight (kg)': 'Ladekapazität Gewicht (kg)', 'Payload Width (m)': 'Ladekapazität Breite (m)', 'Pediatric ICU': 'Kinderklinik ICU', 'Pediatric Psychiatric': 'Kinderpsychiatrie', 'Pediatrics': 'Kinderheilkunde', 'Peer Details': 'Details zu Peers', 'Peer Registration Details': 'Details zur Peer-Registrierung', 'Peer Registration Request': 'Anfrage zu Peer-Registrierung', 'Peer Registration': 'Peer-Registrierung', 'Peer Type': 'Peer Typ', 'Peer UID': 'Peer UID', 'Peer added': 'Peer hinzugefügt', 'Peer deleted': 'Peer gelöscht', 'Peer not allowed to push': 'Peer ist nicht für das pushen von Daten zugelassen', 'Peer registration request added': 'Anfrage zu Peer-Registrierung hinzugefügt', 'Peer registration request deleted': 'Anfrage zu Peer-Registrierung gelöscht', 'Peer registration request updated': 'Anfrage zu Peer-Registrierung aktualisiert', 'Peer updated': 'Peer aktualisiert', 'Peer': 'Peer', 'Pending Requests': 'Anstehende Anfragen', 'Pending': 'Anstehend', 'People Needing Food': 'Menschen die Nahrungsmittel brauchen', 'People Needing Shelter': 'Menschen die Unterkünfte brauchen', 'People Needing Water': 'Menschen die Wasser brauchen', 'People Reservation': 'Gruppe reservieren', 'People Registration': 'Person registrieren', 'People Trapped': 'Menschen die gefangen sind', 'People': 'Menschen', 'Performance Rating': 'Ergebnisbeurteilung', 'Permanent Home Address': 'Dauerhafte Heimatadresse', 'Person 1, Person 2 are the potentially duplicate records': 'Person 1 und Person 2 sind möglicherweise Duplikate', 'Person De-duplicator': 'Dubletten in Personen auflösen', 'Person Details': 'Details zur Person', 'Person Registry': 'Personendatenbank', 'Person added to Group': 'Person einer Gruppe hinzugefügt', 'Person added to Team': 'Person einem Team hinzugefügt', 'Person added': 'Person hinzugefügt', 'Person deleted': 'Person gelöscht', 'Person details updated': 'Details zur Person aktualisiert', 'Person interviewed': 'Person befragt', 'Person or OU': 'Person oder Organisationseinheit', 'Person who has actually seen the person/group.': 'Person, die kürzlich die Person/Gruppe gesehen hat', 'Person/Group': 'Person/Gruppe', 'Personal Data': 'Persönliche Daten', 'Personal Effects Details': 'Details zur persönlichen Habe', 'Personal Effects': 'Persönliche Habe', 'Personal Map': 'Persönliche Karte', 'Personal Profile': 'Persönliches Profil', 'Personal impact of disaster': 'Persönliche Auswirkung der Katastrophe', 'Persons in institutions': 'Personen in Institutionen', 'Persons with disability (mental)': 'Personen mit Behinderungen (psychischen)', 'Persons with disability (physical)': 'Personen mit Behinderungen (körperlichen)', 'Person': 'Person', 'Persons by Age Group': 'Personen nach Altersgruppen', 'Persons by Gender': 'Personen nach Geschlecht', 'Persons': 'Personen', 'Phone 1': 'Telefon 1', 'Phone 2': 'Telefon 2', 'Phone #': 'Telefon #', 'Phone': 'Telefon', 'Phone/Business': 'Telefon/Geschäftlich', 'Phone/Emergency': 'Telefon/Notfall', 'Phone/Exchange (Switchboard)': 'Telefon/Exchange (Hauptschalttafel)', 'Photo Details': 'Foto Details', 'Photo Taken?': 'Foto gemacht?', 'Photo added': 'Foto hinzugefügt', 'Photo deleted': 'Foto gelöscht', 'Photo updated': 'Foto aktualisiert', 'Photo': 'Foto', 'Photograph': 'Fotografie', 'Photos': 'Fotos', 'Physical Description': 'Physische Beschreibung', 'Physical Safety': 'Physische Sicherheit', 'Picture upload and finger print upload facility': 'Einrichtung um Foto und Fingerabdruck hochzuladen', 'Picture': 'Bild', 'Place of Recovery': 'Ort der Wiederherstellung', 'Place on Map': 'Auf Karte plazieren', 'Places for defecation': 'Plätze für Kotablagerung', 'Places the children have been sent to': 'Orte an die Kinder geschickt wurden', 'Playing': 'Wiedergabe', 'Please correct all errors.': 'Korrigieren Sie bitte alle Fehler.', 'Please enter a first name': 'Bitte geben Sie den Vornamen ein', 'Please enter a site OR a location': 'Bitte geben Sie eine Stelle oder einen Standort/Gebiet an', 'Please enter the first few letters of the Person/Group for the autocomplete.': 'Bitte geben sie die ersten Buchstaben der Person/Gruppe ein um die Autovervollständigung zu starten.', 'Please enter the recipient': 'Bitte geben sie den Empfänger ein', 'Please fill this!': 'Bitte hier einfüllen!', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Bitte geben Sie die URL der Seite auf die sie sich beziehen, eine Beschreibung dessen, was sie erwartet haben & was wirklich passiert ist.', 'Please report here where you are:': 'Bitte hier angeben, wo sie sich befinden:', 'Please select another level': 'Bitte wählen Sie eine andere Ebene', 'Please select': 'Treffen Sie eine Auswahl', 'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Bitte melden Sie sich unter Angabe Ihrer Mobilfunknummer an. Das erlaubt uns Ihnen Textnachrichten zu senden. Bitten verwenden Sie die internationale Nummer ein (Deutschland: 0049.... - ohne führende 0).', 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Bitte geben Sie alle Probleme und Hindernisse bei der korrekten Behandlung der Krankheit an, im Detail (in Zahlen, falls zutreffend). Sie können auch Vorschläge machen wie die Situation verbessert werden kann.', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.', 'Please use this field to record any additional information, including any Special Needs.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, einschließlich besonderer Anforderungen, zu hinterlegen.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, wie die Ushahidi Vorgangs-ID, zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.', 'Pledge Support': 'Zusage von Unterstützung', 'PO': 'PO', 'PO Number': 'PO Nummer', 'PoI Types': 'PoI Typen', 'POIS': 'PoIs', 'Point': 'Point', 'Points of Interest': 'Points of Interest', 'Poisoning': 'Vergiftung', 'Poisonous Gas': 'Gasvergiftung', 'Police': 'Polizei', 'Pollution and other environmental': 'Verschmutzung und andere Umwelt', 'Polygon reference of the rating unit': 'Polygonale Abgrenzung der Bewertungseinheit', 'Poor': 'Arm', 'Population Statistic Details': 'Details zur Bevölkerungsstatistik', 'Population Statistic added': 'Bevölkerungsstatistik hinzugefügt', 'Population Statistic deleted': 'Bevölkerungsstatistik gelöscht', 'Population Statistic updated': 'Bevölkerungsstatistik aktualisiert', 'Population Statistics': 'Bevölkerungsstatistiken', 'Population and number of households': 'Bevölkerungs- und Haushaltsanzahl', 'Population': 'Belegung', 'Popup Fields': 'Popup Felder', 'Popup Label': 'Popup Beschriftung', 'Porridge': 'Haferbrei', 'Port Closure': 'Hafenschließung', 'Port': 'Port', 'Portable App': 'Portable App', 'Position Catalog': 'Stanpunktkatalog', 'Position added': 'Standpunkt hinzugefügt', 'Position deleted': 'Standpunkt gelöscht', 'Position updated': 'Standpunkt aktualisiert', 'Positions': 'Positionen', 'Postcode': 'PLZ', 'Posted on': 'Geposted auf', 'Posts can be either full pages, embedded within other pages or part of a series (for use as news items or blog posts)': 'Posts können entweder komplette Seiten, die in anderen Seiten eingebettet wurden oder Teile einer Serie sein (z.B. zur Nutzung als Newseintrag oder Blog Post)', 'Poultry restocking, Rank': 'Geflügel auffüllen, Rank', 'Poultry': 'Geflügel', 'Pounds': 'Pfund', 'Power Failure': 'Netzausfall', 'Power': 'Stromversorgung', 'Powered by Sahana Eden': 'Powered by Sahana Eden', 'Pre-cast connections': 'Beton Verbindungen', 'Preferred Name': 'Bevorzugter Name', 'Pregnant women': 'Schwangere Frauen', 'Preliminary': 'Vorläufig', 'Presence Condition': 'Anwesenheitsbedingung', 'Presence Log': 'Anwesenheitsprotokollierung', 'Presence in the shelter': 'Anwesend in Unterkunft', 'Presence': 'Anwesenheit', 'Previous': 'Vorherige', 'Primary Occupancy': 'Primäre Belegung', 'Priority from 1 to 9. 1 is most preferred.': 'Priorität von 1 bis 9. 1 ist die am meisten bevorzugte.', 'Priority': 'Priorität', 'Privacy': 'Datenschutz', 'Private': 'Privat', 'Problem Administration': 'Verwaltung von Problemen', 'Problem Details': 'Problemdetails', 'Problem Group': 'Problemgruppe', 'Problem Title': 'Problemtitel', 'Problem added': 'Problem hinzugefügt', 'Problem connecting to twitter.com - please refresh': 'Verbindungsproblem zu twitter.com - bitte neu laden', 'Problem deleted': 'Problem gelöscht', 'Problem updated': 'Problem aktualisiert', 'Problem': 'Problem', 'Problems': 'Probleme', 'Procedure': 'Vorgehensweise', 'Process Received Shipment': 'Bearbeiten der erhaltenen Lieferung', 'Process Shipment to Send': 'Vorbereiten der Lieferung zum Versenden', 'Procurement & Logistics cost': 'Kosten für Beschaffung & Logistik', 'Profile': 'Profil', 'Profile Details': 'Details zum Profil', 'Profile Picture?': 'Profilbild?', 'Program': 'Programm', 'Programs': 'Programme', 'Proj4js definition': 'Proj4js Definition', 'Project Details': 'Details zum Projekt', 'Project Name': 'Name des Projekts', 'Project Status': 'Projektstatus', 'Project added': 'Projekt hinzugefügt', 'Project deleted': 'Projekt gelöscht', 'Project has no Lat/Lon': 'Projekt hat keine Geographische Koordinate (lat/lon)', 'Project updated': 'Projekt aktualisiert', 'Project': 'Projekt', 'Projection Details': 'Details zur Kartenprojektion', 'Projection added': 'Kartenprojektion hinzugefügt', 'Projection deleted': 'Kartenprojektion gelöscht', 'Projection updated': 'Kartenprojektion aktualisiert', 'Projection': 'Kartenprojektion', 'Projections': 'Kartenprojektionen', 'Projects': 'Projekte', 'Property reference in the council system': 'Anlage im Behördensystem', 'Proposed': 'Vorgeschlagen', 'Protected resource': 'Geschützte Ressource', 'Protection': 'Schutz', 'Provide Metadata for your media files': 'Stellen Sie Metadaten für Ihre Mediadateien zur Verfügung.', 'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Stekllen Sie optional eine Skizze des gesamten Gebäudes oder der beschädigten Objekte. Markieren Sie dabei die beschädigte Stellen.', 'Psychiatrics/Adult': 'Psychiatrie/Erwachsene', 'Psychiatrics/Pediatric': 'Psychiatrie/Kinder', 'Public Event': 'Öffentliche Ereignis', 'Public and private transportation': 'Öffentlicher und privater Transport', 'Public assembly': 'Öffentliche Versammlung', 'Public': 'Öffentlich', 'Publish': 'Veröffentlichen', 'Published On': 'Veröffentlicht am', 'Pull tickets from external feed': 'Tickets von externen Feeds laden', 'Purchase Date': 'Kaufdatum', 'Purchase Price': 'Kaufpreis', 'Purchase': 'Kauf', 'Purpose': 'Zweck', 'Push tickets to external system': 'Transferiere Tickets zu externen System', 'Pyroclastic Flow': 'Pyroklastischer Strom', 'Pyroclastic Surge': 'Pyroklastischer Welle', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial-Modul ist innerhalb der aktiven Python Umgebung nicht verfügbar - dieses muss installiert werden um das Modem zu aktivieren.', 'Python needs the ReportLab module installed for PDF export': 'Python braucht das ReportLab-Modul für die PDF-Ausgabe. Dies ist derzeit nicht installiert!', 'Quantity Committed': 'Menge bestätigt', 'Quantity Fulfilled': 'Menge erfüllt', 'Quantity range': 'Mengenumfang', 'Quantity Received': 'Erhaltene Menge', 'Quantity Returned': 'Zurückgegebene Menge', 'Quantity Sent': 'Gesendete Menge', 'Quantity in Transit': 'Menge in Transit', 'Quantity': 'Menge', 'Quarantine': 'Quarantäne', 'Queries': 'Abfragen', 'Query': 'Abfrage', 'Queryable?': 'Abfragbar?', 'RC frame with masonry infill': 'RC Rahmen mit Mauerwerkfüllung', 'RECORD A': 'DATENSATZ A', 'RECORD B': 'DATENSATZ B', 'Race': 'Rasse', 'Radio Callsign': 'Radio Rufzeichen', 'Radiological Hazard': 'Strahlungsgefahr', 'Radiology': 'Radiologie', 'Railway Accident': 'Eisenbahnunfall', 'Railway Hijacking': 'Eisenbahnentführung', 'Rain Fall': 'Regenfall', 'Rapid Assessment Details': 'Details zur Schnell-Beurteilung', 'Rapid Assessment added': 'Schnell-Beurteilung hinzugefügt', 'Rapid Assessment deleted': 'Schnell-Beurteilung gelöscht', 'Rapid Assessment updated': 'Schnell-Beurteilung aktualisiert', 'Rapid Assessment': 'Schnell-Beurteilung', 'Rapid Assessments & Flexible Impact Assessments': 'Schnell-Beurteilungen & flexible Abschätzungen der Auswirkungen', 'Rapid Assessments': 'Schnell-Beurteilungen', 'Rapid Close Lead': 'Schnell Führung schliessen', 'Rapid Data Entry': 'Schnelle Dateneingabe', 'Raw Database access': 'Direkter Datenbankzugriff', 'Receive New Shipment': 'Neue Lieferung erhalten', 'Receive Shipment': 'Lieferung erhalten', 'Receive this shipment?': 'Lieferung erhalten?', 'Receive': 'Erhalten', 'Received By Person': 'Erhalten von einer Person', 'Received By': 'Erhalten von', 'Received Item Details': 'Details zum erhaltenen Artikel', 'Received Item deleted': 'Erhaltener Artikel gelöscht', 'Received Item updated': 'Erhaltener Artikel aktualisiert', 'Received Shipment Details': 'Details zur erhaltenen Lieferung', 'Received Shipment canceled and items removed from Inventory': 'Erhaltene Lieferung abgebrochen und Artikel aus dem Bestand entfernt', 'Received Shipment canceled': 'Erhaltene Lieferung abgebrochen', 'Received Shipment updated': 'Erhaltene Lieferung aktualisiert', 'Received Shipments': 'Erhaltene Lieferung', 'Received': 'Erhalten', 'Received date': 'Eingangsdatum', 'Received/Incoming Shipments': 'Erhaltene/Einkommende Lieferungen', 'Receiving and Sending Items': 'Erhalten und Versenden von Artikeln', 'Recipient': 'Empfänger', 'Recipients': 'Empfänger', 'Recipient(s)': 'Empfänger', 'Recommendations for Repair and Reconstruction or Demolition': 'Empfehlungen für Reparatur und Wiederherstellung oder Abriß', 'Record Details': 'Details zum Datensatz', 'Record Saved': 'Datensatz gesichert', 'Record added': 'Datensatz hinzugefügt', 'Record any restriction on use or entry': 'Registrieren jeglicher Einschränkung bei der Nutzung oder Eintragung', 'Record deleted': 'Datensatz gelöscht', 'Record last updated': 'Datensatz zuletzt aktualisiert', 'Record not found!': 'Datensatz nicht gefunden!', 'Record not found': 'Datensatz nicht gefunden', 'Record updated': 'Datensatz aktualisiert', 'Record': 'Datensatz', 'Recording and Assigning Assets': 'Aufzeichnen und Zuweisen von Anlagen', 'Records': 'Datensätze', 'Recovery Request added': 'Bergungsanfrage hinzugefügt', 'Recovery Request deleted': 'Bergungsanfrage gelöscht', 'Recovery Request updated': 'Bergungsanfrage aktualisiert', 'Recovery Request': 'Bergungsanfrage', 'Recovery Requests': 'Bergungsanfragen', 'Recovery': 'Bergung', 'Recurring Cost': 'Wiederkehrende Kosten', 'Recurring Request?': 'Wiederkehrende Anfrage?', 'Recurring cost': 'Wiederkehrende Kosten', 'Recurring costs': 'Wiederkehrende Kosten', 'Recurring': 'Wiederkehrend', 'Red Cross / Red Crescent': 'Rotes Kreuz / Roter Halbmond', 'Red': 'Rot', 'Reference Document': 'Referenzdokument', 'Refresh Rate (seconds)': 'Aktualisierungsrate (Sekunden)', 'Region': 'Regierungsbezirk', 'Region Location': 'Standort Region', 'Regional': 'Regional', 'Regions': 'Regionen', 'Register Person into this Camp': 'Registrieren der Person in dieses Camp', 'Register Person into this Shelter': 'Registrieren der Person in diese Unterkunft', 'Register Person': 'Registrieren einer Person', 'Register them as a volunteer': 'Als Freiwillige registrieren', 'Register': 'Registrieren', 'Registered People': 'Registrierte Personen', 'Registered users can': 'Registrierte Benutzer können', 'Registration Details': 'Details zur Registrierung', 'Registration added': 'Registrierung hinzugefügt', 'Registration entry deleted': 'Anmeldungseintrag gelöscht', 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Die Registrierung wartet noch auf die Genehmigung von der Qualifizierenden Stelle (%s) - bitte warten Sie bis Sie eine Bestätigung erhalten', 'Registration updated': 'Anmeldung aktualisiert', 'Registration': 'Registrierung', 'Rehabilitation/Long Term Care': 'Rehabilitation/Langfristige Pflege', 'Reinforced masonry': 'Mauerwerk verstärkt', 'Rejected': 'Zurückgewiesen', 'Relief Team': 'Unterstützungsteam', 'Relief': 'Unterstützung', 'Religious Leader': 'Religiöser Führer', 'Religious': 'Religiös', 'Relocate as instructed in the <instruction>': 'Verlagern wie in der <instruction> angewiesen', 'Remarks': 'Bemerkungen', 'Remove Asset from this event': 'Anlage von diesem Ereignis entfernen', 'Remove Asset from this scenario': 'Anlage von diesem Szenario entfernen', 'Remove Facility from this event': 'Einrichtung von diesem Ereignis entfernen', 'Remove Facility from this scenario': 'Einrichtung von diesem Szenario entfernen', 'Remove Human Resource from this event': 'Personelle Ressource von diesem Ereignis entfernen', 'Remove Human Resource from this scenario': 'Personelle Ressource von diesem Szenario entfernen', 'Remove Incident Type from this event': 'Vorfallstyp von diesem Ereignis entfernen', 'Remove Item from Inventory': 'Artikel aus Bestand entfernen', 'Remove Layer from Profile': 'Löschen der Kartenebene aus dem Profil', 'Remove Map Profile from this event': 'Kartenkonfiguration von diesem Ereignis entfernen', 'Remove Map Profile from this scenario': 'Kartenkonfiguration von diesem Szenario entfernen', 'Remove Person from Group': 'Person aus Gruppe entfernen', 'Remove Person from Team': 'Person aus Team entfernen', 'Remove existing data before import': 'Löschen der existierenden Daten vor dem Import', 'Remove this asset from this event': 'Diese Anlage vom Ereignis entfernen', 'Remove this asset from this scenario': 'Diese Anlage vom Szenario entfernen', 'Remove': 'Entfernen', 'Removed from Group': 'Aus Gruppe entfernt', 'Removed from Team': 'Aus Team entfernt', 'Repacked By': 'Umgepackt von', 'Repair': 'Reparieren', 'Repairs': 'Reparaturen', 'Repaired': 'Repariert', 'Repeat your password': 'Kennwort wiederholen', 'Replace if Master': 'Ersetzen wenn Master', 'Replace if Newer': 'Ersetze, falls neuer', 'Replace': 'Ersetzen', 'Report Another Assessment...': 'Melde andere Beurteilung...', 'Report Details': 'Details zum Bericht', 'Report Options': 'Optionen zum Bericht', 'Report Options': 'Optionen zum Bericht:', 'Report Types Include': 'Berichtstypen beinhalten', 'Report added': 'Bericht hinzugefügt', 'Report deleted': 'Bericht gelöscht', 'Report my location': 'Meinen Standort melden', 'Report of': 'Bericht von', 'Report the contributing factors for the current EMS status.': 'Melde die beitragenen Faktoren für den aktuellen EMS Status', 'Report the contributing factors for the current OR status.': 'Melde die beitragenden Faktoren für den aktuellen OR Status.', 'Report them as found': 'Als gefunden melden', 'Report them missing': 'Als vermisst melden', 'Report updated': 'Bericht aktualisiert', 'Report': 'Bericht', 'Reporter Name': 'Name des Meldenden', 'Reporter': 'Meldender', 'Reporting on the projects in the region': 'Berichterstattung über die Projekte in der Region', 'Reports': 'Berichte', 'Repositories': 'Repositories', 'REQ': 'Anfrage', 'REQ Number': 'Anfragenummer', 'RSS Channels': 'RSS Kanäle', 'RSS Posts': 'RSS Posts', 'Request Added': 'Anfrage hinzugefügt', 'Request Canceled': 'Anfrage storniert', 'Request Details': 'Details zur Anfrage', 'Request Templates': 'Anfragevorlagen', 'Requested For Facility': 'Angefragt für Einrichtung', 'Request From': 'Anfrage von', 'Request Item Details': 'Details zur Anfrage nach Artikel', 'Request Item added': 'Anfrage nach Artikel hinzugefügt', 'Request Item deleted': 'Anfrage nach Artikel entfernt', 'Request Item from Available Inventory': 'Anfrage nach Artikel aus verfügbarem Bestand', 'Request Item updated': 'Anfrage nach Artikel aktualisiert', 'Request Item': 'Angefragter Artikel', 'Request Items': 'Angefragte Artikel', 'Request Status': 'Anfragestatus', 'Request Type': 'Anfragetyp', 'Request Updated': 'Anfrage aktualisiert', 'Request added': 'Anfrage hinzugefügt', 'Request deleted': 'Anfrage gelöscht', 'Request for Role Upgrade': 'Rollenupgrade anfordern', 'Request updated': 'Anfrage aktualisiert', 'Request': 'Anfrage', 'Requests': 'Anfragen', 'Request, Response & Session': 'Anfrage, Antwort & Sitzung', 'Requested By Facility': 'Angefragt von Einrichtung', 'Requested By': 'Angefragt durch', 'Requested From': 'Angefragt von', 'Requested Items': 'Angefragte Artikel', 'Requested by': 'Angefragt durch', 'Requested on': 'Angefragt am', 'Requested': 'Angefragt', 'Requester': 'Anfragender', 'Requests Management': 'Anfragenverwaltung', 'Requests': 'Anfragen', 'Required Skills': 'Benötigte Fähigkeiten', 'Requires Login!': 'Anmeldung erforderlich!', 'Rescue and recovery': 'Rettung und Bergung (SAR)', 'Reset Password': 'Kennwort zurücksetzen', 'Reset': 'Zurücksetzen', 'Resolve Conflict': 'Konflikt lösen', 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Das verfolgen des Links lässt eine neue Anzeige erscheinen die hilft doppelte Einträge aufzulösen und die Datenbank zu aktualisieren', 'Resolve': 'Auflösen', 'Resource Details': 'Details zur Ressource', 'Resource Inventory': 'Ressourcenbestand', 'Resource Type': 'Ressourcentyp', 'Resource added': 'Ressource hinzugefügt', 'Resource deleted': 'Ressource gelöscht', 'Resource updated': 'Ressource aktualisiert', 'Resource': 'Ressource', 'Resources': 'Ressourcen', 'Respiratory Infections': 'Atemwegsinfektionen', 'Response': 'Antwort', 'Restricted Access': 'Eingeschränkter Zugriff', 'Restricted Use': 'Eingeschränkte Verwendung', 'Result': 'Ergebniss', 'Results': 'Ergebnisse', 'Retail Crime': 'Einzelhandel Kriminalität', 'Retrieve Password': 'Kennwort abrufen', 'Return to Request': 'Zurück zur Anfrage', 'Return': 'Zurück', 'Returned From': 'Zurückgegeben von', 'Returned': 'Zurückgegeben', 'Review Incoming Shipment to Receive': 'Überprüfung der eingehenden Lieferung für die Annahme', 'Rice': 'Reis', 'Rich Text?': 'Rich Text?', 'Riot': 'Aufruhr', 'River Details': 'Details zum Fluss', 'River added': 'Fluss hinzugefügt', 'River deleted': 'Fluss gelöscht', 'River updated': 'Fluss aktualisiert', 'River': 'Fluss', 'Rivers': 'Flüsse', 'Road Accident': 'Verkehrsunfall', 'Road Closed': 'Straße gesperrt', 'Road Conditions': 'Zustand der Straßen', 'Road Delay': 'Verkehrsverzögerung', 'Road Hijacking': 'Straßenentführung', 'Road Usage Condition': 'Strassennutzungszustand', 'Role Details': 'Details zur Rolle', 'Role Name': 'Name der Rolle', 'Role Required': 'Erforderliche Rolle', 'Role Updated': 'Rolle aktualisiert', 'Role added': 'Rolle hinzugefügt', 'Role deleted': 'Rolle gelöscht', 'Role updated': 'Rolle aktualisiert', 'Role': 'Rolle', 'Role-based': 'Rollenbasiert', 'Roles Permitted': 'Zulässige Rollen', 'Roles': 'Rollen', 'Roll On Roll Off Berth': 'Fähranlegestelle', 'Roof tile': 'Dachziegel', 'Roofs, floors (vertical load)': 'Dächer, Böden (vertikale Belastung)', 'Room Details': 'Details zum Raum', 'Room added': 'Raum hinzugefügt', 'Room deleted': 'Raum gelöscht', 'Room updated': 'Raum aktualisiert', 'Room': 'Raum', 'Rooms': 'Räume', 'Rows in table': 'Zeilen in der Tabelle', 'Rows selected': 'Ausgewählte Zeilen', 'Run Interval': 'Intervall der Läufe', 'Runway Length (m)': 'Länge der Landebahn (m)', 'Runway Surface': 'Oberfläche der Landebahn', 'Runway Width (m)': 'Breite der Landebahn (m)', 'Running Cost': 'Laufzeitkosten', 'SMS Modem Channels': 'SMS Modem Kanäle', 'SMS Outbound Gateways': 'SMS Ausgangsgateaways', 'SMS SMTP Channels': 'SMS SMTP Kanäle', 'SMS WebAPI Channels': 'SMS WebAPI Kanäle', 'Safe environment for vulnerable groups': 'Sichere Umgebung für gefährdete Gruppen', 'Safety Assessment Form': 'Formular für Sicherheitsbeurteilung', 'Safety of children and women affected by disaster?': 'Ist die Sicherheit von Kindern und Frauen durch die Katastrophe (resp. das Unglück) beeinträchtigt?', 'Sahana Blue': 'Sahana Blau', 'Sahana Community Chat': 'Sahana Gemeinschaft Chat', 'Sahana Eden <=> Other': 'Sahana Eden <=> Andere', 'Sahana Eden Humanitarian Management Platform': 'Sahana Eden - OpenSource Management-Plattform für humanitäre Notsituationen', 'Sahana Eden Website': 'Sahana Eden Internetseite', 'Sahana Steel': 'Sahana Stahl', 'Sahana access granted': 'Sahana Zugriff gewährt', 'Salted Fish': 'Gesalzener Fisch', 'Sanitation problems': 'Sanitäre Probleme', 'Satellite': 'Satellit', 'Saturday': 'Samstag', 'Save: Default Lat, Lon & Zoom for the Viewport': 'Speichern: Standardmäßig Länge/Breite und Zoomfaktor', 'Save': 'Speichern', 'Saved.': 'Gespeichert.', 'Saved filters': 'Gespeicherte Filter', 'Saving...': 'Wird gespeichert...', 'Scale of Results': 'Umfang der Ergebnisse', 'Scenario Details': 'Details zum Szenario', 'Scenario added': 'Szenario hinzugefügt', 'Scenario deleted': 'Szenario gelöscht', 'Scenario updated': 'Szenario aktualisiert', 'Scenario': 'Szenario', 'Scenarios': 'Szenarios', 'Schedule': 'Zeitplan', 'School Closure': 'Schulschließung', 'School Lockdown': 'Schule geschlossen', 'School Teacher': 'Schullehrer', 'School activities': 'Schulaktivitäten', 'School assistance': 'Schulunterstützung', 'School attendance': 'Schulbesuch', 'School destroyed': 'Schule zerstört', 'School heavily damaged': 'Schule stark beschädigt', 'School tents received': 'Schulzelte erhalten', 'School tents, source': 'Herkunft der Schulzelte', 'School used for other purpose': 'Schule wird für andere Zwecke verwendet', 'School': 'Schule', 'School/studying': 'Schule/lernen', 'Schools': 'Schulen', 'Seaports': 'Seehafen', 'Search Activities': 'Suchaktivitäten', 'Search Activity Report': 'Bericht über Suchaktivitäten', 'Search Addresses': 'Suche nach Adressen', 'Search All Requested Items': 'Alle angefordeten Artikel durchsuchen', 'Search All Requested Skills': 'Alle angefragten Fähigkeiten durchsuchen', 'Search Alternative Items': 'Suche nach alternativen Artikeln', 'Search Assessment Summaries': 'Suche Beurteilungszusammenfassungen', 'Search Assessments': 'Suche Beurteilungen', 'Search Asset Log': 'Suche Anlageprotokoll', 'Search Assets': 'Suche Anlagen', 'Search Baseline Type': 'Referenzdatumstyp suchen', 'Search Baselines': 'Referenzdatum suchen', 'Search Brands': 'Marken suchen', 'Search Budgets': 'Budgets suchen', 'Search Bundles': 'Produktpakete suchen', 'Search Camp Services': 'Camp Leistungen suchen', 'Search Camp Types': 'Camp Typen suchen', 'Search Camps': 'Camps suchen', 'Search Catalog Items': 'Katalog Einträge suchen', 'Search Catalogs': 'Kataloge suchen', 'Search Certificates': 'Zertifikate suchen', 'Search Certifications': 'Zertifizierungen suchen', 'Search Checklists': 'Checklisten suchen', 'Search Cluster Subsectors': 'Cluster Teilbereiche suchen', 'Search Clusters': 'Cluster suchen', 'Search Commitment Items': 'Zugesagte Artikel suchen', 'Search Commitments': 'Zusagen suchen', 'Search Competencies': 'Kompetenzen suchen', 'Search Competency Ratings': 'Kompetenzeinstufungen suchen', 'Search Contact Information': 'Nach Kontaktinformationen suchen', 'Search Contacts': 'Nach Kontakten suchen', 'Search Course Certificates': 'Suchen nach Kurszertifikaten', 'Search Courses': 'Kurse suchen', 'Search Credentials': 'Qualifikationen suchen', 'Search Documents': 'Dokumente suchen', 'Search Donors': 'Spender suchen', 'Search Entries': 'Einträge suchen', 'Search Events': 'Ereignisse suchen', 'Search Facilities': 'Einrichtungen suchen', 'Search Feature Layers': 'Objekt-Ebenen suchen', 'Search Flood Reports': 'Flutberichte suchen', 'Search Groups': 'Gruppen suchen', 'Search Human Resources': 'Personelle Ressourcen suchen', 'Search Identity': 'Identität suchen', 'Search Images': 'Bilder suchen', 'Search Impact Type': 'Auswirkungstypen suchen', 'Search Impacts': 'Auswirkungen suchen', 'Search Incident Reports': 'Vorfallberichte suchen', 'Search Inventory Items': 'Bestandsartikel suchen', 'Search Inventory items': 'Bestandsartikel suchen', 'Search Item Categories': 'Artikelkategorien suchen', 'Search Item Packs': 'Artikelpakete suchen', 'Search Items': 'Artikel suchen', 'Search Job Roles': 'Tätigkeiten suchen', 'Search Keys': 'Sschlüssel suchen', 'Search Kits': 'Ausstattungen (Kits) suchen', 'Search Layers': 'Kartenebenen suchen', 'Search Level 1 Assessments': 'Suche Stufe 1 Beurteilungen', 'Search Level 2 Assessments': 'Suche Stufe 2 Beurteilungen', 'Search Locations': 'Gebiet/Standort suchen', 'Search Log Entry': 'Protokolleintrag suchen', 'Search Map Profiles': 'Kartenkonfiguration suchen', 'Search Markers': 'Marker/Symbol suchen', 'Search Members': 'Mitglied suchen', 'Search Membership': 'Mitgliedschaft suchen', 'Search Missions': 'Aufträge suchen', 'Search Need Type': 'Anforderungstyp suchen', 'Search Needs': 'Anforderungstyp suchen', 'Search Offices': 'Büros suchen', 'Search Organizations': 'Organisationen suchen', 'Search Peer': 'Peer Suchen', 'Search Personal Effects': 'Persönliche Habe suchen', 'Search Persons': 'Personen suchen', 'Search Photos': 'Fotos suchen', 'Search Population Statistics': 'Bevölkerungsstatistiken suchen', 'Search Positions': 'Positionen suchen', 'Search Problems': 'Probleme suchen', 'Search Projections': 'Kartenprojektionen suchen', 'Search Projects': 'Projekte suchen', 'Search Queries': 'Suchabfragen', 'Search Rapid Assessments': 'Schnell-Beurteilung suchen', 'Search Received Items': 'Erhaltene Artikel suchen', 'Search Received Shipments': 'Erhaltene Lieferungen suchen', 'Search Records': 'Datensätze suchen', 'Search Registations': 'Registrierungen suchen', 'Search Registration Request': 'Registrierungsanfragen suchen', 'Search Report': 'Berichte suchen', 'Search Request Items': 'Angefragte Artikel suchen', 'Search Request': 'Anfrage suchen', 'Search Requested Items': 'Angefragte Artikel suchen', 'Search Requests': 'Anfragen suchen', 'Search Resources': 'Ressourcen suchen', 'Search Rivers': 'Flüsse suchen', 'Search Roles': 'Rollen suchen', 'Search Rooms': 'Räume suchen', 'Search Scenarios': 'Szenarien suchen', 'Search Sections': 'Abschnitte suchen', 'Search Sectors': 'Bereiche suchen', 'Search Sent Items': 'Gesendete Artikel suchen', 'Search Sent Shipments': 'Gesendete Lieferungen suchen', 'Search Service Profiles': 'Leistungsprofile suchen', 'Search Settings': 'Sucheinstellungen', 'Search Shelter Services': 'Unterkunftsleistungen suchen', 'Search Shelter Types': 'Unterkunftsarten suchen', 'Search Shelters': 'Unterkünfte suchen', 'Search Shipped Items': 'Suche über gelieferte Artikel', 'Search Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten suchen', 'Search Skill Provisions': 'Fähigkeits-Bereitstellungen suchen', 'Search Skill Types': 'Fähigkeitstypen suchen', 'Search Skills': 'Fähigkeiten suchen', 'Search Solutions': 'Lösungen suchen', 'Search Staff Types': 'Mitarbeitertypen suchen', 'Search Staff or Volunteer': 'Suche Mitarbeiter oder Freiwillige', 'Search Status': 'Status suchen', 'Search Subscriptions': 'Abonnement suchen', 'Search Subsectors': 'Teilbereiche suchen', 'Search Support Requests': 'Unterstützungsanfragen suchen', 'Search Tasks': 'Aufgaben suchen', 'Search Teams': 'Teams suchen', 'Search Themes': 'Themen suchen', 'Search Tickets': 'Tickets suchen', 'Search Tracks': 'Tracks suchen', 'Search Training Participants': 'Suche Kursteilnehmer', 'Search Trainings': 'Schulung suchen', 'Search Twitter Tags': 'Twitter-Tags suchen', 'Search Units': 'Einheiten suchen', 'Search Users': 'Benutzer suchen', 'Search Volunteer Availability': 'Verfügbarkeit von Freiwilligen suchen', 'Search Volunteers': 'Freiwillige suchen', 'Search Warehouses': 'Warenlager suchen', 'Search and Edit Group': 'Suchen und Bearbeiten von Gruppen', 'Search and Edit Individual': 'Suchen und Bearbeiten von einzelnen Personen', 'Search by Skills': 'Suche nach Fähigkeiten', 'Search by skills': 'Suche nach Fähigkeiten', 'Search for Staff or Volunteers': 'Suche nach Mitarbeitern oder Freiwilligen', 'Search for a Location by name, including local names.': 'Suchen nach Standortnamen, einschließlich lokaler Namen.', 'Search for a Person': 'Such nach einer Person', 'Search for a Project': 'Suche nach einem Projekt', 'Search for a shipment by looking for text in any field.': 'Suche nach einer Lieferung (Volltextsuche)', 'Search for a shipment received between these dates': 'Suche nach einer erhaltenen Lieferung im Zeitraum', 'Search for an Organization by name or acronym': 'Suche nach einer Organisation nach Namen oder Abkürzung', 'Search for an Organization by name or acronym.': 'Suche nach einer Organisation in Namen und Acronym.', 'Search for an asset by text.': 'Suche Anlage über Text.', 'Search for an item by category.': 'Suche Artikel nach Kategorie.', 'Search for an item by text.': 'Suche Artikel über Text.', 'Search for asset by country.': 'Suche Anlage nach Ländern.', 'Search for office by country.': 'Suche Büro nach Ländern.', 'Search for office by organization.': 'Suche Büro nach Organisation.', 'Search for office by text.': 'Suche Büro über Text', 'Search for Persons': 'Suche nach Personen', 'Search for warehouse by country.': 'Suche Warenlager nach Ländern', 'Search for warehouse by organization.': 'Suche Warenlager nach Organisation', 'Search for warehouse by text.': 'Suche Warenlager über Text', 'Search here for a person record in order to:': 'Hier nach einem Personendatensatz suchen, um zu:', 'Search location in Geonames': 'Ortssuche in Geonames', 'Search messages': 'Suche Nachrichten', 'Search': 'Suchen', 'Searching for different groups and individuals': 'Suche nach verschiedenen Gruppen und Einzelpersonen', 'Secondary Server (Optional)': 'Sekundärer Server (optional)', 'Seconds must be a number between 0 and 60': 'Sekunden müssen eine Zahl zwischen 0 und 60 sein', 'Section Details': 'Details zum Abschnitt', 'Section deleted': 'Abschnitt gelöscht', 'Section updated': 'Abschnitt aktualisiert', 'Sections': 'Abschnitte', 'Sector Details': 'Details zum Bereich ', 'Sector added': 'Bereich hinzugefügt', 'Sector deleted': 'Bereich gelöscht', 'Sector updated': 'Bereich aktualisiert', 'Sector': 'Bereich', 'Sector(s)': 'Bereich(e)', 'Sectors': 'Bereiche', 'Secure Storage Capacity': 'Sichere Lagerkapazität', 'Security Status': 'Sicherheitsstatus', 'Security problems': 'Sicherheitsprobleme', 'Security': 'Sicherheit', 'See All Entries': 'Siehe alle Einträge', 'See all': 'Alles anzeigen', 'See unassigned recovery requests': 'Siehe nicht zugeordnete Bergungsanfragen.', 'Select': 'Auswahl', 'Select All': 'Alles auswählen', 'Select Items from the Request': 'Wählen sie Artikel aus der Anfrage', 'Select Items from this Inventory': 'Wählen sie Artikel aus diesem Bestand', 'Select Land': 'Land auswählen', 'Select Modules for translation': 'Auswahl der Module zum Übersetzen', 'Select a location': 'Wählen Sie einen Ort aus', 'Select a question from the list': 'Wählen sie eine Frage aus der Liste aus', 'Select a range for the number of total beds': 'Wählen sie einen Bereich für die Gesamtanzahl von Betten', 'Select all that apply': 'Wählen Sie alles Zutreffende aus', 'Select an Organization to see a list of offices': 'Wählen Sie eine Organisation aus, um eine Liste der zugehörigen Büros anzuzeigen.', 'Select resources to import': 'Wählen Sie Ressourcen zum Importieren aus', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Wählen sie die overlays für die Beurteilungen und die zugehörigen Aktivitäten um die Differenz zu identifizieren.', 'Select the person assigned to this role for this project.': 'Wählen Sie die Person die mit diesr Rolle dem Projekt zugeordnet werden soll.', 'Select to show this configuration in the Regions menu.': "Auswahl um sich diese Konfiguration im Menu 'Regionen' anzeigen.", 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Auswahl ob ein Modem, Tropo oder eine andere Schnittstelle zum Versand von SMS verwendet werden soll.', 'Send Alerts using Email &/or SMS': 'Senden von Alarmen unter Nutzung von E-Mail und/oder SMS', 'Send Commitment as Shipment': 'Zusage Lieferung zu senden', 'Send Message': 'Nachricht senden', 'Send New Shipment': 'Neue Lieferung senden', 'Send Notification': 'Benachrichtigung senden', 'Send Shipment': 'Lieferung senden', 'Send a message to this person': 'Dieser Person eine Nachricht senden', 'Send a message to this team': 'Diesem Team eine Nachricht senden', 'Send from %s': 'Senden von %s', 'Send message': 'Nachricht senden', 'Send new message': 'Neue Nachricht senden', 'Send': 'Senden', 'Sends & Receives Alerts via Email & SMS': 'Schickt & empfängt Benachrichtigungen über Email und SMS', 'Sent By Person': 'Gesendet von einer Person', 'Sent By': 'Gesendet von', 'Sent Emails': 'Gesendete E-Mails', 'Sent Item Details': 'Details zum versendeten Artikel', 'Sent Item deleted': 'Gesendeter Artikel gelöscht', 'Sent Item updated': 'Gesendeter Artikel aktualisiert', 'Sent Posts': 'Gesendete Posts', 'Sent Shipment Details': 'Details zur gesendeten Lieferungsdetails', 'Sent Shipment canceled and items returned to Inventory': 'Gesendete Lieferung storniert und Artikel zum Lager zurückgebracht', 'Sent Shipment canceled': 'Gesendete Lieferung storniert', 'Sent Shipment updated': 'Gesendete Lieferung aktualisiert', 'Sent Shipments': 'Gesendete Lieferungen', 'Sent SMS': 'Gesendete SMS', 'Sent date': 'Versanddatum', 'Sent': 'gesendet', 'Separated children, caregiving arrangements': 'von Eltern getrennte Kinder, Pflegevereinbarungen', 'Serial Number': 'Seriennummer', 'Series': 'Serie', 'Server': 'Server', 'Service Catalog': 'Leistungskatalog', 'Service Record': 'Leistungseintrag', 'Service or Facility': 'Leistung oder Einrichtung', 'Service profile added': 'Leistungsprofil hinzugefügt', 'Service profile deleted': 'Leistungsprofil gelöscht', 'Service profile updated': 'Leistungsprofil aktualisiert', 'Service': 'Leistung', 'Services Available': 'Verfügbare Leistungen', 'Services': 'Services (XDS; Exchange-Verzeichnisdienst)', 'Set Base Site': 'Basisstandort festlegen', 'Set By': 'Definiert durch', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': "Wählen sie 'Wahr' um Benutzern, die nicht Karten-Admins sind, zu erlauben dieses Level der Gebietshierachie zu verändern.", 'Setting Details': 'Details konfigurieren', 'Setting added': 'Einstellung hinzugefügt', 'Setting deleted': 'Einstellungen gelöscht', 'Setting updated': 'Einstellung aktualisiert', 'Settings updated': 'Einstellungen aktualisiert', 'Settings were reset because authenticating with Twitter failed': 'Einstellungen wurden zurückgesetzt da die Authentifizierung mit Twitter fehlgeschlagen ist', 'Settings which can be configured through the web interface are available here.': 'Die Einstellungen, die über das Webinterface konfiguriert werden können, sind hier verfügbar.', 'Settings': 'Einstellungen', 'Severe': 'Ernsthaft', 'Severity': 'Wertigkeit', 'Sex': 'Geschlecht', 'Share a common Marker (unless over-ridden at the Feature level)': 'Definiere einen allgemeinen Marker/Symbol (kann auf Objekt-Ebene überschrieben werden)', 'Shelter & Essential NFIs': 'Unterkünfte & Essentielle NFIs', 'Shelter Details': 'Details zur Unterkunft', 'Shelter Name': 'Name der Unterkunft', 'Shelter Registry': 'Unterkunft Register', 'Shelter Service Details': 'Details zur Unterkunftsleistung', 'Shelter Service added': 'Unterkunftsleistung hinzugefügt', 'Shelter Service deleted': 'Unterkunftsleistung gelöscht', 'Shelter Service updated': 'Unterkunftsleistung aktualisiert', 'Shelter Service': 'Unterkunftsleistung', 'Shelter Services': 'Unterkunftsleistungen', 'Shelter Settings': 'Eigenschaften der Unterkunft', 'Shelter Type Details': 'Details zum Unterkunftstyp', 'Shelter Type added': 'Unterkunftstyp hinzugefügt', 'Shelter Type deleted': 'Unterkunftstyp gelöscht', 'Shelter Type updated': 'Unterkunftstyp aktualisiert', 'Shelter Type': 'Unterkunftstyp', 'Shelter Types and Services': 'Unterkunftstypen und -leistungen', 'Shelter Types': 'Unterkunftstypen', 'Shelter added': 'Unterkunft hinzugefügt', 'Shelter deleted': 'Unterkunft gelöscht', 'Shelter updated': 'Unterkunft aktualisiert', 'Shelter': 'Unterkunft', 'Shelter/NFI Assistance': 'Unterkunft/ NFI Hilfe', 'Shelters': 'Unterkünfte', 'Shipment Created': 'Lieferung erstellt', 'Shipment Items received by Inventory': 'Lieferungsartikel aus Bestand empfangen', 'Shipment Items sent from Inventory': 'Lieferungsartikel von Bestand gesendet', 'Shipment Items': 'Lieferungsartikel', 'Shipment Type': 'Typ der Lieferung', 'Shipment to Send': 'Zu sendende Lieferung zu senden', 'Shipments To': 'Lieferungen nach', 'Shipments': 'Lieferungen', 'Shipping cost': 'Lieferkosten', 'Shooting': 'Filmaufnahme', 'Short Assessment': 'Kurz Beurteilung', 'Short Description': 'Kurze Beschreibung', 'Show %(number)s entries': 'Zeige %(number)s Einträge', 'Show Checklist': 'Checkliste anzeigen', 'Show Details': 'Details anzeigen', 'Show Location?': 'Gebiet/Standort anzeigen?', 'Show Map': 'Karte anzeigen', 'Show Region in Menu?': 'Region im Menu anzeigen?', 'Show author picture?': 'Bild des Authors anzeigen?', 'Show on Map': 'Auf Karte anzeigen', 'Show on map': 'Auf Karte anzeigen', 'Show totals': 'Summen anzeigen', 'Show': 'Zeige', 'Showing _START_ to _END_ of _TOTAL_ entries': 'Einträge _START_ bis _END_ von _TOTAL_', 'Showing 0 to 0 of 0 entries': 'Keine Einträge', 'Sign-up as a volunteer': 'Als Freiwilliger anmelden', 'Sign-up for Account': 'Für Benutzerkennung anmelden', 'Sign-up succesful - you should hear from us soon!': 'Registrierung erfolgreich - sie werden in Kürze von uns hören.', 'Site Administration': 'Administration der Seite', 'Site': 'Standort', 'Site Needs': 'Standortbedarf', 'Add Site Needs': 'Standortbedarf hinzufügen', 'Edit Site Needs': 'Standortbedarf ändern', 'Delete Site Needs': 'Standortbedarf löschen', 'Site Needs added': 'Standortbedarf hinzugefügt', 'Site Needs updated': 'Standortbedarf aktualisiert', 'Site Needs deleted': 'Standortbedarf gelöscht', 'Situation Awareness & Geospatial Analysis': 'Situationseinschätzung & Räumliche Analyse', 'Sketch': 'Skizze', 'Skill Catalog': 'Fähigkeitskatalog', 'Skill Details': 'Details zur Fähigkeit', 'Skill Equivalence Details': 'Details zur Fähigkeits-Vergleichbarkeit', 'Skill Equivalence added': 'Fähigkeits-Vergleichbarkeit hinzugefügt', 'Skill Equivalence deleted': 'Fähigkeits-Vergleichbarkeit gelöscht', 'Skill Equivalence updated': 'Fähigkeits-Vergleichbarkeit aktualisiert', 'Skill Equivalence': 'Fähigkeits-Vergleichbarkeit', 'Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten', 'Skill Provision Catalog': 'Fähigkeiten Bestimmungskatalog', 'Skill Provision Details': 'Fähigkeiten Bestimmung Details', 'Skill Provision added': 'Geschick Bestimmung hinzugefügt', 'Skill Provision deleted': 'Fähigkeitenbestimmung gelöscht', 'Skill Provision updated': 'Fähigkeiten Bestimmung aktualisiert', 'Skill Provision': 'Geschick Bestimmung', 'Skill Provisions': 'Fähigkeits-Bereitstellungen', 'Skill Status': 'Fähigkeitsstatus', 'Skill TYpe': 'Art der Fähigkeit', 'Skill Type Catalog': 'Fähigkeitstypen-Katalog', 'Skill Type Details': 'Details zum Fähigkeitstyp', 'Skill Type added': 'Fähigkeitstyp hinzugefuegt', 'Skill Type deleted': 'Fähigkeitstyp gelöscht', 'Skill Type updated': 'Fähigkeitstyp aktualisiert', 'Skill Types': 'Fähigkeitstypen', 'Skill added': 'Fähigkeit hinzugefügt', 'Skill deleted': 'Fähigkeit gelöscht', 'Skill updated': 'Fähigkeit aktualisiert', 'Skill': 'Kenntnisse', 'Skills Catalog': 'Fähigkeiten Katalog', 'Skills Management': 'Fähigkeiten Management', 'Skills': 'Fähigkeiten', 'Skype ID': 'Skype ID', 'Slope failure, debris': 'Abhang Bruch, Schutt', 'Small Trade': 'Kleiner Handel', 'Smoke': 'Rauch', 'Snapshot Report': 'Bericht zur aktuellen Lage', 'Snapshot': 'Momentaufnahme', 'Snow Fall': 'Schneefall', 'Snow Squall': 'Schneeschauer', 'Soil bulging, liquefaction': 'Boden aufgequollen, Verflüssigung', 'Solid waste': 'Feste Abfälle', 'Solution Details': 'Details zur Lösung', 'Solution Item': 'Lösungselement', 'Solution added': 'Lösung hinzugefügt', 'Solution deleted': 'Lösung gelöscht', 'Solution updated': 'Lösung aktualisiert', 'Solution': 'Lösung', 'Solutions': 'Lösungen', 'Some': 'Einige', 'Sorry that location appears to be outside the area of the Parent.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs des übergeordneten Elements zu liegen.', 'Sorry that location appears to be outside the area supported by this deployment.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs zu liegen, der von dieser Anwendung unterstützt wird.', 'Sorry, I could not understand your request': 'Entschuldigung, leider konnte ich ihre Anfrage nicht verstehen', 'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt Gruppen von Standorten/Gebieten zu erstellen.', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt diese Standorte/Gebiete zu bearbeiten', 'Sorry, something went wrong.': 'Entschuldigung, leider is etwas schief gelaufen.', 'Sorry, that page is forbidden for some reason.': 'Entschuldigung, leider der Besuch dieser Seite aus einem bestimmten Grund nicht zulässig.', 'Sorry, that service is temporary unavailable.': 'Entschuldigung, leider steht dieses Service vorübergehend nicht zur Verfügung.', 'Sorry, there are no addresses to display': 'Entschuldigung, leider sind keine Adressen vorhanden um angezeigt zu werden.', 'Source ID': 'Quellen ID', 'Source Time': 'Zeit der Quelle', 'Source': 'Quelle', 'Sources of income': 'Einkommsquellen', 'Space Debris': 'Weltraumschrott', 'Spanish': 'Spanisch', 'Special Ice': 'Besonderes Eis', 'Special Marine': 'Spezielles Wasserfahrzeug', 'Specialized Hospital': 'Spezialisiertes Krankenhaus', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Bestimmter Bereich (z.B. Gebäude/Raum) innerhalb eines Ortes in der diese Person/Gruppe gefunden werden kann.', 'Specific locations need to have a parent of level': 'Bestimmte Orte benötigen ein übergeordnetes Element der Stufe', 'Specify a descriptive title for the image.': 'Geben Sie einen beschreibenden Titel für das Bild an.', 'Specify the bed type of this unit.': 'Geben Sie den Bettentypen an für diese Einheit an.', 'Specify the number of available sets': 'Geben Sie die Anzahl der verfügbaren Sätze an', 'Specify the number of available units (adult doses)': 'Geben Sie die Anzahl der verfügbaren Einheiten ein (Dosis für Erwachsene)', 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Geben Sie die Anzahl der verfügbaren Einheiten (in Liter) von Ringer-Lactat oder gleichwertige Lösungen ein', 'Specify the number of sets needed per 24h': 'Geben Sie die Anzahl der erforderlichen Sätze pro 24h ein', 'Specify the number of units (Erwachsenendosen) needed per 24h': 'Geben Sie die Anzahl der Einheiten ein (Dosis für Erwachsene) die pro 24h benötigt werden.', 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Geben Sie die Anzahl der Einheiten (in Liter) von Ringer-Lactat oder gleichwertigen Lösungen ein, die man pro 24h braucht.', 'Spherical Mercator?': 'Spherische Mercator?', 'Spreadsheet Importer': 'Import von Tabellendokumenten', 'Spreadsheet uploaded': 'Tabellendokument hochgeladen', 'Squall': 'Sturmschauer', 'Staff & Volunteers': 'Mitarbeiter & Freiwillige', 'Staff & Volunteers (Combined)': 'Mitarbeiter & Freiwillige (kombiniert)', 'Staff ID': 'Mitarbeiter-ID', 'Staff Management': 'Mitarbeitermanagement', 'Staff Member Details': 'Details zum Mitarbeiter', 'Staff Member added': 'Mitarbeiter hinzugefügt', 'Staff Members': 'Mitarbeiter', 'Staff Record': 'Mitarbeiterakte', 'Staff Report': 'Mitarbeiterbericht', 'Staff Type Details': 'Details zum Mitarbeitertyp', 'Staff Type added': 'Mitarbeitertyp hinzugefügt.', 'Staff Type deleted': 'Mitarbeitertyp gelöscht', 'Staff Type updated': 'Mitarbeitertyp aktualisiert', 'Staff Types': 'Mitarbeitertypen', 'Staff and Volunteers': 'Mitarbeiter und Freiwillige', 'Staff & Volunteers (combined)': 'Mitarbeiter & Freiwillige (kombiniert)', 'Staff member added': 'Mitarbeiter hinzugefügt', 'Staff present and caring for residents': 'Mitarbeiter ist anwesend und versorgt die Anwohner.', 'Staff': 'Mitarbeiter', 'Staffing': 'Mitarbeiterausstattung', 'Stairs': 'Treppen', 'Start Date': 'Startdatum', 'Start date': 'Startdatum', 'Start of Period': 'Beginn einer Periode', 'State': 'Bundesland', 'State / Province': 'Staat / Bundesland', 'State /Province': 'Staat / Bundesland', 'Stationery': 'Büromaterial', 'Status Report': 'Statusbericht', 'Status Reports': 'Statusberichte', 'Status Updated': 'Status aktualisiert', 'Status added': 'Status hinzugefügt', 'Status deleted': 'Status gelöscht', 'Status of clinical operation of the facility.': 'Status von klinischen Möglichkeiten dieser Einrichtung.', 'Status of general operation of the facility.': 'Status von allgemeinen Möglichkeiten dieser Einrichtung.', 'Status of morgue capacity.': 'Status der Leichenhallenkapazität', 'Status of operations of the emergency department of this hospital.': 'Status von Möglichkeiten der Notaufnahme dieses Krankenhauses.', 'Status of security procedures/access restrictions in the hospital.': 'Status von Sicherheitsverfahren/Zugriffsbeschränkung in diesem Krankenhaus.', 'Status of the operating rooms of this hospital.': 'Der Status des Betriebsräume des Krankenhauses.', 'Status updated': 'Status aktualisiert', 'Status': 'Status', 'Steel frame': 'Stahlrahmen', 'Stock': 'Bestand', 'Stock Counts': 'Bestandszahlen', 'Stock in Warehouse': 'Bestand im Warenlager', 'Stolen': 'Gestohlen', 'Store spreadsheets in the Eden database': 'Speichere Tabellendokument in die Eden Datenbank', 'Storeys at and above ground level': 'Stockwerke auf und über der Erdoberfläche', 'Storm Force Wind': 'Sturm Kraft Wind', 'Storm Surge': 'Sturm Spitzenauslastung', 'Stowaway': 'Blinder Passagier', 'Street Address': 'Adresse', 'Strong Wind': 'Starker Wind', 'Structural Hazards': 'Strukturelle Gefahren', 'Structural': 'Strukturell', 'Styles': 'Styles/Symbolisierungen', 'Style Field': 'Style-Feld', 'Style Values': 'Style-Werte', 'Sub-type': 'Unterart', 'Subject': 'Betreff', 'Submission successful - please wait': 'Absenden erfolgreich - bitte warten', 'Submission successful - please wait...': 'Absenden erfolgreich - bitte warten ...', 'Submit New (full form)': 'Daten erneut absenden (vollständiges Formular)', 'Submit New (triage)': 'Daten erneut absenden (Auswahl)', 'Submit New': 'Daten erneut absenden', 'Submit a request for recovery': 'Registrieren einer Bergungsanfrage', 'Submit new Level 1 assessment (full form)': 'Absenden einer neuen Stufe 1 Beurteilung (vollständiges Formular)', 'Submit new Level 1 assessment (triage)': 'Absenden einer neuen Stufe 1 Beurteilung (Auswahl)', 'Submit new Level 2 assessment': 'Absenden einer neuen Stufe 2 Beurteilung', 'Submit': 'Abschicken', 'Subscription Details': 'Details zum Abo', 'Subscription added': 'Abo hinzugefügt', 'Subscription deleted': 'Abo gelöscht', 'Subscription updated': 'Abo aktualisiert', 'Subscriptions': 'Abonnements', 'Subsector Details': 'Details zum Teilbereich', 'Subsector added': 'Teilbereich hinzugefügt', 'Subsector deleted': 'Teilbereich gelöscht', 'Subsector updated': 'Teilbereich aktualisiert', 'Subsector': 'Teilbereich', 'Subsectors': 'Teilbereich', 'Subsistence Cost': 'Verpflegungskosten', 'Suburb': 'Vorort', 'Suggest not changing this field unless you know what you are doing.': 'Bitte ändern sie diesen Bereich nur, wenn sie ganz genau wissen was sie da tun!!!!', 'Summary by Administration Level': 'Zusammenfassung nach Verwaltungsstufe', 'Summary of Incoming Supplies': 'Zusammenfassung der eingehenden Vorräte', 'Summary of Releases': 'Zusammenfassung der Releases', 'Summary': 'Zusammenfassung', 'Sunday': 'Sonntag', 'Supplier/Donor': 'Lieferant/Spender', 'Suppliers': 'Lieferanten', 'Supply Chain Management': 'Versorgungsketten-Management', 'Support Request': 'Unterstützungsanforderung', 'Support Requests': 'Unterstützungsanforderungen', 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Unterstützt den Entscheidungsprozess von großen Gruppen von Krisenmanagementexperten indem man den Gruppen ermöglicht Prioritätenlisten aufzustellen.', 'Surgery': 'Chirugie', 'Survey Answer Details': 'Details zur Umfrage-Antwort', 'Survey Answer added': 'Umfrage-Antwort hinzugefügt', 'Survey Answer deleted': 'Umfrage-Antwort gelöscht', 'Survey Answer updated': 'Umfrage-Antwort aktualisiert', 'Survey Answer': 'Umfrage-Antwort', 'Survey Module': 'Umfrage Modul', 'Survey Name': 'Name der Umfrage', 'Survey Question Details': 'Details zur Umfrage-Frage', 'Survey Question Display Name': 'Angezeigter Name der Umfrage-Frage', 'Survey Question added': 'Umfrage-Frage hinzugefügt', 'Survey Question deleted': 'Umfrage-Frage gelöscht', 'Survey Question updated': 'Umfrage-Frage aktualisiert', 'Survey Question': 'Umfrage-Frage', 'Survey Series Details': 'Details zur Umfragenserie', 'Survey Series Name': 'Angezeigter Name der Umfrageserie', 'Survey Series added': 'Umfrageserie hinzugefügt', 'Survey Series deleted': 'Umfrageserie gelöscht', 'Survey Series updated': 'Umfrageserie aktualisiert', 'Survey Series': 'Umfrageserien', 'Survey Template Details': 'Details zur Umfragenvorlage', 'Survey Template added': 'Umfragenvorlage hinzugefügt', 'Survey Template deleted': 'Umfragenvorlage gelöscht', 'Survey Template updated': 'Umfragevorlage aktualisiert', 'Survey Template': 'Umfragenvorlage', 'Survey Templates': 'Umfragenvorlagen', 'Surveys': 'Umfragen', 'Switch to 3D': 'In Google Earth anzeigen', 'Symbology': 'Symbolisierung', 'Sync Conflicts': 'Synchronisierungskonflikte', 'Sync History': 'Synchronisierungshistorie', 'Sync Now': 'Jetzt synchronisieren', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Partner für die Synchronisation sind Instanzen von Peers (SahanaEden, SahanaAgasti, Ushahidi, etc. ) mit denen die aktuelle Intanz synchronisiert werden soll. Ein Klick auf den Link rechts bringt Sie zur Seite auf der Sie diese hinzufügen, suchen und ändern können.', 'Sync Partners': 'Partner für die Synchronisation', 'Sync Pools': 'Synchronisierungspools', 'Sync Schedule': 'Synchronisierungszeitplan', 'Sync Settings': 'Synchronisierungseinstellungen', 'Sync process already started on': 'Sync-Prozess bereits gestartet am', 'Synchronisation': 'Synchronisierung', 'Synchronization Conflicts': 'Synchronisierungskonflikte', 'Synchronization Details': 'Synchronisierung - Details', 'Synchronization History': 'Synchronisierungsgeschichte', 'Synchronization Peers': 'Synchronisierung von Peers', 'Synchronization Settings': 'Synchronisierungseinstellungen', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Die Synchronisation erlaubt ihnen Daten gemeinsam zu nutzen, indem ihre eigene Datenbank mit aktuellen Daten anderer aktualisieren oder umgekehrt. Diese Seite informiert sie darüber wie sie das automatische Synchronisationsfeature von Sahana Eden verwenden.', 'Synchronization not configured.': 'Synchronisierung nicht konfiguriert.', 'Synchronization settings updated': 'Synchronisierungseinstellungen wurden aktualisiert', 'Synchronization': 'Synchronisierung', 'Syncronisation History': 'Synchronisierungshistorie', 'Table': 'Tabelle', 'Tags': 'Tags', 'Take shelter in place or per <instruction>': 'Unterkunft aufsuchen oder <instruction>', 'Task Details': 'Details zur Aufgabe', 'Task List': 'Aufgabenliste', 'Task Status': 'Aufgabenstatus', 'Task added': 'Aufgabe hinzugefügt', 'Task deleted': 'Aufgabe gelöscht', 'Task updated': 'Aufgabe aktualisiert', 'Tasks': 'Aufgaben', 'Team Description': 'Teambeschreibung', 'Team Details': 'Details zum Team', 'Team Id': 'Team ID', 'Team Leader': 'Teamleiter', 'Team Member added': 'Teammitglied hinzugefügt', 'Team Members': 'Teammitglieder', 'Team Name': 'Name des Teams', 'Team Type': 'Type des Teams', 'Team added': 'Team hinzugefügt', 'Team deleted': 'Team gelöscht', 'Team updated': 'Team aktualisiert', 'Technical testing only, all recipients disregard': 'Diese Benachrichtung ist ein technischer Test, bitte ignorieren', 'Telecommunications': 'Telekommunikation', 'Telephone': 'Telefon', 'Telephony': 'Telefonie', 'Temp folder %s not writable - unable to apply theme!': 'Temporärer Ordner %s nicht beschreibbar - Layout (theme) kann nicht angewandt werden!', 'Template Name': 'Name der Vorlage', 'Template file %s not readable - unable to apply theme!': 'Template Datei %s nicht lesbar - Layout (theme) kann nicht angewandt werden!', 'Templates': 'Vorlagen', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Begriff für die 5. Ebene der Verwaltungshierarchie eines Landes (z.B. eine Wahl- oder Postleitzahlenbereich). Diese Stufe wird nicht oft verwendet.', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Begriff für die 4. Ebene der Verwaltungshierarchie eines Landes (z.B. Dorf, Stadtteil).', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Begriff für die 1. Ebene der Verwaltungshierarchie eines Landes (z. B. Staat oder Bundesland).', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Begriff für die 2. Ebene der Verwaltungshierarchie eines Landes (z. B. Regierungsbezirk oder Landkreis).', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Begriff für die 3. Ebene der Verwaltungshierarchie eines Landes (z. B. Ort oder Stadt).', 'Term for the top-level administrative division (i.e. Country).': 'Begriff für die Verwaltung der höchsten Ebene (d. h. Land).', 'Test Results': 'Testergebnisse', 'Territorial Authority': 'Territoriale Behörde', 'Terrorism': 'Terrorismus', 'Tertiary Server (Optional)': 'Tertiärer Server (Optional)', 'Text Color for Text blocks': 'Text Farbe für Text Blöcke', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Danke für die Validierung Ihrer E-Mail. Ihr Benutzeraccount wurde vom Systemadministrator noch nicht genehmigt (%s). Sie werden eine Benachrichtigung per E-Mail erhalten wenn ihr Account aktiviert wurde.', 'Thanks for your assistance': 'Danke für Ihre Hilfe', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'Die "query" ist eine Bedingung für "db.table1.field1==\'value\'". Irgendetwas wie "db.table1.field1 == db.table2.field2" führt zu einem SQL JOIN.', 'The Area which this Site is located within.': 'Der Bereich, in dem sich dieser Ort befindet.', 'The Assessments module allows field workers to send in assessments.': 'Das Beurteilungsmodul erlaubt allen Aussendienstmitarbeitern ihre Beurteilungen einzusenden.', 'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyze':'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt Antworten auf Beurteilungen spezieller Ereignisse zu sammeln und auszuwerten', 'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt es Antworten zu speziellen Ereignissen zu sammeln und zu analysieren', 'The Author of this Document (optional)': 'Der Auto dieses Dokumentes (optional)', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Das Gebäudebeurteilungsmodul erlaubt die Sicherheit eines Gebäudes zu beurteilen, z. B. nach einem Erdbeben.', 'The Camp this Request is from': 'Das Camp von dem diese Anfrage stammt', 'The Camp this person is checking into.': 'Das Camp, in das diese Person überführt wird', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Die aktuelle Position der Person/Gruppe, welche ungenau (für die Berichterstellung) oder genau (zur Anzeige von auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Die E-mail Adresse an welche die Genehmigungen gesendet werden (normalerweise ist das eine Gruppen-Mail, keine Adresse einer Einzelperson) Wenn das Feld leer ist, dann werden Anforderungen automatisch genehmigt, wenn die Domänennamen übereinstimmen.', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Das Vorfall Berichtssystem ermöglicht der Allgemeinheit Vorfälle zu melden und diese verfolgen zu lassen.', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Herkunftsort der Person kann ungenau (für die Berichterstellung) oder genau (zur anzeige auf einer Karte ) sein. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Ort, zu dem die Person gehen wird, welcher ungenau (für Berichte) oder genau (für die Darstellung auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.', 'The Media Library provides a catalog of digital media.': 'Das Medienverzeichnis bietet einen Katalog digitaler Medien', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Das Nachrichtenmodul ist der Hauptknotenpunkt der Kommunikation des Sahana Systems. Es wird verwendet, um Warnungen und/oder andere Nachrichten mit Hilfe von SMS & E-Mail an unterschiedliche Gruppen und Einzelpersonen während und nach einem Katastrophenfall zu schicken.', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'Das Organisationsregister gibt einen Überblick über alle Hilfsorganisationen, die in der Region arbeiten.', 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Das Projektüberwachungsmodul ermöglicht die Erstellung von Aktivitäten um Lücken in Anforderungsbewertungen zu füllen.', 'The Role this person plays within this hospital.': 'Die Rolle die diese Person im Krankenhaus übernimmt.', 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Das Unterkunftsregister protokolliert alle Unterkünfte und speichert allgemeine Details. Es arbeitet mit anderen Modulen zusammen, um Menschen die sich in einer Unterkunft befinden, sowie die dort zur Verfügung stehenden Leistungen etc. zu dokumentieren.', 'The Shelter this Request is from': 'Die Unterkunft aus welcher diese Anforderung stammt', 'The Shelter this person is checking into.': 'Die Unterkunft in die diese Person eincheckt.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'Die URL zur "GetCapabilities" Operation eines MapWebService (WMS), dessen Kartenbenen über die Anzeige verfügbar sein sollen.', 'The URL of your web gateway without the post parameters': 'Die URL ihres Web gateways ohne die POST parameter.', 'The URL to access the service.': 'Die URL für den Zugriff zum Service.', 'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Die eindeutige Kennung (UUID) die dieser Einrichtung von der Regierung zugeordnet wurde.', 'The asset must be assigned to a site OR location.': 'Die Anlage muss einem Standort oder einem Gelände zugeordnet werden', 'The attribute which is used for the title of popups.': 'Das Atribut welches für den Titel von Dialogfenstern verwendet wird', 'The attribute within the KML which is used for the title of popups.': 'Das Attribut in der KML das für den Titel der Dialogfenster verwendet wird.', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'Die Attribute innerhalb der KML, die für den body des Dialogfenster verwendet werden sollen. (Verwenden Sie ein Leerzeichen zwischen Attributen)', 'The body height (crown to heel) in cm.': 'Die Körpergrösse (Kopf bis Fuss) in cm.', 'The country the person usually lives in.': 'Das Land, in dem die Person normalerweise lebt.', 'The default Organization for whom this person is acting.': 'Die Standardorganisation, für die diese Person agiert', 'The default Organization for whom you are acting.': 'Die Standardorganisation für welche Sie agieren', 'The duplicate record will be deleted': 'Der doppelte Datensatz wird gelöscht.', 'The first or only name of the person (mandatory).': 'Der erste oder einzige Name der Person (verpflichtend)', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Das Format der URL ist http://your/web/map/service?service=WMS&request=GetCapabilities wobei your/web/map/service für den Pfad der URL zum WMS steht', 'The language you wish the site to be displayed in.': 'Die Sprache in der die Seite angezeigt werden soll.', 'The list of Brands are maintained by the Administrators.': 'Die Liste der Marken wird von den Administratoren verwaltet.', 'The list of Catalogs are maintained by the Administrators.': 'Die Liste der Kataloge wird vom Administrator verwaltet.', 'The map will be displayed initially with this latitude at the center.': 'Die Karte wird zunächst auf diese Geographische Breite zentriert.', 'The map will be displayed initially with this longitude at the center.': 'Die Karte wird zunächst auf diese Geographische Länge zentriert.', 'The minimum number of features to form a cluster.': 'Die minimale Anzahl von Objekten, die als Cluster angezeigt werden.', 'The name to be used when calling for or directly addressing the person (optional).': 'Der zu verwendende Name beim Anfragen oder direkten Ansprechen der Person (optional).', 'The next screen will allow you to detail the number of people here & their needs.': 'Die nächste Bildschirm erlaubt es, nähere Angaben zur Anzahl Menschen hier & ihrer Bedürfnisse zu machen.', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Die Anzahl der Maßeinheiten eines alternativen Artikels, welcher einer Maßeinheit von diesem Artikel entspricht', 'The number of pixels apart that features need to be before they are clustered.': 'Mindestanzahl erforderlicher Pixel, damit sie nicht in Clustern zusammengefasst dargestellt werden.', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Die Anzahl der Teilbilder rund um den sichtbaren Kartenausschnitt die heruntergeladen werden. Null bedeutet, dass die erste Seite schneller geladen wird, höhere Zahlen bedeuten dass nachfolgendes Schwenken schneller ist.', 'The person at the location who is reporting this incident (optional)': 'Die Person vor Ort welche das Ereignis meldet (optional)', 'The post variable containing the phone number': 'Der POST Parameter, der die Telefonnummer beinhaltet', 'The post variable on the URL used for sending messages': 'Der POST Parameter, der die Nachricht beinhaltet.', 'The post variables other than the ones containing the message and the phone number': 'Die POST Parameter, die nicht die Nachricht oder Telefonnummer beinhalten', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Der serielle Anschluss mit dem das Modem verbunden ist - /dev/ttyUSB0, etc unter linux und com1, com2, etc unter Windows', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Der Server hat keine rechtzeitige Antwort von einem anderen Server erhalten, um die Anfrage des Clients beantworten zu können.', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Der Server hat eine ungültige Antwort von einem anderen Server erhalten, dass er zugreift um die Anfrage vom Browser zu erfüllen.', 'The site where this position is based.': 'Das Gelände auf dem dieser Standort/Gebiet liegt.', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Die zuständigen Mitarbeiter für Anlagen können Hilfe anfordern. Bezüglich dieser Anfragen können Zusagen gemacht werden. Diese bleiben solange offen, bis der Anforderer bestätigt, dass die Anfrage erfüllt ist.', 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Das genannte Ereignis stellt keine Bedrohung oder Sorge mehr dar und jede nachfolgende Aktion is unter <instruction> beschrieben.', 'The time at which the Event started.': 'Die Zeit zu der das Ereignis startete.', 'The token associated with this application on': 'Das token welches mit dieser Anwendung verbunden ist', 'The unique identifier which identifies this instance to other instances.': 'Die eindeutige Kennung (UUID), die diese Instanz bei der Kommunikation mit anderen Instanzen identifiziert.', "The volunteer's role": "Rolle des Freiwilligen", 'The way in which an item is normally distributed': 'Die Art in der ein Artikel normalerweise verteilt wird.', 'The weight in kg.': 'Das Gewicht in kg.', 'The': 'Das', 'Thematic Mapping': 'Thematische Kartendarstellung', 'Theme Details': 'Details zum Thema', 'Theme added': 'Thema hinzugefügt', 'Theme deleted': 'Thema gelöscht', 'Theme updated': 'Thema aktualisiert', 'Theme': 'Thema', 'Themes': 'Themen', 'There are errors': 'Es sind Fehler aufgetreten', 'There are insufficient items in the Inventory to send this shipment': 'Es sind nicht genügend Artikel im Bestand um diese Lieferung zu abzusenden.', 'There are multiple records at this location': 'An dieser Stelle gibt es mehrere Datensätze', 'There is no address for this person yet. Add new address.': 'Für diese Person gibt es noch keine Adresse. Fügen Sie eine neue Adresse hinzu.', 'These are settings for Inbound Mail.': 'Dies sind Einstellungen für eingehende Mail.', 'These are the Incident Categories visible to normal End-Users': 'Dies sind die für alle Endbenutzer sichtbaren Kategorien von Vorfällen', 'These need to be added in Decimal Degrees.': 'Diese müssen in Dezimalgrad hinzugefügt werden.', 'They': 'Sie', 'This Group has no Members yet': 'Diese Gruppe hat noch keine Mitglieder', 'This Team has no Members yet': 'Dieses Team hat noch keine Mitglieder', 'This appears to be a duplicate of': 'Dies scheint ein Duplikat zu sein von', 'This file already exists on the server as': 'Diese Datei existiert bereits auf dem Server als', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': "Dies ist zulässig, wenn sich die Stufe noch im Aufbau befindet. Um unbeabsichtige Änderungen zu verhindern, nachdem dieses Level abgeschlossen ist, kann dies auf 'False' gesetzt werden.", 'This is the way to transfer data between machines as it maintains referential integrity.': 'Auf diese Weise werden Daten zwischen Maschinen übertragen um die referenzielle Integrität aufrecht zu erhalten.', 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Auf diese Weise werden Daten zwischen Maschinen übertragen, um die referenzielle Integrität aufrechtzu erhalten. Doppelte Daten sollten vorher manuell entfernt werden.', 'This level is not open for editing.': 'Diese Stufe ist nicht zum Bearbeiten freigegeben.', 'This might be due to a temporary overloading or maintenance of the server.': 'Dies wurde möglicherweise durch eine vorübergehende Überlastung oder Wartung des Servers ausgelöst.', 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Dieses Modul ermöglicht es, Bestandsartikel zwischen Beständen verschiedener Anlagen Anzufragen und zu liefern.', 'This module allows the editing of page content using a web browser.': 'Dieses Modul ermöglicht das Editieren der Webseite unter Verwendung des Browsers.', 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Mit diesem Modul können Szenarien sowohl für Übungen als auch für Ereignisse planen. Sie können geeignete Ressourcen (Menschen, Anlagen & Einrichtungen) zuordnen, damit diese leicht mobilisiert werden können.', 'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Diese Seite zeigt ihnen die Protokolle von vorherigen Syncs. Klicken Sie auf den Link unten um auf diese Seite zu gelangen.', 'This screen allows you to upload a collection of photos to the server.': 'Diese Seite ermöglicht ihnen eine Sammlung von Fotos zum Server hochzuladen.', 'This setting can only be controlled by the Administrator.': 'Diese Einstellung kann nur vom Systemverwalter vorgenommen werden.', 'This shipment has already been received.': 'Diese Lieferung wurde bereits empfangen.', 'This shipment has already been sent.': 'Diese Lieferung wurde bereits abgeschickt.', 'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Diese Lieferung wurde noch nicht empfangen - sie ist nicht abgebrochen worden weil sie immer noch editiert werden kann.', 'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Diese Sendung wurde nicht gesendet-es ist nicht abgebrochen worden weil können immer noch bearbeitet werden.', 'This shipment will be confirmed as received.': 'Der Empfang dieser Lieferung wurde bestätigt.', 'Thunderstorm': 'Gewitter', 'Thursday': 'Donnerstag', 'Ticket Details': 'Details zum Ticket', 'Ticket ID': 'Ticket-ID', 'Ticket added': 'Ticket hinzugefügt', 'Ticket deleted': 'Ticket gelöscht', 'Ticket updated': 'Ticket aktualisiert', 'Ticketing Module': 'Ticket Modul', 'Tile Mapping Service': 'TileMapService', 'Tilt-up concrete': 'Konkrete Neigung', 'Timber frame': 'Holzrahmen', 'Timeline Report': 'Bericht zum Zeitplan', 'Timeline': 'Zeitplan', 'Time Out': 'Ausgangszeit', 'Time Question': 'Zeit Frage', 'Title': 'Titel', 'Title to show for the Web Map Service panel in the Tools panel.': 'Titel, mit der die WebMapService-Leiste in der Werkzeugleiste angezeigt wird', 'To Location': 'Zum Standort', 'To Organization': 'Zur Organisation', 'To Person': 'Zu Händen von', 'To begin the sync process, click the button on the right =>': 'Zum Starten der Synchronisierung, klicken Sie auf die Schaltfläche auf der rechten Seite =>', 'To begin the sync process, click this button =>': 'Um den Synchronisierungsprozess zu starten, klicken Sie diese Schaltfläche =>', 'To create a personal map configuration, click': 'Um eine persönliche Kartenkonfiguration zu erstellen, klicken Sie auf', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Zum Bearbeiten von OpenStreetMap, müssen Sie die Einstellungen in models/000_config. py anpassen', 'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': "Um die Zeitachse zu verschieben nutzen Sie bitte das Mausrad, die Pfeiltasten oder verschieben Sie sie per Drag'n Drop", 'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Um nach einer Jobbezeichnung zu suchen, geben sie einen beliebigen Teil des Namens ein. Sie können % als Wildcard verwenden.', 'To variable': 'zu variieren', 'To': 'Bis', 'To Address': 'Empfängeradresse', 'Tools': 'Arbeitsmittel', 'Tornado': 'Wirbelsturm', 'Total # of Target Beneficiaries': 'Gesamtzahl der Nutznießer', 'Total # of households of site visited': 'Gesamtzahl der Haushalte des besuchten Geländes', 'Total Beds': 'Betten insgesamt', 'Total Beneficiaries': 'Gesamtzahl Nutznießer', 'Total Budget': 'Gesamtbudget', 'Total Capacity (Night)': 'Gesamtkapazität (Nacht)', 'Total Cost per Megabyte': 'Gesamtkosten pro Megabyte', 'Total Cost per Minute': 'Gesamtkosten pro Minute', 'Total Cost': 'Gesamtkosten', 'Total Monthly Cost': 'Gesamte monatliche Kosten', 'Total Monthly': 'Insgesamt Monatlich', 'Total One-time Costs': 'Summe einmaliger Kosten', 'Total Persons': 'Gesamtzahl an Personen', 'Total Records: %(numrows)s':'Gesamtzahl an Datensätzen %(numrows)s', 'Total Recurring Costs': 'Gesamte wiederkehrende Kosten', 'Total Unit Cost': 'Gesamtstückkosten', 'Total Units': 'Summe Einheiten', 'Total Value': 'Gesamtwert', 'Total Volume (m3)': 'Gesamtvolumen (m3)', 'Total Weight (kg)': 'Gesamtgewicht (kg)', 'Total gross floor area (square meters)': 'Gesamtgröße der Fläche (Quadratmeter)', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'Gesamtzahl der Betten in diesem Krankenhaus. Automatisch aktualisiert über die täglichen Berichte.', 'Total number of houses in the area': 'Gesamtzahl der Häuser im Gebiet', 'Total number of schools in affected area': 'Gesamtzahl der Schulen im betroffenen Gebiet', 'Total population of site visited': 'Gesamtzahl der Bevölkerung des besuchten Gebietes', 'Total': 'Summe', 'Tourist Group': 'Touristengruppe', 'Town': 'Stadt', 'Town / Municipality': 'Ort / Stadtbezirk', 'Traces internally displaced people (IDPs) and their needs': 'Verfolgung von Binnenflüchtlingen (IDP) und deren Bedürfnisse', 'Tracing': 'Verfolgung', 'Track Details': 'Details zum Track', 'Track deleted': 'Track gelöscht', 'Track updated': 'Track aktualisiert', 'Track uploaded': 'Track hochgeladen', 'Track with this Person?': 'Diese Person verfolgen?', 'Track': 'Track', 'Tracking of Projects, Activities and Tasks': 'Verfolgen von Projekten, Aktivitäten und Aufgaben', 'Tracking of basic information on the location, facilities and size of the Shelters': 'Verfolgung von Basisinformationen über Ort, Einrichtungen und Größe von Unterkünften', 'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Verfolgung der Position, Verteilung, Kapazität und Aufteilung der Opfer auf Unterkünfte', 'Tracks': 'Verfolgungen', 'Traffic Report': 'Datenverkehrsbericht', 'Training Course Catalog': 'Schulungskurs-Katalog', 'Training Details': 'Details zur Schulung', 'Training Event': 'Schulungskurs', 'Training Events': 'Schulungskurse', 'Training Facility': 'Schulungseinrichtung', 'Training Report': 'Schulungsbericht', 'Training added': 'Schulung hinzugefügt', 'Training deleted': 'Schulung gelöscht', 'Training updated': 'Schulung aktualisiert', 'Training': 'Schulung', 'Trainings': 'Weiterbildungen / Übungen', 'Transition Effect': 'Übergangseffekt', 'Transit Status': 'Transitstatus', 'Translation': 'Übersetzung', 'Transportation assistance, Rank': 'Transport-Unterstützung, Rank', 'Trauma Center': 'Trauma Zentrum', 'Travel Cost': 'Reisekosten', 'Tropical Storm': 'Tropischer Sturm', 'Tropo Messaging Token': 'Tropo Nachrichten Token', 'Tropo Settings': 'Tropo Einstellungen', 'Tropo settings updated': 'Tropo Einstellungen aktualisiert', 'Truck': 'Lastwagen', 'Try checking the URL for errors, maybe it was mistyped.': 'Untersuchen Sie die URL auf Fehler, vielleicht war sie falsch geschrieben.', 'Try hitting refresh/reload button or trying the URL from the address bar again.': "Versuchen Sie den Knopf 'Aktualisieren/Erneut Laden' oder versuchen Sie nochmals die URL aus der Adresszeile.", 'Try refreshing the page or hitting the back button on your browser.': "Versuchen Sie die Seite zu aktualisieren oder den 'Zurück'-Knopf im Browser zu nutzen.", 'Tuesday': 'Dienstag', 'Tugboat Capacity': 'Schleppkahnkapazitäten', 'Tweeted by': 'Getwittert von', 'Tweeted on': 'Getwittert auf', 'Twilio Channels': 'Twilio Kanäle', 'Twitter Channels': 'Twitter Kanäle', 'Twitter ID or #hashtag': 'Twitter-ID oder #hashtag', 'Twitter InBox': 'Twitter Eingang', 'Twitter Search': 'Twitter Suche', 'Twitter Search Results': 'Twitter Suchergebnisse', 'Twitter Settings': 'Einstellungen für Twitter', 'Type of Construction': 'Bautyp', 'Type of water source before the disaster': 'Typ der Wasserquelle vor der Katastrophe', 'Type': 'Typ', 'Types': 'Typen', 'UN': 'UN', 'Un-Repairable': 'Nicht zu reparieren', 'Unable to parse CSV file!': 'CSV Datei kann nicht analysiert werden!', 'Understaffed': 'Unterbesetzt', 'Unidentified': 'Nicht identifiziert', 'Unit Cost': 'Kosten für Einheit', 'Unit Value': 'Einheitswert', 'Unit added': 'Einheit hinzugefügt', 'Unit deleted': 'Einheit gelöscht', 'Unit of Measure': 'Maßeinheit', 'Unit updated': 'Einheit aktualisiert', 'Unit': 'Einheit', 'Units': 'Einheiten', 'Unknown Peer': 'Unbekannter Peer', 'Unknown type of facility': 'Unbekannter Einrichtungstyp', 'Unknown': 'unbekannt', 'Unmark as duplicate': 'Duplikatsmeldung zurückziehen', 'Unreinforced masonry': 'Nicht verstärktes Mauerwerk', 'Unresolved Conflicts': 'Ungelöste Konflikte', 'Unsafe': 'Unsicher', 'Unselect to disable the modem': 'Abwählen um das Modem zu deaktivieren', 'Unsent': 'Nicht gesendet', 'Unsupported data format!': 'Nicht unterstütztes Datenformat!', 'Unsupported method!': 'Nicht unterstützte Methode!', 'Update Activity Report': 'Aktivitätsbericht aktualisieren', 'Update Cholera Treatment Capability Information': 'Aktualisieren der Informationen zu den Cholera Behandlungsmöglichkeiten', 'Update Request': 'Anfrage Aktualisieren', 'Update Service Profile': 'Leistungsprofil aktualisieren', 'Update Status': 'Status aktualisieren', 'Update Task Status': 'Status der Aufgabe aktualisieren', 'Update Unit': 'Enheit Aktualisieren', 'Update if Master': 'Aktualisiere wenn Master', 'Update if Newer': 'Aktualisiere falls neuer', 'Update your current ordered list': 'Aktualisieren Sie ihre aktuell bestellte Liste', 'Update': 'Aktualisierung', 'Updated By': 'Aktualisiert von', 'Upload Photos': 'Fotos hochladen', 'Upload Spreadsheet': 'Tabellendokument hochladen', 'Upload Track': 'Verfolgung hochladen', 'Upload a Spreadsheet': 'Ein Tabellendokument hochladen', 'Upload a file formatted according to the Template.': 'Laden Sie eine entsprechend der Vorlage formatierte Datei hoch.', 'Upload an Assessment Template import file': 'Upload einer Beurteilungsvorlage', 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Grafikdatei hochladen (bmp, gif, jpeg-oder png), max. 300x300 Pixel!', 'Upload an image file here.': 'Laden Sie hier die Grafikdatei hoch.', 'Upload an image, such as a photo': 'Laden Sie eine Grafikdatei hoch, wie beispielsweise ein Foto', 'Uploaded Image': 'Hochgeladenes Bild', 'Upload translated files': 'Übersetzte Dateien hochladen', 'Upon Request': 'Eingehende Anfrage', 'Urban Fire': 'Siedlungsfeuer', 'Urban area': 'Stadtgebiet / Ballungsgebiet', 'Urgent': 'Dringend', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Verwende (...)&(...) für UND, (...)|(...) für ODER und ~(...) für NICHT um komplexere Abfragen zu erstellen.', 'Use Geocoder for address lookups?': "Verwendung von 'Geocoder' für Adressenüberprüfung?", 'Use deg, min, sec':'Nutze Grad, Minuten, Sekunden', 'Use decimal':'Nutze Dezimalgrad', 'Use default': 'Standardwert verwenden', 'Use for Login?': 'Für Login verwenden?', 'Use these links to download data that is currently in the database.': 'Verwenden Sie diese Links um Daten, die derzeit in der Datenbank liegen herunterzuladen.', 'Used by IRS & Assess': 'Verwendet vom IRS & Assess', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Verwendet in onHover Tooltip & Cluster Popups um verschiedene Typen zu unterscheiden.', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Verwendet um onHover Tooltip zu erstellen & das 1. Feld wird ebenfalls im Cluster Dialogfeld benutzt um zwischen verschiedenen Datensätzen zu unterscheiden.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Länge für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Breite für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.', 'Used to import data from spreadsheets into the database': 'Dient dazu Daten aus Tabellendokumenten in die Datenbank zu übertragen.', 'Used within Inventory Management, Request Management and Asset Management': 'Verwendung beim der Bestands-, Anfrage- und Anlagenverwaltung', 'User Account has been Disabled': 'Das Benutzerkonto wurde deaktiviert', 'User Details': 'Details zum Benutzer', 'User Management': 'Benutzerverwaltung', 'User Profile': 'Benutzerprofil', 'User Requests': 'Benutzeranfragen', 'User Updated': 'Benutzer aktualisiert', 'User added': 'Benutzer hinzugefügt', 'User already has this role': 'Der Benutzer hat bereits diese Rolle', 'User deleted': 'Benutzer gelöscht', 'User updated': 'Benutzer aktualisiert', 'User': 'Benutzer', 'Username': 'Benutzername', 'Users removed': 'Benutzer entfernt', 'Users': 'Benutzer', 'Uses the REST Query Format defined in': 'Verwendet das REST-Abfrageformat das definiert ist in', 'Utilities': 'Dienstprogramme', 'Utility, telecommunication, other non-transport infrastructure': 'Dienstprogramm, Telekommunikation, andere nicht-Verkehrsinfrastruktur', 'Utilization Report': 'Verwendungsbericht', 'Value per Pack': 'Wert pro Packet', 'Value': 'wert', 'Various Reporting functionalities': 'Verschiedene Funktionalitäten für das Berichtswesen', 'Vehicle Categories': 'Fahrzeugkategorien', 'Vehicle Crime': 'Fahrzeug Kriminalität', 'Vehicle Height (m)': 'Höhe des Fahrzeugs (m)', 'Vehicle Management': 'Fahrzeugmanagement', 'Vehicle Plate Number': 'Fahrzeugnummernschild', 'Vehicle Type': 'Fahrzeugtyp', 'Vehicle Types': 'Fahrzeugtypen', 'Vehicle Weight (kg)': 'Gewicht des Fahrzeugs (kg)', 'Vehicle': 'Fahrzeug', 'Vehicles': 'Fahrzeuge', 'Vehicles are assets with some extra details.': 'Fahrzeuge sind Anlagen, die mit einigen speziellen Funktionen ausgestattet sind', 'Venue': 'Örtlichkeit', 'Verification Status': 'Prüfstatus', 'Verified?': 'Geprüft?', 'Verify password': 'Passwortprüfung', 'Very Good': 'Sehr gut', 'Very High': 'Sehr hoch', 'Vessel Max Length': 'Wasserfahrzeug maximale Länge', 'View Alerts received using either Email or SMS': 'Empfangene Warnungen über E-Mail oder SMS', 'View All': 'Alles anzeigen', 'View Error Tickets': 'Fehler Tickets ansehen', 'View Fullscreen Map': 'Vollbild Karte anzeigen', 'View Image': 'Bild anzeigen', 'View Items': 'Artikel anzeigen', 'View On Map': 'Auf Karte anzeigen', 'View Outbox': 'Postausgang anzeigen', 'View Picture': 'Bild anzeigen', 'View Settings': 'Einstellungen anzeigen', 'View Test Result Reports': 'Zeige Berichte der Testergebnisse', 'View Tickets': 'Tickets anzeigen', 'View Translation Percentage': 'Zeige Übersetzungsstatistik', 'View and/or update their details': 'Anzeige und/oder Aktualisieren Ihrer Detailinformationen', 'View as Pages': 'Anzeige als Seiten', 'View or update the status of a hospital.': 'Anzeige oder Aktualisieren des Status eines Krankenhauses.', 'View pending requests and pledge support.': 'Anstehende Anforderungen anzeigen und Zusageunterstützung.', 'View the hospitals on a map.': 'Krankenhäuser auf einer Karte anzeigen', 'View/Edit the Database directly': 'Die Datenbank direkt anzeigen/bearbeiten', 'Village Leader': 'Dorfvorsteher', 'Village / Suburb': 'Ortschaft / Vorort', 'Village': 'Dorf', 'Visible?': 'Sichtbar?', 'Visual Recognition': 'Visuelle Erkennung', 'Volcanic Ash Cloud': 'Wolke vulkanischer Asche', 'Volcanic Event': 'Vulkanischen Ereignis', 'Volume (m3)': 'Volumen (m3)', 'Volunteer Availability': 'Verfügbarkeit von Freiwilligen', 'Volunteer Details': 'Details zu Freiwilligen', 'Volunteer Information': 'Freiwilligeninformation', 'Volunteer Management': 'Management von Freiwilligen', 'Volunteer Project': 'Freiwilligen Projekt', 'Volunteer Record': 'Freiwilligen Datensatz', 'Volunteer Report': 'Freiwilligen Bericht', 'Volunteer Request': 'Freiwilligen Anforderung', 'Volunteer Role': 'Rolle des Freiwilligen', 'Volunteer Role Catalog': 'Rollenkatalog für Freiwillige', 'Volunteer added': 'Freiwilliger hinzugefügt', 'Volunteer availability added': 'Freiwilligen Verfügbarkeit hinzugefügt', 'Volunteer availability deleted': 'Freiwilligen Verfügbarkeit geöscht', 'Volunteer availability updated': 'Freiwilligen Verfügbarkeit aktualisiert', 'Volunteer deleted': 'Freiwilliger gelöscht', 'Volunteer details updated': 'Details zu Freiwilligen aktualisiert', 'Volunteers were notified!': 'Freiwillige wurden unterrichtet!', 'Volunteers': 'Freiwillige', 'Vote': 'Abstimmung', 'Votes': 'Abstimmungen', 'WASH': 'WASH', 'Walking Only': 'Nur laufen', 'Wall or other structural damage': 'Wand oder andere Gebäudeschäden', 'Warehouse Details': 'Details zu Warenlager', 'Warehouse Stock': 'Warenlagerbestand', 'Warehousing Storage Capacity': 'Warenlager Ablagekapazität', 'Warehouse Type': 'Warenlagertyp', 'Warehouse Types': 'Warenlagertypen', 'Warehouse added': 'Warenlager hinzugefügt', 'Warehouse deleted': 'Warenlager gelöscht', 'Warehouse updated': 'Warenlager aktualisiert', 'Warehouse': 'Warenlager', 'Warehouses': 'Warenlager', 'Water Sanitation Hygiene': 'Wasser Abwasserentsorgung Hygiene', 'Water collection': 'Wassersammlung', 'Water gallon': 'Wasser Gallonen', 'Water storage containers in households': 'Wasser-Behälter in Haushalten', 'Water supply': 'Wasserversorgung', 'Waybill Number': 'Frachtbriefnummer', 'WB': 'Frachtbriefnr.', 'Web Feature Service': 'WebFeatureService', 'Web Map Service': 'WebMapService', 'Web Map Service Browser Name': 'WebMapService Browser Name', 'Web Map Service Browser URL': 'WebMapService Browser URL', 'Website': 'Webseite', 'Wednesday': 'Mittwoch', 'Weight (kg)': 'Gewicht (kg)', 'Weight': 'Gewicht', 'Welcome to the Sahana Portal at': 'Willkommen beim Sahana Portal', 'Well-Known Text': 'WellKnownText (OGC-WKT)', 'What the Items will be used for': 'Beabsichtigte Verwendung der Artikel', 'Wheat': 'Weizen', 'When reports were entered': 'Wann die Berichte eingegeben wurden', 'Whiskers': 'Barthaare', 'Who is doing what and where': 'Wer macht was und wo', 'Who usually collects water for the family?': 'Wer sammelt normalerweise Wasser für die Familie?', 'Width': 'Breite', 'Width (m)': 'Breite (m)', 'Wild Fire': 'Wildfeuer', 'Wind Chill': 'Kälte vom Wind', 'Window frame': 'Fensterrahmen', 'Winter Storm': 'Wintersturm', 'Women of Child Bearing Age': 'Frauen im gebärfähigen Alter', 'Women participating in coping activities': 'Frauen die sich an den Hilfsaktivitäten beteiligen', 'Women who are Pregnant or in Labour': 'Frauen die schwanger sind oder in den Wehen', 'Womens Focus Groups': 'Focus Gruppen für Frauen', 'Wooden plank': 'Hölzerne Planke', 'Wooden poles': 'Holzmasten', 'Working hours end': 'Arbeitszeit Ende', 'Working hours start': 'Arbeitszeit Beginn', 'Working or other to provide money/food': 'Arbeiten oder etwas anderes um Geld/Lebensmittel zur Verfügung zu stellen.', 'XYZ Tiles': 'XYZ Tiles', 'X-Ray': 'Röntgen', 'YES': 'JA', 'Year built': 'Baujahr', 'Year of Manufacture': 'Herstellungsjahr', 'Year': 'Jahr', 'Yellow': 'Gelb', 'Yes': 'ja', 'You are a recovery team?': 'Sind Sie ein Bergungsteam?', 'You are attempting to delete your own account - are you sure you want to proceed?': 'Sie versuchen Ihr eigenes Konto zu löschen - sind Sie sicher, dass Sie fortfahren möchten?', 'You are currently reported missing!': 'Sie sind derzeit als vermisst gemeldet!', 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Sie können die Konfiguration des Synchronisierungsmodules unter Einstellungen anpassen. Diese Konfiguration enthält ihre UUID (unique identification number), Synchronisierungszeitpläne, Beacon-Service, usw. . Klicken sie auf den folgenden Link um zu den Einstellungen für die Synchronisierung zu gelangen.', 'You can click on the map below to select the Lat/Lon fields': 'Sie können auf die untere Karte klicken um Geographische und Geographische Breiten abzugreifen.', 'You can select the Draw tool': 'Sie können das Zeichen Tool verwenden', 'You can set the modem settings for SMS here.': 'Sie können die Modemeinstellungen für SMS hier festlegen.', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Sie können das Konvertierungprogamm verwenden von GPS-Koordinatenoder Grad/Minuten/Sekunden umzuwandeln.', 'You do not have permission for any facility to make a commitment.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Zusage zu machen.', 'You do not have permission for any facility to make a request.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Anfrage zu starten.', 'You do not have permission for any site to add an inventory item.': 'Sie haben keine Berechtigung für irgendein Gelände einen Bestandsartikel hinzuzufügen.', 'You do not have permission for any site to receive a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung anzunehmen.', 'You do not have permission for any site to send a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung abzusenden.', 'You do not have permission to cancel this received shipment.': 'Sie haben keine Berechtigung diese erhaltene Lieferung zu löschen.', 'You do not have permission to cancel this sent shipment.': 'Sie haben keine Berechtigung diese gesendete Lieferung zu löschen.', 'You do not have permission to make this commitment.': 'Sie haben keine Berechtigung diese Zusage zu machen.', 'You do not have permission to receive this shipment.': 'Sie haben keine Berechtigung diese Lieferung entgegenzunehmen.', 'You do not have permission to send a shipment from this site.': 'Sie haben keine Berechtigung Lieferungen von diesem Gelände zu senden.', 'You do not have permission to send this shipment.': 'Sie haben keine Berechtigung diese Lieferung zu senden.', 'You have a personal map configuration. To change your personal configuration, click': 'Sie haben eine persönliche Kartenkonfiguration. Um ihre persönliche Konfiguration zu ändern, klicken Sie hier', 'You have found a dead body?': 'Sie haben eine Leiche gefunden?', 'You must be logged in to register volunteers.': 'Sie müssen angemeldet sein, um Freiwillige zu registrieren.', 'You must be logged in to report persons missing or found.': 'Sie müssen angemeldet sein, um fehlende oder gefundene Personen zu melden.', 'You must provide a series id to proceed.': 'Sie müssen eine serien-id vorweisen, um fortzufahren.', 'You should edit Twitter settings in models/000_config.py': 'Sie sollten die Twitter Einstellungen unter models/000_config.py bearbeiten', 'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Ihre aktuelle, geordnete Liste der Lösungselemente wird unten angezeigt. Sie können es durch Abstimmen erneut verändern.', 'Your post was added successfully.': 'Der Eintrag wurde erfolgreich hinzugefügt.', 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Ihr System verfügt über eine eindeutige ID (UUID), die andere Computer nützen können um Sie zu identifizieren. Zum Anzeigen Ihrer UUID, können Sie zur Synchronisierung gehen --> Sync Einstellungen Sie könnem auch andere Einstellungen auf dieser Seite einsehen.', 'Zero Hour': 'Stunde null', 'Zinc roof': 'Zinkdach', 'Zoom Levels': 'Zoomebenen', 'Zoom in': 'Hineinzoomen', 'Zoom to Current Location': 'Auf aktuelles Gebiet/Standort fokussieren', 'Zoom to maximum map extent': 'Auf maximale Kartenausdehung fokussieren', 'Zoom': 'Zoomen', 'active': 'aktiv', 'added': 'hinzugefügt', 'all records': 'Alle Datensätze', 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Ermöglicht ein Budget zu entwickeln, basierend auf Mitarbeiter- und Gerätekosten, einschließlich aller administrativen Gemeinkosten.', 'allows for creation and management of surveys to assess the damage following a natural disaster.': 'Ermöglicht die Erstellung und Verwaltung von Umfragen zur Beurteilung von Schäden nach einer Naturkatastrophe.', 'an individual/team to do in 1-2 days': 'Eine Aufwand von 1-2 Tagen für ein einzelnes Team', 'assigned': 'zugewiesen', 'average': 'Durchschnitt', 'black': 'schwarz', 'blue': 'blau', 'brown': 'braun', 'business_damaged': 'Business_beschädigt', 'by': 'durch', 'can be used to extract data from spreadsheets and put them into database tables.': 'Kann verwendet werden um Daten von einer Tabelle zu extrahieren und diese in Datenbanktabellen einzutragen.', 'check all': 'Alles markieren', 'click for more details': 'hier klicken, um mehr Details zu erhalten', 'consider': 'Berücksichtigen', 'curly': 'lockig', 'currently registered': 'derzeitig registriert', 'daily': 'täglich', 'dark': 'dunkel', 'data uploaded': 'hochgeladene Daten', 'database %s select': 'Datenbank%s gewählt', 'database': 'Datenbank', 'deceased': 'Verstorbene', 'delete all checked': 'Alle Ausgewählten löschen', 'deleted': 'gelöscht', 'design': 'Design', 'diseased': 'erkrankt', 'displaced': 'vertrieben', 'divorced': 'geschieden', 'done!': 'fertig!', 'duplicate': 'Dublette', 'eg. gas, electricity, water': 'zum Beispiel Gas, Strom, Wasser', 'enclosed area': 'eingeschlossener Bereich', 'export as csv file': 'Exportieren als CSV-Datei', 'fat': 'fett', 'feedback': 'Rückmeldung', 'female': 'weiblich', 'flush latrine with septic tank': 'die provisorische Toilette mit dem fauligen Tank spülen', 'food_sources': 'lebensmittel_quellen', 'forehead': 'Stirn', 'found': 'gefunden', 'from Twitter': 'aus Twitter', 'green': 'Grün', 'grey': 'grau', 'here': 'hier', 'high': 'hoch', 'hourly': 'stündlich', 'households': 'Haushalte', 'identified': 'identifiziert', 'ignore': 'ignorieren', 'in Deg Min Sec format': 'im Format Grad Minuten Sekunden', 'inactive': 'inaktiv', 'injured': 'verletzt', 'insert new %s': 'neue %en hinzufügen', 'insert new': 'neu einfügen', 'invalid request': 'Ungültige Anfrage', 'invalid': 'ungültig', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'ist ein zentrales online Verzeichnis, in dem Informationen zu allen Opfern und Familien der Katastrophe gespeichert werden können, insbesondere identifizierte Verluste, Evakuierte, Flüchtlinge, Heimatlose. Informationen wie Name, Alter, Kontaktnummer, Ausweisnummer, Vertriebenen-Ort und andere Details werden erfasst. Fotos und Fingerabdrücke der Leute können auf das System hochgeladen werden. Personen können zum Zweck der Effizienz und Einfachheit auch in Gruppen zusammengefasst werden', 'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'ist so konzipiert, dass es aus mehreren Untermodulen zu besteht. Diese arbeiten zusammen, um Organisationen komplexe Funktionalitäten zur Unterstützung von Hilfen und Durchführung von Projekten zur Verfügung zu stellen. Dies beinhaltet ein Aufnahmesystem, ein Warenlager Management System, Produkt-Tracking, Versorgungsketten-Management, Fahrzeugbestand Management, Beschaffungswesen, Finanz-Tracking und andere Bestands- und Resource Management Einsatzmöglichkeiten.', 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Überwacht alle eingehenden Tickets, so dass diese entsprechend eingestuft und an die entsprechende Stelle zur Bearbeitung geleitet werden können.', 'latrines': 'Toiletten', 'leave empty to detach account': 'Leerlassen um das Konto zu entfernen/aufzuheben.', 'legend URL': 'URL zur Legende', 'light': 'lichtquelle', 'login': 'Anmeldung', 'long': 'lang', 'long>12cm': 'lang > 12cm', 'low': 'niedrig', 'male': 'männlich', 'manual': 'manuell', 'married': 'verheiratet', 'medium': 'mittel', 'medium<12cm': 'mittel < 12 cm', 'meters': 'meter', 'missing': 'fehlend', 'module allows the site administrator to configure various options.': 'Modul das dem Seitenadministrator ermöglicht verschiedene Optionen zu konfigurieren.', 'module helps monitoring the status of hospitals.': 'Modul das hilft den Status von Krankenhäusern zu überwachen', 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Modul das gemeinschaftlich einen Mechanismus bietet einen GIS-gestützen Überblick über die sich entwickelnde Lage zu erhalten.', 'more': 'mehr', 'n/a': 'nicht zutreffend', 'negroid': 'Negroid', 'never': 'nie', 'new record inserted': 'Neuen Datensatz eingefügt', 'new': 'neu', 'next 100 rows': 'Nächste 100 Zeilen', 'no': 'Nein', 'none': 'nichts', 'not accessible - no cached version available!': 'Nicht verfügbar - keine zwischengespeicherte Version verfügbar!', 'not accessible - using cached version from': 'Nicht verfügbar - benutze zwischengespeicherte Version von', 'not specified': 'nicht angegeben', 'obsolete': 'obsolet', 'on': 'ein', 'once': 'einmal', 'open defecation': 'Verrichtung der Bedürfnisse im Freien', 'or import from csv file': 'oder aus CSV-Datei importieren', 'other': 'Sonstige', 'over one hour': 'über eine Stunde', 'or drop here': "oder hier per Drag'n Drop ablegen", 'people': 'Personen', 'piece': 'Stück', 'pit latrine': 'Grubenlatrine', 'pit': 'Grube', 'postponed': 'zurückgestellt', 'preliminary template or draft, not actionable in its current form': 'vorläufige Vorlage oder Entwurf, nicht aussagekräftig in seiner jetzigen Form', 'previous 100 rows': 'Vorherige 100 Zeilen', 'record does not exist': 'Datensatz ist nicht vorhanden', 'record id': 'Datensatz ID', 'red': 'rot', 'reports successfully imported.': 'Berichte erfolgreich importiert.', 'representation of the Polygon/Line.': 'Darstellung der Fläche/Linie.', 'retired': 'Außer Dienst', 'river': 'Fluss', 'see comment': 'siehe Kommentar', 'selected': 'ausgewählt', 'separated from family': 'von Familie getrennt', 'separated': 'getrennt', 'shaved': 'rasiert', 'short': 'kurz', 'short<6cm': 'kurz < 6cm', 'sides': 'Seiten', 'sign-up now': 'Jetzt Registrieren', 'single': 'alleine', 'slim': 'dünn', 'specify': 'genauer beschreiben', 'staff members': 'Mitarbeiter', 'staff': 'Personal', 'state location': 'Beschaffenheit des Standort', 'state': 'Zustand', 'straight': 'gerade', 'suffered financial losses': 'Finanzielle Verluste erlitten', 'table': 'Tabelle', 'tall': 'groß', 'this': 'Dieses', 'to access the system': 'um auf das System zuzugreifen', 'tonsure': 'Tonsur', 'total': 'Summe', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Tweepy Modul nicht verfügbar in der aktuellen Python Umgebung läuft - das benötigt die Installation einer none-Tropo Twitter Unterstützung!', 'unable to parse csv file': 'CSV Datei kann nicht analysiert werden', 'uncheck all': 'Alles deselektieren', 'unidentified': 'nicht identifiziert', 'unknown': 'unbekannt', 'unspecified': 'unspezifiziert', 'unverified': 'ungeprüft', 'updated': 'aktualisiert', 'updates only': 'nur Aktualisierungen', 'verified': 'verifiziert', 'volunteer': 'Freiwilliger', 'volunteers': 'Freiwillige', 'wavy': 'wellenförmige Lücke', 'weekly': 'wöchentlich', 'white': 'weiß', 'wider area, longer term, usually contain multiple Activities': 'Größerer Bereich, längere Sicht, enthält normalerweise mehrere Aktivitäten', 'widowed': 'verwitwet', 'within human habitat': 'In menschlichen Lebensraum', 'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt Modul nicht verfügbar im Rahmen der laufenden Python Umgebung - das muss installiert werden für XLS Ausgabe!' }
anurag-ks/eden
languages/de.py
Python
mit
283,796
[ "VisIt" ]
21fb46317c7c45d9cd575f09ebf1b14a3f71fa42c0879a6388b0762909718153
# Illustration of Various Kernels #---------------------------------- # # This function wll illustrate how to # implement various kernels in Tensorflow. # # Linear Kernel: # K(x1, x2) = t(x1) * x2 # # Gaussian Kernel (RBF): # K(x1, x2) = exp(-gamma * abs(x1 - x2)^2) import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn import datasets from tensorflow.python.framework import ops ops.reset_default_graph() # Create graph sess = tf.Session() # Generate non-lnear data (x_vals, y_vals) = datasets.make_circles(n_samples=350, factor=.5, noise=.1) y_vals = np.array([1 if y==1 else -1 for y in y_vals]) class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==1] class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==1] class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==-1] class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==-1] # Declare batch size batch_size = 350 # Initialize placeholders x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32) y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32) # Create variables for svm b = tf.Variable(tf.random_normal(shape=[1,batch_size])) # Apply kernel # Linear Kernel # my_kernel = tf.matmul(x_data, tf.transpose(x_data)) # Gaussian (RBF) kernel gamma = tf.constant(-50.0) dist = tf.reduce_sum(tf.square(x_data), 1) dist = tf.reshape(dist, [-1,1]) sq_dists = tf.add(tf.sub(dist, tf.mul(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist)) my_kernel = tf.exp(tf.mul(gamma, tf.abs(sq_dists))) # Compute SVM Model model_output = tf.matmul(b, my_kernel) first_term = tf.reduce_sum(b) b_vec_cross = tf.matmul(tf.transpose(b), b) y_target_cross = tf.matmul(y_target, tf.transpose(y_target)) second_term = tf.reduce_sum(tf.mul(my_kernel, tf.mul(b_vec_cross, y_target_cross))) loss = tf.neg(tf.sub(first_term, second_term)) # Create Prediction Kernel # Linear prediction kernel # my_kernel = tf.matmul(x_data, tf.transpose(prediction_grid)) # Gaussian (RBF) prediction kernel rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),[-1,1]) rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1),[-1,1]) pred_sq_dist = tf.add(tf.sub(rA, tf.mul(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB)) pred_kernel = tf.exp(tf.mul(gamma, tf.abs(pred_sq_dist))) prediction_output = tf.matmul(tf.mul(tf.transpose(y_target),b), pred_kernel) prediction = tf.sign(prediction_output-tf.reduce_mean(prediction_output)) accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32)) # Declare optimizer my_opt = tf.train.GradientDescentOptimizer(0.002) train_step = my_opt.minimize(loss) # Initialize variables init = tf.initialize_all_variables() sess.run(init) # Training loop loss_vec = [] batch_accuracy = [] for i in range(1000): rand_index = np.random.choice(len(x_vals), size=batch_size) rand_x = x_vals[rand_index] rand_y = np.transpose([y_vals[rand_index]]) sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) loss_vec.append(temp_loss) acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid:rand_x}) batch_accuracy.append(acc_temp) if (i+1)%250==0: print('Step #' + str(i+1)) print('Loss = ' + str(temp_loss)) # Create a mesh to plot points in x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1 y_min, y_max = x_vals[:, 1].min() - 1, x_vals[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02)) grid_points = np.c_[xx.ravel(), yy.ravel()] [grid_predictions] = sess.run(prediction, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid: grid_points}) grid_predictions = grid_predictions.reshape(xx.shape) # Plot points and grid plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired, alpha=0.8) plt.plot(class1_x, class1_y, 'ro', label='Class 1') plt.plot(class2_x, class2_y, 'kx', label='Class -1') plt.title('Gaussian SVM Results') plt.xlabel('x') plt.ylabel('y') plt.legend(loc='lower right') plt.ylim([-1.5, 1.5]) plt.xlim([-1.5, 1.5]) plt.show() # Plot batch accuracy plt.plot(batch_accuracy, 'k-', label='Accuracy') plt.title('Batch Accuracy') plt.xlabel('Generation') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.show() # Plot loss over time plt.plot(loss_vec, 'k-') plt.title('Loss per Generation') plt.xlabel('Generation') plt.ylabel('Loss') plt.show() # sess.run(prediction_output, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid: grid_points}) # sess.run(pred_kernel, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid: grid_points}) # sess.run(model_output, feed_dict={x_data:rand_x, y_target: rand_y}) # sess.run(second_term, feed_dict={x_data:rand_x, y_target: rand_y})
benjaminoh1/tensorflowcookbook
Chapter 04/svm_kernels.py
Python
mit
5,157
[ "Gaussian" ]
baa620774c404847e15a356844fe357e0b75716c202e5367aef51f2d41d41a1a
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Brian Coca <bcoca@ansible.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' module: systemd author: - Ansible Core Team version_added: "2.2" short_description: Manage services description: - Controls systemd services on remote hosts. options: name: description: - Name of the service. When using in a chroot environment you always need to specify the full name i.e. (crond.service). aliases: [ service, unit ] state: description: - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the service. C(reloaded) will always reload. choices: [ reloaded, restarted, started, stopped ] enabled: description: - Whether the service should start on boot. B(At least one of state and enabled are required.) type: bool force: description: - Whether to override existing symlinks. type: bool version_added: 2.6 masked: description: - Whether the unit should be masked or not, a masked unit is impossible to start. type: bool daemon_reload: description: - Run daemon-reload before doing any other operations, to make sure systemd has read any changes. - When set to C(yes), runs daemon-reload even if the module does not start or stop anything. type: bool default: no aliases: [ daemon-reload ] daemon_reexec: description: - Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state. type: bool default: no aliases: [ daemon-reexec ] version_added: "2.8" user: description: - (deprecated) run ``systemctl`` talking to the service manager of the calling user, rather than the service manager of the system. - This option is deprecated and will eventually be removed in 2.11. The ``scope`` option should be used instead. type: bool default: no scope: description: - run systemctl within a given service manager scope, either as the default system scope (system), the current user's scope (user), or the scope of all users (global). - "For systemd to work with 'user', the executing user must have its own instance of dbus started (systemd requirement). The user dbus process is normally started during normal login, but not during the run of Ansible tasks. Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error." choices: [ system, user, global ] version_added: "2.7" no_block: description: - Do not synchronously wait for the requested operation to finish. Enqueued job will continue without Ansible blocking on its completion. type: bool default: no version_added: "2.3" notes: - Since 2.4, one of the following options is required 'state', 'enabled', 'masked', 'daemon_reload', ('daemon_reexec' since 2.8), and all except 'daemon_reload' (and 'daemon_reexec' since 2.8) also require 'name'. - Before 2.4 you always required 'name'. - Globs are not supported in name, i.e ``postgres*.service``. requirements: - A system managed by systemd. ''' EXAMPLES = ''' - name: Make sure a service is running systemd: state: started name: httpd - name: stop service cron on debian, if running systemd: name: cron state: stopped - name: restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes systemd: state: restarted daemon_reload: yes name: crond - name: reload service httpd, in all cases systemd: name: httpd state: reloaded - name: enable service httpd and ensure it is not masked systemd: name: httpd enabled: yes masked: no - name: enable a timer for dnf-automatic systemd: name: dnf-automatic.timer state: started enabled: yes - name: just force systemd to reread configs (2.4 and above) systemd: daemon_reload: yes - name: just force systemd to re-execute itself (2.8 and above) systemd: daemon_reexec: yes ''' RETURN = ''' status: description: A dictionary with the key=value pairs returned from `systemctl show` returned: success type: complex contains: { "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ActiveEnterTimestampMonotonic": "8135942", "ActiveExitTimestampMonotonic": "0", "ActiveState": "active", "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice", "AllowIsolate": "no", "Before": "shutdown.target multi-user.target", "BlockIOAccounting": "no", "BlockIOWeight": "1000", "CPUAccounting": "no", "CPUSchedulingPolicy": "0", "CPUSchedulingPriority": "0", "CPUSchedulingResetOnFork": "no", "CPUShares": "1024", "CanIsolate": "no", "CanReload": "yes", "CanStart": "yes", "CanStop": "yes", "CapabilityBoundingSet": "18446744073709551615", "ConditionResult": "yes", "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ConditionTimestampMonotonic": "7902742", "Conflicts": "shutdown.target", "ControlGroup": "/system.slice/crond.service", "ControlPID": "0", "DefaultDependencies": "yes", "Delegate": "no", "Description": "Command Scheduler", "DevicePolicy": "auto", "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)", "ExecMainCode": "0", "ExecMainExitTimestampMonotonic": "0", "ExecMainPID": "595", "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT", "ExecMainStartTimestampMonotonic": "8134990", "ExecMainStatus": "0", "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", "FragmentPath": "/usr/lib/systemd/system/crond.service", "GuessMainPID": "yes", "IOScheduling": "0", "Id": "crond.service", "IgnoreOnIsolate": "no", "IgnoreOnSnapshot": "no", "IgnoreSIGPIPE": "yes", "InactiveEnterTimestampMonotonic": "0", "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT", "InactiveExitTimestampMonotonic": "8135942", "JobTimeoutUSec": "0", "KillMode": "process", "KillSignal": "15", "LimitAS": "18446744073709551615", "LimitCORE": "18446744073709551615", "LimitCPU": "18446744073709551615", "LimitDATA": "18446744073709551615", "LimitFSIZE": "18446744073709551615", "LimitLOCKS": "18446744073709551615", "LimitMEMLOCK": "65536", "LimitMSGQUEUE": "819200", "LimitNICE": "0", "LimitNOFILE": "4096", "LimitNPROC": "3902", "LimitRSS": "18446744073709551615", "LimitRTPRIO": "0", "LimitRTTIME": "18446744073709551615", "LimitSIGPENDING": "3902", "LimitSTACK": "18446744073709551615", "LoadState": "loaded", "MainPID": "595", "MemoryAccounting": "no", "MemoryLimit": "18446744073709551615", "MountFlags": "0", "Names": "crond.service", "NeedDaemonReload": "no", "Nice": "0", "NoNewPrivileges": "no", "NonBlocking": "no", "NotifyAccess": "none", "OOMScoreAdjust": "0", "OnFailureIsolate": "no", "PermissionsStartOnly": "no", "PrivateNetwork": "no", "PrivateTmp": "no", "RefuseManualStart": "no", "RefuseManualStop": "no", "RemainAfterExit": "no", "Requires": "basic.target", "Restart": "no", "RestartUSec": "100ms", "Result": "success", "RootDirectoryStartOnly": "no", "SameProcessGroup": "no", "SecureBits": "0", "SendSIGHUP": "no", "SendSIGKILL": "yes", "Slice": "system.slice", "StandardError": "inherit", "StandardInput": "null", "StandardOutput": "journal", "StartLimitAction": "none", "StartLimitBurst": "5", "StartLimitInterval": "10000000", "StatusErrno": "0", "StopWhenUnneeded": "no", "SubState": "running", "SyslogLevelPrefix": "yes", "SyslogPriority": "30", "TTYReset": "no", "TTYVHangup": "no", "TTYVTDisallocate": "no", "TimeoutStartUSec": "1min 30s", "TimeoutStopUSec": "1min 30s", "TimerSlackNSec": "50000", "Transient": "no", "Type": "simple", "UMask": "0022", "UnitFileState": "enabled", "WantedBy": "multi-user.target", "Wants": "system.slice", "WatchdogTimestampMonotonic": "0", "WatchdogUSec": "0", } ''' # NOQA import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.facts.system.chroot import is_chroot from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing from ansible.module_utils._text import to_native def is_running_service(service_status): return service_status['ActiveState'] in set(['active', 'activating']) def request_was_ignored(out): return '=' not in out and 'ignoring request' in out def parse_systemctl_show(lines): # The output of 'systemctl show' can contain values that span multiple lines. At first glance it # appears that such values are always surrounded by {}, so the previous version of this code # assumed that any value starting with { was a multi-line value; it would then consume lines # until it saw a line that ended with }. However, it is possible to have a single-line value # that starts with { but does not end with } (this could happen in the value for Description=, # for example), and the previous version of this code would then consume all remaining lines as # part of that value. Cryptically, this would lead to Ansible reporting that the service file # couldn't be found. # # To avoid this issue, the following code only accepts multi-line values for keys whose names # start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to # span multiple lines. parsed = {} multival = [] k = None for line in lines: if k is None: if '=' in line: k, v = line.split('=', 1) if k.startswith('Exec') and v.lstrip().startswith('{'): if not v.rstrip().endswith('}'): multival.append(v) continue parsed[k] = v.strip() k = None else: multival.append(line) if line.rstrip().endswith('}'): parsed[k] = '\n'.join(multival).strip() multival = [] k = None return parsed # =========================================== # Main control flow def main(): # initialize module = AnsibleModule( argument_spec=dict( name=dict(type='str', aliases=['service', 'unit']), state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']), enabled=dict(type='bool'), force=dict(type='bool'), masked=dict(type='bool'), daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']), daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']), user=dict(type='bool'), scope=dict(type='str', choices=['system', 'user', 'global']), no_block=dict(type='bool', default=False), ), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']], required_by=dict( state=('name', ), enabled=('name', ), masked=('name', ), ), mutually_exclusive=[['scope', 'user']], ) unit = module.params['name'] for globpattern in (r"*", r"?", r"["): if globpattern in unit: module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit)) systemctl = module.get_bin_path('systemctl', True) if os.getenv('XDG_RUNTIME_DIR') is None: os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid() ''' Set CLI options depending on params ''' if module.params['user'] is not None: # handle user deprecation, mutually exclusive with scope module.deprecate("The 'user' option is being replaced by 'scope'", version='2.11') if module.params['user']: module.params['scope'] = 'user' else: module.params['scope'] = 'system' # if scope is 'system' or None, we can ignore as there is no extra switch. # The other choices match the corresponding switch if module.params['scope'] not in (None, 'system'): systemctl += " --%s" % module.params['scope'] if module.params['no_block']: systemctl += " --no-block" if module.params['force']: systemctl += " --force" rc = 0 out = err = '' result = dict( name=unit, changed=False, status=dict(), ) # Run daemon-reload first, if requested if module.params['daemon_reload'] and not module.check_mode: (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) # Run daemon-reexec if module.params['daemon_reexec'] and not module.check_mode: (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err)) if unit: found = False is_initd = sysv_exists(unit) is_systemd = False # check service data, cannot error out on rc as it changes across versions, assume not found (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) if request_was_ignored(out) or request_was_ignored(err): # fallback list-unit-files as show does not work on some systems (chroot) # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit)) if rc == 0: is_systemd = True elif rc == 0: # load return of systemctl show into dictionary for easy access and return if out: result['status'] = parse_systemctl_show(to_native(out).split('\n')) is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found' is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked' # Check for loading error if is_systemd and not is_masked and 'LoadError' in result['status']: module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])) else: # Check for systemctl command module.run_command(systemctl, check_rc=True) # Does service exist? found = is_systemd or is_initd if is_initd and not is_systemd: module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit) # mask/unmask the service, if requested, can operate on services before they are installed if module.params['masked'] is not None: # state is not masked unless systemd affirms otherwise masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked') if masked != module.params['masked']: result['changed'] = True if module.params['masked']: action = 'mask' else: action = 'unmask' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't fail_if_missing(module, found, unit, msg='host') # Enable/disable service startup at boot if requested if module.params['enabled'] is not None: if module.params['enabled']: action = 'enable' else: action = 'disable' fail_if_missing(module, found, unit, msg='host') # do we need to enable the service? enabled = False (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) # check systemctl result or if it is a init script if rc == 0: enabled = True elif rc == 1: # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries if module.params['scope'] in (None, 'system') and \ not module.params['user'] and \ is_initd and \ not out.strip().endswith('disabled') and \ sysv_is_enabled(unit): enabled = True # default to current state result['enabled'] = enabled # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err)) result['enabled'] = not enabled # set service state if requested if module.params['state'] is not None: fail_if_missing(module, found, unit, msg="host") # default to desired state result['state'] = module.params['state'] # What is current service state? if 'ActiveState' in result['status']: action = None if module.params['state'] == 'started': if not is_running_service(result['status']): action = 'start' elif module.params['state'] == 'stopped': if is_running_service(result['status']): action = 'stop' else: if not is_running_service(result['status']): action = 'start' else: action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded result['state'] = 'started' if action: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) # check for chroot elif is_chroot(module): module.warn("Target is a chroot. This can lead to false positives or prevent the init system tools from working.") else: # this should not happen? module.fail_json(msg="Service is in unknown state", status=result['status']) module.exit_json(**result) if __name__ == '__main__': main()
rosmo/ansible
lib/ansible/modules/system/systemd.py
Python
gpl-3.0
21,285
[ "Brian" ]
d4ba32067a582de74fa598ff3de398c0075e6460c63ed5e5280045af7beed0f0
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Wgrib2(CMakePackage): """The wgrib2 package functionality for interacting with, reading, writing, and manipulating GRIB2 files.""" homepage = "https://www.cpc.ncep.noaa.gov/products/wesley/wgrib2" url = "https://github.com/NOAA-EMC/NCEPLIBS-wgrib2/archive/refs/tags/v2.0.8-cmake-v6.tar.gz" maintainers = ['t-brown', 'kgerheiser', 'Hang-Lei-NOAA', 'edwardhartnett'] version('2.0.8-cmake-v6', sha256='745cd008b4ce0245ea44247733e57e2b9ec6c5205d171d457e18d0ff8f87172d') depends_on('ip2') depends_on('jasper') depends_on('libpng') depends_on('netcdf-c') depends_on('netcdf-fortran') depends_on('sp') def cmake_args(self): args = ['-DUSE_IPOLATES=3', '-DUSE_SPECTRAL=BOOL:ON'] return args
LLNL/spack
var/spack/repos/builtin/packages/wgrib2/package.py
Python
lgpl-2.1
987
[ "NetCDF" ]
f95f3c62085f31f06b8efef7a0555fa9bd7d56e884204d255deb0c0fff5c2f87
import pysam import re import sys regex = re.compile('[0-9]+N[0-9]+D[0-9]+N') sj = '' infile = pysam.AlignmentFile(sys.argv[1], mode="rb")#, threads=4) outfile = pysam.AlignmentFile(sys.argv[2], mode="wb", template=infile)#, threads=4) # for s in infile: s2 = s if s2.has_tag('MQ'): MQtag = s2.get_tag('MQ') if MQtag==255: s2.set_tag( 'MQ', 60 ) s2.mapping_quality=60 if s2.has_tag('MC'):#try: ctmp = s2.get_tag('MC') rctmp = regex.search(ctmp) while rctmp!=None: ctmpshort = ctmp[rctmp.start():rctmp.end()] indsshort = [i for i in range(len(ctmpshort)) if ctmpshort[i].isalpha()] ndn = int( ctmpshort[0:indsshort[0]] ) + int( ctmpshort[(indsshort[0]+1):indsshort[1]]) + int( ctmpshort[(indsshort[1]+1):indsshort[2]] ) ctmp = ctmp[0:rctmp.start()]+str(ndn)+"N"+ctmp[rctmp.end():len(ctmp)] rctmp = regex.search(ctmp) s2.set_tag( 'MC', ctmp ) outfile.write(s2) infile.close() outfile.close()
IARCbioinfo/mutect-nf
bin/correctNDN.py
Python
gpl-3.0
1,041
[ "pysam" ]
8f7e90dd27da8eeeedd61d96e59de82e0c736d426796ba4c2135d4ac540a77e2
""" Reward function prior distributions """ from __future__ import division import six import scipy.stats import numpy as np from abc import abstractmethod, ABCMeta from ...base import Model __all__ = [ 'RewardPriorBase', 'UniformRewardPrior', 'GaussianRewardPrior', ] class RewardPriorBase(six.with_metaclass(ABCMeta, Model)): """ Reward prior distribution API The reward prior summarizes information about the reward distribution that is available before running the algorithm, i.e. all the relevant domain knowledge. .. note:: These distributions are multivariate, i.e. reward samples are vectors or equivalently functions over :math:`\mathcal{S}`, or more generally :math:`\mathcal{S} \\times \mathcal{A}` or subsets of these. """ def __init__(self, dim): if 0 > dim: raise ValueError('Reward space dimension must be positive') self._dim = dim @abstractmethod def pdf(self, r): """ Estimate the probability of the reward under the prior .. math:: p(r \in A) = \int_A f d\mu for any :math:`A \in \mathcal{A}`, given some measurable space :math:`(\mathcal{X}, \mathcal{A})` and a measure :math:`\mu`. """ raise NotImplementedError('Abstract method') @abstractmethod def log_p(self, r): """ Estimate the log probability of the reward under the prior """ raise NotImplementedError('Abstract method') @abstractmethod def sample(self): """ Generate a sample from the reward prior distribution .. math:: r \sim f_{\\theta} """ raise NotImplementedError('Abstract method') class UniformRewardPrior(RewardPriorBase): """ Uniform reward prior distribution Suitable to task in which there is no clear insight into the nature of the reward function. .. math:: p(r(s, a) = x) = \\text{Uni}(a, b) """ def __init__(self, dim=1, rmin=0.0, rmax=1.0): super(UniformRewardPrior, self).__init__(dim) if rmax < rmin: raise ValueError('Dist rmax cannot be less than rmin') self._dist = scipy.stats.uniform(loc=rmin, scale=2 * (rmax - rmin)) def pdf(self, r): """ Estimate the probability of the reward under the prior """ return np.prod([self._dist.pdf(x) for x in r]) def log_p(self, r): """ Estimate the log probability of the reward under the prior """ return np.sum(self._dist.logpdf(x) for x in r) def sample(self): """ Generate a sample from the reward prior distribution """ return self._dist.rvs(size=self._dim) class GaussianRewardPrior(RewardPriorBase): """ Gaussian reward prior distribution Suitable for many real world tasks with parsimonious reward structures, where most states have negligible rewards [RamBIRL07]_. .. math:: p(r(s, a) = x) = \\frac{1}{\sqrt{2\pi}\sigma} \exp\left(-\\frac{x^2}{2\sigma^2}\\right) .. [RamBIRL07] Deepak Ramachandran and Eyal Amir, "Bayesian inverse reinforcement learning," IJCAI, 2007 """ def __init__(self, dim=1, mean=0.0, sigma=0.5): super(GaussianRewardPrior, self).__init__(dim) self._dist = scipy.stats.norm(loc=mean, scale=sigma) def pdf(self, r): """ Estimate the probability of the reward under the prior """ return np.prod([self._dist.pdf(x) for x in r]) def log_p(self, r): """ Estimate the log probability of the reward under the prior """ return np.sum(self._dist.logpdf(x) for x in r) def sample(self): """ Generate a sample from the reward prior distribution .. math:: r \sim \mathcal{N}(\mathbf{\mu}, \mathbf{\sigma}) """ return self._dist.rvs(size=self._dim)
makokal/funzo
funzo/irl/birl/priors.py
Python
mit
3,847
[ "Gaussian" ]
950db96e4aad15986b63e9ab3aeef88498b73476a6c5d13b9aff469b546a47dd
# Copyright (C) 2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest as ut from espressomd import has_features, code_info class Features(ut.TestCase): def test_has_features(self): for feature in code_info.features(): self.assertTrue(has_features(feature)) for feature in code_info.all_features() - set(code_info.features()): self.assertFalse(has_features(feature)) with self.assertRaises(RuntimeError) as _: has_features("NotAFeature") if __name__ == '__main__': ut.main()
mkuron/espresso
testsuite/python/features.py
Python
gpl-3.0
1,200
[ "ESPResSo" ]
9f12eb04e022fa1a34f4d5d7e182708950644de92ebd6887fe9f0755dfe869a3
import core.modules import core.modules.module_registry from core.modules.vistrails_module import Module, ModuleError import numpy import scipy import scipy.ndimage from Array import * from Matrix import * class ArrayImaging(object): my_namespace = 'numpy|imaging' class ExtractRGBAChannel(ArrayImaging, Module): """ Extract a single color channel from an array representing an RGBA type image. This will return a 2D array with the single channel specified as the scalar elements """ def compute(self): im = self.get_input("Image").get_array() chan = self.get_input("Channel") ar = im[:,:,chan] out = NDArray() out.set_array(ar) self.set_output("Output Array", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Image", (NDArray, 'Image Array')) reg.add_input_port(cls, "Channel", (basic.Integer, 'Channel')) reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array')) class GaussianGradientMagnitude(ArrayImaging, Module): """ Calculate the Gradient Magnitude of an input NDArray using gaussian derivatives. The standard-deviation of the Gaussian filter are given for each axis as a sequence or as a single number, in which case the filter will be isotropic. """ def compute(self): im = self.get_input("Image") sigma = self.get_input_list("Sigmas") if len(sigma) <= 1: sigma = sigma[0] der = scipy.ndimage.gaussian_gradient_magnitude(im.get_array(), sigma) out = NDArray() out.set_array(der) self.set_output("Output Array", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Image", (NDArray, 'Image Array')) reg.add_input_port(cls, "Sigmas", (basic.Float, 'Standard Deviations')) reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array')) class JointHistogram(ArrayImaging, Module): """ Calculate the Joint Histogram of 2 inputs. The inputs can be of arbitrary dimension, but must be equivalently sized. """ def compute(self): in_x = self.get_input("Array One").get_array() in_y = self.get_input("Array Two").get_array() size_x = self.get_input("Bins X") size_y = self.get_input("Bins Y") take_log = True if self.has_input("Log10"): take_log = self.get_input("Log10") out_ar = numpy.zeros((size_x, size_y)) min_x = in_x.min() max_x = in_x.max() - min_x min_y = in_y.min() max_y = in_y.max() - min_y in_x = in_x.flatten() in_y = in_y.flatten() for i in xrange(in_x.size): x_cor = int(((in_x[i] - min_x)/max_x) * (size_x - 1)) y_cor = int(((in_y[i] - min_y)/max_y) * (size_y - 1)) out_ar[x_cor,y_cor] += 1.0 if take_log: out_ar = out_ar + 1.0 out_ar = scipy.log(out_ar) out = NDArray() out_ar = out_ar.transpose() out_ar = out_ar[::-1] out.set_array(out_ar) self.set_output("Joint Histogram", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Array One", (NDArray, 'X Axis Input')) reg.add_input_port(cls, "Array Two", (NDArray, 'Y Axis Input')) reg.add_input_port(cls, "Log10", (basic.Boolean, 'Use Log of Histogram'), True) reg.add_input_port(cls, "Bins X", (basic.Integer, 'Number of X Bins')) reg.add_input_port(cls, "Bins Y", (basic.Integer, 'Number of Y Bins')) reg.add_output_port(cls, "Joint Histogram", (NDArray, 'Joint Histogram')) class GaussianSmooth(ArrayImaging, Module): """ Smooth the Input array with a multi-dimensional gaussian kernel. The standard-deviation of the Gaussian filter are given for each axis as a sequence or as a single number, in which case the filter will be isotropic. """ def compute(self): im = self.get_input("Input Array") sigma = self.get_input_list("Sigmas") if len(sigma) <= 1: sigma = sigma[0] der = scipy.ndimage.gaussian_filter(im.get_array(), sigma) out = NDArray() out.set_array(der) self.set_output("Output Array", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Input Array", (NDArray, 'Image Array')) reg.add_input_port(cls, "Sigmas", (basic.Float, 'Standard Deviations')) reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array')) class MedianFilter(ArrayImaging, Module): """ Smooth the Input array with a multi-dimensional median filter. """ def compute(self): im = self.get_input("Input Array") k_size = self.get_input("Size") der = scipy.ndimage.median_filter(im.get_array(), size=k_size) out = NDArray() out.set_array(der) self.set_output("Output Array", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Input Array", (NDArray, 'Image Array')) reg.add_input_port(cls, "Size", (basic.Integer, 'Kernel Size')) reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array')) class ImageDifference(ArrayImaging, Module): """ Calculate the difference between two input images. """ def compute(self): im = self.get_input("Input 1") im2 = self.get_input("Input 2") da_ar = im.get_array() - im2.get_array() da_ar = numpy.abs(da_ar) out = NDArray() out.set_array(da_ar) self.set_output("Output", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Input 1", (NDArray, 'Image Array')) reg.add_input_port(cls, "Input 2", (NDArray, 'Image Array')) reg.add_output_port(cls, "Output", (NDArray, 'Output Array')) class ImageNormalize(ArrayImaging, Module): """ Move the range of the image to [0,1] """ def compute(self): im = self.get_input("Input") im_max = im.get_array().max() im_ar = im.get_array() / im_max out = NDArray() out.set_array(im_ar) self.set_output("Output", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Input", (NDArray, 'Image Array')) reg.add_output_port(cls, "Output", (NDArray, 'Output Array')) class SobelGradientMagnitude(ArrayImaging, Module): """ Use n-dimensional sobel kernels to compute the gradient magnitude of an image """ def compute(self): im = self.get_input("Input").get_array() mag = numpy.zeros(im.shape) for i in xrange(im.ndim): kern = scipy.ndimage.sobel(im, axis=i) mag += kern*kern out = NDArray() out.set_array(numpy.sqrt(mag)) self.set_output("Output", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Input", (NDArray, 'Image Array')) reg.add_output_port(cls, "Output", (NDArray, 'Output Array'))
Nikea/VisTrails
contrib/NumSciPy/Imaging.py
Python
bsd-3-clause
7,502
[ "Gaussian" ]
644ba2cbdec420f27aa54f66acffe98c06319f88301819502bf6f0cd644fe3fd
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAgimicrorna(RPackage): """Processing and Analysis of Agilent microRNA data.""" homepage = "https://www.bioconductor.org/packages/AgiMicroRna/" git = "https://git.bioconductor.org/packages/AgiMicroRna.git" version('2.26.0', commit='6dd74bae47986f2a23d03e3f1f9f78f701dd8053') depends_on('r@3.4.0:3.4.9', when='@2.26.0') depends_on('r-affycoretools', type=('build', 'run')) depends_on('r-preprocesscore', type=('build', 'run')) depends_on('r-affy', type=('build', 'run')) depends_on('r-limma', type=('build', 'run')) depends_on('r-biobase', type=('build', 'run'))
mfherbst/spack
var/spack/repos/builtin/packages/r-agimicrorna/package.py
Python
lgpl-2.1
1,873
[ "Bioconductor" ]
7ae8ae5dbd81bb5984e30d745b3cd8ff220168b2e7d66ecbf29c7f5ce1b3170e
# Authors: Denis A. Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) # Parts of this code are taken from scikit-learn import pytest import numpy as np from numpy.testing import assert_almost_equal from scipy import stats from scipy import linalg from mne.preprocessing.infomax_ import infomax from mne.utils import requires_sklearn, run_tests_if_main def center_and_norm(x, axis=-1): """Center and norm x in place. Parameters ---------- x: ndarray Array with an axis of observations (statistical units) measured on random variables. axis: int, optional Axis along which the mean and variance are calculated. """ x = np.rollaxis(x, axis) x -= x.mean(axis=0) x /= x.std(axis=0) @requires_sklearn def test_infomax_blowup(): """Test the infomax algorithm blowup condition.""" # scipy.stats uses the global RNG: np.random.seed(0) n_samples = 100 # Generate two sources: s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 s2 = stats.t.rvs(1, size=n_samples) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing angle phi = 0.6 mixing = np.array([[np.cos(phi), np.sin(phi)], # noqa: E241 [np.sin(phi), -np.cos(phi)]]) m = np.dot(mixing, s) center_and_norm(m) X = _get_pca().fit_transform(m.T) k_ = infomax(X, extended=True, l_rate=0.1) s_ = np.dot(k_, X.T) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2) @requires_sklearn def test_infomax_simple(): """Test the infomax algorithm on very simple data.""" rng = np.random.RandomState(0) # scipy.stats uses the global RNG: np.random.seed(0) n_samples = 500 # Generate two sources: s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 s2 = stats.t.rvs(1, size=n_samples) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing angle phi = 0.6 mixing = np.array([[np.cos(phi), np.sin(phi)], # noqa: E241 [np.sin(phi), -np.cos(phi)]]) for add_noise in (False, True): m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(2, n_samples) center_and_norm(m) algos = [True, False] for algo in algos: X = _get_pca().fit_transform(m.T) k_ = infomax(X, extended=algo) s_ = np.dot(k_, X.T) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2) else: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1) def test_infomax_weights_ini(): """Test the infomax algorithm w/initial weights matrix.""" X = np.random.random((3, 100)) weights = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64) w1 = infomax(X, max_iter=0, weights=weights, extended=True) w2 = infomax(X, max_iter=0, weights=weights, extended=False) assert_almost_equal(w1, weights) assert_almost_equal(w2, weights) @requires_sklearn def test_non_square_infomax(): """Test non-square infomax.""" rng = np.random.RandomState(0) n_samples = 200 # Generate two sources: t = np.linspace(0, 100, n_samples) s1 = np.sin(t) s2 = np.ceil(np.sin(np.pi * t)) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing matrix n_observed = 6 mixing = rng.randn(n_observed, 2) for add_noise in (False, True): m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(n_observed, n_samples) center_and_norm(m) m = m.T m = _get_pca(rng).fit_transform(m) # we need extended since input signals are sub-gaussian unmixing_ = infomax(m, random_state=rng, extended=True) s_ = np.dot(unmixing_, m.T) # Check that the mixing model described in the docstring holds: mixing_ = linalg.pinv(unmixing_.T) assert_almost_equal(m, s_.T.dot(mixing_)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2) @pytest.mark.parametrize("return_n_iter", [True, False]) def test_infomax_n_iter(return_n_iter): """Test the return_n_iter kwarg.""" X = np.random.random((3, 100)) max_iter = 1 r = infomax(X, max_iter=max_iter, extended=True, return_n_iter=return_n_iter) if return_n_iter: assert isinstance(r, tuple) assert r[1] == max_iter else: assert isinstance(r, np.ndarray) def _get_pca(rng=None): from sklearn.decomposition import PCA return PCA(n_components=2, whiten=True, svd_solver='randomized', random_state=rng) run_tests_if_main()
olafhauk/mne-python
mne/preprocessing/tests/test_infomax.py
Python
bsd-3-clause
6,115
[ "Gaussian" ]
ed6587ac33394ade73a2a6cede0e22a80d6c1b0c63df74afae39ac1c24a82d6a
#!/usr/bin/env python import logging import capytaine as cpt from capytaine.ui.vtk.animation import Animation logging.basicConfig(level=logging.INFO, format="%(levelname)s:\t%(message)s") # Generate the mesh of a sphere full_sphere = cpt.Sphere( radius=3, center=(0, 0, 0), # Size and positions ntheta=20, nphi=20, # Fineness of the mesh ) full_sphere.add_translation_dof(name="Heave") # Keep only the immersed part of the mesh sphere = full_sphere.keep_immersed_part(inplace=False) sphere.add_translation_dof(name="Heave") # Set up and solve problem solver = cpt.BEMSolver() diffraction_problem = cpt.DiffractionProblem(body=sphere, wave_direction=0.0, omega=2.0) diffraction_result = solver.solve(diffraction_problem) radiation_problem = cpt.RadiationProblem(body=sphere, radiating_dof="Heave", omega=2.0) radiation_result = solver.solve(radiation_problem) # Define a mesh of the free surface and compute the free surface elevation free_surface = cpt.FreeSurface(x_range=(-50, 50), y_range=(-50, 50), nx=150, ny=150) diffraction_elevation_at_faces = solver.get_free_surface_elevation(diffraction_result, free_surface) radiation_elevation_at_faces = solver.get_free_surface_elevation(radiation_result, free_surface) # Add incoming waves diffraction_elevation_at_faces = diffraction_elevation_at_faces + free_surface.incoming_waves(diffraction_result) # Run the animations animation = Animation(loop_duration=diffraction_result.period) animation.add_body(full_sphere, faces_motion=None) animation.add_free_surface(free_surface, faces_elevation=0.5*diffraction_elevation_at_faces) animation.run(camera_position=(-30, -30, 30)) # The camera is oriented towards (0, 0, 0) by default. # animation.save("path/to/the/video/file.ogv", camera_position=(-30, -30, 30)) animation = Animation(loop_duration=radiation_result.period) animation.add_body(full_sphere, faces_motion=full_sphere.dofs["Heave"]) animation.add_free_surface(free_surface, faces_elevation=3.0*radiation_elevation_at_faces) animation.run(camera_position=(-30, -30, 30)) # animation.save("path/to/the/video/file.ogv", camera_position=(-30, -30, 30))
mancellin/capytaine
docs/user_manual/examples/animate_free_surface.py
Python
gpl-3.0
2,141
[ "VTK" ]
1aef5f19939b4109c8581d80d058bbadfb61df046c0333b30603b40a5c249a24
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from ._exception import OverrideError # Adapted from http://stackoverflow.com/a/8313042/579416 def overrides(interface_class): """Decorator for class-level members. Used to indicate that a member is being overridden from a specific parent class. If the member does not have a docstring, it will pull one from the parent class. When chaining decorators, this should be first as it is relatively nondestructive. Parameters ---------- interface_class : class The class which has a member overridden by the decorated member. Returns ------- function The function is not changed or replaced. Raises ------ OverrideError If the `interface_class` does not possess a member of the same name as the decorated member. """ def overrider(method): if method.__name__ not in dir(interface_class): raise OverrideError("%r is not present in parent class: %r." % (method.__name__, interface_class.__name__)) if method.__doc__ is None: method.__doc__ = getattr(interface_class, method.__name__).__doc__ return method return overrider class classproperty(property): """Decorator for class-level properties. Supports read access only. The property will be read-only within an instance. However, the property can always be redefined on the class, since Python classes are mutable. Parameters ---------- func : function Method to make a class property. Returns ------- property Decorated method. Raises ------ AttributeError If the property is set on an instance. """ def __init__(self, func): name = func.__name__ doc = func.__doc__ super(classproperty, self).__init__(classmethod(func)) self.__name__ = name self.__doc__ = doc def __get__(self, cls, owner): return self.fget.__get__(None, owner)() def __set__(self, obj, value): raise AttributeError("can't set attribute")
jensreeder/scikit-bio
skbio/util/_decorator.py
Python
bsd-3-clause
2,517
[ "scikit-bio" ]
2a1d1b6dd2610690a24daf3bf985913fcf206711b2b8e261bbbecd4a2cdabacf
from flask import Flask, render_template, session, request, redirect import random app = Flask(__name__) app.secret_key = 'my_secret_key' @app.route('/') def index(): if not 'gold' in session: session['gold'] = 0 if not 'activities' in session: session['activities'] = [] return render_template('index.html') @app.route('/process', methods = ['POST']) def process(): buildings = { 'farm':random.randint(5,10), 'casino':random.randint(-50,50), 'cave':random.randint(0,30), 'house':random.randint(0,5) } if request.form['building'] in buildings: print buildings[request.form['building']] return redirect('/') if __name__ == '__main__': app.run(debug = True) """ Explain line 24! Dict building key is fram from the index Value comes from the random number generated by farm. """ """ Takes the value from the form with a name of "buiiding". Then finds a random number at the index of that value in the dictionary of buildings.
jiobert/python
Apatira_Authman/Assignments/flaskolympics/olympics7/server.py
Python
mit
1,018
[ "CASINO" ]
c8020bbd6c59ce8614bc1d9144be36405460dbfefd7084400133f73f6ca3c2a9
### Effective medium theory calculator. ### # encoding: utf-8 # Written by: Rasmus E. Christiansen, s072162 # Version 1.02 # Imported Packages import sys import numpy from math import sqrt,exp from ase.data import atomic_numbers, chemical_symbols from ase.units import Bohr from asap3 import FullNeighborList # Gobal parameters parameters = { # E0 s0 V0 eta2 kappa lambda n0 Lattice type # eV bohr eV bohr^-1 bohr^-1 bohr^-1 bohr^-3 'H': (-2.21, 0.71, 2.132, 1.652, 2.790, 1.892, 0.00547, 'dimer'), 'Al': (-3.28, 3.00, 1.493, 1.240, 2.000, 1.169, 0.00700, 'fcc'), 'Cu': (-3.51, 2.67, 2.476, 1.652, 2.740, 1.906, 0.00910, 'fcc'), 'Ag': (-2.96, 3.01, 2.132, 1.652, 2.790, 1.892, 0.00547, 'fcc'), 'Au': (-3.80, 3.00, 2.321, 1.674, 2.873, 2.182, 0.00703, 'fcc'), 'Ni': (-4.44, 2.60, 3.673, 1.669, 2.757, 1.948, 0.01030, 'fcc'), 'Pd': (-3.90, 2.87, 2.773, 1.818, 3.107, 2.155, 0.00688, 'fcc'), 'Pt': (-5.85, 2.90, 4.067, 1.812, 3.145, 2.192, 0.00802, 'fcc'), 'C': (-1.97, 1.18, 0.132, 3.652, 5.790, 2.892, 0.01322, 'dimer'), 'N': (-4.97, 1.18, 0.132, 2.652, 3.790, 2.892, 0.01222, 'dimer'), 'O': (-2.97, 1.25, 2.132, 3.652, 5.790, 4.892, 0.00850, 'dimer')} beta = 1.809 # Calculated from the following equation # beta = ((16 * Pi) / 3)^(1/3) / 2^(1/2) # The "largest" element possibly supported by the calculator (this is determined by the list chemical_symbols). NumElle = len(chemical_symbols) class EMT: """ This class is an implementation of the revised edition of the Effective Medium Theory approach of calculating the energy of a given FCC crystal system. The functional form of the equations used can be found in ******* """ def __init__(self,Params=None,ZeroPoint=1): """ Initializes the EMT object. The input Params is used to specify userdefined parameters and the input Zeropoint is used to specify whether the potential energy measured realtive to E0 = Ecoh (ZeroPoint = 1) or E0 = 0 (ZeroPoint = 0). """ # Secures that the calculator is initialized correctly the first time it is used. self.energy = None self.ZeroPoint = ZeroPoint # If no parameters have been specified when creating the EMT object the default parameters are used. if Params is None: self.parameters = parameters else: self.parameters = Params def initialize(self, atoms): """ Method which initializes the EMT calculator by defining all needed values used in the calculations. """ # A list, Z, of the element type for each atom in the system is defined: self.Z = atoms.get_atomic_numbers() # The number of atoms are calculated: self.N = len(self.Z) # Lists of the values for eta2, kappa, Seq, E0, V0, n0 and L (lambda) for the element types Z are # defined: # The "largest" element number is corrected with regards to the method of counting in python. self.eta2 = numpy.zeros(NumElle) self.kappa = numpy.zeros(NumElle) self.Seq = numpy.zeros(NumElle) self.E0 = numpy.zeros(NumElle) self.V0 = numpy.zeros(NumElle) self.L = numpy.zeros(NumElle) self.n0 = numpy.zeros(NumElle) for i in range(NumElle): if chemical_symbols[i] in self.parameters: self.eta2[i] = self.parameters[chemical_symbols[i]][3] / Bohr self.kappa[i] = self.parameters[chemical_symbols[i]][4] / Bohr self.Seq[i] = self.parameters[chemical_symbols[i]][1] * Bohr self.E0[i] = self.parameters[chemical_symbols[i]][0] self.V0[i] = self.parameters[chemical_symbols[i]][2] self.L[i] = self.parameters[chemical_symbols[i]][5] / Bohr self.n0[i] = self.parameters[chemical_symbols[i]][6] / (Bohr**3) # Calculation of the X*X-arrays: # r_cut; X*X-array of vaules for the cutoff length for the atompair of type (Z,Z') # sigmaaRCUT; X*X-array of values for sigmaa evaluated in r_cut[Z,Z'] # sigmabRCUT; X*X-array of values for sigmab evaluated in r_cut[Z,Z'] # dsigmaadrRCUT; X*X-array of values for the deriviative of sigmaa evaluated in r_cut[Z,Z'] # dsigmabdrRCUT; X*X-array of values for the deriviative of sigmab evaluated in r_cut[Z,Z'] # chi; X*X-array of values for chi for the atompair of type (Z,Z') # The cutoff distances are calculated using that the lattice constant, a0 = sqrt(2)*beta*s0: self.r_cut = numpy.zeros([NumElle,NumElle]) for i in range(NumElle): for j in range(NumElle): # Check to see if the two elements considered are defined or not. Only calculating an # r_cut if both are. r_cut[Z,Z'] is calculated as the cutoff distance for the the one of # elements Z,Z' which has the largest a0. if self.Seq[i] and self.Seq[j] != 0: self.r_cut[i,j] = (1./2. * (sqrt(3. / 2.) + sqrt(4. / 2.)) * (sqrt(2) * beta) * max(self.Seq[i],self.Seq[j])) ### Calculations for sigmaaRCUT, sigmabRCUT, d_sigmaadrRCUT and d_sigmabdrRCUT ### self.dsigmaadrRCUT = numpy.zeros([NumElle,NumElle]) self.dsigmabdrRCUT = numpy.zeros([NumElle,NumElle]) self.sigmaaRCUT = numpy.zeros([NumElle,NumElle]) self.sigmabRCUT = numpy.zeros([NumElle,NumElle]) for i in range(NumElle): for j in range(NumElle): # Check to see if r_cut[i,j] is defined for this pair of elements. r_cut[i,j] == 0 means # that it is not defined. if self.r_cut[i,j] != 0: self.sigmaaRCUT[i,j] = (numpy.exp(self.eta2[j] * (-self.r_cut[i,j] + self.Seq[j] * beta))) self.sigmabRCUT[i,j] = (numpy.exp(self.kappa[j] * (-self.r_cut[i,j] / beta + self.Seq[j]))) self.dsigmaadrRCUT[i,j] = -self.eta2[j] * self.sigmaaRCUT[i,j] self.dsigmabdrRCUT[i,j] = -self.kappa[j] / beta * self.sigmabRCUT[i,j] ### Calculations for chi[Z,Z'] ### self.chi = numpy.zeros([NumElle,NumElle]) for i in range(NumElle): for j in range(NumElle): # Check to see if the elements i,j are defined. if self.n0[i] and self.n0[j] != 0: self.chi[i,j] = self.n0[i] / self.n0[j] ### Calculations of gamma1 and gamma2 ### # Four (3 x NumElle)-arrays for lambda_1,2 (named L_1,2) and sigmaa_1,2 are calculated with the distance, # r_ij = (beta * Seq, sqrt(2) * beta * Seq, sqrt(3) * beta * Seq) for all supportet elements. # where Z = Z'. # The NumberNearestNeighbours variable is set to the number of nearest neighbors included in the model. NumberNearestNeighbours = 3 # arrays for lambda and sigmaa are initialized L_1_Z = numpy.zeros([NumberNearestNeighbours,NumElle]) L_2_Z = numpy.zeros([NumberNearestNeighbours,NumElle]) sigmaa_Z = numpy.zeros([NumberNearestNeighbours,NumElle]) sigmab_Z = numpy.zeros([NumberNearestNeighbours,NumElle]) # The values for each are calculated for each neighbour distance for i in range(NumberNearestNeighbours): L_1_Z[i] = (self.dsigmaadrRCUT[range(NumElle),range(NumElle)] * ((sqrt(1 + i) * beta * self.Seq) - self.r_cut[range(NumElle),range(NumElle)]) + self.sigmaaRCUT[range(NumElle),range(NumElle)]) L_2_Z[i] = (self.dsigmabdrRCUT[range(NumElle),range(NumElle)] * ((sqrt(1 + i) * beta * self.Seq) - self.r_cut[range(NumElle),range(NumElle)]) + self.sigmabRCUT[range(NumElle),range(NumElle)]) sigmaa_Z[i] = numpy.exp(self.eta2 * (-(sqrt(1 + i) * beta * self.Seq) + self.Seq * beta)) sigmab_Z[i] = numpy.exp(self.kappa * (-(sqrt(1 + i) * self.Seq) + self.Seq)) # The factor (self.Seq/self.Seq) is an array of zeros and ones and is only used to secure that only # the elements which are actually defined in "parameters" gives a gamma_1,2 different from zero. self.gamma1 = ((self.Seq/self.Seq) * (12 * (sigmaa_Z[0] - L_1_Z[0]) + 6 * (sigmaa_Z[1] - L_1_Z[1]) + 24 * (sigmaa_Z[2] - L_1_Z[2]) )) self.gamma2 = ((self.Seq/self.Seq) * (12 * (sigmab_Z[0] - L_2_Z[0]) + 6 * (sigmab_Z[1] - L_2_Z[1]) + 24 * (sigmab_Z[2] - L_2_Z[2]) )) ### Construction of a Full Neighborlist for the system of atoms, self.nbList = FullNeighborList(self.r_cut.max(),atoms) ### Initialization of the variables holding the forces on and energy of the atoms ### self.forces = None self.energy = None def NeighborList_rcutReduced(self,i): """ Method which makes sure that only the neighboratoms within the correct cutoff for the involved element types Z,Z' are included in the calculations by modifying the output of the FullNeighborList function. """ # Relavant data about the neighbor atom, j, for atom i which can possible give a contribution are # selected (other_j,r_ij,rsq) = self.nbList.get_neighbors(i) # The neighbor atoms which will actually give a contribution to the energy, based on the individual # cutoff distances between atom i of type Z and atom j of type Z', are selected. # The neighbor atoms which fullfill the condition are chosen keep = numpy.sqrt(rsq) <= self.r_cut[self.Z[i],self.Z[other_j]] # The lists of data about the neighbor atoms are updated if len(keep) != 0: return (other_j[keep],r_ij[keep],rsq[keep]) else: # nbList returned empty lists, but we cannot index a shape (0,3) array (r_ij) # with an empty list (bug in numpy?). return (other_j,r_ij,rsq) def update(self, atoms): """ This method is called by the atoms object to which the calculator is attached, it secures that the energy (and/or force) of the system is recalculated if this is required. """ need_calc = False if (self.energy is None or len(self.Z) != len(atoms) or (self.Z != atoms.get_atomic_numbers()).any()): # The calculator is initialized with regards to the atoms object. self.initialize(atoms) need_calc = True elif (self.positions != atoms.get_positions()).any(): # The atoms object has not changed enough for the calculator to need a reinitialization but a # new calculation of the value for the energies are still needed. need_calc = True if need_calc: self.positions = atoms.get_positions() self.nbList.check_and_update(atoms) self.energy = self.calculate_Energy() self.forces = self.calculate_Force() # Returns the energy of the atoms (the method calculates the energy first if needed be) def get_potential_energy(self, atoms): self.update(atoms) return self.energy # Returns the forces on the atoms (the method calculates the forces first if needed be) def get_forces(self, atoms): self.update(atoms) return self.forces.copy() def get_stress(self, atoms): raise NotImplementedError('No stresses implemented') ########## ENERGY Calculations ########## ### sigma_1,2 ### def calculate_sigma12(self): """ Calculates and returns sigma_1 and sigma_2. """ # The N-arrays for sigma_1,2 are initialized sigma_1 = numpy.zeros(self.N) sigma_2 = numpy.zeros(self.N) for i in range(self.N): # The numbers of the neighbor atoms, the relative position vectors and length of the position # vectors squared between atom i and the neighbor atoms j are defined in three arrays. (other_j,r_ij,rsq) = self.NeighborList_rcutReduced(i) # The values for the linear subtracktion functions evaluated at norm(r_ij,2) for all the atom # pairs, [i,other_j], are calculated. L_1_i = (self.dsigmaadrRCUT[self.Z[i],self.Z[other_j]] * (numpy.sqrt(rsq) - self.r_cut[self.Z[i],self.Z[other_j]]) + self.sigmaaRCUT[self.Z[i],self.Z[other_j]]) L_2_i = (self.dsigmabdrRCUT[self.Z[i],self.Z[other_j]] * (numpy.sqrt(rsq) - self.r_cut[self.Z[i],self.Z[other_j]]) + self.sigmabRCUT[self.Z[i],self.Z[other_j]]) # sigmaa_i and sigmab_i are evaluated at norm(r_ij,2) for all the atom pairs, [i,other_j]. sigmaa_i = (numpy.exp(self.eta2[self.Z[other_j]] * (-numpy.sqrt(rsq) + self.Seq[self.Z[other_j]] * beta))) sigmab_i = (numpy.exp(self.kappa[self.Z[other_j]] * (-numpy.sqrt(rsq) / beta + self.Seq[self.Z[other_j]]))) #if (i == 1): # print sigmaa_i # The values of sigma_1_i and sigma_2_i are calculated for # the atom i. Where max(a,b) is introduced in order to # secure a none zero minimumvalue for sigma_1 so the # following calculations wont result in an error. sigma_1[i] = max( pow(10,-9) , (self.chi[self.Z[i],self.Z[other_j]] * (sigmaa_i - L_1_i)).sum() ) sigma_2[i] = (self.chi[self.Z[i],self.Z[other_j]] * (sigmab_i - L_2_i)).sum() #TESTER# #print sigma_1[:10] return (sigma_1,sigma_2) ### s_i ### def calculate_s(self,sigma_1): """ Calculates and returns an N-array containing the neutrality sphere radii, s, for the atoms of the system is calculated.""" return self.Seq[self.Z] - numpy.log(sigma_1 / self.gamma1[self.Z]) / (beta * self.eta2[self.Z]) ### E_tot ### def calculate_Energy_function(self,s,sigma_2): """ Calculates and returns the total energy of the system using s and sigma_2. """ # Calculation of the N-array containing the cohesive energy for each of the N atoms. E_c = ( self.E0[self.Z] * (self.L[self.Z] * (s - self.Seq[self.Z]) + 1) * numpy.exp(-self.L[self.Z] * (s - self.Seq[self.Z])) ) - self.E0[self.Z] * self.ZeroPoint # Calculation of the N-array containing the atomic sphere correction energy for each of the N atoms. E_as = (6 * ( self.V0[self.Z] * numpy.exp(-self.kappa[self.Z] * (s - self.Seq[self.Z])) -self.V0[self.Z] * sigma_2 / self.gamma2[self.Z] ) ) # Calculation of the total energy return (E_c + E_as).sum() ### Final Energy Calculator ### def calculate_Energy(self): """ Calculates and returnes the energy of the atoms in the atom object to which the EMT calculator is attached. The calculations are done using the following methods, also defined in EMT.py: calculate_sigma12(self), calculate_s(self,sigma_1), calculate_Energy_function(self,s,sigma_2). """ (sigma_1,sigma_2) = self.calculate_sigma12() s = self.calculate_s(sigma_1) # The total energy is calculated and returned return self.calculate_Energy_function(s,sigma_2) ########## FORCE Calculations ########## ### dsdsigma_1 ### def calculate_dsdsigma_1(self,sigma_1): """ Calculates and returns dsdsigma_1 using sigma_1. """ # An N-array containing the the deriviative of neutrality sphere radii, s, with regards to sigma_1 for # the atoms of the system is calculated. dsdsigma_1 = -1 / (beta * self.eta2[self.Z] * sigma_1) return dsdsigma_1 ### dE_cids, dE_asds, dE_asdsigma_2 ### def calculate_Deriviative_of_Energy(self,s): """ Calculates and returns the deriviatives of E_cs and E_as with regards to s and sigma_2. """ # Calculation of the N-array containing the deriviative of the cohesive energy with regards to s for # each of the N atoms. dE_cds = -( self.E0[self.Z] * self.L[self.Z] * self.L[self.Z] * numpy.exp(-self.L[self.Z] * (s - self.Seq[self.Z])) * (s - self.Seq[self.Z]) ) # Calculation of the N-array containing the deriviative of the atomic sphere correction energy with # regards to s for each of the N atoms. dE_asds = -6 * self.kappa[self.Z] * self.V0[self.Z] * numpy.exp(-self.kappa[self.Z] * (s - self.Seq[self.Z])) # Calculation of the N-array containing the deriviative of the atomic sphere correction energy with # regards to sigma_2 for each of the N atoms. dE_asdsigma_2 = -6 * self.V0[self.Z] / (self.gamma2[self.Z]) return (dE_cds,dE_asds,dE_asdsigma_2) ### F_kalpha ### def calculate_Force_function(self,dE_cds,dE_asds,dE_asdsigma_2,dsdsigma_1): """ Calculates the force on all k atoms in the three directions {x,y,z} representet by alpha. """ # An array for the force is initialized F = numpy.zeros([self.N,3]) for k in range(self.N): # The atoms interacting with atom k are selected. (other_i,r_ki,rsq) = self.NeighborList_rcutReduced(k) #print other_i #print r_ki #print numpy.sqrt(rsq) # The values for dr_ijdr_kalpha are calculated for the relevant atoms, k and other_i. dr_kidr_kalpha = r_ki / numpy.sqrt(rsq)[:,numpy.newaxis] ## The force on the k'th atom caused by the atoms, other_i's, interactions with k are calculated ## # The values for dsigmaa_idr_ij and dsigmab_idr_ij are calculated with regards to the k'th atom sigmaa_k = numpy.exp(self.eta2[self.Z[other_i]] * (-numpy.sqrt(rsq) + self.Seq[self.Z[other_i]] * beta)) sigmab_k = numpy.exp(self.kappa[self.Z[other_i]] * (-numpy.sqrt(rsq) / beta + self.Seq[self.Z[other_i]])) dsigmaa_kdr_ki = (-self.eta2[self.Z[other_i]] * sigmaa_k)[:,numpy.newaxis] dsigmab_kdr_ki = (-self.kappa[self.Z[other_i]] / beta * sigmab_k)[:,numpy.newaxis] # Values for dL_1idr_ij and dL_2idr_ij are calculated with regards to the k'th atom dL_1kdr_ki = self.dsigmaadrRCUT[self.Z[k],self.Z[other_i]][:,numpy.newaxis] dL_2kdr_ki = self.dsigmabdrRCUT[self.Z[k],self.Z[other_i]][:,numpy.newaxis] # First the value of dsigma1_idr_kaplha and dsigma2_idr_kaplha are calculated for the k'th atom dsigma1_kdr_kalpha = ( self.chi[self.Z[k],self.Z[other_i]][:,numpy.newaxis] * (dsigmaa_kdr_ki - dL_1kdr_ki) * dr_kidr_kalpha ).sum(axis=0) dsigma2_kdr_kalpha = ( self.chi[self.Z[k],self.Z[other_i]][:,numpy.newaxis] * (dsigmab_kdr_ki - dL_2kdr_ki) * dr_kidr_kalpha ).sum(axis=0) """ TJEK DER SKAL FJERNES SENERE """ assert len(dsigma1_kdr_kalpha) == 3 """ TJEK DER SKAL FJERNES SENERE """ # The contribution to the force on atom k from the k'th atoms interaction with the other_i atoms is # calculated F[k] = (dE_cds[k] * dsdsigma_1[k] * dsigma1_kdr_kalpha + dE_asds[k] * dsdsigma_1[k] * dsigma1_kdr_kalpha + dE_asdsigma_2[k] * dsigma2_kdr_kalpha) # The values for dsigmaa_idr_ij and dsigmab_idr_ij are calculated with regards to the atoms other_i # where j = k for all other_i (thus we only need one value of dsigmaa_idr_ij). sigmaa_i = numpy.exp(self.eta2[self.Z[k]] * (-numpy.sqrt(rsq) + self.Seq[self.Z[k]] * beta)) sigmab_i = numpy.exp(self.kappa[self.Z[k]] * (-numpy.sqrt(rsq) / beta + self.Seq[self.Z[k]])) dsigmaa_idr_ik = (-self.eta2[self.Z[k]] * sigmaa_i)[:,numpy.newaxis] dsigmab_idr_ik = (-self.kappa[self.Z[k]] / beta * sigmab_i)[:,numpy.newaxis] # Values for dL_1idr_ij and dL_2idr_ij are calculated with regards to the atoms other_i # where j = k for all other_i. dL_1idr_ik = self.dsigmaadrRCUT[self.Z[other_i],self.Z[k]][:,numpy.newaxis] dL_2idr_ik = self.dsigmabdrRCUT[self.Z[other_i],self.Z[k]][:,numpy.newaxis] # First the value of dsigma1_idr_kaplha and dsigma2_idr_kaplha are calculated with regards to the atoms # other_i where j are only the atom k for all other_i. (thus the sum only has one element for all other_i. # which results in the calculations leading to an [other_i,3]-array. dsigma1_idr_kalpha = (self.chi[self.Z[other_i],self.Z[k]][:,numpy.newaxis] * (dsigmaa_idr_ik - dL_1idr_ik) * (dr_kidr_kalpha) ) dsigma2_idr_kalpha = (self.chi[self.Z[other_i],self.Z[k]][:,numpy.newaxis] * (dsigmab_idr_ik - dL_2idr_ik) * (dr_kidr_kalpha) ) # The contribution to the force on atom k from the other_i atoms interaction with the k'th atom is now # calculated F[k] += (dE_cds[other_i][:,numpy.newaxis] * dsdsigma_1[other_i][:,numpy.newaxis] * dsigma1_idr_kalpha + dE_asds[other_i][:,numpy.newaxis] * dsdsigma_1[other_i][:,numpy.newaxis] * dsigma1_idr_kalpha + dE_asdsigma_2[other_i][:,numpy.newaxis] * dsigma2_idr_kalpha).sum(axis=0) """ TJEK DER SKAL FJERNES SENERE """ assert len(F[k]) == 3 """ TJEK DER SKAL FJERNES SENERE """ return F ### Final Force Calculator ### def calculate_Force(self): """ Calculates and returnes the force acting on each of the atoms in the atoms object to which the EMT calculator is attached. These calculations are done using the following methods, also defined in EMT.py: calculate_sigma12(self), calculate_s(self,sigma_1), calculate_dsdsigma_1 (self,sigma_1), calculate_Deriviative_of_Energy(self,s) and calculate_Force_function (self,dE_cds,dE_asds,dE_asdsigma_2,dsdsigma_1) """ (sigma_1,sigma_2) = self.calculate_sigma12() s = self.calculate_s(sigma_1) dsdsigma_1 = self.calculate_dsdsigma_1(sigma_1) (dE_cds,dE_asds,dE_asdsigma_2) = self.calculate_Deriviative_of_Energy(s) # The force is calculated and returned return self.calculate_Force_function(dE_cds,dE_asds,dE_asdsigma_2,dsdsigma_1)
auag92/n2dm
Asap-3.8.4/Python/asap3/EMT2011_py.py
Python
mit
23,251
[ "ASE", "CRYSTAL" ]
a2c404ff3eee8b4adbbfce612b8e88f10a2a5e24232e59bb7ffc474b718d852e
""" Code to help with managing a TVTK data set in Pythonic ways. """ # Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in> # Copyright (c) 2008, Enthought, Inc. # License: BSD Style. from enthought.traits.api import (HasTraits, Instance, Array, Str, Property, Dict) from enthought.tvtk.api import tvtk from enthought.tvtk.array_handler import array2vtk ###################################################################### # Utility functions. ###################################################################### def get_array_type(arr): """Returns if the array is a scalar ('scalars'), vector ('vectors') or tensor ('tensors'). It looks at the number of components to decide. If it has a wierd number of components it returns the empty string. """ n = arr.number_of_components ret = {1: 'scalars', 3: 'vectors', 4: 'scalars', 9:'tensors'} return ret.get(n) or '' def get_attribute_list(data): """ Gets scalar, vector and tensor information from the given data (either cell or point data). """ attr = {'scalars':[], 'vectors':[], 'tensors':[]} if data is not None: n = data.number_of_arrays for i in range(n): name = data.get_array_name(i) t = get_array_type(data.get_array(i)) if len(t) > 0 and name is not None: attr[t].extend([name]) def _mk_first(lst, value): """Makes the specified `value` the first item in `lst`.""" lst.remove(value) lst.insert(0, value) attr1 = attr.copy() for a in attr: v = getattr(data, a) if v is not None: name = v.name if name is not None: try: _mk_first(attr[a], v.name) except ValueError: # Sometimes we have a multi-component scalar. attr1[a].insert(0, name) return attr1 def get_all_attributes(obj): """Gets the scalar, vector and tensor attributes that are available in the given VTK data object. """ point_attr = get_attribute_list(obj.point_data) cell_attr = get_attribute_list(obj.cell_data) return point_attr, cell_attr ################################################################################ # `DatasetManager` class. ################################################################################ class DatasetManager(HasTraits): # The TVTK dataset we manage. dataset = Instance(tvtk.DataSet) # Our output, this is the dataset modified by us with different # active arrays. output = Property(Instance(tvtk.DataSet)) # The point scalars for the dataset. You may manipulate the arrays # in-place. However adding new keys in this dict will not set the # data in the `dataset` for that you must explicitly call # `add_array`. point_scalars = Dict(Str, Array) # Point vectors. point_vectors = Dict(Str, Array) # Point tensors. point_tensors = Dict(Str, Array) # The cell scalars for the dataset. cell_scalars = Dict(Str, Array) cell_vectors = Dict(Str, Array) cell_tensors = Dict(Str, Array) # This filter allows us to change the attributes of the data # object and will ensure that the pipeline is properly taken care # of. Directly setting the array in the VTK object will not do # this. _assign_attribute = Instance(tvtk.AssignAttribute, args=(), allow_none=False) ###################################################################### # Public interface. ###################################################################### def add_array(self, array, name, category='point'): """ Add an array to the dataset to specified category ('point' or 'cell'). """ assert len(array.shape) <= 2, "Only 2D arrays can be added." data = getattr(self.dataset, '%s_data'%category) if len(array.shape) == 2: assert array.shape[1] in [1, 3, 4, 9], \ "Only Nxm arrays where (m in [1,3,4,9]) are supported" va = tvtk.to_tvtk(array2vtk(array)) va.name = name data.add_array(va) mapping = {1:'scalars', 3: 'vectors', 4: 'scalars', 9: 'tensors'} dict = getattr(self, '%s_%s'%(category, mapping[array.shape[1]])) dict[name] = array else: va = tvtk.to_tvtk(array2vtk(array)) va.name = name data.add_array(va) dict = getattr(self, '%s_scalars'%(category)) dict[name] = array def remove_array(self, name, category='point'): """Remove an array by its name and optional category (point and cell). Returns the removed array. """ type = self._find_array(name, category) data = getattr(self.dataset, '%s_data'%category) data.remove_array(name) d = getattr(self, '%s_%s'%(category, type)) return d.pop(name) def rename_array(self, name1, name2, category='point'): """Rename a particular array from `name1` to `name2`. """ type = self._find_array(name1, category) data = getattr(self.dataset, '%s_data'%category) arr = data.get_array(name1) arr.name = name2 d = getattr(self, '%s_%s'%(category, type)) d[name2] = d.pop(name1) def activate(self, name, category='point'): """Make the specified array the active one. """ type = self._find_array(name, category) self._activate_data_array(type, category, name) def update(self): """Update the dataset when the arrays are changed. """ self.dataset.modified() self._assign_attribute.update() ###################################################################### # Non-public interface. ###################################################################### def _dataset_changed(self, value): self._setup_data() self._assign_attribute.input = value def _get_output(self): return self._assign_attribute.output def _setup_data(self): """Updates the arrays from what is available in the input data. """ input = self.dataset pnt_attr, cell_attr = get_all_attributes(input) self._setup_data_arrays(cell_attr, 'cell') self._setup_data_arrays(pnt_attr, 'point') def _setup_data_arrays(self, attributes, d_type): """Given the dict of the attributes from the `get_all_attributes` function and the data type (point/cell) data this will setup the object and the data. """ attrs = ['scalars', 'vectors', 'tensors'] aa = self._assign_attribute input = self.dataset data = getattr(input, '%s_data'%d_type) for attr in attrs: values = attributes[attr] # Get the arrays from VTK, create numpy arrays and setup our # traits. arrays = {} for name in values: va = data.get_array(name) npa = va.to_array() # Now test if changes to the numpy array are reflected # in the VTK array, if they are we are set, else we # have to set the VTK array back to the numpy array. if len(npa.shape) > 1: old = npa[0,0] npa[0][0] = old - 1 if abs(va[0][0] - npa[0,0]) > 1e-8: va.from_array(npa) npa[0][0] = old else: old = npa[0] npa[0] = old - 1 if abs(va[0] - npa[0]) > 1e-8: va.from_array(npa) npa[0] = old arrays[name] = npa setattr(self, '%s_%s'%(d_type, attr), arrays) def _activate_data_array(self, data_type, category, name): """Activate (or deactivate) a particular array. Given the nature of the data (scalars, vectors etc.) and the type of data (cell or points) it activates the array given by its name. Parameters: ----------- data_type: one of 'scalars', 'vectors', 'tensors' category: one of 'cell', 'point'. name: string of array name to activate. """ input = self.dataset data = None data = getattr(input, category + '_data') method = getattr(data, 'set_active_%s'%data_type) if len(name) == 0: # If the value is empty then we deactivate that attribute. method(None) else: aa = self._assign_attribute method(name) aa.assign(name, data_type.upper(), category.upper() +'_DATA') aa.update() def _find_array(self, name, category='point'): """Return information on which kind of attribute contains the specified named array in a particular category.""" types = ['scalars', 'vectors', 'tensors'] for type in types: attr = '%s_%s'%(category, type) d = getattr(self, attr) if name in d.keys(): return type raise KeyError('No %s array named %s available in dataset' %(category, name))
olivierverdier/sfepy
sfepy/postprocess/dataset_manager.py
Python
bsd-3-clause
9,486
[ "VTK" ]
c696c9de36d5bbe5bdc489b1a5eb8a17255b320ba66700820ed2cd7baa70d724
# -*- coding: utf-8 -*- import itertools import functools import os import re import urllib import logging import pymongo import datetime import urlparse from collections import OrderedDict import warnings import pytz from flask import request from django.core.urlresolvers import reverse from modularodm import Q from modularodm import fields from modularodm.validators import MaxLengthValidator from modularodm.exceptions import NoResultsFound from modularodm.exceptions import ValidationTypeError from modularodm.exceptions import ValidationValueError from api.base.utils import absolute_reverse from framework import status from framework.mongo import ObjectId from framework.mongo import StoredObject from framework.addons import AddonModelMixin from framework.auth import get_user, User, Auth from framework.auth import signals as auth_signals from framework.exceptions import PermissionsError from framework.guid.model import GuidStoredObject from framework.auth.utils import privacy_info_handle from framework.analytics import tasks as piwik_tasks from framework.mongo.utils import to_mongo, to_mongo_key, unique_on from framework.analytics import ( get_basic_counters, increment_user_activity_counters ) from framework.sentry import log_exception from framework.transactions.context import TokuTransaction from framework.utils import iso8601format from website import language, mails, settings, tokens from website.util import web_url_for from website.util import api_url_for from website.util import sanitize from website.exceptions import ( NodeStateError, InvalidSanctionApprovalToken, InvalidSanctionRejectionToken, ) from website.citations.utils import datetime_to_csl from website.identifiers.model import IdentifierMixin from website.util.permissions import expand_permissions from website.util.permissions import CREATOR_PERMISSIONS, DEFAULT_CONTRIBUTOR_PERMISSIONS, ADMIN from website.project.metadata.schemas import OSF_META_SCHEMAS from website.project import signals as project_signals logger = logging.getLogger(__name__) VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/' def has_anonymous_link(node, auth): """check if the node is anonymous to the user :param Node node: Node which the user wants to visit :param str link: any view-only link in the current url :return bool anonymous: Whether the node is anonymous to the user or not """ view_only_link = auth.private_key or request.args.get('view_only', '').strip('/') if not view_only_link: return False if node.is_public: return False return any( link.anonymous for link in node.private_links_active if link.key == view_only_link ) class MetaSchema(StoredObject): _id = fields.StringField(default=lambda: str(ObjectId())) name = fields.StringField() schema = fields.DictionaryField() category = fields.StringField() # Version of the Knockout metadata renderer to use (e.g. if data binds # change) metadata_version = fields.IntegerField() # Version of the schema to use (e.g. if questions, responses change) schema_version = fields.IntegerField() def ensure_schemas(clear=True): """Import meta-data schemas from JSON to database, optionally clearing database first. :param clear: Clear schema database before import """ if clear: try: MetaSchema.remove() except AttributeError: if not settings.DEBUG_MODE: raise for schema in OSF_META_SCHEMAS: try: MetaSchema.find_one( Q('name', 'eq', schema['name']) & Q('schema_version', 'eq', schema['schema_version']) ) except: schema['name'] = schema['name'].replace(' ', '_') schema_obj = MetaSchema(**schema) schema_obj.save() class MetaData(GuidStoredObject): _id = fields.StringField(primary=True) target = fields.AbstractForeignField(backref='metadata') data = fields.DictionaryField() date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow) date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow) def validate_comment_reports(value, *args, **kwargs): for key, val in value.iteritems(): if not User.load(key): raise ValidationValueError('Keys must be user IDs') if not isinstance(val, dict): raise ValidationTypeError('Values must be dictionaries') if 'category' not in val or 'text' not in val: raise ValidationValueError( 'Values must include `category` and `text` keys' ) class Comment(GuidStoredObject): _id = fields.StringField(primary=True) user = fields.ForeignField('user', required=True, backref='commented') node = fields.ForeignField('node', required=True, backref='comment_owner') target = fields.AbstractForeignField(required=True, backref='commented') date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow) date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow) modified = fields.BooleanField() is_deleted = fields.BooleanField(default=False) content = fields.StringField() # Dictionary field mapping user IDs to dictionaries of report details: # { # 'icpnw': {'category': 'hate', 'message': 'offensive'}, # 'cdi38': {'category': 'spam', 'message': 'godwins law'}, # } reports = fields.DictionaryField(validate=validate_comment_reports) @classmethod def create(cls, auth, **kwargs): comment = cls(**kwargs) comment.save() comment.node.add_log( NodeLog.COMMENT_ADDED, { 'project': comment.node.parent_id, 'node': comment.node._id, 'user': comment.user._id, 'comment': comment._id, }, auth=auth, save=False, ) comment.node.save() return comment def edit(self, content, auth, save=False): self.content = content self.modified = True self.node.add_log( NodeLog.COMMENT_UPDATED, { 'project': self.node.parent_id, 'node': self.node._id, 'user': self.user._id, 'comment': self._id, }, auth=auth, save=False, ) if save: self.save() def delete(self, auth, save=False): self.is_deleted = True self.node.add_log( NodeLog.COMMENT_REMOVED, { 'project': self.node.parent_id, 'node': self.node._id, 'user': self.user._id, 'comment': self._id, }, auth=auth, save=False, ) if save: self.save() def undelete(self, auth, save=False): self.is_deleted = False self.node.add_log( NodeLog.COMMENT_ADDED, { 'project': self.node.parent_id, 'node': self.node._id, 'user': self.user._id, 'comment': self._id, }, auth=auth, save=False, ) if save: self.save() def report_abuse(self, user, save=False, **kwargs): """Report that a comment is abuse. :param User user: User submitting the report :param bool save: Save changes :param dict kwargs: Report details :raises: ValueError if the user submitting abuse is the same as the user who posted the comment """ if user == self.user: raise ValueError self.reports[user._id] = kwargs if save: self.save() def unreport_abuse(self, user, save=False): """Revoke report of abuse. :param User user: User who submitted the report :param bool save: Save changes :raises: ValueError if user has not reported comment as abuse """ try: self.reports.pop(user._id) except KeyError: raise ValueError('User has not reported comment as abuse') if save: self.save() @unique_on(['params.node', '_id']) class NodeLog(StoredObject): _id = fields.StringField(primary=True, default=lambda: str(ObjectId())) date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True) action = fields.StringField(index=True) params = fields.DictionaryField() should_hide = fields.BooleanField(default=False) was_connected_to = fields.ForeignField('node', list=True) user = fields.ForeignField('user', index=True) foreign_user = fields.StringField() DATE_FORMAT = '%m/%d/%Y %H:%M UTC' # Log action constants -- NOTE: templates stored in log_templates.mako CREATED_FROM = 'created_from' PROJECT_CREATED = 'project_created' PROJECT_REGISTERED = 'project_registered' PROJECT_DELETED = 'project_deleted' NODE_CREATED = 'node_created' NODE_FORKED = 'node_forked' NODE_REMOVED = 'node_removed' POINTER_CREATED = 'pointer_created' POINTER_FORKED = 'pointer_forked' POINTER_REMOVED = 'pointer_removed' WIKI_UPDATED = 'wiki_updated' WIKI_DELETED = 'wiki_deleted' WIKI_RENAMED = 'wiki_renamed' MADE_WIKI_PUBLIC = 'made_wiki_public' MADE_WIKI_PRIVATE = 'made_wiki_private' CONTRIB_ADDED = 'contributor_added' CONTRIB_REMOVED = 'contributor_removed' CONTRIB_REORDERED = 'contributors_reordered' PERMISSIONS_UPDATED = 'permissions_updated' MADE_PRIVATE = 'made_private' MADE_PUBLIC = 'made_public' TAG_ADDED = 'tag_added' TAG_REMOVED = 'tag_removed' EDITED_TITLE = 'edit_title' EDITED_DESCRIPTION = 'edit_description' UPDATED_FIELDS = 'updated_fields' FILE_MOVED = 'addon_file_moved' FILE_COPIED = 'addon_file_copied' FILE_RENAMED = 'addon_file_renamed' FOLDER_CREATED = 'folder_created' FILE_ADDED = 'file_added' FILE_UPDATED = 'file_updated' FILE_REMOVED = 'file_removed' FILE_RESTORED = 'file_restored' ADDON_ADDED = 'addon_added' ADDON_REMOVED = 'addon_removed' COMMENT_ADDED = 'comment_added' COMMENT_REMOVED = 'comment_removed' COMMENT_UPDATED = 'comment_updated' MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible' MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible' EXTERNAL_IDS_ADDED = 'external_ids_added' EMBARGO_APPROVED = 'embargo_approved' EMBARGO_CANCELLED = 'embargo_cancelled' EMBARGO_COMPLETED = 'embargo_completed' EMBARGO_INITIATED = 'embargo_initiated' RETRACTION_APPROVED = 'retraction_approved' RETRACTION_CANCELLED = 'retraction_cancelled' RETRACTION_INITIATED = 'retraction_initiated' REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled' REGISTRATION_APPROVAL_INITIATED = 'registration_initiated' REGISTRATION_APPROVAL_APPROVED = 'registration_approved' def __repr__(self): return ('<NodeLog({self.action!r}, params={self.params!r}) ' 'with id {self._id!r}>').format(self=self) @property def node(self): """Return the :class:`Node` associated with this log.""" return ( Node.load(self.params.get('node')) or Node.load(self.params.get('project')) ) @property def tz_date(self): '''Return the timezone-aware date. ''' # Date should always be defined, but a few logs in production are # missing dates; return None and log error if date missing if self.date: return self.date.replace(tzinfo=pytz.UTC) logger.error('Date missing on NodeLog {}'.format(self._primary_key)) @property def formatted_date(self): '''Return the timezone-aware, ISO-formatted string representation of this log's date. ''' if self.tz_date: return self.tz_date.isoformat() def resolve_node(self, node): """A single `NodeLog` record may be attached to multiple `Node` records (parents, forks, registrations, etc.), so the node that the log refers to may not be the same as the node the user is viewing. Use `resolve_node` to determine the relevant node to use for permission checks. :param Node node: Node being viewed """ if self.node == node or self.node in node.nodes: return self.node if node.is_fork_of(self.node) or node.is_registration_of(self.node): return node for child in node.nodes: if child.is_fork_of(self.node) or node.is_registration_of(self.node): return child return False def can_view(self, node, auth): node_to_check = self.resolve_node(node) if node_to_check: return node_to_check.can_view(auth) return False def _render_log_contributor(self, contributor, anonymous=False): user = User.load(contributor) if not user: # Handle legacy non-registered users, which were # represented as a dict if isinstance(contributor, dict): if 'nr_name' in contributor: return { 'fullname': contributor['nr_name'], 'registered': False, } return None if self.node: fullname = user.display_full_name(node=self.node) else: fullname = user.fullname return { 'id': privacy_info_handle(user._primary_key, anonymous), 'fullname': privacy_info_handle(fullname, anonymous, name=True), 'registered': user.is_registered, } class Tag(StoredObject): _id = fields.StringField(primary=True, validate=MaxLengthValidator(128)) def __repr__(self): return '<Tag() with id {self._id!r}>'.format(self=self) @property def url(self): return '/search/?tags={}'.format(self._id) class Pointer(StoredObject): """A link to a Node. The Pointer delegates all but a few methods to its contained Node. Forking and registration are overridden such that the link is cloned, but its contained Node is not. """ #: Whether this is a pointer or not primary = False _id = fields.StringField() node = fields.ForeignField('node', backref='_pointed') _meta = {'optimistic': True} def _clone(self): if self.node: clone = self.clone() clone.node = self.node clone.save() return clone def fork_node(self, *args, **kwargs): return self._clone() def register_node(self, *args, **kwargs): return self._clone() def use_as_template(self, *args, **kwargs): return self._clone() def resolve(self): return self.node def __getattr__(self, item): """Delegate attribute access to the node being pointed to.""" # Prevent backref lookups from being overriden by proxied node try: return super(Pointer, self).__getattr__(item) except AttributeError: pass if self.node: return getattr(self.node, item) raise AttributeError( 'Pointer object has no attribute {0}'.format( item ) ) def get_pointer_parent(pointer): """Given a `Pointer` object, return its parent node. """ # The `parent_node` property of the `Pointer` schema refers to the parents # of the pointed-at `Node`, not the parents of the `Pointer`; use the # back-reference syntax to find the parents of the `Pointer`. parent_refs = pointer.node__parent assert len(parent_refs) == 1, 'Pointer must have exactly one parent.' return parent_refs[0] def validate_category(value): """Validator for Node#category. Makes sure that the value is one of the categories defined in CATEGORY_MAP. """ if value not in Node.CATEGORY_MAP.keys(): raise ValidationValueError('Invalid value for category.') return True def validate_title(value): """Validator for Node#title. Makes sure that the value exists and is not above 200 characters. """ if value is None or not value.strip(): raise ValidationValueError('Title cannot be blank.') value = sanitize.strip_html(value) if value is None or not value.strip(): raise ValidationValueError('Invalid title.') if len(value) > 200: raise ValidationValueError('Title cannot exceed 200 characters.') return True def validate_user(value): if value != {}: user_id = value.iterkeys().next() if User.find(Q('_id', 'eq', user_id)).count() != 1: raise ValidationValueError('User does not exist.') return True class NodeUpdateError(Exception): def __init__(self, reason, key, *args, **kwargs): super(NodeUpdateError, self).__init__(*args, **kwargs) self.key = key self.reason = reason class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin): #: Whether this is a pointer or not primary = True __indices__ = [{ 'unique': False, 'key_or_list': [ ('tags.$', pymongo.ASCENDING), ('is_public', pymongo.ASCENDING), ('is_deleted', pymongo.ASCENDING), ] }] # Node fields that trigger an update to Solr on save SOLR_UPDATE_FIELDS = { 'title', 'category', 'description', 'visible_contributor_ids', 'tags', 'is_fork', 'is_registration', 'retraction', 'embargo', 'is_public', 'is_deleted', 'wiki_pages_current', 'is_retracted', } # Maps category identifier => Human-readable representation for use in # titles, menus, etc. # Use an OrderedDict so that menu items show in the correct order CATEGORY_MAP = OrderedDict([ ('', 'Uncategorized'), ('project', 'Project'), ('hypothesis', 'Hypothesis'), ('methods and measures', 'Methods and Measures'), ('procedure', 'Procedure'), ('instrumentation', 'Instrumentation'), ('data', 'Data'), ('analysis', 'Analysis'), ('communication', 'Communication'), ('other', 'Other'), ]) # Fields that are writable by Node.update WRITABLE_WHITELIST = [ 'title', 'description', 'category', 'is_public', ] # Named constants PRIVATE = 'private' PUBLIC = 'public' _id = fields.StringField(primary=True) date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True) # Privacy is_public = fields.BooleanField(default=False, index=True) # User mappings permissions = fields.DictionaryField() visible_contributor_ids = fields.StringField(list=True) # Project Organization is_dashboard = fields.BooleanField(default=False, index=True) is_folder = fields.BooleanField(default=False, index=True) # Expanded: Dictionary field mapping user IDs to expand state of this node: # { # 'icpnw': True, # 'cdi38': False, # } expanded = fields.DictionaryField(default={}, validate=validate_user) is_deleted = fields.BooleanField(default=False, index=True) deleted_date = fields.DateTimeField(index=True) is_registration = fields.BooleanField(default=False, index=True) registered_date = fields.DateTimeField(index=True) registered_user = fields.ForeignField('user', backref='registered') registered_schema = fields.ForeignField('metaschema', backref='registered') registered_meta = fields.DictionaryField() registration_approval = fields.ForeignField('registrationapproval') retraction = fields.ForeignField('retraction') embargo = fields.ForeignField('embargo') is_fork = fields.BooleanField(default=False, index=True) forked_date = fields.DateTimeField(index=True) title = fields.StringField(validate=validate_title) description = fields.StringField() category = fields.StringField(validate=validate_category, index=True) # One of 'public', 'private' # TODO: Add validator comment_level = fields.StringField(default='private') wiki_pages_current = fields.DictionaryField() wiki_pages_versions = fields.DictionaryField() # Dictionary field mapping node wiki page to sharejs private uuid. # {<page_name>: <sharejs_id>} wiki_private_uuids = fields.DictionaryField() file_guid_to_share_uuids = fields.DictionaryField() creator = fields.ForeignField('user', backref='created') contributors = fields.ForeignField('user', list=True, backref='contributed') users_watching_node = fields.ForeignField('user', list=True, backref='watched') logs = fields.ForeignField('nodelog', list=True, backref='logged') tags = fields.ForeignField('tag', list=True, backref='tagged') # Tags for internal use system_tags = fields.StringField(list=True) nodes = fields.AbstractForeignField(list=True, backref='parent') forked_from = fields.ForeignField('node', backref='forked', index=True) registered_from = fields.ForeignField('node', backref='registrations', index=True) # The node (if any) used as a template for this node's creation template_node = fields.ForeignField('node', backref='template_node', index=True) piwik_site_id = fields.StringField() # Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for # {<User.id>: [<Node._id>, <Node2._id>, ...] } child_node_subscriptions = fields.DictionaryField(default=dict) _meta = { 'optimistic': True, } def __init__(self, *args, **kwargs): tags = kwargs.pop('tags', []) super(Node, self).__init__(*args, **kwargs) # Ensure when Node is created with tags through API, tags are added to Tag if tags: for tag in tags: self.add_tag(tag, Auth(self.creator), save=False, log=False) if kwargs.get('_is_loaded', False): return if self.creator: self.contributors.append(self.creator) self.set_visible(self.creator, visible=True, log=False) # Add default creator permissions for permission in CREATOR_PERMISSIONS: self.add_permission(self.creator, permission, save=False) def __repr__(self): return ('<Node(title={self.title!r}, category={self.category!r}) ' 'with _id {self._id!r}>').format(self=self) # For Django compatibility @property def pk(self): return self._id @property def category_display(self): """The human-readable representation of this node's category.""" return self.CATEGORY_MAP[self.category] # We need the following 2 properties in order to serialize related links in NodeRegistrationSerializer @property def registered_user_id(self): """The ID of the user who registered this node if this is a registration, else None. """ if self.registered_user: return self.registered_user._id return None @property def registered_from_id(self): """The ID of the user who registered this node if this is a registration, else None. """ if self.registered_from: return self.registered_from._id return None @property def sanction(self): sanction = self.registration_approval or self.embargo or self.retraction if sanction: return sanction elif self.parent_node: return self.parent_node.sanction else: return None @property def is_pending_registration(self): if not self.is_registration: return False if self.registration_approval is None: if self.parent_node: return self.parent_node.is_pending_registration return False return self.registration_approval.pending_approval @property def is_registration_approved(self): if self.registration_approval is None: if self.parent_node: return self.parent_node.is_registration_approved return False return self.registration_approval.is_approved @property def is_retracted(self): if self.retraction is None: if self.parent_node: return self.parent_node.is_retracted return False return self.retraction.is_approved @property def is_pending_retraction(self): if self.retraction is None: if self.parent_node: return self.parent_node.is_pending_retraction return False return self.retraction.pending_approval @property def embargo_end_date(self): if self.embargo is None: if self.parent_node: return self.parent_node.embargo_end_date return False return self.embargo.embargo_end_date @property def is_pending_embargo(self): if self.embargo is None: if self.parent_node: return self.parent_node.is_pending_embargo return False return self.embargo.pending_approval @property def is_pending_embargo_for_existing_registration(self): """ Returns True if Node has an Embargo pending approval for an existing registrations. This is used specifically to ensure registrations pre-dating the Embargo feature do not get deleted if their respective Embargo request is rejected. """ if self.embargo is None: if self.parent_node: return self.parent_node.is_pending_embargo_for_existing_registration return False return self.embargo.pending_registration @property def private_links(self): return self.privatelink__shared @property def private_links_active(self): return [x for x in self.private_links if not x.is_deleted] @property def private_link_keys_active(self): return [x.key for x in self.private_links if not x.is_deleted] @property def private_link_keys_deleted(self): return [x.key for x in self.private_links if x.is_deleted] def path_above(self, auth): parents = self.parents return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)]) @property def ids_above(self): parents = self.parents return {p._id for p in parents} @property def nodes_active(self): return [x for x in self.nodes if not x.is_deleted] def can_edit(self, auth=None, user=None): """Return if a user is authorized to edit this node. Must specify one of (`auth`, `user`). :param Auth auth: Auth object to check :param User user: User object to check :returns: Whether user has permission to edit this node. """ if not auth and not user: raise ValueError('Must pass either `auth` or `user`') if auth and user: raise ValueError('Cannot pass both `auth` and `user`') user = user or auth.user if auth: is_api_node = auth.api_node == self else: is_api_node = False return ( (user and self.has_permission(user, 'write')) or is_api_node ) def active_contributors(self, include=lambda n: True): for contrib in self.contributors: if contrib.is_active and include(contrib): yield contrib def is_admin_parent(self, user): if self.has_permission(user, 'admin', check_parent=False): return True if self.parent_node: return self.parent_node.is_admin_parent(user) return False def can_view(self, auth): if not auth and not self.is_public: return False return ( self.is_public or (auth.user and self.has_permission(auth.user, 'read')) or auth.private_key in self.private_link_keys_active or self.is_admin_parent(auth.user) ) def is_expanded(self, user=None): """Return if a user is has expanded the folder in the dashboard view. Must specify one of (`auth`, `user`). :param User user: User object to check :returns: Boolean if the folder is expanded. """ if user._id in self.expanded: return self.expanded[user._id] else: return False def expand(self, user=None): self.expanded[user._id] = True self.save() def collapse(self, user=None): self.expanded[user._id] = False self.save() def is_derived_from(self, other, attr): derived_from = getattr(self, attr) while True: if derived_from is None: return False if derived_from == other: return True derived_from = getattr(derived_from, attr) def is_fork_of(self, other): return self.is_derived_from(other, 'forked_from') def is_registration_of(self, other): return self.is_derived_from(other, 'registered_from') @property def forks(self): """List of forks of this node""" return list(self.node__forked.find(Q('is_deleted', 'eq', False) & Q('is_registration', 'ne', True))) def add_permission(self, user, permission, save=False): """Grant permission to a user. :param str permission: Permission to grant :param bool save: Save changes :raises: ValueError if user already has permission """ if user._id not in self.permissions: self.permissions[user._id] = [permission] else: if permission in self.permissions[user._id]: raise ValueError('User already has permission {0}'.format(permission)) self.permissions[user._id].append(permission) if save: self.save() def remove_permission(self, user, permission, save=False): """Revoke permission from a user. :param User user: User to revoke permission from :param str permission: Permission to revoke :param bool save: Save changes :raises: ValueError if user does not have permission """ try: self.permissions[user._id].remove(permission) except (KeyError, ValueError): raise ValueError('User does not have permission {0}'.format(permission)) if save: self.save() def clear_permission(self, user, save=False): """Clear all permissions for a user. :param User user: User to revoke permission from :param bool save: Save changes :raises: ValueError if user not in permissions """ try: self.permissions.pop(user._id) except KeyError: raise ValueError( 'User {0} not in permissions list for node {1}'.format( user._id, self._id, ) ) if save: self.save() def set_permissions(self, user, permissions, save=False): self.permissions[user._id] = permissions if save: self.save() def has_permission(self, user, permission, check_parent=True): """Check whether user has permission. :param User user: User to test :param str permission: Required permission :returns: User has required permission """ if user is None: logger.warn('User is ``None``.') return False if permission in self.permissions.get(user._id, []): return True if permission == 'read' and check_parent: return self.is_admin_parent(user) return False def has_permission_on_children(self, user, permission): """Checks if the given user has a given permission on any child nodes that are not registrations or deleted """ if self.has_permission(user, permission): return True for node in self.nodes: if not node.primary or node.is_deleted: continue if node.has_permission_on_children(user, permission): return True return False def has_addon_on_children(self, addon): """Checks if a given node has a specific addon on child nodes that are not registrations or deleted """ if self.has_addon(addon): return True for node in self.nodes: if not node.primary or node.is_deleted: continue if node.has_addon_on_children(addon): return True return False def get_permissions(self, user): """Get list of permissions for user. :param User user: User to check :returns: List of permissions :raises: ValueError if user not found in permissions """ return self.permissions.get(user._id, []) def adjust_permissions(self): for key in self.permissions.keys(): if key not in self.contributors: self.permissions.pop(key) @property def visible_contributors(self): return [ User.load(_id) for _id in self.visible_contributor_ids ] @property def parents(self): if self.parent_node: return [self.parent_node] + self.parent_node.parents return [] @property def admin_contributor_ids(self, contributors=None): contributor_ids = self.contributors._to_primary_keys() admin_ids = set() for parent in self.parents: admins = [ user for user, perms in parent.permissions.iteritems() if 'admin' in perms ] admin_ids.update(set(admins).difference(contributor_ids)) return admin_ids @property def admin_contributors(self): return sorted( [User.load(_id) for _id in self.admin_contributor_ids], key=lambda user: user.family_name, ) def get_visible(self, user): if not self.is_contributor(user): raise ValueError(u'User {0} not in contributors'.format(user)) return user._id in self.visible_contributor_ids def update_visible_ids(self, save=False): """Update the order of `visible_contributor_ids`. Updating on making a contributor visible is more efficient than recomputing order on accessing `visible_contributors`. """ self.visible_contributor_ids = [ contributor._id for contributor in self.contributors if contributor._id in self.visible_contributor_ids ] if save: self.save() def set_visible(self, user, visible, log=True, auth=None, save=False): if not self.is_contributor(user): raise ValueError(u'User {0} not in contributors'.format(user)) if visible and user._id not in self.visible_contributor_ids: self.visible_contributor_ids.append(user._id) self.update_visible_ids(save=False) elif not visible and user._id in self.visible_contributor_ids: if len(self.visible_contributor_ids) == 1: raise ValueError('Must have at least one visible contributor') self.visible_contributor_ids.remove(user._id) else: return message = ( NodeLog.MADE_CONTRIBUTOR_VISIBLE if visible else NodeLog.MADE_CONTRIBUTOR_INVISIBLE ) if log: self.add_log( message, params={ 'parent': self.parent_id, 'node': self._id, 'contributors': [user._id], }, auth=auth, save=False, ) if save: self.save() def can_comment(self, auth): if self.comment_level == 'public': return auth.logged_in and ( self.is_public or (auth.user and self.has_permission(auth.user, 'read')) ) return self.is_contributor(auth.user) def update(self, fields, auth=None, save=True): """Update the node with the given fields. :param dict fields: Dictionary of field_name:value pairs. :param Auth auth: Auth object for the user making the update. :param bool save: Whether to save after updating the object. """ if self.is_registration: raise NodeUpdateError(reason="Registered content cannot be updated") if not fields: # Bail out early if there are no fields to update return False values = {} for key, value in fields.iteritems(): if key not in self.WRITABLE_WHITELIST: continue # Title and description have special methods for logging purposes if key == 'title': self.set_title(title=value, auth=auth, save=False) elif key == 'description': self.set_description(description=value, auth=auth, save=False) elif key == 'is_public': self.set_privacy( Node.PUBLIC if value else Node.PRIVATE, auth=auth, log=True, save=False ) else: with warnings.catch_warnings(): try: # This is in place because historically projects and components # live on different ElasticSearch indexes, and at the time of Node.save # there is no reliable way to check what the old Node.category # value was. When the cateogory changes it is possible to have duplicate/dead # search entries, so always delete the ES doc on categoryt change # TODO: consolidate Node indexes into a single index, refactor search if key == 'category': self.delete_search_entry() ############### old_value = getattr(self, key) if old_value != value: values[key] = { 'old': old_value, 'new': value, } setattr(self, key, value) except AttributeError: raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key) except warnings.Warning: raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key) if save: updated = self.save() else: updated = [] for key in values: values[key]['new'] = getattr(self, key) if values: self.add_log( NodeLog.UPDATED_FIELDS, params={ 'node': self._id, 'updated_fields': { key: { 'old': values[key]['old'], 'new': values[key]['new'] } for key in values } }, auth=auth) return updated def save(self, *args, **kwargs): update_piwik = kwargs.pop('update_piwik', True) self.adjust_permissions() first_save = not self._is_loaded if first_save and self.is_dashboard: existing_dashboards = self.creator.node__contributed.find( Q('is_dashboard', 'eq', True) ) if existing_dashboards.count() > 0: raise NodeStateError("Only one dashboard allowed per user.") is_original = not self.is_registration and not self.is_fork if 'suppress_log' in kwargs.keys(): suppress_log = kwargs['suppress_log'] del kwargs['suppress_log'] else: suppress_log = False saved_fields = super(Node, self).save(*args, **kwargs) if first_save and is_original and not suppress_log: # TODO: This logic also exists in self.use_as_template() for addon in settings.ADDONS_AVAILABLE: if 'node' in addon.added_default: self.add_addon(addon.short_name, auth=None, log=False) # Define log fields for non-component project log_action = NodeLog.PROJECT_CREATED log_params = { 'node': self._primary_key, } if getattr(self, 'parent', None): # Append log to parent self.parent.nodes.append(self) self.parent.save() log_params.update({'parent_node': self.parent._primary_key}) # Add log with appropriate fields self.add_log( log_action, params=log_params, auth=Auth(user=self.creator), log_date=self.date_created, save=True, ) # Only update Solr if at least one stored field has changed, and if # public or privacy setting has changed need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields)) if not self.is_public: if first_save or 'is_public' not in saved_fields: need_update = False if self.is_folder or self.archiving: need_update = False if need_update: self.update_search() # This method checks what has changed. if settings.PIWIK_HOST and update_piwik: piwik_tasks.update_node(self._id, saved_fields) # Return expected value for StoredObject::save return saved_fields ###################################### # Methods that return a new instance # ###################################### def use_as_template(self, auth, changes=None, top_level=True): """Create a new project, using an existing project as a template. :param auth: The user to be assigned as creator :param changes: A dictionary of changes, keyed by node id, which override the attributes of the template project or its children. :return: The `Node` instance created. """ changes = changes or dict() # build the dict of attributes to change for the new node try: attributes = changes[self._id] # TODO: explicitly define attributes which may be changed. except (AttributeError, KeyError): attributes = dict() new = self.clone() # clear permissions, which are not cleared by the clone method new.permissions = {} new.visible_contributor_ids = [] # Clear quasi-foreign fields new.wiki_pages_current = {} new.wiki_pages_versions = {} new.wiki_private_uuids = {} new.file_guid_to_share_uuids = {} # set attributes which may be overridden by `changes` new.is_public = False new.description = None # apply `changes` for attr, val in attributes.iteritems(): setattr(new, attr, val) # set attributes which may NOT be overridden by `changes` new.creator = auth.user new.template_node = self new.add_contributor(contributor=auth.user, permissions=CREATOR_PERMISSIONS, log=False, save=False) new.is_fork = False new.is_registration = False new.piwik_site_id = None # If that title hasn't been changed, apply the default prefix (once) if (new.title == self.title and top_level and language.TEMPLATED_FROM_PREFIX not in new.title): new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, )) # Slight hack - date_created is a read-only field. new._fields['date_created'].__set__( new, datetime.datetime.utcnow(), safe=True ) new.save(suppress_log=True) # Log the creation new.add_log( NodeLog.CREATED_FROM, params={ 'node': new._primary_key, 'template_node': { 'id': self._primary_key, 'url': self.url, }, }, auth=auth, log_date=new.date_created, save=False, ) # add mandatory addons # TODO: This logic also exists in self.save() for addon in settings.ADDONS_AVAILABLE: if 'node' in addon.added_default: new.add_addon(addon.short_name, auth=None, log=False) # deal with the children of the node, if any new.nodes = [ x.use_as_template(auth, changes, top_level=False) for x in self.nodes if x.can_view(auth) ] new.save() return new ############ # Pointers # ############ def add_pointer(self, node, auth, save=True): """Add a pointer to a node. :param Node node: Node to add :param Auth auth: Consolidated authorization :param bool save: Save changes :return: Created pointer """ # Fail if node already in nodes / pointers. Note: cast node and node # to primary keys to test for conflicts with both nodes and pointers # contained in `self.nodes`. if node._id in self.node_ids: raise ValueError( 'Pointer to node {0} already in list'.format(node._id) ) if self.is_registration: raise NodeStateError('Cannot add a pointer to a registration') # If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard. # Also, no pointers to the dashboard project, which could cause loops as well. already_pointed = node.pointed if node.is_folder and len(already_pointed) > 0: raise ValueError( 'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id) ) if node.is_dashboard: raise ValueError( 'Pointer to dashboard ({0}) not allowed.'.format(node._id) ) # Append pointer pointer = Pointer(node=node) pointer.save() self.nodes.append(pointer) # Add log self.add_log( action=NodeLog.POINTER_CREATED, params={ 'parent_node': self.parent_id, 'node': self._primary_key, 'pointer': { 'id': pointer.node._id, 'url': pointer.node.url, 'title': pointer.node.title, 'category': pointer.node.category, }, }, auth=auth, save=False, ) # Optionally save changes if save: self.save() return pointer def rm_pointer(self, pointer, auth): """Remove a pointer. :param Pointer pointer: Pointer to remove :param Auth auth: Consolidated authorization """ if pointer not in self.nodes: raise ValueError('Node link does not belong to the requested node.') # Remove `Pointer` object; will also remove self from `nodes` list of # parent node Pointer.remove_one(pointer) # Add log self.add_log( action=NodeLog.POINTER_REMOVED, params={ 'parent_node': self.parent_id, 'node': self._primary_key, 'pointer': { 'id': pointer.node._id, 'url': pointer.node.url, 'title': pointer.node.title, 'category': pointer.node.category, }, }, auth=auth, save=False, ) @property def node_ids(self): return [ node._id if node.primary else node.node._id for node in self.nodes ] @property def nodes_primary(self): return [ node for node in self.nodes if node.primary ] def node_and_primary_descendants(self): """Return an iterator for a node and all of its primary (non-pointer) descendants. :param node Node: target Node """ return itertools.chain([self], self.get_descendants_recursive(lambda n: n.primary)) @property def depth(self): return len(self.parents) def next_descendants(self, auth, condition=lambda auth, node: True): """ Recursively find the first set of descedants under a given node that meet a given condition returns a list of [(node, [children]), ...] """ ret = [] for node in self.nodes: if condition(auth, node): # base case ret.append((node, [])) else: ret.append((node, node.next_descendants(auth, condition))) ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches return ret def get_descendants_recursive(self, include=lambda n: True): for node in self.nodes: if include(node): yield node if node.primary: for descendant in node.get_descendants_recursive(include): if include(descendant): yield descendant def get_aggregate_logs_queryset(self, auth): ids = [self._id] + [n._id for n in self.get_descendants_recursive() if n.can_view(auth)] query = Q('__backrefs.logged.node.logs', 'in', ids) return NodeLog.find(query).sort('-_id') @property def nodes_pointer(self): return [ node for node in self.nodes if not node.primary ] @property def has_pointers_recursive(self): """Recursively checks whether the current node or any of its nodes contains a pointer. """ if self.nodes_pointer: return True for node in self.nodes_primary: if node.has_pointers_recursive: return True return False @property def pointed(self): return getattr(self, '_pointed', []) def pointing_at(self, pointed_node_id): """This node is pointed at another node. :param Node pointed_node_id: The node id of the node being pointed at. :return: pointer_id """ for pointer in self.nodes_pointer: node_id = pointer.node._id if node_id == pointed_node_id: return pointer._id return None def get_points(self, folders=False, deleted=False, resolve=True): ret = [] for each in self.pointed: pointer_node = get_pointer_parent(each) if not folders and pointer_node.is_folder: continue if not deleted and pointer_node.is_deleted: continue if resolve: ret.append(pointer_node) else: ret.append(each) return ret def resolve(self): return self def fork_pointer(self, pointer, auth, save=True): """Replace a pointer with a fork. If the pointer points to a project, fork the project and replace the pointer with a new pointer pointing to the fork. If the pointer points to a component, fork the component and add it to the current node. :param Pointer pointer: :param Auth auth: :param bool save: :return: Forked node """ # Fail if pointer not contained in `nodes` try: index = self.nodes.index(pointer) except ValueError: raise ValueError('Pointer {0} not in list'.format(pointer._id)) # Get pointed node node = pointer.node # Fork into current node and replace pointer with forked component forked = node.fork_node(auth) if forked is None: raise ValueError('Could not fork node') self.nodes[index] = forked # Add log self.add_log( NodeLog.POINTER_FORKED, params={ 'parent_node': self.parent_id, 'node': self._primary_key, 'pointer': { 'id': pointer.node._id, 'url': pointer.node.url, 'title': pointer.node.title, 'category': pointer.node.category, }, }, auth=auth, save=False, ) # Optionally save changes if save: self.save() # Garbage-collect pointer. Note: Must save current node before # removing pointer, else remove will fail when trying to remove # backref from self to pointer. Pointer.remove_one(pointer) # Return forked content return forked def get_recent_logs(self, n=10): """Return a list of the n most recent logs, in reverse chronological order. :param int n: Number of logs to retrieve """ return list(reversed(self.logs)[:n]) @property def date_modified(self): '''The most recent datetime when this node was modified, based on the logs. ''' try: return self.logs[-1].date except IndexError: return self.date_created def set_title(self, title, auth, save=False): """Set the title of this Node and log it. :param str title: The new title. :param auth: All the auth information including user, API key. """ #Called so validation does not have to wait until save. validate_title(title) original_title = self.title new_title = sanitize.strip_html(title) # Title hasn't changed after sanitzation, bail out if original_title == new_title: return False self.title = new_title self.add_log( action=NodeLog.EDITED_TITLE, params={ 'parent_node': self.parent_id, 'node': self._primary_key, 'title_new': self.title, 'title_original': original_title, }, auth=auth, save=False, ) if save: self.save() return None def set_description(self, description, auth, save=False): """Set the description and log the event. :param str description: The new description :param auth: All the auth informtion including user, API key. :param bool save: Save self after updating. """ original = self.description new_description = sanitize.strip_html(description) if original == new_description: return False self.description = new_description self.add_log( action=NodeLog.EDITED_DESCRIPTION, params={ 'parent_node': self.parent_id, 'node': self._primary_key, 'description_new': self.description, 'description_original': original }, auth=auth, save=False, ) if save: self.save() return None def update_search(self): from website import search try: search.search.update_node(self) except search.exceptions.SearchUnavailableError as e: logger.exception(e) log_exception() @classmethod def bulk_update_search(cls, nodes): from website import search try: serialize = functools.partial(search.search.update_node, bulk=True) search.search.bulk_update_nodes(serialize, nodes) except search.exceptions.SearchUnavailableError as e: logger.exception(e) log_exception() def delete_search_entry(self): from website import search try: search.search.delete_node(self) except search.exceptions.SearchUnavailableError as e: logger.exception(e) log_exception() def delete_registration_tree(self, save=False): self.is_deleted = True if not getattr(self.embargo, 'for_existing_registration', False): self.registered_from = None if save: self.save() self.update_search() for child in self.nodes_primary: child.delete_registration_tree(save=save) def remove_node(self, auth, date=None): """Marks a node as deleted. TODO: Call a hook on addons Adds a log to the parent node if applicable :param auth: an instance of :class:`Auth`. :param date: Date node was removed :type date: `datetime.datetime` or `None` """ # TODO: rename "date" param - it's shadowing a global if self.is_dashboard: raise NodeStateError("Dashboards may not be deleted.") if not self.can_edit(auth): raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node')) #if this is a folder, remove all the folders that this is pointing at. if self.is_folder: for pointed in self.nodes_pointer: if pointed.node.is_folder: pointed.node.remove_node(auth=auth) if [x for x in self.nodes_primary if not x.is_deleted]: raise NodeStateError("Any child components must be deleted prior to deleting this project.") # After delete callback for addon in self.get_addons(): message = addon.after_delete(self, auth.user) if message: status.push_status_message(message, kind='info', trust=False) log_date = date or datetime.datetime.utcnow() # Add log to parent if self.node__parent: self.node__parent[0].add_log( NodeLog.NODE_REMOVED, params={ 'project': self._primary_key, }, auth=auth, log_date=log_date, save=True, ) else: self.add_log( NodeLog.PROJECT_DELETED, params={ 'project': self._primary_key, }, auth=auth, log_date=log_date, save=True, ) self.is_deleted = True self.deleted_date = date self.save() auth_signals.node_deleted.send(self) return True def fork_node(self, auth, title='Fork of '): """Recursively fork a node. :param Auth auth: Consolidated authorization :param str title: Optional text to prepend to forked title :return: Forked node """ user = auth.user # Non-contributors can't fork private nodes if not (self.is_public or self.has_permission(user, 'read')): raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id)) when = datetime.datetime.utcnow() original = self.load(self._primary_key) if original.is_deleted: raise NodeStateError('Cannot fork deleted node.') # Note: Cloning a node copies its `wiki_pages_current` and # `wiki_pages_versions` fields, but does not clone the underlying # database objects to which these dictionaries refer. This means that # the cloned node must pass itself to its wiki objects to build the # correct URLs to that content. forked = original.clone() forked.logs = self.logs forked.tags = self.tags # Recursively fork child nodes for node_contained in original.nodes: if not node_contained.is_deleted: forked_node = None try: # Catch the potential PermissionsError above forked_node = node_contained.fork_node(auth=auth, title='') except PermissionsError: pass # If this exception is thrown omit the node from the result set if forked_node is not None: forked.nodes.append(forked_node) forked.title = title + forked.title forked.is_fork = True forked.is_registration = False forked.forked_date = when forked.forked_from = original forked.creator = user forked.piwik_site_id = None # Forks default to private status forked.is_public = False # Clear permissions before adding users forked.permissions = {} forked.visible_contributor_ids = [] forked.add_contributor( contributor=user, permissions=CREATOR_PERMISSIONS, log=False, save=False ) forked.add_log( action=NodeLog.NODE_FORKED, params={ 'parent_node': original.parent_id, 'node': original._primary_key, 'registration': forked._primary_key, }, auth=auth, log_date=when, save=False, ) forked.save() # After fork callback for addon in original.get_addons(): _, message = addon.after_fork(original, forked, user) if message: status.push_status_message(message, kind='info', trust=True) return forked def register_node(self, schema, auth, template, data, parent=None): """Make a frozen copy of a node. :param schema: Schema object :param auth: All the auth information including user, API key. :param template: Template name :param data: Form data :param parent Node: parent registration of registration to be created """ # NOTE: Admins can register child nodes even if they don't have write access them if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user): raise PermissionsError( 'User {} does not have permission ' 'to register this node'.format(auth.user._id) ) if self.is_folder: raise NodeStateError("Folders may not be registered") template = urllib.unquote_plus(template) template = to_mongo(template) when = datetime.datetime.utcnow() original = self.load(self._primary_key) # Note: Cloning a node copies its `wiki_pages_current` and # `wiki_pages_versions` fields, but does not clone the underlying # database objects to which these dictionaries refer. This means that # the cloned node must pass itself to its wiki objects to build the # correct URLs to that content. if original.is_deleted: raise NodeStateError('Cannot register deleted node.') registered = original.clone() registered.is_registration = True registered.registered_date = when registered.registered_user = auth.user registered.registered_schema = schema registered.registered_from = original if not registered.registered_meta: registered.registered_meta = {} registered.registered_meta[template] = data registered.contributors = self.contributors registered.forked_from = self.forked_from registered.creator = self.creator registered.logs = self.logs registered.tags = self.tags registered.piwik_site_id = None registered.save() if parent: registered.parent_node = parent # After register callback for addon in original.get_addons(): _, message = addon.after_register(original, registered, auth.user) if message: status.push_status_message(message, kind='info', trust=False) for node_contained in original.nodes: if not node_contained.is_deleted: child_registration = node_contained.register_node( schema, auth, template, data, parent=registered ) if child_registration and not child_registration.primary: registered.nodes.append(child_registration) registered.save() if settings.ENABLE_ARCHIVER: project_signals.after_create_registration.send(self, dst=registered, user=auth.user) return registered def remove_tag(self, tag, auth, save=True): if tag in self.tags: self.tags.remove(tag) self.add_log( action=NodeLog.TAG_REMOVED, params={ 'parent_node': self.parent_id, 'node': self._primary_key, 'tag': tag, }, auth=auth, save=False, ) if save: self.save() def add_tag(self, tag, auth, save=True, log=True): if tag not in self.tags: new_tag = Tag.load(tag) if not new_tag: new_tag = Tag(_id=tag) new_tag.save() self.tags.append(new_tag) if log: self.add_log( action=NodeLog.TAG_ADDED, params={ 'parent_node': self.parent_id, 'node': self._primary_key, 'tag': tag, }, auth=auth, save=False, ) if save: self.save() def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True): user = auth.user if auth else None params['node'] = params.get('node') or params.get('project') log = NodeLog( action=action, user=user, foreign_user=foreign_user, params=params, ) if log_date: log.date = log_date log.save() self.logs.append(log) if save: self.save() if user: increment_user_activity_counters(user._primary_key, action, log.date) return log @property def url(self): return '/{}/'.format(self._primary_key) def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs): return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs) def api_url_for(self, view_name, _absolute=False, *args, **kwargs): return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs) @property def absolute_url(self): if not self.url: logger.error('Node {0} has a parent that is not a project'.format(self._id)) return None return urlparse.urljoin(settings.DOMAIN, self.url) @property def display_absolute_url(self): url = self.absolute_url if url is not None: return re.sub(r'https?:', '', url).strip('/') @property def api_v2_url(self): return reverse('nodes:node-detail', kwargs={'node_id': self._id}) @property def absolute_api_v2_url(self): return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id}) # used by django and DRF def get_absolute_url(self): return self.absolute_api_v2_url @property def api_url(self): if not self.url: logger.error('Node {0} has a parent that is not a project'.format(self._id)) return None return '/api/v1{0}'.format(self.deep_url) @property def deep_url(self): return '/project/{}/'.format(self._primary_key) @property def csl(self): # formats node information into CSL format for citation parsing """a dict in CSL-JSON schema For details on this schema, see: https://github.com/citation-style-language/schema#csl-json-schema """ csl = { 'id': self._id, 'title': sanitize.unescape_entities(self.title), 'author': [ contributor.csl_name # method in auth/model.py which parses the names of authors for contributor in self.visible_contributors ], 'publisher': 'Open Science Framework', 'type': 'webpage', 'URL': self.display_absolute_url, } doi = self.get_identifier_value('doi') if doi: csl['DOI'] = doi if self.logs: csl['issued'] = datetime_to_csl(self.logs[-1].date) return csl def author_list(self, and_delim='&'): author_names = [ author.biblio_name for author in self.visible_contributors if author ] if len(author_names) < 2: return ' {0} '.format(and_delim).join(author_names) if len(author_names) > 7: author_names = author_names[:7] author_names.append('et al.') return ', '.join(author_names) return u'{0}, {1} {2}'.format( ', '.join(author_names[:-1]), and_delim, author_names[-1] ) @property def templated_list(self): return [ x for x in self.node__template_node if not x.is_deleted ] @property def parent_node(self): """The parent node, if it exists, otherwise ``None``. Note: this property is named `parent_node` rather than `parent` to avoid a conflict with the `parent` back-reference created by the `nodes` field on this schema. """ try: if not self.node__parent[0].is_deleted: return self.node__parent[0] except IndexError: pass return None @parent_node.setter def parent_node(self, parent): parent.nodes.append(self) parent.save() @property def root(self): if self.parent_node: return self.parent_node.root else: return self @property def archiving(self): job = self.archive_job return job and not job.done and not job.archive_tree_finished() @property def archive_job(self): return self.archivejob__active[0] if self.archivejob__active else None @property def registrations(self): return self.node__registrations.find(Q('archiving', 'eq', False)) @property def watch_url(self): return os.path.join(self.api_url, "watch/") @property def parent_id(self): if self.node__parent: return self.node__parent[0]._primary_key return None @property def forked_from_id(self): if self.forked_from: return self.forked_from._id return None @property def project_or_component(self): return 'project' if self.category == 'project' else 'component' def is_contributor(self, user): return ( user is not None and ( user._id in self.contributors ) ) def add_addon(self, addon_name, auth, log=True, *args, **kwargs): """Add an add-on to the node. Do nothing if the addon is already enabled. :param str addon_name: Name of add-on :param Auth auth: Consolidated authorization object :param bool log: Add a log after adding the add-on :return: A boolean, whether the addon was added """ ret = AddonModelMixin.add_addon(self, addon_name, auth=auth, *args, **kwargs) if ret and log: config = settings.ADDONS_AVAILABLE_DICT[addon_name] self.add_log( action=NodeLog.ADDON_ADDED, params={ 'project': self.parent_id, 'node': self._primary_key, 'addon': config.full_name, }, auth=auth, save=False, ) self.save() # TODO: here, or outside the conditional? @mambocab return ret def delete_addon(self, addon_name, auth, _force=False): """Delete an add-on from the node. :param str addon_name: Name of add-on :param Auth auth: Consolidated authorization object :param bool _force: For migration testing ONLY. Do not set to True in the application, or else projects will be allowed to delete mandatory add-ons! :return bool: Add-on was deleted """ ret = super(Node, self).delete_addon(addon_name, auth, _force) if ret: config = settings.ADDONS_AVAILABLE_DICT[addon_name] self.add_log( action=NodeLog.ADDON_REMOVED, params={ 'project': self.parent_id, 'node': self._primary_key, 'addon': config.full_name, }, auth=auth, save=False, ) self.save() # TODO: save here or outside the conditional? @mambocab return ret def callback(self, callback, recursive=False, *args, **kwargs): """Invoke callbacks of attached add-ons and collect messages. :param str callback: Name of callback method to invoke :param bool recursive: Apply callback recursively over nodes :return list: List of callback messages """ messages = [] for addon in self.get_addons(): method = getattr(addon, callback) message = method(self, *args, **kwargs) if message: messages.append(message) if recursive: for child in self.nodes: if not child.is_deleted: messages.extend( child.callback( callback, recursive, *args, **kwargs ) ) return messages def replace_contributor(self, old, new): for i, contrib in enumerate(self.contributors): if contrib._primary_key == old._primary_key: self.contributors[i] = new # Remove unclaimed record for the project if self._primary_key in old.unclaimed_records: del old.unclaimed_records[self._primary_key] old.save() for permission in self.get_permissions(old): self.add_permission(new, permission) self.permissions.pop(old._id) if old._id in self.visible_contributor_ids: self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id return True return False def remove_contributor(self, contributor, auth, log=True): """Remove a contributor from this node. :param contributor: User object, the contributor to be removed :param auth: All the auth information including user, API key. """ # remove unclaimed record if necessary if self._primary_key in contributor.unclaimed_records: del contributor.unclaimed_records[self._primary_key] self.contributors.remove(contributor._id) self.clear_permission(contributor) if contributor._id in self.visible_contributor_ids: self.visible_contributor_ids.remove(contributor._id) if not self.visible_contributor_ids: return False # Node must have at least one registered admin user # TODO: Move to validator or helper admins = [ user for user in self.contributors if self.has_permission(user, 'admin') and user.is_registered ] if not admins: return False # Clear permissions for removed user self.permissions.pop(contributor._id, None) # After remove callback for addon in self.get_addons(): message = addon.after_remove_contributor(self, contributor, auth) if message: status.push_status_message(message, kind='info', trust=True) if log: self.add_log( action=NodeLog.CONTRIB_REMOVED, params={ 'project': self.parent_id, 'node': self._primary_key, 'contributors': [contributor._id], }, auth=auth, save=False, ) self.save() #send signal to remove this user from project subscriptions auth_signals.contributor_removed.send(contributor, node=self) return True def remove_contributors(self, contributors, auth=None, log=True, save=False): results = [] removed = [] for contrib in contributors: outcome = self.remove_contributor( contributor=contrib, auth=auth, log=False, ) results.append(outcome) removed.append(contrib._id) if log: self.add_log( action=NodeLog.CONTRIB_REMOVED, params={ 'project': self.parent_id, 'node': self._primary_key, 'contributors': removed, }, auth=auth, save=False, ) if save: self.save() if False in results: return False return True def update_contributor(self, user, permission, visible, auth, save=False): """ TODO: this method should be updated as a replacement for the main loop of Node#manage_contributors. Right now there are redundancies, but to avoid major feature creep this will not be included as this time. Also checks to make sure unique admin is not removing own admin privilege. """ if not self.has_permission(auth.user, ADMIN): raise PermissionsError("Only admins can modify contributor permissions") permissions = expand_permissions(permission) or DEFAULT_CONTRIBUTOR_PERMISSIONS admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active] if not len(admins) > 1: # has only one admin admin = admins[0] if admin == user and ADMIN not in permissions: raise NodeStateError('{} is the only admin.'.format(user.fullname)) if user not in self.contributors: raise ValueError( 'User {0} not in contributors'.format(user.fullname) ) if permission: permissions = expand_permissions(permission) if set(permissions) != set(self.get_permissions(user)): self.set_permissions(user, permissions, save=save) permissions_changed = { user._id: permissions } self.add_log( action=NodeLog.PERMISSIONS_UPDATED, params={ 'project': self.parent_id, 'node': self._id, 'contributors': permissions_changed, }, auth=auth, save=save ) with TokuTransaction(): if ['read'] in permissions_changed.values(): project_signals.write_permissions_revoked.send(self) if visible is not None: self.set_visible(user, visible, auth=auth, save=save) self.update_visible_ids() def manage_contributors(self, user_dicts, auth, save=False): """Reorder and remove contributors. :param list user_dicts: Ordered list of contributors represented as dictionaries of the form: {'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool} :param Auth auth: Consolidated authentication information :param bool save: Save changes :raises: ValueError if any users in `users` not in contributors or if no admin contributors remaining """ with TokuTransaction(): users = [] user_ids = [] permissions_changed = {} visibility_removed = [] to_retain = [] to_remove = [] for user_dict in user_dicts: user = User.load(user_dict['id']) if user is None: raise ValueError('User not found') if user not in self.contributors: raise ValueError( 'User {0} not in contributors'.format(user.fullname) ) permissions = expand_permissions(user_dict['permission']) if set(permissions) != set(self.get_permissions(user)): self.set_permissions(user, permissions, save=False) permissions_changed[user._id] = permissions # visible must be added before removed to ensure they are validated properly if user_dict['visible']: self.set_visible(user, visible=True, auth=auth) else: visibility_removed.append(user) users.append(user) user_ids.append(user_dict['id']) for user in visibility_removed: self.set_visible(user, visible=False, auth=auth) for user in self.contributors: if user._id in user_ids: to_retain.append(user) else: to_remove.append(user) # TODO: Move to validator or helper @jmcarp admins = [ user for user in users if self.has_permission(user, 'admin') and user.is_registered ] if users is None or not admins: raise ValueError( 'Must have at least one registered admin contributor' ) if to_retain != users: self.add_log( action=NodeLog.CONTRIB_REORDERED, params={ 'project': self.parent_id, 'node': self._id, 'contributors': [ user._id for user in users ], }, auth=auth, save=False, ) if to_remove: self.remove_contributors(to_remove, auth=auth, save=False) self.contributors = users if permissions_changed: self.add_log( action=NodeLog.PERMISSIONS_UPDATED, params={ 'project': self.parent_id, 'node': self._id, 'contributors': permissions_changed, }, auth=auth, save=False, ) # Update list of visible IDs self.update_visible_ids() if save: self.save() with TokuTransaction(): if to_remove or permissions_changed and ['read'] in permissions_changed.values(): project_signals.write_permissions_revoked.send(self) def add_contributor(self, contributor, permissions=None, visible=True, auth=None, log=True, save=False): """Add a contributor to the project. :param User contributor: The contributor to be added :param list permissions: Permissions to grant to the contributor :param bool visible: Contributor is visible in project dashboard :param Auth auth: All the auth information including user, API key :param bool log: Add log to self :param bool save: Save after adding contributor :returns: Whether contributor was added """ MAX_RECENT_LENGTH = 15 # If user is merged into another account, use master account contrib_to_add = contributor.merged_by if contributor.is_merged else contributor if contrib_to_add not in self.contributors: self.contributors.append(contrib_to_add) if visible: self.set_visible(contrib_to_add, visible=True, log=False) # Add default contributor permissions permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS for permission in permissions: self.add_permission(contrib_to_add, permission, save=False) # Add contributor to recently added list for user if auth is not None: user = auth.user if contrib_to_add in user.recently_added: user.recently_added.remove(contrib_to_add) user.recently_added.insert(0, contrib_to_add) while len(user.recently_added) > MAX_RECENT_LENGTH: user.recently_added.pop() if log: self.add_log( action=NodeLog.CONTRIB_ADDED, params={ 'project': self.parent_id, 'node': self._primary_key, 'contributors': [contrib_to_add._primary_key], }, auth=auth, save=False, ) if save: self.save() project_signals.contributor_added.send(self, contributor=contributor) return True #Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of. elif contrib_to_add in self.contributors and permissions is not None: self.set_permissions(contrib_to_add, permissions) if save: self.save() return False else: return False def add_contributors(self, contributors, auth=None, log=True, save=False): """Add multiple contributors :param list contributors: A list of dictionaries of the form: { 'user': <User object>, 'permissions': <Permissions list, e.g. ['read', 'write']>, 'visible': <Boolean indicating whether or not user is a bibliographic contributor> } :param auth: All the auth information including user, API key. :param log: Add log to self :param save: Save after adding contributor """ for contrib in contributors: self.add_contributor( contributor=contrib['user'], permissions=contrib['permissions'], visible=contrib['visible'], auth=auth, log=False, save=False, ) if log and contributors: self.add_log( action=NodeLog.CONTRIB_ADDED, params={ 'project': self.parent_id, 'node': self._primary_key, 'contributors': [ contrib['user']._id for contrib in contributors ], }, auth=auth, save=False, ) if save: self.save() def add_unregistered_contributor(self, fullname, email, auth, permissions=None, save=False): """Add a non-registered contributor to the project. :param str fullname: The full name of the person. :param str email: The email address of the person. :param Auth auth: Auth object for the user adding the contributor. :returns: The added contributor :raises: DuplicateEmailError if user with given email is already in the database. """ # Create a new user record contributor = User.create_unregistered(fullname=fullname, email=email) contributor.add_unclaimed_record(node=self, referrer=auth.user, given_name=fullname, email=email) try: contributor.save() except ValidationValueError: # User with same email already exists contributor = get_user(email=email) # Unregistered users may have multiple unclaimed records, so # only raise error if user is registered. if contributor.is_registered or self.is_contributor(contributor): raise contributor.add_unclaimed_record(node=self, referrer=auth.user, given_name=fullname, email=email) contributor.save() self.add_contributor( contributor, permissions=permissions, auth=auth, log=True, save=False, ) self.save() return contributor def set_privacy(self, permissions, auth=None, log=True, save=True, meeting_creation=False): """Set the permissions for this node. Also, based on meeting_creation, queues an email to user about abilities of public projects. :param permissions: A string, either 'public' or 'private' :param auth: All the auth information including user, API key. :param bool log: Whether to add a NodeLog for the privacy change. :param bool meeting_creation: Whther this was creayed due to a meetings email. """ if auth and not self.has_permission(auth.user, ADMIN): raise PermissionsError('Must be an admin to change privacy settings.') if permissions == 'public' and not self.is_public: if self.is_registration: if self.is_pending_embargo: raise NodeStateError("A registration with an unapproved embargo cannot be made public.") if self.embargo_end_date and not self.is_pending_embargo: self.embargo.state = Embargo.REJECTED self.embargo.save() self.is_public = True elif permissions == 'private' and self.is_public: if self.is_registration and not self.is_pending_embargo: raise NodeStateError("Public registrations must be retracted, not made private.") else: self.is_public = False else: return False # After set permissions callback for addon in self.get_addons(): message = addon.after_set_privacy(self, permissions) if message: status.push_status_message(message, kind='info', trust=False) if log: action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE self.add_log( action=action, params={ 'project': self.parent_id, 'node': self._primary_key, }, auth=auth, save=False, ) if save: self.save() if auth and permissions == 'public': project_signals.privacy_set_public.send(auth.user, node=self, meeting_creation=meeting_creation) return True def admin_public_wiki(self, user): return ( self.has_addon('wiki') and self.has_permission(user, 'admin') and self.is_public ) def include_wiki_settings(self, user): """Check if node meets requirements to make publicly editable.""" return ( self.admin_public_wiki(user) or any( each.admin_public_wiki(user) for each in self.get_descendants_recursive() ) ) # TODO: Move to wiki add-on def get_wiki_page(self, name=None, version=None, id=None): from website.addons.wiki.model import NodeWikiPage if name: name = (name or '').strip() key = to_mongo_key(name) try: if version and (isinstance(version, int) or version.isdigit()): id = self.wiki_pages_versions[key][int(version) - 1] elif version == 'previous': id = self.wiki_pages_versions[key][-2] elif version == 'current' or version is None: id = self.wiki_pages_current[key] else: return None except (KeyError, IndexError): return None return NodeWikiPage.load(id) # TODO: Move to wiki add-on def update_node_wiki(self, name, content, auth): """Update the node's wiki page with new content. :param page: A string, the page's name, e.g. ``"home"``. :param content: A string, the posted content. :param auth: All the auth information including user, API key. """ from website.addons.wiki.model import NodeWikiPage name = (name or '').strip() key = to_mongo_key(name) if key not in self.wiki_pages_current: if key in self.wiki_pages_versions: version = len(self.wiki_pages_versions[key]) + 1 else: version = 1 else: current = NodeWikiPage.load(self.wiki_pages_current[key]) current.is_current = False version = current.version + 1 current.save() new_page = NodeWikiPage( page_name=name, version=version, user=auth.user, is_current=True, node=self, content=content ) new_page.save() # check if the wiki page already exists in versions (existed once and is now deleted) if key not in self.wiki_pages_versions: self.wiki_pages_versions[key] = [] self.wiki_pages_versions[key].append(new_page._primary_key) self.wiki_pages_current[key] = new_page._primary_key self.add_log( action=NodeLog.WIKI_UPDATED, params={ 'project': self.parent_id, 'node': self._primary_key, 'page': new_page.page_name, 'page_id': new_page._primary_key, 'version': new_page.version, }, auth=auth, log_date=new_page.date, save=False, ) self.save() # TODO: Move to wiki add-on def rename_node_wiki(self, name, new_name, auth): """Rename the node's wiki page with new name. :param name: A string, the page's name, e.g. ``"My Page"``. :param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``. :param auth: All the auth information including user, API key. """ # TODO: Fix circular imports from website.addons.wiki.exceptions import ( PageCannotRenameError, PageConflictError, PageNotFoundError, ) name = (name or '').strip() key = to_mongo_key(name) new_name = (new_name or '').strip() new_key = to_mongo_key(new_name) page = self.get_wiki_page(name) if key == 'home': raise PageCannotRenameError('Cannot rename wiki home page') if not page: raise PageNotFoundError('Wiki page not found') if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home': raise PageConflictError( 'Page already exists with name {0}'.format( new_name, ) ) # rename the page first in case we hit a validation exception. old_name = page.page_name page.rename(new_name) # TODO: merge historical records like update (prevents log breaks) # transfer the old page versions/current keys to the new name. if key != new_key: self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key] del self.wiki_pages_versions[key] self.wiki_pages_current[new_key] = self.wiki_pages_current[key] del self.wiki_pages_current[key] if key in self.wiki_private_uuids: self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key] del self.wiki_private_uuids[key] self.add_log( action=NodeLog.WIKI_RENAMED, params={ 'project': self.parent_id, 'node': self._primary_key, 'page': page.page_name, 'page_id': page._primary_key, 'old_page': old_name, 'version': page.version, }, auth=auth, save=False, ) self.save() def delete_node_wiki(self, name, auth): name = (name or '').strip() key = to_mongo_key(name) page = self.get_wiki_page(key) del self.wiki_pages_current[key] self.add_log( action=NodeLog.WIKI_DELETED, params={ 'project': self.parent_id, 'node': self._primary_key, 'page': page.page_name, 'page_id': page._primary_key, }, auth=auth, save=False, ) self.save() def get_stats(self, detailed=False): if detailed: raise NotImplementedError( 'Detailed stats exist, but are not yet implemented.' ) else: return get_basic_counters('node:%s' % self._primary_key) # TODO: Deprecate this; it duplicates much of what serialize_project already # does def serialize(self, auth=None): """Dictionary representation of node that is nested within a NodeLog's representation. """ # TODO: incomplete implementation return { 'id': str(self._primary_key), 'category': self.category_display, 'node_type': self.project_or_component, 'url': self.url, # TODO: Titles shouldn't contain escaped HTML in the first place 'title': sanitize.unescape_entities(self.title), 'path': self.path_above(auth), 'api_url': self.api_url, 'is_public': self.is_public, 'is_registration': self.is_registration, } def _initiate_retraction(self, user, justification=None): """Initiates the retraction process for a registration :param user: User who initiated the retraction :param justification: Justification, if given, for retraction """ retraction = Retraction( initiated_by=user, justification=justification or None, # make empty strings None state=Retraction.UNAPPROVED ) retraction.save() # Save retraction so it has a primary key self.retraction = retraction self.save() # Set foreign field reference Node.retraction admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active] for admin in admins: retraction.add_authorizer(admin) retraction.save() # Save retraction approval state return retraction def retract_registration(self, user, justification=None, save=True): """Retract public registration. Instantiate new Retraction object and associate it with the respective registration. """ if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.is_pending_embargo)): raise NodeStateError('Only public or embargoed registrations may be retracted.') if self.root is not self: raise NodeStateError('Retraction of non-parent registrations is not permitted.') retraction = self._initiate_retraction(user, justification) self.registered_from.add_log( action=NodeLog.RETRACTION_INITIATED, params={ 'node': self._id, 'retraction_id': retraction._id, }, auth=Auth(user), ) self.retraction = retraction if save: self.save() def _is_embargo_date_valid(self, end_date): today = datetime.datetime.utcnow() if (end_date - today) >= settings.EMBARGO_END_DATE_MIN: if (end_date - today) <= settings.EMBARGO_END_DATE_MAX: return True return False def _initiate_embargo(self, user, end_date, for_existing_registration=False): """Initiates the retraction process for a registration :param user: User who initiated the retraction :param end_date: Date when the registration should be made public """ embargo = Embargo( initiated_by=user, end_date=datetime.datetime.combine(end_date, datetime.datetime.min.time()), for_existing_registration=for_existing_registration ) embargo.save() # Save embargo so it has a primary key self.embargo = embargo self.save() # Set foreign field reference Node.embargo admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active] for admin in admins: embargo.add_authorizer(admin) embargo.save() # Save embargo's approval_state return embargo def embargo_registration(self, user, end_date, for_existing_registration=False): """Enter registration into an embargo period at end of which, it will be made public :param user: User initiating the embargo :param end_date: Date when the registration should be made public :raises: NodeStateError if Node is not a registration :raises: PermissionsError if user is not an admin for the Node :raises: ValidationValueError if end_date is not within time constraints """ if not self.is_registration: raise NodeStateError('Only registrations may be embargoed') if not self.has_permission(user, 'admin'): raise PermissionsError('Only admins may embargo a registration') if not self._is_embargo_date_valid(end_date): raise ValidationValueError('Embargo end date must be more than one day in the future') embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration) self.registered_from.add_log( action=NodeLog.EMBARGO_INITIATED, params={ 'node': self._id, 'embargo_id': embargo._id, }, auth=Auth(user), save=True, ) if self.is_public: self.set_privacy('private', Auth(user)) def _initiate_approval(self, user): end_date = datetime.datetime.now() + settings.REGISTRATION_APPROVAL_TIME approval = RegistrationApproval( initiated_by=user, end_date=end_date, ) approval.save() # Save approval so it has a primary key self.registration_approval = approval self.save() # Set foreign field reference Node.registration_approval admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active] for admin in admins: approval.add_authorizer(admin) approval.save() # Save approval's approval_state return approval def require_approval(self, user): if not self.is_registration: raise NodeStateError('Only registrations can require registration approval') if not self.has_permission(user, 'admin'): raise PermissionsError('Only admins can initiate a registration approval') approval = self._initiate_approval(user) self.registered_from.add_log( action=NodeLog.REGISTRATION_APPROVAL_INITIATED, params={ 'node': self._id, 'registration_approval_id': approval._id, }, auth=Auth(user), save=True, ) # TODO make private? @Node.subscribe('before_save') def validate_permissions(schema, instance): """Ensure that user IDs in `contributors` and `permissions` match. """ node = instance contributor_ids = set([user._id for user in node.contributors]) permission_ids = set(node.permissions.keys()) mismatched_contributors = contributor_ids.difference(permission_ids) if mismatched_contributors: raise ValidationValueError( 'Contributors {0} missing from `permissions` on node {1}'.format( ', '.join(mismatched_contributors), node._id, ) ) mismatched_permissions = permission_ids.difference(contributor_ids) if mismatched_permissions: raise ValidationValueError( 'Permission keys {0} missing from `contributors` on node {1}'.format( ', '.join(mismatched_contributors), node._id, ) ) @Node.subscribe('before_save') def validate_visible_contributors(schema, instance): """Ensure that user IDs in `contributors` and `visible_contributor_ids` match. """ node = instance for user_id in node.visible_contributor_ids: if user_id not in node.contributors: raise ValidationValueError( ('User {0} is in `visible_contributor_ids` but not in ' '`contributors` on node {1}').format( user_id, node._id, ) ) class WatchConfig(StoredObject): _id = fields.StringField(primary=True, default=lambda: str(ObjectId())) node = fields.ForeignField('Node', backref='watched') digest = fields.BooleanField(default=False) immediate = fields.BooleanField(default=False) def __repr__(self): return '<WatchConfig(node="{self.node}")>'.format(self=self) class PrivateLink(StoredObject): _id = fields.StringField(primary=True, default=lambda: str(ObjectId())) date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow) key = fields.StringField(required=True) name = fields.StringField() is_deleted = fields.BooleanField(default=False) anonymous = fields.BooleanField(default=False) nodes = fields.ForeignField('node', list=True, backref='shared') creator = fields.ForeignField('user', backref='created') @property def node_ids(self): node_ids = [node._id for node in self.nodes] return node_ids def node_scale(self, node): # node may be None if previous node's parent is deleted if node is None or node.parent_id not in self.node_ids: return -40 else: offset = 20 if node.parent_node is not None else 0 return offset + self.node_scale(node.parent_node) def to_json(self): return { "id": self._id, "date_created": iso8601format(self.date_created), "key": self.key, "name": sanitize.unescape_entities(self.name), "creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url}, "nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category} for x in self.nodes if not x.is_deleted], "anonymous": self.anonymous } class Sanction(StoredObject): """Sanction object is a generic way to track approval states""" abstract = True UNAPPROVED = 'unapproved' APPROVED = 'approved' REJECTED = 'rejected' DISPLAY_NAME = 'Sanction' # SHORT_NAME must correspond with the associated foreign field to query against, # e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction)) SHORT_NAME = 'sanction' APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}' APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.' REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}' REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.' _id = fields.StringField(primary=True, default=lambda: str(ObjectId())) initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow) end_date = fields.DateTimeField(default=None) # Sanction subclasses must have an initiated_by field # initiated_by = fields.ForeignField('user', backref='initiated') # Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens: # { # 'b3k97': { # 'has_approved': False, # 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN', # 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'} # } approval_state = fields.DictionaryField() # One of 'unapproved', 'approved', or 'rejected' state = fields.StringField(default='unapproved') def __repr__(self): return '<Sanction(end_date={self.end_date}) with _id {self._id}>'.format(self=self) @property def pending_approval(self): return self.state == Sanction.UNAPPROVED @property def is_approved(self): return self.state == Sanction.APPROVED @property def is_rejected(self): return self.state == Sanction.REJECTED def _validate_authorizer(self, user): return True def add_authorizer(self, user, approved=False, save=False): valid = self._validate_authorizer(user) if valid and user._id not in self.approval_state: self.approval_state[user._id] = { 'has_approved': approved, 'approval_token': tokens.encode( { 'user_id': user._id, 'sanction_id': self._id, 'action': 'approve_{}'.format(self.SHORT_NAME) } ), 'rejection_token': tokens.encode( { 'user_id': user._id, 'sanction_id': self._id, 'action': 'reject_{}'.format(self.SHORT_NAME) } ), } if save: self.save() return True return False def remove_authorizer(self, user): if user._id not in self.approval_state: return False del self.approval_state[user._id] self.save() return True def _on_approve(self, user, token): if all(authorizer['has_approved'] for authorizer in self.approval_state.values()): self.state = Sanction.APPROVED self._on_complete(user) def _on_reject(self, user, token): """Early termination of a Sanction""" raise NotImplementedError('Sanction subclasses must implement an #_on_reject method') def _on_complete(self, user): """When a Sanction has unanimous approval""" raise NotImplementedError('Sanction subclasses must implement an #_on_complete method') def approve(self, user, token): """Add user to approval list if user is admin and token verifies.""" try: if self.approval_state[user._id]['approval_token'] != token: raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME)) except KeyError: raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME)) self.approval_state[user._id]['has_approved'] = True self._on_approve(user, token) def reject(self, user, token): """Cancels sanction if user is admin and token verifies.""" try: if self.approval_state[user._id]['rejection_token'] != token: raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME)) except KeyError: raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME)) self.state = Sanction.REJECTED self._on_reject(user, token) def forcibly_reject(self): self.state = Sanction.REJECTED def _notify_authorizer(self, user): pass def _notify_non_authorizer(self, user): pass def ask(self, group): for contrib in group: if contrib._id in self.approval_state: self._notify_authorizer(contrib) else: self._notify_non_authorizer(contrib) class EmailApprovableSanction(Sanction): AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None VIEW_URL_TEMPLATE = '' APPROVE_URL_TEMPLATE = '' REJECT_URL_TEMPLATE = '' # Store a persistant copy of urls for use when needed outside of a request context. # This field gets automagically updated whenever models approval_state is modified # and the model is saved # { # 'abcde': { # 'approve': [APPROVAL_URL], # 'reject': [REJECT_URL], # } # } stashed_urls = fields.DictionaryField(default=dict) @staticmethod def _format_or_empty(template, context): if context: return template.format(**context) return '' def _view_url(self, user_id): return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id)) def _view_url_context(self, user_id): return None def _approval_url(self, user_id): return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id)) def _approval_url_context(self, user_id): return None def _rejection_url(self, user_id): return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id)) def _rejection_url_context(self, user_id): return None def _send_approval_request_email(self, user, template, context): mails.send_mail( user.username, template, user=user, **context ) def _email_template_context(self, user, is_authorizer=False): return {} def _notify_authorizer(self, authorizer): context = self._email_template_context(authorizer, is_authorizer=True) if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE: self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context) else: raise NotImplementedError def _notify_non_authorizer(self, user): context = self._email_template_context(user) if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE: self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context) else: raise NotImplementedError def add_authorizer(self, user, **kwargs): super(EmailApprovableSanction, self).add_authorizer(user, **kwargs) self.stashed_urls[user._id] = { 'view': self._view_url(user._id), 'approve': self._approval_url(user._id), 'reject': self._rejection_url(user._id) } self.save() class Embargo(EmailApprovableSanction): """Embargo object for registrations waiting to go public.""" COMPLETED = 'completed' DISPLAY_NAME = 'Embargo' SHORT_NAME = 'embargo' AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}' REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}' initiated_by = fields.ForeignField('user', backref='embargoed') for_existing_registration = fields.BooleanField(default=False) @property def is_completed(self): return self.state == self.COMPLETED @property def embargo_end_date(self): if self.state == self.APPROVED: return self.end_date return False # NOTE(hrybacki): Old, private registrations are grandfathered and do not # require to be made public or embargoed. This field differentiates them # from new registrations entering into an embargo field which should not # show up in any search related fields. @property def pending_registration(self): return not self.for_existing_registration and self.pending_approval def __repr__(self): parent_registration = None try: parent_registration = Node.find_one(Q('embargo', 'eq', self)) except NoResultsFound: pass return ('<Embargo(parent_registration={0}, initiated_by={1}, ' 'end_date={2}) with _id {3}>').format( parent_registration, self.initiated_by, self.end_date, self._id ) def _view_url_context(self, user_id): registration = Node.find_one(Q('embargo', 'eq', self)) return { 'node_id': registration._id } def _approval_url_context(self, user_id): approval_token = self.approval_state.get(user_id, {}).get('approval_token') if approval_token: registration = Node.find_one(Q('embargo', 'eq', self)) return { 'node_id': registration._id, 'token': approval_token, } def _rejection_url_context(self, user_id): rejection_token = self.approval_state.get(user_id, {}).get('rejection_token') if rejection_token: registration = Node.find_one(Q('embargo', 'eq', self)) return { 'node_id': registration._id, 'token': rejection_token, } def _email_template_context(self, user, is_authorizer=False, urls=None): urls = urls or self.stashed_urls.get(user._id, {}) registration_link = urls.get('view', self._view_url(user._id)) if is_authorizer: approval_link = urls.get('approve', '') disapproval_link = urls.get('reject', '') approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24 registration = Node.find_one(Q('embargo', 'eq', self)) return { 'is_initiator': self.initiated_by == user, 'initiated_by': self.initiated_by.fullname, 'approval_link': approval_link, 'project_name': registration.title, 'disapproval_link': disapproval_link, 'registration_link': registration_link, 'embargo_end_date': self.end_date, 'approval_time_span': approval_time_span, } else: return { 'initiated_by': self.initiated_by.fullname, 'registration_link': registration_link, 'embargo_end_date': self.end_date, } def _validate_authorizer(self, user): registration = Node.find_one(Q('embargo', 'eq', self)) return registration.has_permission(user, ADMIN) def _on_reject(self, user, token): parent_registration = Node.find_one(Q('embargo', 'eq', self)) parent_registration.registered_from.add_log( action=NodeLog.EMBARGO_CANCELLED, params={ 'node': parent_registration._id, 'embargo_id': self._id, }, auth=Auth(user), ) # Remove backref to parent project if embargo was for a new registration if not self.for_existing_registration: parent_registration.delete_registration_tree(save=True) parent_registration.registered_from = None # Delete parent registration if it was created at the time the embargo was initiated if not self.for_existing_registration: parent_registration.is_deleted = True parent_registration.save() def disapprove_embargo(self, user, token): """Cancels retraction if user is admin and token verifies.""" self.reject(user, token) def _on_complete(self, user): parent_registration = Node.find_one(Q('embargo', 'eq', self)) parent_registration.registered_from.add_log( action=NodeLog.EMBARGO_APPROVED, params={ 'node': parent_registration._id, 'embargo_id': self._id, }, auth=Auth(self.initiated_by), ) self.save() def approve_embargo(self, user, token): """Add user to approval list if user is admin and token verifies.""" self.approve(user, token) class Retraction(EmailApprovableSanction): """Retraction object for public registrations.""" DISPLAY_NAME = 'Retraction' SHORT_NAME = 'retraction' AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}' REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}' initiated_by = fields.ForeignField('user', backref='initiated') justification = fields.StringField(default=None, validate=MaxLengthValidator(2048)) def __repr__(self): parent_registration = None try: parent_registration = Node.find_one(Q('retraction', 'eq', self)) except NoResultsFound: pass return ('<Retraction(parent_registration={0}, initiated_by={1}) ' 'with _id {2}>').format( parent_registration, self.initiated_by, self._id ) def _view_url_context(self, user_id): registration = Node.find_one(Q('retraction', 'eq', self)) return { 'node_id': registration._id } def _approval_url_context(self, user_id): approval_token = self.approval_state.get(user_id, {}).get('approval_token') if approval_token: registration = Node.find_one(Q('retraction', 'eq', self)) return { 'node_id': registration._id, 'token': approval_token, } def _rejection_url_context(self, user_id): rejection_token = self.approval_state.get(user_id, {}).get('rejection_token') if rejection_token: registration = Node.find_one(Q('retraction', 'eq', self)) return { 'node_id': registration._id, 'token': rejection_token, } def _email_template_context(self, user, is_authorizer=False, urls=None): urls = urls or self.stashed_urls.get(user._id, {}) registration_link = urls.get('view', self._view_url(user._id)) if is_authorizer: approval_link = urls.get('approve', '') disapproval_link = urls.get('reject', '') approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24 registration = Node.find_one(Q('retraction', 'eq', self)) return { 'is_initiator': self.initiated_by == user, 'initiated_by': self.initiated_by.fullname, 'project_name': registration.title, 'registration_link': registration_link, 'approval_link': approval_link, 'disapproval_link': disapproval_link, 'approval_time_span': approval_time_span, } else: return { 'initiated_by': self.initiated_by.fullname, 'registration_link': registration_link, } def _on_reject(self, user, token): parent_registration = Node.find_one(Q('retraction', 'eq', self)) parent_registration.registered_from.add_log( action=NodeLog.RETRACTION_CANCELLED, params={ 'node': parent_registration._id, 'retraction_id': self._id, }, auth=Auth(user), save=True, ) def _on_complete(self, user): parent_registration = Node.find_one(Q('retraction', 'eq', self)) parent_registration.registered_from.add_log( action=NodeLog.RETRACTION_APPROVED, params={ 'node': parent_registration._id, 'retraction_id': self._id, }, auth=Auth(self.initiated_by), ) # Remove any embargoes associated with the registration if parent_registration.embargo_end_date or parent_registration.is_pending_embargo: parent_registration.embargo.state = self.REJECTED parent_registration.registered_from.add_log( action=NodeLog.EMBARGO_CANCELLED, params={ 'node': parent_registration._id, 'embargo_id': parent_registration.embargo._id, }, auth=Auth(self.initiated_by), ) parent_registration.embargo.save() # Ensure retracted registration is public if not parent_registration.is_public: parent_registration.set_privacy('public') parent_registration.update_search() # Retraction status is inherited from the root project, so we # need to recursively update search for every descendant node # so that retracted subrojects/components don't appear in search for node in parent_registration.get_descendants_recursive(): node.update_search() self.save() def approve_retraction(self, user, token): self.approve(user, token) def disapprove_retraction(self, user, token): self.reject(user, token) class RegistrationApproval(EmailApprovableSanction): DISPLAY_NAME = 'Approval' SHORT_NAME = 'registration_approval' AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}' REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}' initiated_by = fields.ForeignField('user', backref='registration_approved') def _view_url_context(self, user_id): registration = Node.find_one(Q('registration_approval', 'eq', self)) return { 'node_id': registration._id } def _approval_url_context(self, user_id): approval_token = self.approval_state.get(user_id, {}).get('approval_token') if approval_token: registration = Node.find_one(Q('registration_approval', 'eq', self)) return { 'node_id': registration._id, 'token': approval_token, } def _rejection_url_context(self, user_id): rejection_token = self.approval_state.get(user_id, {}).get('rejection_token') if rejection_token: registration = Node.find_one(Q('registration_approval', 'eq', self)) return { 'node_id': registration._id, 'token': rejection_token, } def _email_template_context(self, user, is_authorizer=False, urls=None): urls = urls or self.stashed_urls.get(user._id, {}) registration_link = urls.get('view', self._view_url(user._id)) if is_authorizer: approval_link = urls.get('approve', '') disapproval_link = urls.get('reject', '') approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24 registration = Node.find_one(Q('registration_approval', 'eq', self)) return { 'is_initiator': self.initiated_by == user, 'initiated_by': self.initiated_by.fullname, 'registration_link': registration_link, 'approval_link': approval_link, 'disapproval_link': disapproval_link, 'approval_time_span': approval_time_span, 'project_name': registration.title, } else: return { 'initiated_by': self.initiated_by.fullname, 'registration_link': registration_link, } def _add_success_logs(self, node, user): src = node.registered_from src.add_log( action=NodeLog.PROJECT_REGISTERED, params={ 'parent_node': src.parent_id, 'node': src._primary_key, 'registration': node._primary_key, }, auth=Auth(user), save=False ) src.save() def _on_complete(self, user): self.state = Sanction.APPROVED register = Node.find_one(Q('registration_approval', 'eq', self)) registered_from = register.registered_from auth = Auth(self.initiated_by) register.set_privacy('public', auth, log=False) for child in register.get_descendants_recursive(lambda n: n.primary): child.set_privacy('public', auth, log=False) # Accounts for system actions where no `User` performs the final approval auth = Auth(user) if user else None registered_from.add_log( action=NodeLog.REGISTRATION_APPROVAL_APPROVED, params={ 'node': registered_from._id, 'registration_approval_id': self._id, }, auth=auth, ) for node in register.root.node_and_primary_descendants(): self._add_success_logs(node, user) node.update_search() # update search if public self.save() def _on_reject(self, user, token): register = Node.find_one(Q('registration_approval', 'eq', self)) registered_from = register.registered_from register.delete_registration_tree(save=True) registered_from.add_log( action=NodeLog.REGISTRATION_APPROVAL_CANCELLED, params={ 'node': register._id, 'registration_approval_id': self._id, }, auth=Auth(user), )
njantrania/osf.io
website/project/model.py
Python
apache-2.0
132,206
[ "VisIt" ]
1490ee7972ad2d83f9436c93cabfff918617586e674fc80886ddfbe73163e20e
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2012 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## from stoqlib.api import api from stoqlib.domain.commission import Commission from stoqlib.domain.person import Person from stoqlib.gui.search.commissionsearch import CommissionSearch from stoqlib.gui.search.searchfilters import DateSearchFilter from stoqlib.gui.test.uitestutils import GUITest from stoqlib.lib.dateutils import localdatetime, localdate class TestCommissionSearch(GUITest): def test_search(self): self.clean_domain([Commission]) person = self.store.find(Person, name=u'Deivis Alexandre Junior').one() salesperson = person.sales_person sale = self.create_sale() sale.identifier = 74521 sale.open_date = localdatetime(2012, 1, 1) sale.confirm_date = localdatetime(2012, 1, 10) sale.salesperson = salesperson payment = self.create_payment() payment.paid_date = localdatetime(2012, 1, 15) Commission(sale=sale, payment=payment, store=self.store) person = self.store.find(Person, name=u'Maria Aparecida Ardana').one() salesperson = person.sales_person sale = self.create_sale() sale.identifier = 85412 sale.open_date = localdatetime(2012, 2, 2) sale.confirm_date = localdatetime(2012, 2, 10) sale.salesperson = salesperson payment = self.create_payment() payment.paid_date = localdatetime(2012, 2, 15) Commission(sale=sale, payment=payment, store=self.store) # First check for columns getting the confirm date of the sale api.sysparam.set_bool(self.store, 'SALE_PAY_COMMISSION_WHEN_CONFIRMED', True) search = CommissionSearch(self.store) search._date_filter.select(data=DateSearchFilter.Type.USER_INTERVAL) search._date_filter.start_date.update(localdate(2010, 1, 1)) search._date_filter.end_date.update(localdate(2012, 2, 15)) search.search.refresh() self.check_search(search, 'commission-confirmed-no-filter') search.set_searchbar_search_string('dei') search.search.refresh() self.check_search(search, 'commission-confirmed-string-filter') search.set_searchbar_search_string('') search._salesperson_filter.set_state(salesperson.id) search.search.refresh() self.check_search(search, 'commission-confirmed-salesperson-filter') # Then check for columns getting the paid date of the payment api.sysparam.set_bool(self.store, 'SALE_PAY_COMMISSION_WHEN_CONFIRMED', False) search = CommissionSearch(self.store) search._date_filter.select(data=DateSearchFilter.Type.USER_INTERVAL) search._date_filter.start_date.update(localdate(2010, 1, 1)) search._date_filter.end_date.update(localdate(2012, 2, 15)) search.search.refresh() self.check_search(search, 'commission-paid-no-filter') search.set_searchbar_search_string('dei') search.search.refresh() self.check_search(search, 'commission-paid-string-filter') search.set_searchbar_search_string('') search._salesperson_filter.set_state(salesperson.id) search.search.refresh() self.check_search(search, 'commission-paid-salesperson-filter')
tiagocardosos/stoq
stoqlib/gui/test/test_commissionsearch.py
Python
gpl-2.0
4,304
[ "VisIt" ]
b66c6ba66c34287e70f596df41a82b0bd3457ea442a28d5678301c4d1823228c
# This file is part of PyEMMA. # # Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER) # # PyEMMA is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ''' Test feature reader and Tica by checking the properties of the ICs. cov(ic_i,ic_j) = delta_ij and cov(ic_i,ic_j,tau) = lambda_i delta_ij @author: Fabian Paul ''' from __future__ import absolute_import from __future__ import print_function import os import tempfile import unittest from nose.plugins.attrib import attr import mdtraj from pyemma.coordinates.api import tica from pyemma.coordinates.data.feature_reader import FeatureReader from pyemma.util.contexts import numpy_random_seed from logging import getLogger from six.moves import range import numpy as np log = getLogger('pyemma.'+'TestFeatureReaderAndTICAProjection') def random_invertible(n, eps=0.01): 'generate real random invertible matrix' m = np.random.randn(n, n) u, s, v = np.linalg.svd(m) s = np.maximum(s, eps) return u.dot(np.diag(s)).dot(v) @attr(slow=True) class TestFeatureReaderAndTICAProjection(unittest.TestCase): @classmethod def setUpClass(cls): with numpy_random_seed(52): c = super(TestFeatureReaderAndTICAProjection, cls).setUpClass() cls.dim = 99 # dimension (must be divisible by 3) N = 5000 # length of single trajectory # 500000 # 50000 N_trajs = 10 # number of trajectories A = random_invertible(cls.dim) # mixing matrix # tica will approximate its inverse with the projection matrix mean = np.random.randn(cls.dim) # create topology file cls.temppdb = tempfile.mktemp('.pdb') with open(cls.temppdb, 'w') as f: for i in range(cls.dim // 3): print(('ATOM %5d C ACE A 1 28.490 31.600 33.379 0.00 1.00' % i), file=f) t = np.arange(0, N) cls.trajnames = [] # list of xtc file names for i in range(N_trajs): # set up data white = np.random.randn(N, cls.dim) brown = np.cumsum(white, axis=0) correlated = np.dot(brown, A) data = correlated + mean xyz = data.reshape((N, cls.dim // 3, 3)) # create trajectory file traj = mdtraj.load(cls.temppdb) traj.xyz = xyz traj.time = t tempfname = tempfile.mktemp('.xtc') traj.save(tempfname) cls.trajnames.append(tempfname) @classmethod def tearDownClass(cls): for fname in cls.trajnames: os.unlink(fname) os.unlink(cls.temppdb) super(TestFeatureReaderAndTICAProjection, cls).tearDownClass() def test_covariances_and_eigenvalues(self): reader = FeatureReader(self.trajnames, self.temppdb) for tau in [1, 10, 100, 1000, 2000]: trans = tica(lag=tau, dim=self.dim, kinetic_map=False) trans.data_producer = reader log.info('number of trajectories reported by tica %d' % trans.number_of_trajectories()) trans.parametrize() data = trans.get_output() log.info('max. eigenvalue: %f' % np.max(trans.eigenvalues)) self.assertTrue(np.all(trans.eigenvalues <= 1.0)) # check ICs check = tica(data=data, lag=tau, dim=self.dim) np.testing.assert_allclose(np.eye(self.dim), check.cov, atol=1e-8) np.testing.assert_allclose(check.mean, 0.0, atol=1e-8) ic_cov_tau = np.zeros((self.dim, self.dim)) ic_cov_tau[np.diag_indices(self.dim)] = trans.eigenvalues np.testing.assert_allclose(ic_cov_tau, check.cov_tau, atol=1e-8) def test_partial_fit(self): from pyemma.coordinates import source reader = source(self.trajnames, top=self.temppdb) reader_output = reader.get_output() params = {'lag': 10, 'kinetic_map': False, 'dim': self.dim} tica_obj = tica(**params) tica_obj.partial_fit(reader_output[0]) assert not tica_obj._estimated # acccess eigenvectors to force diagonalization tica_obj.eigenvectors assert tica_obj._estimated tica_obj.partial_fit(reader_output[1]) assert not tica_obj._estimated tica_obj.eigenvalues assert tica_obj._estimated for traj in reader_output[2:]: tica_obj.partial_fit(traj) # reference ref = tica(reader, **params) np.testing.assert_allclose(tica_obj.cov, ref.cov, atol=1e-15) np.testing.assert_allclose(tica_obj.cov_tau, ref.cov_tau, atol=1e-15) np.testing.assert_allclose(tica_obj.eigenvalues, ref.eigenvalues, atol=1e-15) # we do not test eigenvectors here, since the system is very metastable and # we have multiple eigenvalues very close to one. if __name__ == "__main__": unittest.main()
gph82/PyEMMA
pyemma/coordinates/tests/test_featurereader_and_tica_projection.py
Python
lgpl-3.0
5,619
[ "MDTraj" ]
7a9fdd44082e91398d9307f084ea54dcd85e74faa4304dbdd13ccf172a1be633
"""Regression testing framework This module will search for scripts in the same directory named XYZtest.py. Each such script should be a test suite that tests a module through PyUnit. (As of Python 2.1, PyUnit is included in the standard library as 'unittest'.) This script will aggregate all found test suites into one big test suite and run them all at once. This program is part of "Dive Into Python", a free Python book for experienced programmers. Visit http://diveintopython.org/ for the latest version. """ __author__ = "Mark Pilgrim (f8dy@diveintopython.org)" __version__ = "$Revision: 1.1 $" __date__ = "$Date: 2003/09/03 17:50:48 $" __copyright__ = "Copyright (c) 2001 Mark Pilgrim" __license__ = "Python" import sys, os, re, unittest def regressionTest(): path = os.path.abspath(os.path.dirname(sys.argv[0])) files = os.listdir(path) test = re.compile("test.py$", re.IGNORECASE) files = filter(test.search, files) filenameToModuleName = lambda f: os.path.splitext(f)[0] moduleNames = map(filenameToModuleName, files) modules = map(__import__, moduleNames) load = unittest.defaultTestLoader.loadTestsFromModule return unittest.TestSuite(map(load, modules)) if __name__ == "__main__": unittest.main(defaultTest="regressionTest")
jhjguxin/PyCDC
Karrigell-2.3.5/test/regression.py
Python
gpl-3.0
1,292
[ "VisIt" ]
d1850acdba91158b1a74706dc98b84da44c50466f3a58456c44f5897e39a3446
#!/usr/bin/env python from mpl_toolkits.mplot3d import Axes3D import matplotlib.cm as cm from sklearn.metrics import silhouette_score, silhouette_samples from sklearn.cluster import KMeans, DBSCAN import msmexplorer as msme import mdtraj import argparse from matplotlib import pyplot as plt import numpy as np import seaborn as sns parser = argparse.ArgumentParser(prog='cluster_ligands.py', formatter_class=argparse.RawDescriptionHelpFormatter, description='''version1''') parser.add_argument("Trajectories", help="""An indefinite amount of AMBER trajectories""", nargs="+") parser.add_argument('-p', '--prmtop', type=str, required=True) parser.add_argument('-st', '--stride', type=int, required=False, default=1) parser.add_argument('-l', '--ligand_selection', type=str, required=True) parser.add_argument('-o', '--out_file', type=str, required=False, default='cluster_ligands') def figure_dims(width_pt, factor=0.45): """ I copied this from here: https://www.archer.ac.uk/training/course-material/2014/07/SciPython_Cranfield/Slides/L04_matplotlib.pdf """ WIDTH = width_pt # Figure width in pt (usually from LaTeX) FACTOR = factor # Fraction of the width you'd like the figure to occupy widthpt = WIDTH * FACTOR inperpt = 1.0 / 72.27 golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good widthin = widthpt * inperpt heightin = widthin * golden_ratio figdims = [widthin, heightin] # Dimensions as list return figdims def plot_com_matrix(com_matrix): """ Plots the projections of X, Y and Z of a center of mass position matrix Parameters ---------- com_matrix: np.array of shape (frames, 3) Returns ------- fig: plt.figure """ fig, axes = plt.subplots(3, 3, figsize=(15, 15)) correspondance = { 0: 'x', 1: 'y', 2: 'z' } # axes is a np.array of shape (3, 3) for i in range(3): for j in range(3): ax = axes[i][j] if i >= j: # we'll plot only the lower half if i == j: sns.kdeplot(com_matrix[:, i], ax=ax) ax.set(xlabel='%s (nm)' % correspondance[i], ylabel='Density') else: ax.set(xlabel='%s (nm)' % correspondance[j], ylabel='%s (nm)' % correspondance[i]) msme.plot_free_energy( com_matrix, obs=(j, i), ax=ax, shade=True, n_levels=5, vmin=-1e-12, cmap='viridis' ) else: ax.set_axis_off() return fig def plot_points_labels_3D(matrix, clusterer, ax=None): if ax is None: # Plot alone fig = plt.figure(figsize=figure_dims(2500)) ax = plt.subplot(111, projection='3d') # Make background of 3d plot white ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) if hasattr(clusterer, 'n_clusters'): n_clusters = clusterer.n_clusters else: # DBSCAN has no n_clusters attribute n_clusters = len(set(clusterer.labels_)) - (1 if -1 in clusterer.labels_ else 0) print('Estimated n of clusters {}'.format(n_clusters)) colors = cm.spectral(clusterer.labels_.astype(float) / n_clusters) ax.scatter(matrix[:, 0], matrix[:, 1], matrix[:, 2], marker='.', s=10, lw=0, alpha=0.6, c=colors) # Labeling the clusters if hasattr(clusterer, 'cluster_centers_'): centers = clusterer.cluster_centers_ # Plot the label of the cluster at its center on top of a white round dot for i, c in enumerate(centers): color = cm.spectral(float(i) / n_clusters) ax.scatter(c[0], c[1], c[2], marker='o', alpha=1, s=200, color='white', lw=1, zorder=20) ax.scatter(c[0], c[1], c[2], marker='$%d$' % i, alpha=1, s=100, color=color, zorder=20) ax.set_xlabel("x (nm)") ax.set_ylabel("y (nm)") ax.set_zlabel("z (nm)") ax.set_title("{} clusters".format(n_clusters)) return ax def report_clusters(com_matrix, n_cluster_list=[2, 3, 4, 5, 6]): """ Performs K means clustering on a center of mass position matrix. Reports the results in two plots for each number of clusters: 1st Plot: Silhouette score summary 2nd Plot: 3D representation of the center of mass positions and the cluster labels that have been assigned. Parameters ---------- com_matrix: np.array of shape (frames, 3) Returns ------- fig_list: list of plt.figures, of length n_cluster_list """ fig_list = [] min_silhouette_value = 0 for n_clusters in n_cluster_list: # Create a figure which will have two axis (1 row, 2 columns) fig = plt.figure(figsize=figure_dims(2500)) ax1 = plt.subplot(121) # Plot on the left will be simple 2D silhouette ax2 = plt.subplot(122, projection='3d') # 3D plot on the right clusterer = KMeans(n_clusters=n_clusters) cluster_labels = clusterer.fit_predict(com_matrix) silhouette_avg = silhouette_score(com_matrix, cluster_labels) sample_silhouette_values = silhouette_samples(com_matrix, cluster_labels) # Silhouette values can go between -1 and 1, but here let's set the minimum # to whatever minimal value we have min_silhouette_value = min(min_silhouette_value, sample_silhouette_values.min()) # Set axis values of first plot ax1.set_ylim([0, len(com_matrix) + (n_clusters + 1) * 10]) ax1.set_xlim([min_silhouette_value, 1]) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("Silhouette plot") ax1.set_xlabel("Silhouette coefficient") ax1.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.annotate('Avg. silhouette: %.2f' % silhouette_avg, xy=(0.75, 0.95), xycoords='axes fraction') ax1.set_yticks([]) # Clear the yaxis labels / ticks ax2 = plot_points_labels_3D(com_matrix, clusterer, ax=ax2) fig_list.append((fig, n_clusters)) return fig_list def plot_3d_time(com_matrix, time): fig = plt.figure(figsize=figure_dims(2500)) ax = plt.subplot(111, projection='3d') # Set background color of 3D axis to white ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) ax.set_xlabel("x (nm)") ax.set_ylabel("y (nm)") ax.set_zlabel("z (nm)") p = ax.scatter(com_matrix[:, 0], com_matrix[:, 1], com_matrix[:, 2], c=time, cmap='viridis') cbar = fig.colorbar(p) cbar.set_label('Time (ns)') return fig, ax if __name__ == '__main__': plt.style.use(['seaborn-talk', 'seaborn-whitegrid']) args = parser.parse_args() print(args) top = mdtraj.load_prmtop(args.prmtop) traj = mdtraj.load([t for t in args.Trajectories], top=args.prmtop, stride=args.stride) print('{} frames have been loaded'.format(traj.n_frames)) # Center all coordinates, makes center of geometry of the system (0, 0, 0) traj.center_coordinates() # Superpose trajectory onto first frame # This is exactly like the 'hold selection steady' command in Chimera traj.superpose(traj, 0) # Create separate trajectory for the ligand ligand_indices = top.select(args.ligand_selection) lig_traj = traj.atom_slice(ligand_indices) center_mass_ligand = mdtraj.compute_center_of_mass(lig_traj) # Plot projections f = plot_com_matrix(center_mass_ligand) f.savefig('{}.pdf'.format(args.out_file)) # Plot clustering with Kmeans fig_list = report_clusters(center_mass_ligand) for fig in fig_list: f, n_clusters = fig[0], fig[1] f.savefig('{}_{}clusters_kmeans.pdf'.format(args.out_file, n_clusters)) # plot 3d alone time = np.linspace(0, traj.timestep * traj.n_frames / 1000, num=traj.n_frames) f, _ = plot_3d_time(center_mass_ligand, time) f.savefig('{}_3d.pdf'.format(args.out_file)) plt.close() # DBSCAN max_score = 0 for eps in np.arange(start=0.5, stop=5, step=0.5): for min_samples in [2, 5, 10, 20, 50, 100]: try: db = DBSCAN(eps, min_samples) db.fit_predict(center_mass_ligand) new_score = silhouette_score(center_mass_ligand, db.labels_) print('{} eps\t{} minsamp\t{} sil'.format(eps, min_samples, silhouette_score(center_mass_ligand, db.labels_))) if new_score > max_score: max_score = new_score ax = plot_points_labels_3D(center_mass_ligand, db) f = plt.gcf() f.savefig('DBSCAN{}{}.pdf'.format(eps, min_samples)) except: continue
jeiros/Scripts
AnalysisMDTraj/cluster_ligands.py
Python
mit
10,303
[ "Amber", "MDTraj" ]
a7bacec62a2fef9dd32df79ff6e7cf25a19cefa60dd54d79b748226f2394870d
""" VTK Animations Usage: vtkanim <file> --type=<type> Options: --type=<type> type that of grid (curvi, ugrid) """ import logging import numpy as np from mayavi.sources.vtk_data_source import VTKDataSource from mayavi.modules.outline import Outline from mayavi.modules.surface import Surface from mayavi.modules.vectors import Vectors from mayavi.filters.cell_to_point_data import CellToPointData from mayavi.filters.threshold import Threshold from mayavi.scripts import mayavi2 from tvtk.api import tvtk import mayavi.mlab as mlab mlab.options.offscreen = True import docopt import sources # custom filter from .filters import DataSetTriangleFilter import pipes logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) @mayavi2.standalone def main(): arguments = docopt.docopt(__doc__, version='0.1') if arguments['--type'] == 'curvi': import updates_d3d as updates else: import updates_fm as updates logger.info("%s", arguments) # load the source generator source_generator = getattr(sources, arguments['--type'] + '_from_file') # generate the vtksources grids = list(source_generator(arguments['<file>'])) pipe_list = list(pipes.pipes[arguments['--type']]) print arguments['--type'] update_list = list(updates.updates[arguments['--type']]) # # Setup the scen scene = mayavi.new_scene() scene.scene.background = (0.182, 0.182, 0.182) # scene.scene.camera.position = [58166.583048915491, 485959.0247321708, 16043.572309725338] # scene.scene.camera.focal_point = [69999.998654336974, 450767.75405880535, -1590.5715721476552] # scene.scene.camera.view_angle = 30.0 # scene.scene.camera.view_up = [0.17119034045790907, -0.39476891515875417, 0.90269118249725122] # scene.scene.camera.clipping_range = [138.29111167241902, 138291.11167241901] # scene.scene.camera.compute_view_plane_normal() camera = scene.scene.camera #camera.position = [35000, 525000, 10000] #camera.focal_point = [60000, 450000, 0] #camera.view_angle = 30 #camera.view_up = [0.17119034045790907, -0.39476891515875417, 0.90269118249725122] # bathymetry grid, waterlevel grid (different z's) # ug_bathy, ug_waterlevel = list(ugs) # wrap the sources in vtk for grid, pipe in zip(grids, pipe_list): vtkgrid = VTKDataSource(data=grid) # add the grid to the scen mayavi.add_source(vtkgrid) pipe(mayavi) scene.scene.isometric_view() mlab.view(azimuth=90, elevation=70, distance=50000) #camera.compute_view_plane_normal() scene.render() for t in range(75): logger.info("rendering %s", t) for grid, update in zip(grids, update_list): update(arguments['<file>'], grid, t) scene.render() mlab.savefig('test%03d.png' % (t, )) import sys sys.exit(0) """ import mayavi.mlab recorder = mayavi.mlab.start_recording() """
openearth/vtkanim
vtkanim/commands.py
Python
gpl-3.0
2,949
[ "Mayavi", "VTK" ]
c68ad631902e9b41c6132065e0f6bb35afb8d42b9b091c0642d5e0af01f2b4e3
# Custom library import data_reader # Standard libraries import datetime import os import sys import time # Third-party libraries import numpy as np import tensorflow as tf # Short form of Boolean value T, F = True, False class optimise_hyperparameters : def __init__(self, use_single_U = True, U = 5, U1 = 5, U2 = 20, name_output_file_by_date_first = True): self.U = U self.U1 = U1 self.U2 = U2 self.use_single_U = use_single_U self.name_output_file_by_date_first = name_output_file_by_date_first if use_single_U : print 'Training using U = %d' % U else : print 'Training using U = %d and U = %d.' % (U1,U2) # System size # number of spin in each of the cube dimension self.n_x = 4 n_x = self.n_x # number of imaginary time dimension self.L = 200 L = self.L # Volume of tesseract self.V4d = L*(n_x)**3 # Maximum number of data file to be used for training and testing. Max_nfile = 100 # Offset to the file index (to load) File_index_offset = 0 if use_single_U : # Input labelled and shuffled filename for training and performaing classification # with labels. filename = './N%dx%dx%d_L%d_U%d_Mu0_T_shuffled' % (n_x,n_x,n_x,L,U) + '_%.2d.dat' # Input raw filename for performing classification without labels. rawdata_filename = './N%dx%dx%d_L%d_U%d_Mu0_T' % (n_x,n_x,n_x,L,U) + '%s.HSF.stream' else : # Input labelled and shuffled filename for training and performaing classification # with labels. if U1 < U2 : filename = './N%dx%dx%d_L%d_U%d+U%d_Mu0_T_shuffled' % (n_x,n_x,n_x,L,U1,U2) + '_%.2d.dat' else : filename = './N%dx%dx%d_L%d_U%d+U%d_Mu0_T_shuffled' % (n_x,n_x,n_x,L,U2,U1) + '_%.2d.dat' # Load data if use_single_U : # Get temperature and save them to a file. os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(n_x,n_x,n_x,L,U,n_x,n_x,n_x,L,U)) self.dtau = np.genfromtxt("dtau.dat") os.remove("dtau.dat") # Array of shuffled file's file number else : # Get temperature and save them to a file. os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau1.dat" %(n_x,n_x,n_x,L,U1,n_x,n_x,n_x,L,U1)) dtau1 = np.genfromtxt("dtau1.dat") # Get temperature and save them to a file. os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau2.dat" %(n_x,n_x,n_x,L,U2,n_x,n_x,n_x,L,U2)) dtau2 = np.genfromtxt("dtau2.dat") self.dtau = np.hstack((dtau1,dtau2)) os.remove("dtau1.dat") os.remove("dtau2.dat") # Array of shuffled file's file number filenumber = np.arange(1+File_index_offset,len(self.dtau)+1,1) if len(filenumber) > Max_nfile : filenumber = filenumber[:Max_nfile] # Provide file information to the data_reader module. HSF = data_reader.insert_file_info(filename,filenumber, load_test_data_only=False) # Load and catogorize data into either training data, test data, validation data, or # all of them. If validation data is needed, set include_validation_data to (T) # in the insert_file_info() module above. self.HSF = HSF.categorize_data() def insert_hyperparameters(self, n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron, continue_training_using_trained_model = False, filename_trained_model = "./model.ckpt" ): HSF = self.HSF # System size # number of spin in each of the cube dimension n_x = self.n_x # number of imaginary time dimension L = self.L # Volume of tesseract V4d = self.V4d U = self.U U1 = self.U1 U2 = self.U2 dtau = self.dtau name_output_file_by_date_first = self.name_output_file_by_date_first # Code name of the neural network NNetwork = 'CNN0f' sess = tf.InteractiveSession() use_single_U = self.use_single_U print "(Conv + ReLu) 1: %d, (Conv + ReLu) 2: %d, (Conv + ReLu) 3: %d, fc 1: %d" %(n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) # Number of training epoch epochs = 500 # Size of training batch batch_size = 100 # Threshold of difference between train_accuracy and test_accuracy delta_accuracy_threshold = 0.025 # Threshold for stopping training when overtraining is encountered. overtraining_threshold = 10 # Initialize best test accuracy. The minimum test_accuracy for model and measurements # to be saved. best_test_accuracy = 0.5 # String of current date and time dt = datetime.datetime.now() year, month, day, hour, minute = '%.2d' % dt.year, '%.2d' % dt.month, '%.2d' % dt.day, '%.2d' % dt.hour, '%.2d' % dt.minute start_date_time = '%s%s%s-%s%s' % (year, month, day, hour, minute) if name_output_file_by_date_first == False : if use_single_U : # Output model filename filename_weight_bias = "./model_U%d_" % U + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f_" + start_date_time + ".ckpt" # Output of training measurements filename filename_measure = "./measurements_U%d_" % U + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f_" + start_date_time + ".dat" # Output of classification result with labels filename_result = "./result_U%d_" % U + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f_" + start_date_time + ".dat" else : # Output model filename filename_weight_bias = "./model_U%d+U%d_" % (U1,U2) + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f_" + start_date_time + ".ckpt" # Output of training measurements filename filename_measure = "./measurements_U%d+U%d_" % (U1,U2) + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f_" + start_date_time + ".dat" # Output of classification result with labels filename_result = "./result_U%d+U%d_" % (U1,U2) + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f_" + start_date_time + ".dat" else : if use_single_U : # Output model filename filename_weight_bias = "./" + start_date_time + "_model_U%d_" % U + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f.ckpt" # Output of training measurements filename filename_measure = "./" + start_date_time + "_measurements_U%d_" % U + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f.dat" # Output of classification result with labels filename_result = "./" + start_date_time + "_result_U%d_" % U + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f.dat" else : # Output model filename filename_weight_bias = "./" + start_date_time + "_model_U%d+U%d_" % (U1,U2) + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f.ckpt" # Output of training measurements filename filename_measure = "./" + start_date_time + "_measurements_U%d+U%d_" % (U1,U2) + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f.dat" # Output of classification result with labels filename_result = "./" + start_date_time + "_result_U%d+U%d_" % (U1,U2) + NNetwork + "_CR1%d_CR2%d_CR3%d_fc%d" % (n_feature_map1, n_feature_map2, n_feature_map3, n_fully_connected_neuron) + "_test_acc_%.1f.dat" # Neural network architecture settings ----------------------------------------------- n_output_neuron = 2 # Spatial filter size: filter depth, height, and width filter_d = 2 filter_h = filter_d filter_w = filter_d # Adaptive learning rate is used. As the training goes on, the learning rate is # lowered progressively using exponential decay function. # Optimizer initial learning rate eta0 = 1e-3 # decay rate decay_rate = 0.925 n_train_data = len(HSF.train.labels) while np.modf(float(n_train_data)/batch_size)[0] > 0.0 : print 'Warning! Number of data/ batch size must be an integer.' print 'Number of data: %d' % n_train_data print 'Batch size : %d' % batch_size batch_size = int(input('Input new batch size: ')) # Number of training cycle per training epoch iteration_per_epoch=n_train_data/batch_size print 'Number of training data: %d' % n_train_data # x is a 2D-tensor and None means that a dimesion can be of any length, # but in this case, it corresponds to the batch size. To start building # the computation graph, we'll create nodes for input images and target # output classes. The target output classes y_ will consist of a 2D # tensor, where each row is a one-hot (one-hot refers to a groups of # bits among which only one is (1), the opposite is called one-cold) 2 # -dimensional vector vector indicating which digit class the # corresponding HSF data belongs to. x = tf.placeholder(tf.float32, [None, n_x*n_x*n_x * L]) y_ = tf.placeholder(tf.float32, [None, n_output_neuron]) # To prevent 0 gradients and break symmetry, one should genereally # initialize weights with a small amount of noise for symmetry breaking. # To avoid "dead neurons" when using ReLU neurons, it is also a good # practice to initalize them with a slightly positive initial bias. def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv3d(x, W, pad='VALID'): # The convolutions uses a stride of one and are zero padded so that # the output is the same size as the input. # tf.nn.conv3d(input, filter, strides, padding, name=None) return tf.nn.conv3d(x, W, strides = [1,1,1,1,1], padding=pad) def max_pool_2x2x2(x, pad='SAME'): # Max pooling over 2x2 blocks. # tf.nn.max_pool(input, ksize, strides, padding, # data_format='NHWC', name=None) # input : shape [batch, depth, rows, cols, channels] # ksize : The size of the max pool window for each dimension of the # input tensor # strides : The stride of the sliding window for each dimension of # the input tnesor. return tf.nn.max_pool3d(x, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding=pad) # Feature extraction layer ----------------------------------------------------------- # First Convolution Layer # The convolution will compute n features for each mxmxm block. Its weight # tensor will have a shape of [filter_Depth, filter_height, filter_width, # in_channels, out_channels]. W_conv1 = weight_variable([filter_d,filter_h,filter_w,L,n_feature_map1]) b_conv1 = bias_variable([n_feature_map1]) # To apply the layer, first reshape x to a 4D tensor, with the second and # third dimensions correspondings to image width and height, and the final # dimension corresponding to the number of color channels. x_image = tf.reshape(x, [-1,n_x,n_x,n_x,L]) # Then convolve x_image with the weight tensor, add the bias, apply the # ReLU function. Zero padding is used in conv3d, i.e. padding = 'SAME', # the output size : n_feature_map1 x n_x x n_x x n_x h_conv1 = tf.nn.relu(conv3d(x_image, W_conv1, pad='SAME') + b_conv1) # Second Convolution Layer W_conv2 = weight_variable([filter_d,filter_h,filter_w,n_feature_map1,n_feature_map2]) b_conv2 = bias_variable([n_feature_map2]) h_conv2 = tf.nn.relu(conv3d(h_conv1, W_conv2, pad='SAME') + b_conv2) # Third Convolution Layer W_conv3 = weight_variable([filter_d,filter_h,filter_w,n_feature_map2,n_feature_map3]) b_conv3 = bias_variable([n_feature_map3]) h_conv3 = tf.nn.relu(conv3d(h_conv2, W_conv3, pad='SAME') + b_conv3) # Classification layer --------------------------------------------------------------- # Fully-connected Layer # Now add a fully-connected layer with n_fully_connected_neuron neurons to # allow processing on the entire image. The tensor from the previous layer # is reshaped into a batch of vectors, multiply by a weight matrix, add a # bias, and apply a ReLU. W_fc1 = weight_variable([n_feature_map3*(n_x)**3, n_fully_connected_neuron]) b_fc1 = bias_variable([n_fully_connected_neuron]) h_conv1_flat = tf.reshape(h_conv3, [-1, n_feature_map3*(n_x)**3]) h_fc1 = tf.nn.relu(tf.matmul(h_conv1_flat, W_fc1) + b_fc1) # Dropout # To reduce overfitting, dropout will be applied before the readout layer. # We'll create a placeholder for the probability that a neuron's output is # kept during dropout. This allows us tro turn dropout on during training, and # turn it off during testing. TensorFlow's tf.nn.dropout op automatically # handles scaling neuron outputs in addition to masking them, so droput just # works without any additional scaling. keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Readout layer # Finally, a softmax regression layer is added. W_fc2 = weight_variable([n_fully_connected_neuron,n_output_neuron]) b_fc2 = bias_variable([n_output_neuron]) y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) # Train and Evaluate the Model cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1])) # tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False) # Use adaptive learning rate global_step = tf.Variable(0, trainable=False) eta = tf.train.exponential_decay(eta0, global_step*batch_size, n_train_data, decay_rate) train_step = tf.train.AdamOptimizer(eta).minimize(cross_entropy, global_step=global_step) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Before Variables can be used within a session, they must be initialized # using that session. sess.run(tf.initialize_all_variables()) # Training --------------------------------------------------------------------------- # Training the model can be accomplished by repeatedly running train_step. Each # training iteration load n training examples. Then, the train_step operation can be # run using feed_dict to replace the placeholder tensors x and y_ with the training # examples. Note: any tensor in the computation graph can be replcaed using feed_dict. start_time = time.time() # Check if the trained model checkpoint file is located in the current file directory # before restoring. if continue_training_using_trained_model : skip = False file_exist = os.path.isfile(filename_trained_model) while (not(file_exist) and not(skip)) : print '%s is not found in the current directory.' % filename_trained_model.replace('./','') skip = raw_input('Select T to start training from scratch or F to enter the filename of another trained model: ') while skip not in ['T','F']: skip = raw_input('Select T or F: ') if skip == 'T' : skip = True else : skip = False if skip : file_exist = False else : filename_trained_model = raw_input('Input trained model filename: ') while not(os.path.isfile(filename_trained_model)) : print '%s is not found in the current directory.'% filename_trained_model.replace('./','') filename_trained_model = raw_input('Input trained model filename: ') filename_trained_model = './' + filename_trained_model if os.path.isfile(filename_trained_model) : skip = True if file_exist : print 'Continue training using %s.' % filename_trained_model.replace('./','') saver = tf.train.Saver([W_conv1, b_conv1, W_conv2, b_conv2, W_conv3, b_conv3, W_fc1, b_fc1, W_fc2, b_fc2]) # Restore trained model. save_path = saver.restore(sess, filename_trained_model) # Calculate the number of data to collect for the whole training cycle. ndata_collect_per_epoch = round(float(n_train_data)/batch_size/100) if ndata_collect_per_epoch > 1 : ndata_collect = int(ndata_collect_per_epoch*epochs) else : ndata_collect = int(epochs) # Initialise data table. # First column : Training epochs # Second column: Training accuracy # Third column : Testing accuracy # Fourth column: Cost Table_measure = np.zeros(( ndata_collect, 4)) # Initialise the counter for number of data collected. n = 0 fractional_epoch = batch_size*100/float(n_train_data) print 'Total number of training epochs: %.1f' % (ndata_collect*fractional_epoch) # Initialise counter for checking overtraining/ overfitting. n_overtraining_counter = 0 m = 0 Overtraining = False slow_learning = False best_epoch = 0 file_save_counter = 0 for j in range(epochs): # Break out of the training epoch loop if overtraining is encountered. if Overtraining : break if best_test_accuracy <= 0.6 and j >= 4 : slow_learning = True print 'Slow learning. Exiting...' break for i in range(iteration_per_epoch): batch = HSF.train.next_batch(batch_size) if i%100 == 0: train_accuracy = accuracy.eval(feed_dict={ x: batch[0], y_: batch[1], keep_prob: 1.0}) test_accuracy = accuracy.eval(feed_dict={ x: HSF.test.images, y_: HSF.test.labels, keep_prob: 1.0}) Cost = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}) print '%.2fs, epoch %.2f, training accuracy %g, test accuracy %g, cost %g' % (time.time()-start_time,(n+1)*fractional_epoch, train_accuracy, test_accuracy, Cost) Table_measure[n,0] = n*fractional_epoch Table_measure[n,1] = train_accuracy Table_measure[n,2] = test_accuracy Table_measure[n,3] = Cost # To avoid multiple training, the model is saved when the difference between testing # accuracy and training accuracy doesn't exceed a set value (it is set to 0.05 here) # and if the current testing accuracy is higher than the previous. delta_accuracy = train_accuracy - test_accuracy if (test_accuracy > best_test_accuracy) and (delta_accuracy <= delta_accuracy_threshold) and (delta_accuracy > 0) : # Update the best test accuracy best_test_accuracy = test_accuracy # Save the best model thus far if the above two criteria are met. print 'Saving model %s and measurements %s.' % ((filename_weight_bias%(best_test_accuracy*100)).replace('./',''), (filename_measure%(best_test_accuracy*100)).replace('./','')) saver = tf.train.Saver([W_conv1, b_conv1, W_conv2, b_conv2, W_conv3, b_conv3, W_fc1, b_fc1, W_fc2, b_fc2]) best_epoch = (n+1)*fractional_epoch if file_save_counter == 0 : filename_weight_bias_tmp = (filename_weight_bias%(best_test_accuracy*100)) filename_measure_tmp = (filename_measure%(best_test_accuracy*100)) save_path = saver.save(sess, filename_weight_bias_tmp ) np.savetxt( filename_measure_tmp, Table_measure[:n+1,:]) check_model = tf.reduce_mean(W_conv1).eval() file_save_counter += 1 else : os.remove( filename_weight_bias_tmp ) os.remove( filename_weight_bias_tmp + '.meta' ) os.remove( filename_measure_tmp ) filename_weight_bias_tmp = (filename_weight_bias%(best_test_accuracy*100)) filename_measure_tmp = (filename_measure%(best_test_accuracy*100)) save_path = saver.save(sess, filename_weight_bias_tmp ) np.savetxt( filename_measure_tmp, Table_measure[:n+1,:]) check_model = tf.reduce_mean(W_conv1).eval() file_save_counter += 1 # Check for overtraining/ overfitting. If so, stop training and break out of the # training iteration per epoch loop. if train_accuracy > test_accuracy : if m == 0 : # If training accuracy is greater than test accuracy on first account, set # counter to 1. n_overtraining_counter = 1 m = n elif (m+1) == n : # If training accuracy is greater than test accuracy consecutively, increase # counter. n_overtraining_counter += 1 m = n elif (m+1) < n : # Reset counter otherwise. n_overtraining_counter = 0 m = 0 if n_overtraining_counter >= overtraining_threshold or np.isnan(Cost): print 'Overtraining encountered. Stopping training.' Table_measure = Table_measure[:n+1,:] Overtraining = True break else : Overtraining = False n += 1 train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) if test_accuracy <= 0.55 and n*fractional_epoch >= 12 : slow_learning = True print 'Slow learning. Exiting...' break if not(Overtraining) : # Final check to save the best model. train_accuracy = accuracy.eval(feed_dict={ x: batch[0], y_: batch[1], keep_prob: 1.0}) test_accuracy = accuracy.eval(feed_dict={ x: HSF.test.images, y_: HSF.test.labels, keep_prob: 1.0}) Cost = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}) delta_accuracy = abs(train_accuracy - test_accuracy) if test_accuracy > best_test_accuracy : if (delta_accuracy <= delta_accuracy_threshold) and delta_accuracy > 0 : # Update the best test accuracy best_test_accuracy = test_accuracy print 'Saving model and measurements...' saver = tf.train.Saver([W_conv1, b_conv1, W_conv2, b_conv2, W_conv3, b_conv3, W_fc1, b_fc1, W_fc2, b_fc2]) check_model = tf.reduce_mean(W_conv1).eval() best_epoch = ndata_collect*fractional_epoch os.remove( filename_weight_bias_tmp ) os.remove( filename_weight_bias_tmp + '.meta' ) os.remove( filename_measure_tmp ) filename_weight_bias_tmp = (filename_weight_bias%(best_test_accuracy*100)) filename_measure_tmp = (filename_measure%(best_test_accuracy*100)) save_path = saver.save(sess, filename_weight_bias_tmp ) np.savetxt(filename_measure_tmp, Table_measure) print '%.2fs, epoch %.2f, training accuracy %g, test accuracy %g, cost %g' % (time.time()-start_time,(n+1)*fractional_epoch, train_accuracy, test_accuracy, Cost) else : os.remove( filename_measure_tmp ) filename_measure_tmp = (filename_measure%(best_test_accuracy*100)) print 'Saving measurements %s.' % (filename_measure%(best_test_accuracy*100)).replace('./','') # Save the measurements: # First column : Training epochs # Second column: Training accuracy # Third column : Testing accuracy # Fourth column: Cost np.savetxt(filename_measure_tmp, Table_measure[:n+1,:]) if best_epoch == 0 : print 'Training model is not saved as saving criteria are not met. Classification will not be performed.' saver.restore(sess, filename_trained_model) model_saving_criteria_not_met = True else : print 'Best training epoch: %g' % best_epoch print 'Model saved in file: ', save_path # To proceed, load the best (saved) model instead of the last training model.i filename_trained_model = (filename_weight_bias%(best_test_accuracy*100)) saver.restore(sess, filename_trained_model) # Check if the saved model and the restored model are the same. if check_model != tf.reduce_mean(W_conv1).eval() : print 'Warning! Best training model and the restored model is incompatible. Exiting...' sys.exit() model_saving_criteria_not_met = False # Classification --------------------------------------------------------------------- if not(slow_learning) or not(model_saving_criteria_not_met): print 'Performing classification using %s.' % filename_trained_model.replace('./','') # First column : Temperature # Second column: Average classified output of the second neuron # Third column : Average classified output of the first neuron # Fourth column: Classification accuracy # Fifth column : Number of data used Table = np.zeros(( len(dtau), 5)) Table[:,0] = dtau for i in range(len(HSF.test.temps)) : # Output of neural net vs temperature Table[HSF.test.temps[i],1] += np.argmax(y_conv.eval(feed_dict={x: HSF.test.images[i,:].reshape(1,V4d), keep_prob: 1.0})) # Accuracy vs temperature Table[HSF.test.temps[i],3] += accuracy.eval(feed_dict={x: HSF.test.images[i,:].reshape(1,V4d), y_: HSF.test.labels[i,:].reshape(1,n_output_neuron), keep_prob: 1.0}) Table[HSF.test.temps[i],-1] += 1 # Normalize the output of the second neuron Table[:,1] = Table[:,1]/Table[:,-1].astype('float') # Normalized output of the first neuron Table[:,2] = 1.0-Table[:,1] # Normalize the classification accuracy Table[:,3] = Table[:,3]/Table[:,-1].astype('float') np.savetxt((filename_result%(best_test_accuracy*100)), Table) print 'Result saved as %s.' % (filename_result%(best_test_accuracy*100))
kchng/Quantum_machine_learning
TF_HSF_CNN0f_core.py
Python
apache-2.0
28,818
[ "NEURON" ]
c8607ef443ca5d23263dd396e77cf44aaa07a381bf1acf3a3ad88302dfdcbc38
# sql/expression.py # Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines the base components of SQL expression trees. All components are derived from a common base class :class:`.ClauseElement`. Common behaviors are organized based on class hierarchies, in some cases via mixins. All object construction from this package occurs via functions which in some cases will construct composite :class:`.ClauseElement` structures together, and in other cases simply return a single :class:`.ClauseElement` constructed directly. The function interface affords a more "DSL-ish" feel to constructing SQL expressions and also allows future class reorganizations. Even though classes are not constructed directly from the outside, most classes which have additional public methods are considered to be public (i.e. have no leading underscore). Other classes which are "semi-public" are marked with a single leading underscore; these classes usually have few or no public methods and are less guaranteed to stay the same in future releases. """ import itertools, re from operator import attrgetter from sqlalchemy import util, exc from sqlalchemy.sql import operators from sqlalchemy.sql.operators import Operators, ColumnOperators from sqlalchemy.sql.visitors import Visitable, cloned_traverse import operator functions = util.importlater("sqlalchemy.sql", "functions") sqlutil = util.importlater("sqlalchemy.sql", "util") sqltypes = util.importlater("sqlalchemy", "types") default = util.importlater("sqlalchemy.engine", "default") __all__ = [ 'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement', 'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select', 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between', 'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct', 'except_', 'except_all', 'exists', 'extract', 'func', 'modifier', 'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label', 'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast', 'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery', 'table', 'text', 'tuple_', 'type_coerce', 'union', 'union_all', 'update', ] PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT') def nullsfirst(column): """Return a NULLS FIRST ``ORDER BY`` clause element. e.g.:: someselect.order_by(desc(table1.mycol).nullsfirst()) produces:: ORDER BY mycol DESC NULLS FIRST """ return _UnaryExpression(column, modifier=operators.nullsfirst_op) def nullslast(column): """Return a NULLS LAST ``ORDER BY`` clause element. e.g.:: someselect.order_by(desc(table1.mycol).nullslast()) produces:: ORDER BY mycol DESC NULLS LAST """ return _UnaryExpression(column, modifier=operators.nullslast_op) def desc(column): """Return a descending ``ORDER BY`` clause element. e.g.:: someselect.order_by(desc(table1.mycol)) produces:: ORDER BY mycol DESC """ return _UnaryExpression(column, modifier=operators.desc_op) def asc(column): """Return an ascending ``ORDER BY`` clause element. e.g.:: someselect.order_by(asc(table1.mycol)) produces:: ORDER BY mycol ASC """ return _UnaryExpression(column, modifier=operators.asc_op) def outerjoin(left, right, onclause=None): """Return an ``OUTER JOIN`` clause element. The returned object is an instance of :class:`.Join`. Similar functionality is also available via the :meth:`~.FromClause.outerjoin()` method on any :class:`.FromClause`. :param left: The left side of the join. :param right: The right side of the join. :param onclause: Optional criterion for the ``ON`` clause, is derived from foreign key relationships established between left and right otherwise. To chain joins together, use the :meth:`.FromClause.join` or :meth:`.FromClause.outerjoin` methods on the resulting :class:`.Join` object. """ return Join(left, right, onclause, isouter=True) def join(left, right, onclause=None, isouter=False): """Return a ``JOIN`` clause element (regular inner join). The returned object is an instance of :class:`.Join`. Similar functionality is also available via the :meth:`~.FromClause.join()` method on any :class:`.FromClause`. :param left: The left side of the join. :param right: The right side of the join. :param onclause: Optional criterion for the ``ON`` clause, is derived from foreign key relationships established between left and right otherwise. To chain joins together, use the :meth:`.FromClause.join` or :meth:`.FromClause.outerjoin` methods on the resulting :class:`.Join` object. """ return Join(left, right, onclause, isouter) def select(columns=None, whereclause=None, from_obj=[], **kwargs): """Returns a ``SELECT`` clause element. Similar functionality is also available via the :func:`select()` method on any :class:`.FromClause`. The returned object is an instance of :class:`.Select`. All arguments which accept :class:`.ClauseElement` arguments also accept string arguments, which will be converted as appropriate into either :func:`text()` or :func:`literal_column()` constructs. :param columns: A list of :class:`.ClauseElement` objects, typically :class:`.ColumnElement` objects or subclasses, which will form the columns clause of the resulting statement. For all members which are instances of :class:`.Selectable`, the individual :class:`.ColumnElement` members of the :class:`.Selectable` will be added individually to the columns clause. For example, specifying a :class:`~sqlalchemy.schema.Table` instance will result in all the contained :class:`~sqlalchemy.schema.Column` objects within to be added to the columns clause. This argument is not present on the form of :func:`select()` available on :class:`~sqlalchemy.schema.Table`. :param whereclause: A :class:`.ClauseElement` expression which will be used to form the ``WHERE`` clause. :param from_obj: A list of :class:`.ClauseElement` objects which will be added to the ``FROM`` clause of the resulting statement. Note that "from" objects are automatically located within the columns and whereclause ClauseElements. Use this parameter to explicitly specify "from" objects which are not automatically locatable. This could include :class:`~sqlalchemy.schema.Table` objects that aren't otherwise present, or :class:`.Join` objects whose presence will supercede that of the :class:`~sqlalchemy.schema.Table` objects already located in the other clauses. :param autocommit: Deprecated. Use .execution_options(autocommit=<True|False>) to set the autocommit option. :param bind=None: an :class:`~.base.Engine` or :class:`~.base.Connection` instance to which the resulting :class:`.Select` object will be bound. The :class:`.Select` object will otherwise automatically bind to whatever :class:`~.base.Connectable` instances can be located within its contained :class:`.ClauseElement` members. :param correlate=True: indicates that this :class:`.Select` object should have its contained :class:`.FromClause` elements "correlated" to an enclosing :class:`.Select` object. This means that any :class:`.ClauseElement` instance within the "froms" collection of this :class:`.Select` which is also present in the "froms" collection of an enclosing select will not be rendered in the ``FROM`` clause of this select statement. :param distinct=False: when ``True``, applies a ``DISTINCT`` qualifier to the columns clause of the resulting statement. The boolean argument may also be a column expression or list of column expressions - this is a special calling form which is understood by the Postgresql dialect to render the ``DISTINCT ON (<columns>)`` syntax. ``distinct`` is also available via the :meth:`~.Select.distinct` generative method. .. note:: The ``distinct`` keyword's acceptance of a string argument for usage with MySQL is deprecated. Use the ``prefixes`` argument or :meth:`~.Select.prefix_with`. :param for_update=False: when ``True``, applies ``FOR UPDATE`` to the end of the resulting statement. Certain database dialects also support alternate values for this parameter, for example mysql supports "read" which translates to ``LOCK IN SHARE MODE``, and oracle supports "nowait" which translates to ``FOR UPDATE NOWAIT``. :param group_by: a list of :class:`.ClauseElement` objects which will comprise the ``GROUP BY`` clause of the resulting select. :param having: a :class:`.ClauseElement` that will comprise the ``HAVING`` clause of the resulting select when ``GROUP BY`` is used. :param limit=None: a numerical value which usually compiles to a ``LIMIT`` expression in the resulting select. Databases that don't support ``LIMIT`` will attempt to provide similar functionality. :param offset=None: a numeric value which usually compiles to an ``OFFSET`` expression in the resulting select. Databases that don't support ``OFFSET`` will attempt to provide similar functionality. :param order_by: a scalar or list of :class:`.ClauseElement` objects which will comprise the ``ORDER BY`` clause of the resulting select. :param prefixes: a list of strings or :class:`.ClauseElement` objects to include directly after the SELECT keyword in the generated statement, for dialect-specific query features. ``prefixes`` is also available via the :meth:`~.Select.prefix_with` generative method. :param use_labels=False: when ``True``, the statement will be generated using labels for each column in the columns clause, which qualify each column with its parent table's (or aliases) name so that name conflicts between columns in different tables don't occur. The format of the label is <tablename>_<column>. The "c" collection of the resulting :class:`.Select` object will use these names as well for targeting column members. use_labels is also available via the :meth:`~._SelectBase.apply_labels` generative method. """ return Select(columns, whereclause=whereclause, from_obj=from_obj, **kwargs) def subquery(alias, *args, **kwargs): """Return an :class:`.Alias` object derived from a :class:`.Select`. name alias name \*args, \**kwargs all other arguments are delivered to the :func:`select` function. """ return Select(*args, **kwargs).alias(alias) def insert(table, values=None, inline=False, **kwargs): """Return an :class:`.Insert` clause element. Similar functionality is available via the :func:`insert()` method on :class:`~sqlalchemy.schema.Table`. :param table: The table to be inserted into. :param values: A dictionary which specifies the column specifications of the ``INSERT``, and is optional. If left as None, the column specifications are determined from the bind parameters used during the compile phase of the ``INSERT`` statement. If the bind parameters also are None during the compile phase, then the column specifications will be generated from the full list of table columns. Note that the :meth:`~Insert.values()` generative method may also be used for this. :param prefixes: A list of modifier keywords to be inserted between INSERT and INTO. Alternatively, the :meth:`~Insert.prefix_with` generative method may be used. :param inline: if True, SQL defaults will be compiled 'inline' into the statement and not pre-executed. If both `values` and compile-time bind parameters are present, the compile-time bind parameters override the information specified within `values` on a per-key basis. The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their string identifiers. Each key may reference one of: * a literal data value (i.e. string, number, etc.); * a Column object; * a SELECT statement. If a ``SELECT`` statement is specified which references this ``INSERT`` statement's table, the statement will be correlated against the ``INSERT`` statement. """ return Insert(table, values, inline=inline, **kwargs) def update(table, whereclause=None, values=None, inline=False, **kwargs): """Return an :class:`.Update` clause element. Similar functionality is available via the :func:`update()` method on :class:`~sqlalchemy.schema.Table`. :param table: The table to be updated. :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` condition of the ``UPDATE`` statement. Note that the :meth:`~Update.where()` generative method may also be used for this. :param values: A dictionary which specifies the ``SET`` conditions of the ``UPDATE``, and is optional. If left as None, the ``SET`` conditions are determined from the bind parameters used during the compile phase of the ``UPDATE`` statement. If the bind parameters also are None during the compile phase, then the ``SET`` conditions will be generated from the full list of table columns. Note that the :meth:`~Update.values()` generative method may also be used for this. :param inline: if True, SQL defaults will be compiled 'inline' into the statement and not pre-executed. If both `values` and compile-time bind parameters are present, the compile-time bind parameters override the information specified within `values` on a per-key basis. The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their string identifiers. Each key may reference one of: * a literal data value (i.e. string, number, etc.); * a Column object; * a SELECT statement. If a ``SELECT`` statement is specified which references this ``UPDATE`` statement's table, the statement will be correlated against the ``UPDATE`` statement. """ return Update( table, whereclause=whereclause, values=values, inline=inline, **kwargs) def delete(table, whereclause = None, **kwargs): """Return a :class:`.Delete` clause element. Similar functionality is available via the :func:`delete()` method on :class:`~sqlalchemy.schema.Table`. :param table: The table to be updated. :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` condition of the ``UPDATE`` statement. Note that the :meth:`~Delete.where()` generative method may be used instead. """ return Delete(table, whereclause, **kwargs) def and_(*clauses): """Join a list of clauses together using the ``AND`` operator. The ``&`` operator is also overloaded on all :class:`_CompareMixin` subclasses to produce the same result. """ if len(clauses) == 1: return clauses[0] return BooleanClauseList(operator=operators.and_, *clauses) def or_(*clauses): """Join a list of clauses together using the ``OR`` operator. The ``|`` operator is also overloaded on all :class:`_CompareMixin` subclasses to produce the same result. """ if len(clauses) == 1: return clauses[0] return BooleanClauseList(operator=operators.or_, *clauses) def not_(clause): """Return a negation of the given clause, i.e. ``NOT(clause)``. The ``~`` operator is also overloaded on all :class:`_CompareMixin` subclasses to produce the same result. """ return operators.inv(_literal_as_binds(clause)) def distinct(expr): """Return a ``DISTINCT`` clause. e.g.:: distinct(a) renders:: DISTINCT a """ expr = _literal_as_binds(expr) return _UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type) def between(ctest, cleft, cright): """Return a ``BETWEEN`` predicate clause. Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``. The :func:`between()` method on all :class:`_CompareMixin` subclasses provides similar functionality. """ ctest = _literal_as_binds(ctest) return ctest.between(cleft, cright) def case(whens, value=None, else_=None): """Produce a ``CASE`` statement. whens A sequence of pairs, or alternatively a dict, to be translated into "WHEN / THEN" clauses. value Optional for simple case statements, produces a column expression as in "CASE <expr> WHEN ..." else\_ Optional as well, for case defaults produces the "ELSE" portion of the "CASE" statement. The expressions used for THEN and ELSE, when specified as strings, will be interpreted as bound values. To specify textual SQL expressions for these, use the :func:`literal_column` construct. The expressions used for the WHEN criterion may only be literal strings when "value" is present, i.e. CASE table.somecol WHEN "x" THEN "y". Otherwise, literal strings are not accepted in this position, and either the text(<string>) or literal(<string>) constructs must be used to interpret raw string values. Usage examples:: case([(orderline.c.qty > 100, item.c.specialprice), (orderline.c.qty > 10, item.c.bulkprice) ], else_=item.c.regularprice) case(value=emp.c.type, whens={ 'engineer': emp.c.salary * 1.1, 'manager': emp.c.salary * 3, }) Using :func:`literal_column()`, to allow for databases that do not support bind parameters in the ``then`` clause. The type can be specified which determines the type of the :func:`case()` construct overall:: case([(orderline.c.qty > 100, literal_column("'greaterthan100'", String)), (orderline.c.qty > 10, literal_column("'greaterthan10'", String)) ], else_=literal_column("'lethan10'", String)) """ return _Case(whens, value=value, else_=else_) def cast(clause, totype, **kwargs): """Return a ``CAST`` function. Equivalent of SQL ``CAST(clause AS totype)``. Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e:: cast(table.c.unit_price * table.c.qty, Numeric(10,4)) or:: cast(table.c.timestamp, DATE) """ return _Cast(clause, totype, **kwargs) def extract(field, expr): """Return the clause ``extract(field FROM expr)``.""" return _Extract(field, expr) def collate(expression, collation): """Return the clause ``expression COLLATE collation``. e.g.:: collate(mycolumn, 'utf8_bin') produces:: mycolumn COLLATE utf8_bin """ expr = _literal_as_binds(expression) return _BinaryExpression( expr, _literal_as_text(collation), operators.collate, type_=expr.type) def exists(*args, **kwargs): """Return an ``EXISTS`` clause as applied to a :class:`.Select` object. Calling styles are of the following forms:: # use on an existing select() s = select([table.c.col1]).where(table.c.col2==5) s = exists(s) # construct a select() at once exists(['*'], **select_arguments).where(criterion) # columns argument is optional, generates "EXISTS (SELECT *)" # by default. exists().where(table.c.col2==5) """ return _Exists(*args, **kwargs) def union(*selects, **kwargs): """Return a ``UNION`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. A similar :func:`union()` method is available on all :class:`.FromClause` subclasses. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs) def union_all(*selects, **kwargs): """Return a ``UNION ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. A similar :func:`union_all()` method is available on all :class:`.FromClause` subclasses. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs) def except_(*selects, **kwargs): """Return an ``EXCEPT`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs) def except_all(*selects, **kwargs): """Return an ``EXCEPT ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs) def intersect(*selects, **kwargs): """Return an ``INTERSECT`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs) def intersect_all(*selects, **kwargs): """Return an ``INTERSECT ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs) def alias(selectable, name=None): """Return an :class:`.Alias` object. An :class:`.Alias` represents any :class:`.FromClause` with an alternate name assigned within SQL, typically using the ``AS`` clause when generated, e.g. ``SELECT * FROM table AS aliasname``. Similar functionality is available via the :meth:`~.FromClause.alias` method available on all :class:`.FromClause` subclasses. When an :class:`.Alias` is created from a :class:`.Table` object, this has the effect of the table being rendered as ``tablename AS aliasname`` in a SELECT statement. For :func:`.select` objects, the effect is that of creating a named subquery, i.e. ``(select ...) AS aliasname``. The ``name`` parameter is optional, and provides the name to use in the rendered SQL. If blank, an "anonymous" name will be deterministically generated at compile time. Deterministic means the name is guaranteed to be unique against other constructs used in the same statement, and will also be the same name for each successive compilation of the same statement object. :param selectable: any :class:`.FromClause` subclass, such as a table, select statement, etc. :param name: string name to be assigned as the alias. If ``None``, a name will be deterministically generated at compile time. """ return Alias(selectable, name=name) def literal(value, type_=None): """Return a literal clause, bound to a bind parameter. Literal clauses are created automatically when non- :class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are used in a comparison operation with a :class:`_CompareMixin` subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the generation of a literal clause, which will be created as a :class:`_BindParamClause` with a bound value. :param value: the value to be bound. Can be any Python object supported by the underlying DB-API, or is translatable via the given type argument. :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which will provide bind-parameter translation for this literal. """ return _BindParamClause(None, value, type_=type_, unique=True) def tuple_(*expr): """Return a SQL tuple. Main usage is to produce a composite IN construct:: tuple_(table.c.col1, table.c.col2).in_( [(1, 2), (5, 12), (10, 19)] ) """ return _Tuple(*expr) def type_coerce(expr, type_): """Coerce the given expression into the given type, on the Python side only. :func:`.type_coerce` is roughly similar to :func:.`cast`, except no "CAST" expression is rendered - the given type is only applied towards expression typing and against received result values. e.g.:: from sqlalchemy.types import TypeDecorator import uuid class AsGuid(TypeDecorator): impl = String def process_bind_param(self, value, dialect): if value is not None: return str(value) else: return None def process_result_value(self, value, dialect): if value is not None: return uuid.UUID(value) else: return None conn.execute( select([type_coerce(mytable.c.ident, AsGuid)]).\\ where( type_coerce(mytable.c.ident, AsGuid) == uuid.uuid3(uuid.NAMESPACE_URL, 'bar') ) ) """ if hasattr(expr, '__clause_expr__'): return type_coerce(expr.__clause_expr__()) elif not isinstance(expr, Visitable): if expr is None: return null() else: return literal(expr, type_=type_) else: return _Label(None, expr, type_=type_) def label(name, obj): """Return a :class:`_Label` object for the given :class:`.ColumnElement`. A label changes the name of an element in the columns clause of a ``SELECT`` statement, typically via the ``AS`` SQL keyword. This functionality is more conveniently available via the :func:`label()` method on :class:`.ColumnElement`. name label name obj a :class:`.ColumnElement`. """ return _Label(name, obj) def column(text, type_=None): """Return a textual column clause, as would be in the columns clause of a ``SELECT`` statement. The object returned is an instance of :class:`.ColumnClause`, which represents the "syntactical" portion of the schema-level :class:`~sqlalchemy.schema.Column` object. It is often used directly within :func:`~.expression.select` constructs or with lightweight :func:`~.expression.table` constructs. Note that the :func:`~.expression.column` function is not part of the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package:: from sqlalchemy.sql import table, column :param text: the name of the column. Quoting rules will be applied to the clause like any other column name. For textual column constructs that are not to be quoted, use the :func:`literal_column` function. :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object which will provide result-set translation for this column. See :class:`.ColumnClause` for further examples. """ return ColumnClause(text, type_=type_) def literal_column(text, type_=None): """Return a textual column expression, as would be in the columns clause of a ``SELECT`` statement. The object returned supports further expressions in the same way as any other column object, including comparison, math and string operations. The type\_ parameter is important to determine proper expression behavior (such as, '+' means string concatenation or numerical addition based on the type). :param text: the text of the expression; can be any SQL expression. Quoting rules will not be applied. To specify a column-name expression which should be subject to quoting rules, use the :func:`column` function. :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object which will provide result-set translation and additional expression semantics for this column. If left as None the type will be NullType. """ return ColumnClause(text, type_=type_, is_literal=True) def table(name, *columns): """Represent a textual table clause. The object returned is an instance of :class:`.TableClause`, which represents the "syntactical" portion of the schema-level :class:`~.schema.Table` object. It may be used to construct lightweight table constructs. Note that the :func:`~.expression.table` function is not part of the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package:: from sqlalchemy.sql import table, column :param name: Name of the table. :param columns: A collection of :func:`~.expression.column` constructs. See :class:`.TableClause` for further examples. """ return TableClause(name, *columns) def bindparam(key, value=None, type_=None, unique=False, required=False, callable_=None): """Create a bind parameter clause with the given key. :param key: the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other :class:`_BindParamClause` objects exist with the same key, or if its length is too long and truncation is required. :param value: Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution. :param callable\_: A callable function that takes the place of "value". The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embedded bind values are still desirable. :param type\_: A ``TypeEngine`` object that will be used to pre-process the value corresponding to this :class:`_BindParamClause` at execution time. :param unique: if True, the key name of this BindParamClause will be modified if another :class:`_BindParamClause` of the same name already has been located within the containing :class:`.ClauseElement`. :param required: a value is required at execution time. """ if isinstance(key, ColumnClause): return _BindParamClause(key.name, value, type_=key.type, callable_=callable_, unique=unique, required=required) else: return _BindParamClause(key, value, type_=type_, callable_=callable_, unique=unique, required=required) def outparam(key, type_=None): """Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them. The ``outparam`` can be used like a regular function parameter. The "output" value will be available from the :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` attribute, which returns a dictionary containing the values. """ return _BindParamClause( key, None, type_=type_, unique=False, isoutparam=True) def text(text, bind=None, *args, **kwargs): """Create a SQL construct that is represented by a literal string. E.g.:: t = text("SELECT * FROM users") result = connection.execute(t) The advantages :func:`text` provides over a plain string are backend-neutral support for bind parameters, per-statement execution options, as well as bind parameter and result-column typing behavior, allowing SQLAlchemy type constructs to play a role when executing a statement that is specified literally. Bind parameters are specified by name, using the format ``:name``. E.g.:: t = text("SELECT * FROM users WHERE id=:user_id") result = connection.execute(t, user_id=12) To invoke SQLAlchemy typing logic for bind parameters, the ``bindparams`` list allows specification of :func:`bindparam` constructs which specify the type for a given name:: t = text("SELECT id FROM users WHERE updated_at>:updated", bindparams=[bindparam('updated', DateTime())] ) Typing during result row processing is also an important concern. Result column types are specified using the ``typemap`` dictionary, where the keys match the names of columns. These names are taken from what the DBAPI returns as ``cursor.description``:: t = text("SELECT id, name FROM users", typemap={ 'id':Integer, 'name':Unicode } ) The :func:`text` construct is used internally for most cases when a literal string is specified for part of a larger query, such as within :func:`select()`, :func:`update()`, :func:`insert()` or :func:`delete()`. In those cases, the same bind parameter syntax is applied:: s = select([users.c.id, users.c.name]).where("id=:user_id") result = connection.execute(s, user_id=12) Using :func:`text` explicitly usually implies the construction of a full, standalone statement. As such, SQLAlchemy refers to it as an :class:`.Executable` object, and it supports the :meth:`Executable.execution_options` method. For example, a :func:`text` construct that should be subject to "autocommit" can be set explicitly so using the ``autocommit`` option:: t = text("EXEC my_procedural_thing()").\\ execution_options(autocommit=True) Note that SQLAlchemy's usual "autocommit" behavior applies to :func:`text` constructs - that is, statements which begin with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``, or a variety of other phrases specific to certain backends, will be eligible for autocommit if no transaction is in progress. :param text: the text of the SQL statement to be created. use ``:<param>`` to specify bind parameters; they will be compiled to their engine-specific format. :param autocommit: Deprecated. Use .execution_options(autocommit=<True|False>) to set the autocommit option. :param bind: an optional connection or engine to be used for this text query. :param bindparams: a list of :func:`bindparam()` instances which can be used to define the types and/or initial values for the bind parameters within the textual statement; the keynames of the bindparams must match those within the text of the statement. The types will be used for pre-processing on bind values. :param typemap: a dictionary mapping the names of columns represented in the columns clause of a ``SELECT`` statement to type objects, which will be used to perform post-processing on columns within the result set. This argument applies to any expression that returns result sets. """ return _TextClause(text, bind=bind, *args, **kwargs) def over(func, partition_by=None, order_by=None): """Produce an OVER clause against a function. Used against aggregate or so-called "window" functions, for database backends that support window functions. E.g.:: from sqlalchemy import over over(func.row_number(), order_by='x') Would produce "ROW_NUMBER() OVER(ORDER BY x)". :param func: a :class:`.FunctionElement` construct, typically generated by :attr:`~.expression.func`. :param partition_by: a column element or string, or a list of such, that will be used as the PARTITION BY clause of the OVER construct. :param order_by: a column element or string, or a list of such, that will be used as the ORDER BY clause of the OVER construct. This function is also available from the :attr:`~.expression.func` construct itself via the :meth:`.FunctionElement.over` method. New in 0.7. """ return _Over(func, partition_by=partition_by, order_by=order_by) def null(): """Return a :class:`_Null` object, which compiles to ``NULL``. """ return _Null() def true(): """Return a :class:`_True` object, which compiles to ``true``, or the boolean equivalent for the target dialect. """ return _True() def false(): """Return a :class:`_False` object, which compiles to ``false``, or the boolean equivalent for the target dialect. """ return _False() class _FunctionGenerator(object): """Generate :class:`.Function` objects based on getattr calls.""" def __init__(self, **opts): self.__names = [] self.opts = opts def __getattr__(self, name): # passthru __ attributes; fixes pydoc if name.startswith('__'): try: return self.__dict__[name] except KeyError: raise AttributeError(name) elif name.endswith('_'): name = name[0:-1] f = _FunctionGenerator(**self.opts) f.__names = list(self.__names) + [name] return f def __call__(self, *c, **kwargs): o = self.opts.copy() o.update(kwargs) if len(self.__names) == 1: func = getattr(functions, self.__names[-1].lower(), None) if func is not None and \ isinstance(func, type) and \ issubclass(func, Function): return func(*c, **o) return Function(self.__names[-1], packagenames=self.__names[0:-1], *c, **o) # "func" global - i.e. func.count() func = _FunctionGenerator() """Generate SQL function expressions. ``func`` is a special object instance which generates SQL functions based on name-based attributes, e.g.:: >>> print func.count(1) count(:param_1) The element is a column-oriented SQL element like any other, and is used in that way:: >>> print select([func.count(table.c.id)]) SELECT count(sometable.id) FROM sometable Any name can be given to ``func``. If the function name is unknown to SQLAlchemy, it will be rendered exactly as is. For common SQL functions which SQLAlchemy is aware of, the name may be interpreted as a *generic function* which will be compiled appropriately to the target database:: >>> print func.current_timestamp() CURRENT_TIMESTAMP To call functions which are present in dot-separated packages, specify them in the same manner:: >>> print func.stats.yield_curve(5, 10) stats.yield_curve(:yield_curve_1, :yield_curve_2) SQLAlchemy can be made aware of the return type of functions to enable type-specific lexical and result-based behavior. For example, to ensure that a string-based function returns a Unicode value and is similarly treated as a string in expressions, specify :class:`~sqlalchemy.types.Unicode` as the type: >>> print func.my_string(u'hi', type_=Unicode) + ' ' + \ ... func.my_string(u'there', type_=Unicode) my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3) The object returned by a ``func`` call is an instance of :class:`.Function`. This object meets the "column" interface, including comparison and labeling functions. The object can also be passed the :meth:`~.Connectable.execute` method of a :class:`.Connection` or :class:`.Engine`, where it will be wrapped inside of a SELECT statement first:: print connection.execute(func.current_timestamp()).scalar() A function can also be "bound" to a :class:`.Engine` or :class:`.Connection` using the ``bind`` keyword argument, providing an execute() as well as a scalar() method:: myfunc = func.current_timestamp(bind=some_engine) print myfunc.scalar() Functions which are interpreted as "generic" functions know how to calculate their return type automatically. For a listing of known generic functions, see :ref:`generic_functions`. """ # "modifier" global - i.e. modifier.distinct # TODO: use UnaryExpression for this instead ? modifier = _FunctionGenerator(group=False) class _generated_label(unicode): """A unicode subclass used to identify dynamically generated names.""" def _escape_for_generated(x): if isinstance(x, _generated_label): return x else: return x.replace('%', '%%') def _string_or_unprintable(element): if isinstance(element, basestring): return element else: try: return str(element) except: return "unprintable element %r" % element def _clone(element): return element._clone() def _expand_cloned(elements): """expand the given set of ClauseElements to be the set of all 'cloned' predecessors. """ return itertools.chain(*[x._cloned_set for x in elements]) def _select_iterables(elements): """expand tables into individual columns in the given list of column expressions. """ return itertools.chain(*[c._select_iterable for c in elements]) def _cloned_intersection(a, b): """return the intersection of sets a and b, counting any overlap between 'cloned' predecessors. The returned set is in terms of the enties present within 'a'. """ all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) return set(elem for elem in a if all_overlap.intersection(elem._cloned_set)) def _is_literal(element): return not isinstance(element, Visitable) and \ not hasattr(element, '__clause_element__') def _from_objects(*elements): return itertools.chain(*[element._from_objects for element in elements]) def _labeled(element): if not hasattr(element, 'name'): return element.label(None) else: return element def _column_as_key(element): if isinstance(element, basestring): return element if hasattr(element, '__clause_element__'): element = element.__clause_element__() return element.key def _literal_as_text(element): if isinstance(element, Visitable): return element elif hasattr(element, '__clause_element__'): return element.__clause_element__() elif isinstance(element, basestring): return _TextClause(unicode(element)) elif isinstance(element, (util.NoneType, bool)): return _const_expr(element) else: raise exc.ArgumentError( "SQL expression object or string expected." ) def _const_expr(element): if element is None: return null() elif element is False: return false() elif element is True: return true() else: raise exc.ArgumentError( "Expected None, False, or True" ) def _clause_element_as_expr(element): if hasattr(element, '__clause_element__'): return element.__clause_element__() else: return element def _literal_as_column(element): if isinstance(element, Visitable): return element elif hasattr(element, '__clause_element__'): return element.__clause_element__() else: return literal_column(str(element)) def _literal_as_binds(element, name=None, type_=None): if hasattr(element, '__clause_element__'): return element.__clause_element__() elif not isinstance(element, Visitable): if element is None: return null() else: return _BindParamClause(name, element, type_=type_, unique=True) else: return element def _type_from_args(args): for a in args: if not isinstance(a.type, sqltypes.NullType): return a.type else: return sqltypes.NullType def _no_literals(element): if hasattr(element, '__clause_element__'): return element.__clause_element__() elif not isinstance(element, Visitable): raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' " "function to indicate a SQL expression " "literal, or 'literal()' to indicate a " "bound value." % element) else: return element def _only_column_elements(element, name): if hasattr(element, '__clause_element__'): element = element.__clause_element__() if not isinstance(element, ColumnElement): raise exc.ArgumentError("Column-based expression object expected for argument '%s'; " "got: '%s', type %s" % (name, element, type(element))) return element def _corresponding_column_or_error(fromclause, column, require_embedded=False): c = fromclause.corresponding_column(column, require_embedded=require_embedded) if c is None: raise exc.InvalidRequestError( "Given column '%s', attached to table '%s', " "failed to locate a corresponding column from table '%s'" % (column, getattr(column, 'table', None),fromclause.description) ) return c @util.decorator def _generative(fn, *args, **kw): """Mark a method as generative.""" self = args[0]._generate() fn(self, *args[1:], **kw) return self def is_column(col): """True if ``col`` is an instance of :class:`.ColumnElement`.""" return isinstance(col, ColumnElement) class ClauseElement(Visitable): """Base class for elements of a programmatically constructed SQL expression. """ __visit_name__ = 'clause' _annotations = {} supports_execution = False _from_objects = [] bind = None def _clone(self): """Create a shallow copy of this ClauseElement. This method may be used by a generative API. Its also used as part of the "deep" copy afforded by a traversal that combines the _copy_internals() method. """ c = self.__class__.__new__(self.__class__) c.__dict__ = self.__dict__.copy() c.__dict__.pop('_cloned_set', None) # this is a marker that helps to "equate" clauses to each other # when a Select returns its list of FROM clauses. the cloning # process leaves around a lot of remnants of the previous clause # typically in the form of column expressions still attached to the # old table. c._is_clone_of = self return c @property def _constructor(self): """return the 'constructor' for this ClauseElement. This is for the purposes for creating a new object of this type. Usually, its just the element's __class__. However, the "Annotated" version of the object overrides to return the class of its proxied element. """ return self.__class__ @util.memoized_property def _cloned_set(self): """Return the set consisting all cloned anscestors of this ClauseElement. Includes this ClauseElement. This accessor tends to be used for FromClause objects to identify 'equivalent' FROM clauses, regardless of transformative operations. """ s = util.column_set() f = self while f is not None: s.add(f) f = getattr(f, '_is_clone_of', None) return s def __getstate__(self): d = self.__dict__.copy() d.pop('_is_clone_of', None) return d if util.jython: def __hash__(self): """Return a distinct hash code. ClauseElements may have special equality comparisons which makes us rely on them having unique hash codes for use in hash-based collections. Stock __hash__ doesn't guarantee unique values on platforms with moving GCs. """ return id(self) def _annotate(self, values): """return a copy of this ClauseElement with the given annotations dictionary. """ return sqlutil.Annotated(self, values) def _deannotate(self): """return a copy of this ClauseElement with an empty annotations dictionary. """ return self._clone() def unique_params(self, *optionaldict, **kwargs): """Return a copy with :func:`bindparam()` elments replaced. Same functionality as ``params()``, except adds `unique=True` to affected bind parameters so that multiple statements can be used. """ return self._params(True, optionaldict, kwargs) def params(self, *optionaldict, **kwargs): """Return a copy with :func:`bindparam()` elments replaced. Returns a copy of this ClauseElement with :func:`bindparam()` elements replaced with values taken from the given dictionary:: >>> clause = column('x') + bindparam('foo') >>> print clause.compile().params {'foo':None} >>> print clause.params({'foo':7}).compile().params {'foo':7} """ return self._params(False, optionaldict, kwargs) def _params(self, unique, optionaldict, kwargs): if len(optionaldict) == 1: kwargs.update(optionaldict[0]) elif len(optionaldict) > 1: raise exc.ArgumentError( "params() takes zero or one positional dictionary argument") def visit_bindparam(bind): if bind.key in kwargs: bind.value = kwargs[bind.key] if unique: bind._convert_to_unique() return cloned_traverse(self, {}, {'bindparam':visit_bindparam}) def compare(self, other, **kw): """Compare this ClauseElement to the given ClauseElement. Subclasses should override the default behavior, which is a straight identity comparison. \**kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see :class:`.ColumnElement`) """ return self is other def _copy_internals(self, clone=_clone): """Reassign internal elements to be clones of themselves. Called during a copy-and-traverse operation on newly shallow-copied elements to create a deep copy. """ pass def get_children(self, **kwargs): """Return immediate child elements of this :class:`.ClauseElement`. This is used for visit traversal. \**kwargs may contain flags that change the collection that is returned, for example to return a subset of items in order to cut down on larger traversals, or to return child items from a different context (such as schema-level collections instead of clause-level). """ return [] def self_group(self, against=None): """Apply a 'grouping' to this :class:`.ClauseElement`. This method is overridden by subclasses to return a "grouping" construct, i.e. parenthesis. In particular it's used by "binary" expressions to provide a grouping around themselves when placed into a larger expression, as well as by :func:`.select` constructs when placed into the FROM clause of another :func:`.select`. (Note that subqueries should be normally created using the :func:`.Select.alias` method, as many platforms require nested SELECT statements to be named). As expressions are composed together, the application of :meth:`self_group` is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy's clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like ``x OR (y AND z)`` - AND takes precedence over OR. The base :meth:`self_group` method of :class:`.ClauseElement` just returns self. """ return self @util.deprecated('0.7', 'Only SQL expressions which subclass ' ':class:`.Executable` may provide the ' ':func:`.execute` method.') def execute(self, *multiparams, **params): """Compile and execute this :class:`.ClauseElement`. """ e = self.bind if e is None: label = getattr(self, 'description', self.__class__.__name__) msg = ('This %s does not support direct execution.' % label) raise exc.UnboundExecutionError(msg) return e._execute_clauseelement(self, multiparams, params) @util.deprecated('0.7', 'Only SQL expressions which subclass ' ':class:`.Executable` may provide the ' ':func:`.scalar` method.') def scalar(self, *multiparams, **params): """Compile and execute this :class:`.ClauseElement`, returning the result's scalar representation. """ return self.execute(*multiparams, **params).scalar() def compile(self, bind=None, dialect=None, **kw): """Compile this SQL expression. The return value is a :class:`~sqlalchemy.engine.Compiled` object. Calling ``str()`` or ``unicode()`` on the returned value will yield a string representation of the result. The :class:`~sqlalchemy.engine.Compiled` object also can return a dictionary of bind parameter names and values using the ``params`` accessor. :param bind: An ``Engine`` or ``Connection`` from which a ``Compiled`` will be acquired. This argument takes precedence over this :class:`.ClauseElement`'s bound engine, if any. :param column_keys: Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If ``None``, all columns from the target table object are rendered. :param dialect: A ``Dialect`` instance frmo which a ``Compiled`` will be acquired. This argument takes precedence over the `bind` argument as well as this :class:`.ClauseElement`'s bound engine, if any. :param inline: Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement's VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key `Column`. """ if not dialect: if bind: dialect = bind.dialect elif self.bind: dialect = self.bind.dialect bind = self.bind else: dialect = default.DefaultDialect() return self._compiler(dialect, bind=bind, **kw) def _compiler(self, dialect, **kw): """Return a compiler appropriate for this ClauseElement, given a Dialect.""" return dialect.statement_compiler(dialect, self, **kw) def __str__(self): # Py3K #return unicode(self.compile()) # Py2K return unicode(self.compile()).encode('ascii', 'backslashreplace') # end Py2K def __and__(self, other): return and_(self, other) def __or__(self, other): return or_(self, other) def __invert__(self): return self._negate() def __nonzero__(self): raise TypeError("Boolean value of this clause is not defined") def _negate(self): if hasattr(self, 'negation_clause'): return self.negation_clause else: return _UnaryExpression( self.self_group(against=operators.inv), operator=operators.inv, negate=None) def __repr__(self): friendly = getattr(self, 'description', None) if friendly is None: return object.__repr__(self) else: return '<%s.%s at 0x%x; %s>' % ( self.__module__, self.__class__.__name__, id(self), friendly) class _Immutable(object): """mark a ClauseElement as 'immutable' when expressions are cloned.""" def unique_params(self, *optionaldict, **kwargs): raise NotImplementedError("Immutable objects do not support copying") def params(self, *optionaldict, **kwargs): raise NotImplementedError("Immutable objects do not support copying") def _clone(self): return self class _CompareMixin(ColumnOperators): """Defines comparison and math operations for :class:`.ClauseElement` instances. See :class:`.ColumnOperators` and :class:`.Operators` for descriptions of all operations. """ def __compare(self, op, obj, negate=None, reverse=False, **kwargs ): if obj is None or isinstance(obj, _Null): if op == operators.eq: return _BinaryExpression(self, null(), operators.is_, negate=operators.isnot) elif op == operators.ne: return _BinaryExpression(self, null(), operators.isnot, negate=operators.is_) else: raise exc.ArgumentError("Only '='/'!=' operators can " "be used with NULL") else: obj = self._check_literal(op, obj) if reverse: return _BinaryExpression(obj, self, op, type_=sqltypes.BOOLEANTYPE, negate=negate, modifiers=kwargs) else: return _BinaryExpression(self, obj, op, type_=sqltypes.BOOLEANTYPE, negate=negate, modifiers=kwargs) def __operate(self, op, obj, reverse=False): obj = self._check_literal(op, obj) if reverse: left, right = obj, self else: left, right = self, obj if left.type is None: op, result_type = sqltypes.NULLTYPE._adapt_expression(op, right.type) elif right.type is None: op, result_type = left.type._adapt_expression(op, sqltypes.NULLTYPE) else: op, result_type = left.type._adapt_expression(op, right.type) return _BinaryExpression(left, right, op, type_=result_type) # a mapping of operators with the method they use, along with their negated # operator for comparison operators operators = { operators.add : (__operate,), operators.mul : (__operate,), operators.sub : (__operate,), # Py2K operators.div : (__operate,), # end Py2K operators.mod : (__operate,), operators.truediv : (__operate,), operators.lt : (__compare, operators.ge), operators.le : (__compare, operators.gt), operators.ne : (__compare, operators.eq), operators.gt : (__compare, operators.le), operators.ge : (__compare, operators.lt), operators.eq : (__compare, operators.ne), operators.like_op : (__compare, operators.notlike_op), operators.ilike_op : (__compare, operators.notilike_op), } def operate(self, op, *other, **kwargs): o = _CompareMixin.operators[op] return o[0](self, op, other[0], *o[1:], **kwargs) def reverse_operate(self, op, other, **kwargs): o = _CompareMixin.operators[op] return o[0](self, op, other, reverse=True, *o[1:], **kwargs) def in_(self, other): """See :meth:`.ColumnOperators.in_`.""" return self._in_impl(operators.in_op, operators.notin_op, other) def _in_impl(self, op, negate_op, seq_or_selectable): seq_or_selectable = _clause_element_as_expr(seq_or_selectable) if isinstance(seq_or_selectable, _ScalarSelect): return self.__compare(op, seq_or_selectable, negate=negate_op) elif isinstance(seq_or_selectable, _SelectBase): # TODO: if we ever want to support (x, y, z) IN (select x, # y, z from table), we would need a multi-column version of # as_scalar() to produce a multi- column selectable that # does not export itself as a FROM clause return self.__compare(op, seq_or_selectable.as_scalar(), negate=negate_op) elif isinstance(seq_or_selectable, (Selectable, _TextClause)): return self.__compare(op, seq_or_selectable, negate=negate_op) # Handle non selectable arguments as sequences args = [] for o in seq_or_selectable: if not _is_literal(o): if not isinstance(o, _CompareMixin): raise exc.InvalidRequestError('in() function accept' 's either a list of non-selectable values, ' 'or a selectable: %r' % o) else: o = self._bind_param(op, o) args.append(o) if len(args) == 0: # Special case handling for empty IN's, behave like # comparison against zero row selectable. We use != to # build the contradiction as it handles NULL values # appropriately, i.e. "not (x IN ())" should not return NULL # values for x. util.warn('The IN-predicate on "%s" was invoked with an ' 'empty sequence. This results in a ' 'contradiction, which nonetheless can be ' 'expensive to evaluate. Consider alternative ' 'strategies for improved performance.' % self) return self != self return self.__compare(op, ClauseList(*args).self_group(against=op), negate=negate_op) def __neg__(self): """See :meth:`.ColumnOperators.__neg__`.""" return _UnaryExpression(self, operator=operators.neg) def startswith(self, other, escape=None): """See :meth:`.ColumnOperators.startswith`.""" # use __radd__ to force string concat behavior return self.__compare( operators.like_op, literal_column("'%'", type_=sqltypes.String).__radd__( self._check_literal(operators.like_op, other) ), escape=escape) def endswith(self, other, escape=None): """See :meth:`.ColumnOperators.endswith`.""" return self.__compare( operators.like_op, literal_column("'%'", type_=sqltypes.String) + self._check_literal(operators.like_op, other), escape=escape) def contains(self, other, escape=None): """See :meth:`.ColumnOperators.contains`.""" return self.__compare( operators.like_op, literal_column("'%'", type_=sqltypes.String) + self._check_literal(operators.like_op, other) + literal_column("'%'", type_=sqltypes.String), escape=escape) def match(self, other): """See :meth:`.ColumnOperators.match`.""" return self.__compare(operators.match_op, self._check_literal(operators.match_op, other)) def label(self, name): """Produce a column label, i.e. ``<columnname> AS <name>``. This is a shortcut to the :func:`~.expression.label` function. if 'name' is None, an anonymous label name will be generated. """ return _Label(name, self, self.type) def desc(self): """See :meth:`.ColumnOperators.desc`.""" return desc(self) def asc(self): """See :meth:`.ColumnOperators.asc`.""" return asc(self) def nullsfirst(self): """See :meth:`.ColumnOperators.nullsfirst`.""" return nullsfirst(self) def nullslast(self): """See :meth:`.ColumnOperators.nullslast`.""" return nullslast(self) def distinct(self): """See :meth:`.ColumnOperators.distinct`.""" return _UnaryExpression(self, operator=operators.distinct_op, type_=self.type) def between(self, cleft, cright): """See :meth:`.ColumnOperators.between`.""" return _BinaryExpression( self, ClauseList( self._check_literal(operators.and_, cleft), self._check_literal(operators.and_, cright), operator=operators.and_, group=False), operators.between_op) def collate(self, collation): """See :meth:`.ColumnOperators.collate`.""" return collate(self, collation) def op(self, operator): """See :meth:`.ColumnOperators.op`.""" return lambda other: self.__operate(operator, other) def _bind_param(self, operator, obj): return _BindParamClause(None, obj, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) def _check_literal(self, operator, other): if isinstance(other, _BindParamClause) and \ isinstance(other.type, sqltypes.NullType): # TODO: perhaps we should not mutate the incoming bindparam() # here and instead make a copy of it. this might # be the only place that we're mutating an incoming construct. other.type = self.type return other elif hasattr(other, '__clause_element__'): return other.__clause_element__() elif not isinstance(other, ClauseElement): return self._bind_param(operator, other) elif isinstance(other, (_SelectBase, Alias)): return other.as_scalar() else: return other class ColumnElement(ClauseElement, _CompareMixin): """Represent an element that is usable within the "column clause" portion of a ``SELECT`` statement. This includes columns associated with tables, aliases, and subqueries, expressions, function calls, SQL keywords such as ``NULL``, literals, etc. :class:`.ColumnElement` is the ultimate base class for all such elements. :class:`.ColumnElement` supports the ability to be a *proxy* element, which indicates that the :class:`.ColumnElement` may be associated with a :class:`.Selectable` which was derived from another :class:`.Selectable`. An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a :class:`~sqlalchemy.schema.Table`. A :class:`.ColumnElement`, by subclassing the :class:`_CompareMixin` mixin class, provides the ability to generate new :class:`.ClauseElement` objects using Python expressions. See the :class:`_CompareMixin` docstring for more details. """ __visit_name__ = 'column' primary_key = False foreign_keys = [] quote = None _label = None @property def _select_iterable(self): return (self, ) @util.memoized_property def base_columns(self): return util.column_set(c for c in self.proxy_set if not hasattr(c, 'proxies')) @util.memoized_property def proxy_set(self): s = util.column_set([self]) if hasattr(self, 'proxies'): for c in self.proxies: s.update(c.proxy_set) return s def shares_lineage(self, othercolumn): """Return True if the given :class:`.ColumnElement` has a common ancestor to this :class:`.ColumnElement`.""" return bool(self.proxy_set.intersection(othercolumn.proxy_set)) def _make_proxy(self, selectable, name=None): """Create a new :class:`.ColumnElement` representing this :class:`.ColumnElement` as it appears in the select list of a descending selectable. """ if name is None: name = self.anon_label # TODO: may want to change this to anon_label, # or some value that is more useful than the # compiled form of the expression key = str(self) else: key = name co = ColumnClause(name, selectable, type_=getattr(self, 'type', None)) co.proxies = [self] selectable._columns[key] = co return co def compare(self, other, use_proxies=False, equivalents=None, **kw): """Compare this ColumnElement to another. Special arguments understood: :param use_proxies: when True, consider two columns that share a common base column as equivalent (i.e. shares_lineage()) :param equivalents: a dictionary of columns as keys mapped to sets of columns. If the given "other" column is present in this dictionary, if any of the columns in the correponding set() pass the comparison test, the result is True. This is used to expand the comparison to other columns that may be known to be equivalent to this one via foreign key or other criterion. """ to_compare = (other, ) if equivalents and other in equivalents: to_compare = equivalents[other].union(to_compare) for oth in to_compare: if use_proxies and self.shares_lineage(oth): return True elif oth is self: return True else: return False @util.memoized_property def anon_label(self): """provides a constant 'anonymous label' for this ColumnElement. This is a label() expression which will be named at compile time. The same label() is returned each time anon_label is called so that expressions can reference anon_label multiple times, producing the same label name at compile time. the compiler uses this function automatically at compile time for expressions that are known to be 'unnamed' like binary expressions and function calls. """ return _generated_label('%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon'))) class ColumnCollection(util.OrderedProperties): """An ordered dictionary that stores a list of ColumnElement instances. Overrides the ``__eq__()`` method to produce SQL clauses between sets of correlated columns. """ def __init__(self, *cols): super(ColumnCollection, self).__init__() self._data.update((c.key, c) for c in cols) self.__dict__['_all_cols'] = util.column_set(self) def __str__(self): return repr([str(c) for c in self]) def replace(self, column): """add the given column to this collection, removing unaliased versions of this column as well as existing columns with the same key. e.g.:: t = Table('sometable', metadata, Column('col1', Integer)) t.columns.replace(Column('col1', Integer, key='columnone')) will remove the original 'col1' from the collection, and add the new column under the name 'columnname'. Used by schema.Column to override columns during table reflection. """ if column.name in self and column.key != column.name: other = self[column.name] if other.name == other.key: del self._data[other.name] self._all_cols.remove(other) if column.key in self._data: self._all_cols.remove(self._data[column.key]) self._all_cols.add(column) self._data[column.key] = column def add(self, column): """Add a column to this collection. The key attribute of the column will be used as the hash key for this dictionary. """ self[column.key] = column def __delitem__(self, key): raise NotImplementedError() def __setattr__(self, key, object): raise NotImplementedError() def __setitem__(self, key, value): if key in self: # this warning is primarily to catch select() statements # which have conflicting column names in their exported # columns collection existing = self[key] if not existing.shares_lineage(value): util.warn('Column %r on table %r being replaced by ' 'another column with the same key. Consider ' 'use_labels for select() statements.' % (key, getattr(existing, 'table', None))) self._all_cols.remove(existing) self._all_cols.add(value) self._data[key] = value def clear(self): self._data.clear() self._all_cols.clear() def remove(self, column): del self._data[column.key] self._all_cols.remove(column) def update(self, value): self._data.update(value) self._all_cols.clear() self._all_cols.update(self._data.values()) def extend(self, iter): self.update((c.key, c) for c in iter) __hash__ = None def __eq__(self, other): l = [] for c in other: for local in self: if c.shares_lineage(local): l.append(c==local) return and_(*l) def __contains__(self, other): if not isinstance(other, basestring): raise exc.ArgumentError("__contains__ requires a string argument") return util.OrderedProperties.__contains__(self, other) def __setstate__(self, state): self.__dict__['_data'] = state['_data'] self.__dict__['_all_cols'] = util.column_set(self._data.values()) def contains_column(self, col): # this has to be done via set() membership return col in self._all_cols def as_immutable(self): return ImmutableColumnCollection(self._data, self._all_cols) class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection): def __init__(self, data, colset): util.ImmutableProperties.__init__(self, data) self.__dict__['_all_cols'] = colset extend = remove = util.ImmutableProperties._immutable class ColumnSet(util.ordered_column_set): def contains_column(self, col): return col in self def extend(self, cols): for col in cols: self.add(col) def __add__(self, other): return list(self) + list(other) def __eq__(self, other): l = [] for c in other: for local in self: if c.shares_lineage(local): l.append(c==local) return and_(*l) def __hash__(self): return hash(tuple(x for x in self)) class Selectable(ClauseElement): """mark a class as being selectable""" __visit_name__ = 'selectable' class FromClause(Selectable): """Represent an element that can be used within the ``FROM`` clause of a ``SELECT`` statement. """ __visit_name__ = 'fromclause' named_with_column = False _hide_froms = [] quote = None schema = None def count(self, whereclause=None, **params): """return a SELECT COUNT generated against this :class:`.FromClause`.""" if self.primary_key: col = list(self.primary_key)[0] else: col = list(self.columns)[0] return select( [func.count(col).label('tbl_row_count')], whereclause, from_obj=[self], **params) def select(self, whereclause=None, **params): """return a SELECT of this :class:`.FromClause`.""" return select([self], whereclause, **params) def join(self, right, onclause=None, isouter=False): """return a join of this :class:`.FromClause` against another :class:`.FromClause`.""" return Join(self, right, onclause, isouter) def outerjoin(self, right, onclause=None): """return an outer join of this :class:`.FromClause` against another :class:`.FromClause`.""" return Join(self, right, onclause, True) def alias(self, name=None): """return an alias of this :class:`.FromClause`. This is shorthand for calling:: from sqlalchemy import alias a = alias(self, name=name) See :func:`~.expression.alias` for details. """ return Alias(self, name) def is_derived_from(self, fromclause): """Return True if this FromClause is 'derived' from the given FromClause. An example would be an Alias of a Table is derived from that Table. """ return fromclause in self._cloned_set def replace_selectable(self, old, alias): """replace all occurrences of FromClause 'old' with the given Alias object, returning a copy of this :class:`.FromClause`. """ return sqlutil.ClauseAdapter(alias).traverse(self) def correspond_on_equivalents(self, column, equivalents): """Return corresponding_column for the given column, or if None search for a match in the given dictionary. """ col = self.corresponding_column(column, require_embedded=True) if col is None and col in equivalents: for equiv in equivalents[col]: nc = self.corresponding_column(equiv, require_embedded=True) if nc: return nc return col def corresponding_column(self, column, require_embedded=False): """Given a :class:`.ColumnElement`, return the exported :class:`.ColumnElement` object from this :class:`.Selectable` which corresponds to that original :class:`~sqlalchemy.schema.Column` via a common anscestor column. :param column: the target :class:`.ColumnElement` to be matched :param require_embedded: only return corresponding columns for the given :class:`.ColumnElement`, if the given :class:`.ColumnElement` is actually present within a sub-element of this :class:`.FromClause`. Normally the column will match if it merely shares a common anscestor with one of the exported columns of this :class:`.FromClause`. """ # dont dig around if the column is locally present if self.c.contains_column(column): return column col, intersect = None, None target_set = column.proxy_set cols = self.c for c in cols: i = target_set.intersection(itertools.chain(*[p._cloned_set for p in c.proxy_set])) if i and (not require_embedded or c.proxy_set.issuperset(target_set)): if col is None: # no corresponding column yet, pick this one. col, intersect = c, i elif len(i) > len(intersect): # 'c' has a larger field of correspondence than # 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x # matches a1.c.x->table.c.x better than # selectable.c.x->table.c.x does. col, intersect = c, i elif i == intersect: # they have the same field of correspondence. see # which proxy_set has fewer columns in it, which # indicates a closer relationship with the root # column. Also take into account the "weight" # attribute which CompoundSelect() uses to give # higher precedence to columns based on vertical # position in the compound statement, and discard # columns that have no reference to the target # column (also occurs with CompoundSelect) col_distance = util.reduce(operator.add, [sc._annotations.get('weight', 1) for sc in col.proxy_set if sc.shares_lineage(column)]) c_distance = util.reduce(operator.add, [sc._annotations.get('weight', 1) for sc in c.proxy_set if sc.shares_lineage(column)]) if c_distance < col_distance: col, intersect = c, i return col @property def description(self): """a brief description of this FromClause. Used primarily for error message formatting. """ return getattr(self, 'name', self.__class__.__name__ + " object") def _reset_exported(self): """delete memoized collections when a FromClause is cloned.""" for name in 'primary_key', '_columns', 'columns', \ 'foreign_keys', 'locate_all_froms': self.__dict__.pop(name, None) @util.memoized_property def columns(self): """Return the collection of Column objects contained by this FromClause.""" if '_columns' not in self.__dict__: self._init_collections() self._populate_column_collection() return self._columns.as_immutable() @util.memoized_property def primary_key(self): """Return the collection of Column objects which comprise the primary key of this FromClause.""" self._init_collections() self._populate_column_collection() return self.primary_key @util.memoized_property def foreign_keys(self): """Return the collection of ForeignKey objects which this FromClause references.""" self._init_collections() self._populate_column_collection() return self.foreign_keys c = property(attrgetter('columns')) _select_iterable = property(attrgetter('columns')) def _init_collections(self): assert '_columns' not in self.__dict__ assert 'primary_key' not in self.__dict__ assert 'foreign_keys' not in self.__dict__ self._columns = ColumnCollection() self.primary_key = ColumnSet() self.foreign_keys = set() def _populate_column_collection(self): pass class _BindParamClause(ColumnElement): """Represent a bind parameter. Public constructor is the :func:`bindparam()` function. """ __visit_name__ = 'bindparam' quote = None def __init__(self, key, value, type_=None, unique=False, callable_=None, isoutparam=False, required=False, _compared_to_operator=None, _compared_to_type=None): """Construct a _BindParamClause. :param key: the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other :class:`_BindParamClause` objects exist with the same key, or if its length is too long and truncation is required. :param value: Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution. :param callable\_: A callable function that takes the place of "value". The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embeded bind values are still desirable. :param type\_: A ``TypeEngine`` object that will be used to pre-process the value corresponding to this :class:`_BindParamClause` at execution time. :param unique: if True, the key name of this BindParamClause will be modified if another :class:`_BindParamClause` of the same name already has been located within the containing :class:`.ClauseElement`. :param required: a value is required at execution time. :param isoutparam: if True, the parameter should be treated like a stored procedure "OUT" parameter. """ if unique: self.key = _generated_label('%%(%d %s)s' % (id(self), key or 'param')) else: self.key = key or _generated_label('%%(%d param)s' % id(self)) self._orig_key = key or 'param' self.unique = unique self.value = value self.callable = callable_ self.isoutparam = isoutparam self.required = required if type_ is None: if _compared_to_type is not None: self.type = \ _compared_to_type._coerce_compared_value( _compared_to_operator, value) else: self.type = sqltypes._type_map.get(type(value), sqltypes.NULLTYPE) elif isinstance(type_, type): self.type = type_() else: self.type = type_ def _clone(self): c = ClauseElement._clone(self) if self.unique: c.key = _generated_label('%%(%d %s)s' % (id(c), c._orig_key or 'param')) return c def _convert_to_unique(self): if not self.unique: self.unique = True self.key = _generated_label('%%(%d %s)s' % (id(self), self._orig_key or 'param')) def compare(self, other, **kw): """Compare this :class:`_BindParamClause` to the given clause.""" return isinstance(other, _BindParamClause) \ and self.type._compare_type_affinity(other.type) \ and self.value == other.value def __getstate__(self): """execute a deferred value for serialization purposes.""" d = self.__dict__.copy() v = self.value if self.callable: v = self.callable() d['callable'] = None d['value'] = v return d def __repr__(self): return '_BindParamClause(%r, %r, type_=%r)' % (self.key, self.value, self.type) class _TypeClause(ClauseElement): """Handle a type keyword in a SQL statement. Used by the ``Case`` statement. """ __visit_name__ = 'typeclause' def __init__(self, type): self.type = type class _Generative(object): """Allow a ClauseElement to generate itself via the @_generative decorator. """ def _generate(self): s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() return s class Executable(_Generative): """Mark a ClauseElement as supporting execution. :class:`.Executable` is a superclass for all "statement" types of objects, including :func:`select`, :func:`delete`, :func:`update`, :func:`insert`, :func:`text`. """ supports_execution = True _execution_options = util.immutabledict() _bind = None @_generative def execution_options(self, **kw): """ Set non-SQL options for the statement which take effect during execution. Execution options can be set on a per-statement or per :class:`.Connection` basis. Additionally, the :class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide access to execution options which they in turn configure upon connections. The :meth:`execution_options` method is generative. A new instance of this statement is returned that contains the options:: statement = select([table.c.x, table.c.y]) statement = statement.execution_options(autocommit=True) Note that only a subset of possible execution options can be applied to a statement - these include "autocommit" and "stream_results", but not "isolation_level" or "compiled_cache". See :meth:`.Connection.execution_options` for a full list of possible options. See also: :meth:`.Connection.execution_options()` :meth:`.Query.execution_options()` """ if 'isolation_level' in kw: raise exc.ArgumentError( "'isolation_level' execution option may only be specified " "on Connection.execution_options(), or " "per-engine using the isolation_level " "argument to create_engine()." ) if 'compiled_cache' in kw: raise exc.ArgumentError( "'compiled_cache' execution option may only be specified " "on Connection.execution_options(), not per statement." ) self._execution_options = self._execution_options.union(kw) def execute(self, *multiparams, **params): """Compile and execute this :class:`.Executable`.""" e = self.bind if e is None: label = getattr(self, 'description', self.__class__.__name__) msg = ('This %s is not directly bound to a Connection or Engine.' 'Use the .execute() method of a Connection or Engine ' 'to execute this construct.' % label) raise exc.UnboundExecutionError(msg) return e._execute_clauseelement(self, multiparams, params) def scalar(self, *multiparams, **params): """Compile and execute this :class:`.Executable`, returning the result's scalar representation. """ return self.execute(*multiparams, **params).scalar() @property def bind(self): """Returns the :class:`.Engine` or :class:`.Connection` to which this :class:`.Executable` is bound, or None if none found. This is a traversal which checks locally, then checks among the "from" clauses of associated objects until a bound engine or connection is found. """ if self._bind is not None: return self._bind for f in _from_objects(self): if f is self: continue engine = f.bind if engine is not None: return engine else: return None # legacy, some outside users may be calling this _Executable = Executable class _TextClause(Executable, ClauseElement): """Represent a literal SQL text fragment. Public constructor is the :func:`text()` function. """ __visit_name__ = 'textclause' _bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE) _execution_options = \ Executable._execution_options.union({'autocommit' : PARSE_AUTOCOMMIT}) @property def _select_iterable(self): return (self,) _hide_froms = [] def __init__( self, text='', bind=None, bindparams=None, typemap=None, autocommit=None, ): self._bind = bind self.bindparams = {} self.typemap = typemap if autocommit is not None: util.warn_deprecated('autocommit on text() is deprecated. ' 'Use .execution_options(autocommit=Tru' 'e)') self._execution_options = \ self._execution_options.union({'autocommit' : autocommit}) if typemap is not None: for key in typemap.keys(): typemap[key] = sqltypes.to_instance(typemap[key]) def repl(m): self.bindparams[m.group(1)] = bindparam(m.group(1)) return ':%s' % m.group(1) # scan the string and search for bind parameter names, add them # to the list of bindparams self.text = self._bind_params_regex.sub(repl, text) if bindparams is not None: for b in bindparams: self.bindparams[b.key] = b @property def type(self): if self.typemap is not None and len(self.typemap) == 1: return list(self.typemap)[0] else: return sqltypes.NULLTYPE def self_group(self, against=None): if against is operators.in_op: return _Grouping(self) else: return self def _copy_internals(self, clone=_clone): self.bindparams = dict((b.key, clone(b)) for b in self.bindparams.values()) def get_children(self, **kwargs): return self.bindparams.values() class _Null(ColumnElement): """Represent the NULL keyword in a SQL statement. Public constructor is the :func:`null()` function. """ __visit_name__ = 'null' def __init__(self): self.type = sqltypes.NULLTYPE class _False(ColumnElement): """Represent the ``false`` keyword in a SQL statement. Public constructor is the :func:`false()` function. """ __visit_name__ = 'false' def __init__(self): self.type = sqltypes.BOOLEANTYPE class _True(ColumnElement): """Represent the ``true`` keyword in a SQL statement. Public constructor is the :func:`true()` function. """ __visit_name__ = 'true' def __init__(self): self.type = sqltypes.BOOLEANTYPE class ClauseList(ClauseElement): """Describe a list of clauses, separated by an operator. By default, is comma-separated, such as a column listing. """ __visit_name__ = 'clauselist' def __init__(self, *clauses, **kwargs): self.operator = kwargs.pop('operator', operators.comma_op) self.group = kwargs.pop('group', True) self.group_contents = kwargs.pop('group_contents', True) if self.group_contents: self.clauses = [ _literal_as_text(clause).self_group(against=self.operator) for clause in clauses if clause is not None] else: self.clauses = [ _literal_as_text(clause) for clause in clauses if clause is not None] @util.memoized_property def type(self): if self.clauses: return self.clauses[0].type else: return sqltypes.NULLTYPE def __iter__(self): return iter(self.clauses) def __len__(self): return len(self.clauses) @property def _select_iterable(self): return iter(self) def append(self, clause): # TODO: not sure if i like the 'group_contents' flag. need to # define the difference between a ClauseList of ClauseLists, # and a "flattened" ClauseList of ClauseLists. flatten() # method ? if self.group_contents: self.clauses.append(_literal_as_text(clause).\ self_group(against=self.operator)) else: self.clauses.append(_literal_as_text(clause)) def _copy_internals(self, clone=_clone): self.clauses = [clone(clause) for clause in self.clauses] def get_children(self, **kwargs): return self.clauses @property def _from_objects(self): return list(itertools.chain(*[c._from_objects for c in self.clauses])) def self_group(self, against=None): if self.group and operators.is_precedent(self.operator, against): return _Grouping(self) else: return self def compare(self, other, **kw): """Compare this :class:`.ClauseList` to the given :class:`.ClauseList`, including a comparison of all the clause items. """ if not isinstance(other, ClauseList) and len(self.clauses) == 1: return self.clauses[0].compare(other, **kw) elif isinstance(other, ClauseList) and \ len(self.clauses) == len(other.clauses): for i in range(0, len(self.clauses)): if not self.clauses[i].compare(other.clauses[i], **kw): return False else: return self.operator == other.operator else: return False class BooleanClauseList(ClauseList, ColumnElement): __visit_name__ = 'clauselist' def __init__(self, *clauses, **kwargs): super(BooleanClauseList, self).__init__(*clauses, **kwargs) self.type = sqltypes.to_instance(kwargs.get('type_', sqltypes.Boolean)) @property def _select_iterable(self): return (self, ) class _Tuple(ClauseList, ColumnElement): def __init__(self, *clauses, **kw): clauses = [_literal_as_binds(c) for c in clauses] super(_Tuple, self).__init__(*clauses, **kw) self.type = _type_from_args(clauses) @property def _select_iterable(self): return (self, ) def _bind_param(self, operator, obj): return _Tuple(*[ _BindParamClause(None, o, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) for o in obj ]).self_group() class _Case(ColumnElement): __visit_name__ = 'case' def __init__(self, whens, value=None, else_=None): try: whens = util.dictlike_iteritems(whens) except TypeError: pass if value is not None: whenlist = [ (_literal_as_binds(c).self_group(), _literal_as_binds(r)) for (c, r) in whens ] else: whenlist = [ (_no_literals(c).self_group(), _literal_as_binds(r)) for (c, r) in whens ] if whenlist: type_ = list(whenlist[-1])[-1].type else: type_ = None if value is None: self.value = None else: self.value = _literal_as_binds(value) self.type = type_ self.whens = whenlist if else_ is not None: self.else_ = _literal_as_binds(else_) else: self.else_ = None def _copy_internals(self, clone=_clone): if self.value is not None: self.value = clone(self.value) self.whens = [(clone(x), clone(y)) for x, y in self.whens] if self.else_ is not None: self.else_ = clone(self.else_) def get_children(self, **kwargs): if self.value is not None: yield self.value for x, y in self.whens: yield x yield y if self.else_ is not None: yield self.else_ @property def _from_objects(self): return list(itertools.chain(*[x._from_objects for x in self.get_children()])) class FunctionElement(Executable, ColumnElement, FromClause): """Base for SQL function-oriented constructs.""" packagenames = () def __init__(self, *clauses, **kwargs): """Construct a :class:`.FunctionElement`. """ args = [_literal_as_binds(c, self.name) for c in clauses] self.clause_expr = ClauseList( operator=operators.comma_op, group_contents=True, *args).\ self_group() @property def columns(self): """Fulfill the 'columns' contrct of :class:`.ColumnElement`. Returns a single-element list consisting of this object. """ return [self] @util.memoized_property def clauses(self): """Return the underlying :class:`.ClauseList` which contains the arguments for this :class:`.FunctionElement`. """ return self.clause_expr.element def over(self, partition_by=None, order_by=None): """Produce an OVER clause against this function. Used against aggregate or so-called "window" functions, for database backends that support window functions. The expression:: func.row_number().over(order_by='x') is shorthand for:: from sqlalchemy import over over(func.row_number(), order_by='x') See :func:`~.expression.over` for a full description. New in 0.7. """ return over(self, partition_by=partition_by, order_by=order_by) @property def _from_objects(self): return self.clauses._from_objects def get_children(self, **kwargs): return self.clause_expr, def _copy_internals(self, clone=_clone): self.clause_expr = clone(self.clause_expr) self._reset_exported() util.reset_memoized(self, 'clauses') def select(self): """Produce a :func:`~.expression.select` construct against this :class:`.FunctionElement`. This is shorthand for:: s = select([function_element]) """ s = select([self]) if self._execution_options: s = s.execution_options(**self._execution_options) return s def scalar(self): """Execute this :class:`.FunctionElement` against an embedded 'bind' and return a scalar value. This first calls :meth:`~.FunctionElement.select` to produce a SELECT construct. Note that :class:`.FunctionElement` can be passed to the :meth:`.Connectable.scalar` method of :class:`.Connection` or :class:`.Engine`. """ return self.select().execute().scalar() def execute(self): """Execute this :class:`.FunctionElement` against an embedded 'bind'. This first calls :meth:`~.FunctionElement.select` to produce a SELECT construct. Note that :class:`.FunctionElement` can be passed to the :meth:`.Connectable.execute` method of :class:`.Connection` or :class:`.Engine`. """ return self.select().execute() def _bind_param(self, operator, obj): return _BindParamClause(None, obj, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) class Function(FunctionElement): """Describe a named SQL function. See the superclass :class:`.FunctionElement` for a description of public methods. """ __visit_name__ = 'function' def __init__(self, name, *clauses, **kw): """Construct a :class:`.Function`. The :attr:`.func` construct is normally used to construct new :class:`.Function` instances. """ self.packagenames = kw.pop('packagenames', None) or [] self.name = name self._bind = kw.get('bind', None) self.type = sqltypes.to_instance(kw.get('type_', None)) FunctionElement.__init__(self, *clauses, **kw) def _bind_param(self, operator, obj): return _BindParamClause(self.name, obj, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) class _Cast(ColumnElement): __visit_name__ = 'cast' def __init__(self, clause, totype, **kwargs): self.type = sqltypes.to_instance(totype) self.clause = _literal_as_binds(clause, None) self.typeclause = _TypeClause(self.type) def _copy_internals(self, clone=_clone): self.clause = clone(self.clause) self.typeclause = clone(self.typeclause) def get_children(self, **kwargs): return self.clause, self.typeclause @property def _from_objects(self): return self.clause._from_objects class _Extract(ColumnElement): __visit_name__ = 'extract' def __init__(self, field, expr, **kwargs): self.type = sqltypes.Integer() self.field = field self.expr = _literal_as_binds(expr, None) def _copy_internals(self, clone=_clone): self.expr = clone(self.expr) def get_children(self, **kwargs): return self.expr, @property def _from_objects(self): return self.expr._from_objects class _UnaryExpression(ColumnElement): __visit_name__ = 'unary' def __init__(self, element, operator=None, modifier=None, type_=None, negate=None): self.operator = operator self.modifier = modifier self.element = _literal_as_text(element).\ self_group(against=self.operator or self.modifier) self.type = sqltypes.to_instance(type_) self.negate = negate @property def _from_objects(self): return self.element._from_objects def _copy_internals(self, clone=_clone): self.element = clone(self.element) def get_children(self, **kwargs): return self.element, def compare(self, other, **kw): """Compare this :class:`_UnaryExpression` against the given :class:`.ClauseElement`.""" return ( isinstance(other, _UnaryExpression) and self.operator == other.operator and self.modifier == other.modifier and self.element.compare(other.element, **kw) ) def _negate(self): if self.negate is not None: return _UnaryExpression( self.element, operator=self.negate, negate=self.operator, modifier=self.modifier, type_=self.type) else: return super(_UnaryExpression, self)._negate() def self_group(self, against=None): if self.operator and operators.is_precedent(self.operator, against): return _Grouping(self) else: return self class _BinaryExpression(ColumnElement): """Represent an expression that is ``LEFT <operator> RIGHT``.""" __visit_name__ = 'binary' def __init__(self, left, right, operator, type_=None, negate=None, modifiers=None): self.left = _literal_as_text(left).self_group(against=operator) self.right = _literal_as_text(right).self_group(against=operator) self.operator = operator self.type = sqltypes.to_instance(type_) self.negate = negate if modifiers is None: self.modifiers = {} else: self.modifiers = modifiers def __nonzero__(self): try: return self.operator(hash(self.left), hash(self.right)) except: raise TypeError("Boolean value of this clause is not defined") @property def _from_objects(self): return self.left._from_objects + self.right._from_objects def _copy_internals(self, clone=_clone): self.left = clone(self.left) self.right = clone(self.right) def get_children(self, **kwargs): return self.left, self.right def compare(self, other, **kw): """Compare this :class:`_BinaryExpression` against the given :class:`_BinaryExpression`.""" return ( isinstance(other, _BinaryExpression) and self.operator == other.operator and ( self.left.compare(other.left, **kw) and self.right.compare(other.right, **kw) or ( operators.is_commutative(self.operator) and self.left.compare(other.right, **kw) and self.right.compare(other.left, **kw) ) ) ) def self_group(self, against=None): if operators.is_precedent(self.operator, against): return _Grouping(self) else: return self def _negate(self): if self.negate is not None: return _BinaryExpression( self.left, self.right, self.negate, negate=self.operator, type_=sqltypes.BOOLEANTYPE, modifiers=self.modifiers) else: return super(_BinaryExpression, self)._negate() class _Exists(_UnaryExpression): __visit_name__ = _UnaryExpression.__visit_name__ _from_objects = [] def __init__(self, *args, **kwargs): if args and isinstance(args[0], (_SelectBase, _ScalarSelect)): s = args[0] else: if not args: args = ([literal_column('*')],) s = select(*args, **kwargs).as_scalar().self_group() _UnaryExpression.__init__(self, s, operator=operators.exists, type_=sqltypes.Boolean) def select(self, whereclause=None, **params): return select([self], whereclause, **params) def correlate(self, fromclause): e = self._clone() e.element = self.element.correlate(fromclause).self_group() return e def select_from(self, clause): """return a new exists() construct with the given expression set as its FROM clause. """ e = self._clone() e.element = self.element.select_from(clause).self_group() return e def where(self, clause): """return a new exists() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ e = self._clone() e.element = self.element.where(clause).self_group() return e class Join(FromClause): """represent a ``JOIN`` construct between two :class:`.FromClause` elements. The public constructor function for :class:`.Join` is the module-level :func:`join()` function, as well as the :func:`join()` method available off all :class:`.FromClause` subclasses. """ __visit_name__ = 'join' def __init__(self, left, right, onclause=None, isouter=False): """Construct a new :class:`.Join`. The usual entrypoint here is the :func:`~.expression.join` function or the :meth:`.FromClause.join` method of any :class:`.FromClause` object. """ self.left = _literal_as_text(left) self.right = _literal_as_text(right).self_group() if onclause is None: self.onclause = self._match_primaries(self.left, self.right) else: self.onclause = onclause self.isouter = isouter self.__folded_equivalents = None @property def description(self): return "Join object on %s(%d) and %s(%d)" % ( self.left.description, id(self.left), self.right.description, id(self.right)) def is_derived_from(self, fromclause): return fromclause is self or \ self.left.is_derived_from(fromclause) or\ self.right.is_derived_from(fromclause) def self_group(self, against=None): return _FromGrouping(self) def _populate_column_collection(self): columns = [c for c in self.left.columns] + \ [c for c in self.right.columns] self.primary_key.extend(sqlutil.reduce_columns( (c for c in columns if c.primary_key), self.onclause)) self._columns.update((col._label, col) for col in columns) self.foreign_keys.update(itertools.chain( *[col.foreign_keys for col in columns])) def _copy_internals(self, clone=_clone): self._reset_exported() self.left = clone(self.left) self.right = clone(self.right) self.onclause = clone(self.onclause) self.__folded_equivalents = None def get_children(self, **kwargs): return self.left, self.right, self.onclause def _match_primaries(self, left, right): if isinstance(left, Join): left_right = left.right else: left_right = None return sqlutil.join_condition(left, right, a_subset=left_right) def select(self, whereclause=None, fold_equivalents=False, **kwargs): """Create a :class:`.Select` from this :class:`.Join`. The equivalent long-hand form, given a :class:`.Join` object ``j``, is:: from sqlalchemy import select j = select([j.left, j.right], **kw).\\ where(whereclause).\\ select_from(j) :param whereclause: the WHERE criterion that will be sent to the :func:`select()` function :param fold_equivalents: based on the join criterion of this :class:`.Join`, do not include repeat column names in the column list of the resulting select, for columns that are calculated to be "equivalent" based on the join criterion of this :class:`.Join`. This will recursively apply to any joins directly nested by this one as well. :param \**kwargs: all other kwargs are sent to the underlying :func:`select()` function. """ if fold_equivalents: collist = sqlutil.folded_equivalents(self) else: collist = [self.left, self.right] return select(collist, whereclause, from_obj=[self], **kwargs) @property def bind(self): return self.left.bind or self.right.bind def alias(self, name=None): """return an alias of this :class:`.Join`. Used against a :class:`.Join` object, :meth:`~.Join.alias` calls the :meth:`~.Join.select` method first so that a subquery against a :func:`.select` construct is generated. the :func:`~expression.select` construct also has the ``correlate`` flag set to ``False`` and will not auto-correlate inside an enclosing :func:`~expression.select` construct. The equivalent long-hand form, given a :class:`.Join` object ``j``, is:: from sqlalchemy import select, alias j = alias( select([j.left, j.right]).\\ select_from(j).\\ with_labels(True).\\ correlate(False), name=name ) See :func:`~.expression.alias` for further details on aliases. """ return self.select(use_labels=True, correlate=False).alias(name) @property def _hide_froms(self): return itertools.chain(*[_from_objects(x.left, x.right) for x in self._cloned_set]) @property def _from_objects(self): return [self] + \ self.onclause._from_objects + \ self.left._from_objects + \ self.right._from_objects class Alias(FromClause): """Represents an table or selectable alias (AS). Represents an alias, as typically applied to any table or sub-select within a SQL statement using the ``AS`` keyword (or without the keyword on certain databases such as Oracle). This object is constructed from the :func:`~.expression.alias` module level function as well as the :meth:`.FromClause.alias` method available on all :class:`.FromClause` subclasses. """ __visit_name__ = 'alias' named_with_column = True def __init__(self, selectable, name=None): baseselectable = selectable while isinstance(baseselectable, Alias): baseselectable = baseselectable.element self.original = baseselectable self.supports_execution = baseselectable.supports_execution if self.supports_execution: self._execution_options = baseselectable._execution_options self.element = selectable if name is None: if self.original.named_with_column: name = getattr(self.original, 'name', None) name = _generated_label('%%(%d %s)s' % (id(self), name or 'anon')) self.name = name @property def description(self): # Py3K #return self.name # Py2K return self.name.encode('ascii', 'backslashreplace') # end Py2K def as_scalar(self): try: return self.element.as_scalar() except AttributeError: raise AttributeError("Element %s does not support " "'as_scalar()'" % self.element) def is_derived_from(self, fromclause): if fromclause in self._cloned_set: return True return self.element.is_derived_from(fromclause) def _populate_column_collection(self): for col in self.element.columns: col._make_proxy(self) def _copy_internals(self, clone=_clone): self._reset_exported() self.element = _clone(self.element) baseselectable = self.element while isinstance(baseselectable, Alias): baseselectable = baseselectable.element self.original = baseselectable def get_children(self, column_collections=True, aliased_selectables=True, **kwargs): if column_collections: for c in self.c: yield c if aliased_selectables: yield self.element @property def _from_objects(self): return [self] @property def bind(self): return self.element.bind class _Grouping(ColumnElement): """Represent a grouping within a column expression""" __visit_name__ = 'grouping' def __init__(self, element): self.element = element self.type = getattr(element, 'type', None) @property def _label(self): return getattr(self.element, '_label', None) or self.anon_label def _copy_internals(self, clone=_clone): self.element = clone(self.element) def get_children(self, **kwargs): return self.element, @property def _from_objects(self): return self.element._from_objects def __getattr__(self, attr): return getattr(self.element, attr) def __getstate__(self): return {'element':self.element, 'type':self.type} def __setstate__(self, state): self.element = state['element'] self.type = state['type'] class _FromGrouping(FromClause): """Represent a grouping of a FROM clause""" __visit_name__ = 'grouping' def __init__(self, element): self.element = element def _init_collections(self): pass @property def columns(self): return self.element.columns @property def primary_key(self): return self.element.primary_key @property def foreign_keys(self): # this could be # self.element.foreign_keys # see SelectableTest.test_join_condition return set() @property def _hide_froms(self): return self.element._hide_froms def get_children(self, **kwargs): return self.element, def _copy_internals(self, clone=_clone): self.element = clone(self.element) @property def _from_objects(self): return self.element._from_objects def __getattr__(self, attr): return getattr(self.element, attr) def __getstate__(self): return {'element':self.element} def __setstate__(self, state): self.element = state['element'] class _Over(ColumnElement): """Represent an OVER clause. This is a special operator against a so-called "window" function, as well as any aggregate function, which produces results relative to the result set itself. It's supported only by certain database backends. """ __visit_name__ = 'over' order_by = None partition_by = None def __init__(self, func, partition_by=None, order_by=None): self.func = func if order_by is not None: self.order_by = ClauseList(*util.to_list(order_by)) if partition_by is not None: self.partition_by = ClauseList(*util.to_list(partition_by)) @util.memoized_property def type(self): return self.func.type def get_children(self, **kwargs): return [c for c in (self.func, self.partition_by, self.order_by) if c is not None] def _copy_internals(self, clone=_clone): self.func = clone(self.func) if self.partition_by is not None: self.partition_by = clone(self.partition_by) if self.order_by is not None: self.order_by = clone(self.order_by) @property def _from_objects(self): return list(itertools.chain( *[c._from_objects for c in (self.func, self.partition_by, self.order_by) if c is not None] )) class _Label(ColumnElement): """Represents a column label (AS). Represent a label, as typically applied to any column-level element using the ``AS`` sql keyword. This object is constructed from the :func:`label()` module level function as well as the :func:`label()` method available on all :class:`.ColumnElement` subclasses. """ __visit_name__ = 'label' def __init__(self, name, element, type_=None): while isinstance(element, _Label): element = element.element self.name = self.key = self._label = name \ or _generated_label('%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon'))) self._element = element self._type = type_ self.quote = element.quote self.proxies = [element] @util.memoized_property def type(self): return sqltypes.to_instance( self._type or getattr(self._element, 'type', None) ) @util.memoized_property def element(self): return self._element.self_group(against=operators.as_) def self_group(self, against=None): sub_element = self._element.self_group(against=against) if sub_element is not self._element: return _Label(self.name, sub_element, type_=self._type) else: return self._element @property def primary_key(self): return self.element.primary_key @property def foreign_keys(self): return self.element.foreign_keys def get_children(self, **kwargs): return self.element, def _copy_internals(self, clone=_clone): self.element = clone(self.element) @property def _from_objects(self): return self.element._from_objects def _make_proxy(self, selectable, name = None): e = self.element._make_proxy(selectable, name=name or self.name) e.proxies.append(self) return e class ColumnClause(_Immutable, ColumnElement): """Represents a generic column expression from any textual string. This includes columns associated with tables, aliases and select statements, but also any arbitrary text. May or may not be bound to an underlying :class:`.Selectable`. :class:`.ColumnClause` is constructed by itself typically via the :func:`~.expression.column` function. It may be placed directly into constructs such as :func:`.select` constructs:: from sqlalchemy.sql import column, select c1, c2 = column("c1"), column("c2") s = select([c1, c2]).where(c1==5) There is also a variant on :func:`~.expression.column` known as :func:`~.expression.literal_column` - the difference is that in the latter case, the string value is assumed to be an exact expression, rather than a column name, so that no quoting rules or similar are applied:: from sqlalchemy.sql import literal_column, select s = select([literal_column("5 + 7")]) :class:`.ColumnClause` can also be used in a table-like fashion by combining the :func:`~.expression.column` function with the :func:`~.expression.table` function, to produce a "lightweight" form of table metadata:: from sqlalchemy.sql import table, column user = table("user", column("id"), column("name"), column("description"), ) The above construct can be created in an ad-hoc fashion and is not associated with any :class:`.schema.MetaData`, unlike it's more full fledged :class:`.schema.Table` counterpart. :param text: the text of the element. :param selectable: parent selectable. :param type: :class:`.types.TypeEngine` object which can associate this :class:`.ColumnClause` with a type. :param is_literal: if True, the :class:`.ColumnClause` is assumed to be an exact expression that will be delivered to the output with no quoting rules applied regardless of case sensitive settings. the :func:`literal_column()` function is usually used to create such a :class:`.ColumnClause`. """ __visit_name__ = 'column' onupdate = default = server_default = server_onupdate = None def __init__(self, text, selectable=None, type_=None, is_literal=False): self.key = self.name = text self.table = selectable self.type = sqltypes.to_instance(type_) self.is_literal = is_literal @util.memoized_property def _from_objects(self): if self.table is not None: return [self.table] else: return [] @util.memoized_property def description(self): # Py3K #return self.name # Py2K return self.name.encode('ascii', 'backslashreplace') # end Py2K @util.memoized_property def _label(self): if self.is_literal: return None elif self.table is not None and self.table.named_with_column: if getattr(self.table, 'schema', None): label = self.table.schema.replace('.', '_') + "_" + \ _escape_for_generated(self.table.name) + "_" + \ _escape_for_generated(self.name) else: label = _escape_for_generated(self.table.name) + "_" + \ _escape_for_generated(self.name) # ensure the label name doesn't conflict with that # of an existing column if label in self.table.c: _label = label counter = 1 while _label in self.table.c: _label = label + "_" + str(counter) counter += 1 label = _label return _generated_label(label) else: return self.name def label(self, name): # currently, anonymous labels don't occur for # ColumnClause. The use at the moment # is that they do not generate nicely for # is_literal clauses. We would like to change # this so that label(None) acts as would be expected. # See [ticket:2168]. if name is None: return self else: return super(ColumnClause, self).label(name) def _bind_param(self, operator, obj): return _BindParamClause(self.name, obj, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) def _make_proxy(self, selectable, name=None, attach=True): # propagate the "is_literal" flag only if we are keeping our name, # otherwise its considered to be a label is_literal = self.is_literal and (name is None or name == self.name) c = self._constructor( name or self.name, selectable=selectable, type_=self.type, is_literal=is_literal ) c.proxies = [self] if attach: selectable._columns[c.name] = c return c class TableClause(_Immutable, FromClause): """Represents a minimal "table" construct. The constructor for :class:`.TableClause` is the :func:`~.expression.table` function. This produces a lightweight table object that has only a name and a collection of columns, which are typically produced by the :func:`~.expression.column` function:: from sqlalchemy.sql import table, column user = table("user", column("id"), column("name"), column("description"), ) The :class:`.TableClause` construct serves as the base for the more commonly used :class:`~.schema.Table` object, providing the usual set of :class:`~.expression.FromClause` services including the ``.c.`` collection and statement generation methods. It does **not** provide all the additional schema-level services of :class:`~.schema.Table`, including constraints, references to other tables, or support for :class:`.MetaData`-level services. It's useful on its own as an ad-hoc construct used to generate quick SQL statements when a more fully fledged :class:`~.schema.Table` is not on hand. """ __visit_name__ = 'table' named_with_column = True def __init__(self, name, *columns): super(TableClause, self).__init__() self.name = self.fullname = name self._columns = ColumnCollection() self.primary_key = ColumnSet() self.foreign_keys = set() for c in columns: self.append_column(c) def _init_collections(self): pass @util.memoized_property def description(self): # Py3K #return self.name # Py2K return self.name.encode('ascii', 'backslashreplace') # end Py2K def append_column(self, c): self._columns[c.name] = c c.table = self def get_children(self, column_collections=True, **kwargs): if column_collections: return [c for c in self.c] else: return [] def count(self, whereclause=None, **params): """return a SELECT COUNT generated against this :class:`.TableClause`.""" if self.primary_key: col = list(self.primary_key)[0] else: col = list(self.columns)[0] return select( [func.count(col).label('tbl_row_count')], whereclause, from_obj=[self], **params) def insert(self, values=None, inline=False, **kwargs): """Generate an :func:`insert()` construct.""" return insert(self, values=values, inline=inline, **kwargs) def update(self, whereclause=None, values=None, inline=False, **kwargs): """Generate an :func:`update()` construct.""" return update(self, whereclause=whereclause, values=values, inline=inline, **kwargs) def delete(self, whereclause=None, **kwargs): """Generate a :func:`delete()` construct.""" return delete(self, whereclause, **kwargs) @property def _from_objects(self): return [self] class _SelectBase(Executable, FromClause): """Base class for :class:`.Select` and ``CompoundSelects``.""" _order_by_clause = ClauseList() _group_by_clause = ClauseList() _limit = None _offset = None def __init__(self, use_labels=False, for_update=False, limit=None, offset=None, order_by=None, group_by=None, bind=None, autocommit=None): self.use_labels = use_labels self.for_update = for_update if autocommit is not None: util.warn_deprecated('autocommit on select() is ' 'deprecated. Use .execution_options(a' 'utocommit=True)') self._execution_options = \ self._execution_options.union({'autocommit' : autocommit}) if limit is not None: self._limit = util.asint(limit) if offset is not None: self._offset = util.asint(offset) self._bind = bind if order_by is not None: self._order_by_clause = ClauseList(*util.to_list(order_by)) if group_by is not None: self._group_by_clause = ClauseList(*util.to_list(group_by)) def as_scalar(self): """return a 'scalar' representation of this selectable, which can be used as a column expression. Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression. The returned object is an instance of :class:`_ScalarSelect`. """ return _ScalarSelect(self) @_generative def apply_labels(self): """return a new selectable with the 'use_labels' flag set to True. This will result in column expressions being generated using labels against their table name, such as "SELECT somecolumn AS tablename_somecolumn". This allows selectables which contain multiple FROM clauses to produce a unique set of column names regardless of name conflicts among the individual FROM clauses. """ self.use_labels = True def label(self, name): """return a 'scalar' representation of this selectable, embedded as a subquery with a label. See also ``as_scalar()``. """ return self.as_scalar().label(name) @_generative @util.deprecated('0.6', message=":func:`.autocommit` is deprecated. Use " ":func:`.Executable.execution_options` with the " "'autocommit' flag.") def autocommit(self): """return a new selectable with the 'autocommit' flag set to True.""" self._execution_options = \ self._execution_options.union({'autocommit': True}) def _generate(self): """Override the default _generate() method to also clear out exported collections.""" s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() s._reset_exported() return s @_generative def limit(self, limit): """return a new selectable with the given LIMIT criterion applied.""" self._limit = util.asint(limit) @_generative def offset(self, offset): """return a new selectable with the given OFFSET criterion applied.""" self._offset = util.asint(offset) @_generative def order_by(self, *clauses): """return a new selectable with the given list of ORDER BY criterion applied. The criterion will be appended to any pre-existing ORDER BY criterion. """ self.append_order_by(*clauses) @_generative def group_by(self, *clauses): """return a new selectable with the given list of GROUP BY criterion applied. The criterion will be appended to any pre-existing GROUP BY criterion. """ self.append_group_by(*clauses) def append_order_by(self, *clauses): """Append the given ORDER BY criterion applied to this selectable. The criterion will be appended to any pre-existing ORDER BY criterion. """ if len(clauses) == 1 and clauses[0] is None: self._order_by_clause = ClauseList() else: if getattr(self, '_order_by_clause', None) is not None: clauses = list(self._order_by_clause) + list(clauses) self._order_by_clause = ClauseList(*clauses) def append_group_by(self, *clauses): """Append the given GROUP BY criterion applied to this selectable. The criterion will be appended to any pre-existing GROUP BY criterion. """ if len(clauses) == 1 and clauses[0] is None: self._group_by_clause = ClauseList() else: if getattr(self, '_group_by_clause', None) is not None: clauses = list(self._group_by_clause) + list(clauses) self._group_by_clause = ClauseList(*clauses) @property def _from_objects(self): return [self] class _ScalarSelect(_Grouping): _from_objects = [] def __init__(self, element): self.element = element self.type = element._scalar_type() @property def columns(self): raise exc.InvalidRequestError('Scalar Select expression has no ' 'columns; use this object directly within a ' 'column-level expression.') c = columns def self_group(self, **kwargs): return self def _make_proxy(self, selectable, name): return list(self.inner_columns)[0]._make_proxy(selectable, name) class CompoundSelect(_SelectBase): """Forms the basis of ``UNION``, ``UNION ALL``, and other SELECT-based set operations.""" __visit_name__ = 'compound_select' UNION = util.symbol('UNION') UNION_ALL = util.symbol('UNION ALL') EXCEPT = util.symbol('EXCEPT') EXCEPT_ALL = util.symbol('EXCEPT ALL') INTERSECT = util.symbol('INTERSECT') INTERSECT_ALL = util.symbol('INTERSECT ALL') def __init__(self, keyword, *selects, **kwargs): self._should_correlate = kwargs.pop('correlate', False) self.keyword = keyword self.selects = [] numcols = None # some DBs do not like ORDER BY in the inner queries of a UNION, etc. for n, s in enumerate(selects): s = _clause_element_as_expr(s) if not numcols: numcols = len(s.c) elif len(s.c) != numcols: raise exc.ArgumentError('All selectables passed to ' 'CompoundSelect must have identical numbers of ' 'columns; select #%d has %d columns, select ' '#%d has %d' % (1, len(self.selects[0].c), n + 1, len(s.c))) self.selects.append(s.self_group(self)) _SelectBase.__init__(self, **kwargs) def _scalar_type(self): return self.selects[0]._scalar_type() def self_group(self, against=None): return _FromGrouping(self) def is_derived_from(self, fromclause): for s in self.selects: if s.is_derived_from(fromclause): return True return False def _populate_column_collection(self): for cols in zip(*[s.c for s in self.selects]): # this is a slightly hacky thing - the union exports a # column that resembles just that of the *first* selectable. # to get at a "composite" column, particularly foreign keys, # you have to dig through the proxies collection which we # generate below. We may want to improve upon this, such as # perhaps _make_proxy can accept a list of other columns # that are "shared" - schema.column can then copy all the # ForeignKeys in. this would allow the union() to have all # those fks too. proxy = cols[0]._make_proxy(self, name=self.use_labels and cols[0]._label or None) # hand-construct the "proxies" collection to include all # derived columns place a 'weight' annotation corresponding # to how low in the list of select()s the column occurs, so # that the corresponding_column() operation can resolve # conflicts proxy.proxies = [c._annotate({'weight': i + 1}) for (i, c) in enumerate(cols)] def _copy_internals(self, clone=_clone): self._reset_exported() self.selects = [clone(s) for s in self.selects] if hasattr(self, '_col_map'): del self._col_map for attr in ('_order_by_clause', '_group_by_clause'): if getattr(self, attr) is not None: setattr(self, attr, clone(getattr(self, attr))) def get_children(self, column_collections=True, **kwargs): return (column_collections and list(self.c) or []) \ + [self._order_by_clause, self._group_by_clause] \ + list(self.selects) def bind(self): if self._bind: return self._bind for s in self.selects: e = s.bind if e: return e else: return None def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) class Select(_SelectBase): """Represents a ``SELECT`` statement. Select statements support appendable clauses, as well as the ability to execute themselves and return a result set. """ __visit_name__ = 'select' _prefixes = () _hints = util.immutabledict() _distinct = False def __init__(self, columns, whereclause=None, from_obj=None, distinct=False, having=None, correlate=True, prefixes=None, **kwargs): """Construct a Select object. The public constructor for Select is the :func:`select` function; see that function for argument descriptions. Additional generative and mutator methods are available on the :class:`_SelectBase` superclass. """ self._should_correlate = correlate if distinct is not False: if isinstance(distinct, basestring): util.warn_deprecated( "A string argument passed to the 'distinct' " "keyword argument of 'select()' is deprecated " "- please use 'prefixes' or 'prefix_with()' " "to specify additional prefixes") if prefixes: prefixes = util.to_list(prefixes) + [distinct] else: prefixes = [distinct] elif distinct is True: self._distinct = True else: self._distinct = [ _literal_as_text(e) for e in util.to_list(distinct) ] self._correlate = set() self._froms = util.OrderedSet() try: cols_present = bool(columns) except TypeError: raise exc.ArgumentError("columns argument to select() must " "be a Python list or other iterable") if cols_present: self._raw_columns = [] for c in columns: c = _literal_as_column(c) if isinstance(c, _ScalarSelect): c = c.self_group(against=operators.comma_op) self._raw_columns.append(c) self._froms.update(_from_objects(*self._raw_columns)) else: self._raw_columns = [] if whereclause is not None: self._whereclause = _literal_as_text(whereclause) self._froms.update(_from_objects(self._whereclause)) else: self._whereclause = None if from_obj is not None: for f in util.to_list(from_obj): if _is_literal(f): self._froms.add(_TextClause(f)) else: self._froms.add(f) if having is not None: self._having = _literal_as_text(having) else: self._having = None if prefixes: self._prefixes = tuple([_literal_as_text(p) for p in prefixes]) _SelectBase.__init__(self, **kwargs) def _get_display_froms(self, existing_froms=None): """Return the full list of 'from' clauses to be displayed. Takes into account a set of existing froms which may be rendered in the FROM clause of enclosing selects; this Select may want to leave those absent if it is automatically correlating. """ froms = self._froms toremove = itertools.chain(*[f._hide_froms for f in froms]) if toremove: froms = froms.difference(toremove) if len(froms) > 1 or self._correlate: if self._correlate: froms = froms.difference(_cloned_intersection(froms, self._correlate)) if self._should_correlate and existing_froms: froms = froms.difference(_cloned_intersection(froms, existing_froms)) if not len(froms): raise exc.InvalidRequestError("Select statement '%s" "' returned no FROM clauses due to " "auto-correlation; specify " "correlate(<tables>) to control " "correlation manually." % self) return froms def _scalar_type(self): elem = self._raw_columns[0] cols = list(elem._select_iterable) return cols[0].type @property def froms(self): """Return the displayed list of FromClause elements.""" return self._get_display_froms() @_generative def with_hint(self, selectable, text, dialect_name='*'): """Add an indexing hint for the given selectable to this :class:`.Select`. The text of the hint is rendered in the appropriate location for the database backend in use, relative to the given :class:`.Table` or :class:`.Alias` passed as the *selectable* argument. The dialect implementation typically uses Python string substitution syntax with the token ``%(name)s`` to render the name of the table or alias. E.g. when using Oracle, the following:: select([mytable]).\\ with_hint(mytable, "+ index(%(name)s ix_mytable)") Would render SQL as:: select /*+ index(mytable ix_mytable) */ ... from mytable The ``dialect_name`` option will limit the rendering of a particular hint to a particular backend. Such as, to add hints for both Oracle and Sybase simultaneously:: select([mytable]).\\ with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\ with_hint(mytable, "WITH INDEX ix_mytable", 'sybase') """ self._hints = self._hints.union({(selectable, dialect_name):text}) @property def type(self): raise exc.InvalidRequestError("Select objects don't have a type. " "Call as_scalar() on this Select object " "to return a 'scalar' version of this Select.") @util.memoized_instancemethod def locate_all_froms(self): """return a Set of all FromClause elements referenced by this Select. This set is a superset of that returned by the ``froms`` property, which is specifically for those FromClause elements that would actually be rendered. """ return self._froms.union(_from_objects(*list(self._froms))) @property def inner_columns(self): """an iterator of all ColumnElement expressions which would be rendered into the columns clause of the resulting SELECT statement. """ return _select_iterables(self._raw_columns) def is_derived_from(self, fromclause): if self in fromclause._cloned_set: return True for f in self.locate_all_froms(): if f.is_derived_from(fromclause): return True return False def _copy_internals(self, clone=_clone): self._reset_exported() from_cloned = dict((f, clone(f)) for f in self._froms.union(self._correlate)) self._froms = util.OrderedSet(from_cloned[f] for f in self._froms) self._correlate = set(from_cloned[f] for f in self._correlate) self._raw_columns = [clone(c) for c in self._raw_columns] for attr in '_whereclause', '_having', '_order_by_clause', \ '_group_by_clause': if getattr(self, attr) is not None: setattr(self, attr, clone(getattr(self, attr))) def get_children(self, column_collections=True, **kwargs): """return child elements as per the ClauseElement specification.""" return (column_collections and list(self.columns) or []) + \ self._raw_columns + list(self._froms) + \ [x for x in (self._whereclause, self._having, self._order_by_clause, self._group_by_clause) if x is not None] @_generative def column(self, column): """return a new select() construct with the given column expression added to its columns clause. """ column = _literal_as_column(column) if isinstance(column, _ScalarSelect): column = column.self_group(against=operators.comma_op) self._raw_columns = self._raw_columns + [column] self._froms = self._froms.union(_from_objects(column)) @_generative def with_only_columns(self, columns): """return a new select() construct with its columns clause replaced with the given columns. """ self._raw_columns = [ isinstance(c, _ScalarSelect) and c.self_group(against=operators.comma_op) or c for c in [_literal_as_column(c) for c in columns] ] @_generative def where(self, whereclause): """return a new select() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ self.append_whereclause(whereclause) @_generative def having(self, having): """return a new select() construct with the given expression added to its HAVING clause, joined to the existing clause via AND, if any. """ self.append_having(having) @_generative def distinct(self, *expr): """Return a new select() construct which will apply DISTINCT to its columns clause. :param \*expr: optional column expressions. When present, the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)`` construct. """ if expr: expr = [_literal_as_text(e) for e in expr] if isinstance(self._distinct, list): self._distinct = self._distinct + expr else: self._distinct = expr else: self._distinct = True @_generative def prefix_with(self, *expr): """return a new select() construct which will apply the given expressions, typically strings, to the start of its columns clause, not using any commas. In particular is useful for MySQL keywords. e.g.:: select(['a', 'b']).prefix_with('HIGH_PRIORITY', 'SQL_SMALL_RESULT', 'ALL') Would render:: SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL a, b """ expr = tuple(_literal_as_text(e) for e in expr) self._prefixes = self._prefixes + expr @_generative def select_from(self, fromclause): """return a new select() construct with the given FROM expression applied to its list of FROM objects. """ fromclause = _literal_as_text(fromclause) self._froms = self._froms.union([fromclause]) @_generative def correlate(self, *fromclauses): """return a new select() construct which will correlate the given FROM clauses to that of an enclosing select(), if a match is found. By "match", the given fromclause must be present in this select's list of FROM objects and also present in an enclosing select's list of FROM objects. Calling this method turns off the select's default behavior of "auto-correlation". Normally, select() auto-correlates all of its FROM clauses to those of an embedded select when compiled. If the fromclause is None, correlation is disabled for the returned select(). """ self._should_correlate = False if fromclauses == (None,): self._correlate = set() else: self._correlate = self._correlate.union(fromclauses) def append_correlation(self, fromclause): """append the given correlation expression to this select() construct.""" self._should_correlate = False self._correlate = self._correlate.union([fromclause]) def append_column(self, column): """append the given column expression to the columns clause of this select() construct. """ column = _literal_as_column(column) if isinstance(column, _ScalarSelect): column = column.self_group(against=operators.comma_op) self._raw_columns = self._raw_columns + [column] self._froms = self._froms.union(_from_objects(column)) self._reset_exported() def append_prefix(self, clause): """append the given columns clause prefix expression to this select() construct. """ clause = _literal_as_text(clause) self._prefixes = self._prefixes + (clause,) def append_whereclause(self, whereclause): """append the given expression to this select() construct's WHERE criterion. The expression will be joined to existing WHERE criterion via AND. """ whereclause = _literal_as_text(whereclause) self._froms = self._froms.union(_from_objects(whereclause)) if self._whereclause is not None: self._whereclause = and_(self._whereclause, whereclause) else: self._whereclause = whereclause def append_having(self, having): """append the given expression to this select() construct's HAVING criterion. The expression will be joined to existing HAVING criterion via AND. """ if self._having is not None: self._having = and_(self._having, _literal_as_text(having)) else: self._having = _literal_as_text(having) def append_from(self, fromclause): """append the given FromClause expression to this select() construct's FROM clause. """ if _is_literal(fromclause): fromclause = _TextClause(fromclause) self._froms = self._froms.union([fromclause]) def _populate_column_collection(self): for c in self.inner_columns: if hasattr(c, '_make_proxy'): c._make_proxy(self, name=self.use_labels and c._label or None) def self_group(self, against=None): """return a 'grouping' construct as per the ClauseElement specification. This produces an element that can be embedded in an expression. Note that this method is called automatically as needed when constructing expressions. """ if isinstance(against, CompoundSelect): return self return _FromGrouping(self) def union(self, other, **kwargs): """return a SQL UNION of this select() construct against the given selectable.""" return union(self, other, **kwargs) def union_all(self, other, **kwargs): """return a SQL UNION ALL of this select() construct against the given selectable. """ return union_all(self, other, **kwargs) def except_(self, other, **kwargs): """return a SQL EXCEPT of this select() construct against the given selectable.""" return except_(self, other, **kwargs) def except_all(self, other, **kwargs): """return a SQL EXCEPT ALL of this select() construct against the given selectable. """ return except_all(self, other, **kwargs) def intersect(self, other, **kwargs): """return a SQL INTERSECT of this select() construct against the given selectable. """ return intersect(self, other, **kwargs) def intersect_all(self, other, **kwargs): """return a SQL INTERSECT ALL of this select() construct against the given selectable. """ return intersect_all(self, other, **kwargs) def bind(self): if self._bind: return self._bind if not self._froms: for c in self._raw_columns: e = c.bind if e: self._bind = e return e else: e = list(self._froms)[0].bind if e: self._bind = e return e return None def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) class UpdateBase(Executable, ClauseElement): """Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.""" __visit_name__ = 'update_base' _execution_options = \ Executable._execution_options.union({'autocommit': True}) kwargs = util.immutabledict() def _process_colparams(self, parameters): if isinstance(parameters, (list, tuple)): pp = {} for i, c in enumerate(self.table.c): pp[c.key] = parameters[i] return pp else: return parameters def params(self, *arg, **kw): raise NotImplementedError( "params() is not supported for INSERT/UPDATE/DELETE statements." " To set the values for an INSERT or UPDATE statement, use" " stmt.values(**parameters).") def bind(self): return self._bind or self.table.bind def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) _returning_re = re.compile(r'(?:firebird|postgres(?:ql)?)_returning') def _process_deprecated_kw(self, kwargs): for k in list(kwargs): m = self._returning_re.match(k) if m: self._returning = kwargs.pop(k) util.warn_deprecated( "The %r argument is deprecated. Please " "use statement.returning(col1, col2, ...)" % k ) return kwargs @_generative def returning(self, *cols): """Add a RETURNING or equivalent clause to this statement. The given list of columns represent columns within the table that is the target of the INSERT, UPDATE, or DELETE. Each element can be any column expression. :class:`~sqlalchemy.schema.Table` objects will be expanded into their individual columns. Upon compilation, a RETURNING clause, or database equivalent, will be rendered within the statement. For INSERT and UPDATE, the values are the newly inserted/updated values. For DELETE, the values are those of the rows which were deleted. Upon execution, the values of the columns to be returned are made available via the result set and can be iterated using ``fetchone()`` and similar. For DBAPIs which do not natively support returning values (i.e. cx_oracle), SQLAlchemy will approximate this behavior at the result level so that a reasonable amount of behavioral neutrality is provided. Note that not all databases/DBAPIs support RETURNING. For those backends with no support, an exception is raised upon compilation and/or execution. For those who do support it, the functionality across backends varies greatly, including restrictions on executemany() and other statements which return multiple rows. Please read the documentation notes for the database in use in order to determine the availability of RETURNING. """ self._returning = cols class ValuesBase(UpdateBase): """Supplies support for :meth:`.ValuesBase.values` to INSERT and UPDATE constructs.""" __visit_name__ = 'values_base' def __init__(self, table, values): self.table = table self.parameters = self._process_colparams(values) @_generative def values(self, *args, **kwargs): """specify the VALUES clause for an INSERT statement, or the SET clause for an UPDATE. \**kwargs key=<somevalue> arguments \*args A single dictionary can be sent as the first positional argument. This allows non-string based keys, such as Column objects, to be used. """ if args: v = args[0] else: v = {} if self.parameters is None: self.parameters = self._process_colparams(v) self.parameters.update(kwargs) else: self.parameters = self.parameters.copy() self.parameters.update(self._process_colparams(v)) self.parameters.update(kwargs) class Insert(ValuesBase): """Represent an INSERT construct. The :class:`.Insert` object is created using the :func:`insert()` function. """ __visit_name__ = 'insert' _prefixes = () def __init__(self, table, values=None, inline=False, bind=None, prefixes=None, returning=None, **kwargs): ValuesBase.__init__(self, table, values) self._bind = bind self.select = None self.inline = inline self._returning = returning if prefixes: self._prefixes = tuple([_literal_as_text(p) for p in prefixes]) if kwargs: self.kwargs = self._process_deprecated_kw(kwargs) def get_children(self, **kwargs): if self.select is not None: return self.select, else: return () def _copy_internals(self, clone=_clone): # TODO: coverage self.parameters = self.parameters.copy() @_generative def prefix_with(self, clause): """Add a word or expression between INSERT and INTO. Generative. If multiple prefixes are supplied, they will be separated with spaces. """ clause = _literal_as_text(clause) self._prefixes = self._prefixes + (clause,) class Update(ValuesBase): """Represent an Update construct. The :class:`.Update` object is created using the :func:`update()` function. """ __visit_name__ = 'update' def __init__(self, table, whereclause, values=None, inline=False, bind=None, returning=None, **kwargs): ValuesBase.__init__(self, table, values) self._bind = bind self._returning = returning if whereclause is not None: self._whereclause = _literal_as_text(whereclause) else: self._whereclause = None self.inline = inline if kwargs: self.kwargs = self._process_deprecated_kw(kwargs) def get_children(self, **kwargs): if self._whereclause is not None: return self._whereclause, else: return () def _copy_internals(self, clone=_clone): # TODO: coverage self._whereclause = clone(self._whereclause) self.parameters = self.parameters.copy() @_generative def where(self, whereclause): """return a new update() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ if self._whereclause is not None: self._whereclause = and_(self._whereclause, _literal_as_text(whereclause)) else: self._whereclause = _literal_as_text(whereclause) class Delete(UpdateBase): """Represent a DELETE construct. The :class:`.Delete` object is created using the :func:`delete()` function. """ __visit_name__ = 'delete' def __init__(self, table, whereclause, bind=None, returning =None, **kwargs): self._bind = bind self.table = table self._returning = returning if whereclause is not None: self._whereclause = _literal_as_text(whereclause) else: self._whereclause = None if kwargs: self.kwargs = self._process_deprecated_kw(kwargs) def get_children(self, **kwargs): if self._whereclause is not None: return self._whereclause, else: return () @_generative def where(self, whereclause): """Add the given WHERE clause to a newly returned delete construct.""" if self._whereclause is not None: self._whereclause = and_(self._whereclause, _literal_as_text(whereclause)) else: self._whereclause = _literal_as_text(whereclause) def _copy_internals(self, clone=_clone): # TODO: coverage self._whereclause = clone(self._whereclause) class _IdentifiedClause(Executable, ClauseElement): __visit_name__ = 'identified' _execution_options = \ Executable._execution_options.union({'autocommit': False}) quote = None def __init__(self, ident): self.ident = ident class SavepointClause(_IdentifiedClause): __visit_name__ = 'savepoint' class RollbackToSavepointClause(_IdentifiedClause): __visit_name__ = 'rollback_to_savepoint' class ReleaseSavepointClause(_IdentifiedClause): __visit_name__ = 'release_savepoint'
eunchong/build
third_party/sqlalchemy_0_7_1/sqlalchemy/sql/expression.py
Python
bsd-3-clause
168,783
[ "VisIt" ]
fad1797685569affcbb7e3e38ef4a25b992d6b3917a7ea4d81b401c18f48fa49
#!/usr/bin/python from multiprocessing import Pool import time import os import sys import argparse from Bio.Blast.Applications import NcbiblastxCommandline # Copyright(C) 2014 David Ream # Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html # Do not remove this comment # This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else. def parser_code(): parser = argparse.ArgumentParser(description='Run BLAST on a set of searchable database using a query that is specified by the user. Results will be stored by the accession number of the database. Currently this script will only accept protein queries, but I will update to automatically run on all types of genes, as most of the information needed for this behavior exists.') parser.add_argument("-d", "--database_folder", dest="database_folder", metavar="DIRECTORY", default='./db/', help="Folder containing all BLAST searchable databases to be used by the program.") parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./blast_result/', help="Folder where the BLAST results will be stored.") parser.add_argument("-f", "--filter", dest="filter", metavar="FILE", default='NONE', help="File restrictiong which accession numbers this script will process. If no file is provided, filtering is not performed.") parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int, help="Number of processors that you want this script to run on. The default is every CPU that the system has.") # Fix this option, ultimately it should be a folder, that reads in a operon file with the name of operon(s) with a list of gene names and types. # The program will then take two files, protein and nucleic acid queries and run them. (This would require that there are two seperate, and complementary # blast databases within this folder. My desire is that it would be two subfolders, 'protein/' and 'rna/' which house these sets of data. parser.add_argument("-q", "--query", dest="query", default='./operon_query_files/protein_matches.fa', metavar="FILE", help="A file that contains the BLAST query for every gene of interest in the dataset.") parser.add_argument("-e", "--eval", dest="eval", default='1e-10', metavar="FLOAT", type=float, help="eval for the BLAST search.") return parser.parse_args() def check_options(parsed_args): # section of code that checks the database entry if os.path.isdir(parsed_args.database_folder): database_folder = parsed_args.database_folder else: print "The folder %s does not exist." % parsed_args.database_folder sys.exit() # if the directory that the user specifies does not exist, then the program makes it for them. if not os.path.isdir(parsed_args.outfolder): os.makedirs(parsed_args.outfolder) outfolder = parsed_args.outfolder # Check the filter file if parsed_args.filter == 'NONE' or os.path.exists(parsed_args.filter): filter_file = parsed_args.filter else: print "The file %s does not exist." % parsed_args.filter sys.exit() # section of code that deals determining the number of CPU cores that will be used by the program if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"): num_proc = os.sysconf("SC_NPROCESSORS_CONF") elif parsed_args.num_proc < 1: num_proc = 1 else: num_proc = int(parsed_args.num_proc) # Check the query file if os.path.exists(parsed_args.query): query_file = parsed_args.query else: print "The file %s does not exist." % parsed_args.query sys.exit() e_val = parsed_args.eval return database_folder, outfolder, filter_file, num_proc, query_file, e_val #this function will return all of the files that are in a directory. os.walk is recursive traversal. def returnRecursiveDirFiles(root_dir): result = [] for path, dir_name, flist in os.walk(root_dir): for f in flist: fname = os.path.join(path, f) if os.path.isfile(fname): result.append(fname) return result # This code right now only deals with protein, but I will add functionality later for nucleotides. # Just moving the project along here, but this is a critical flaw moving forward. def do_parallel_blast(arg_tuple): db, query_file, blast_result_folder, num_processors, eval_threshold = arg_tuple out_file = "%s%s.txt" % (blast_result_folder, db.split('/')[-1].split('.')[0]) #print "db", db #print "got here" #cmd = "blastall -p tblastn -a %i -i %s -d %s -e %s -o %s -m 9" % (num_processors, query_file, db, eval_threshold, out_file) #cmd = "blastall -p tblastn -a %i -i %s -d %s -e %s -o %s -m 9" % (1, query_file, db, eval_threshold, out_file) #cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 9" % (1, query_file, db, eval_threshold, out_file) cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 8" % (1, query_file, db, eval_threshold, out_file) #print cmd os.system( cmd ) #def parallel_blast(infile, query, folder, num_proc, e_val = '1e-10'): def parallel_blast(database_folder, outfolder, filter_file, num_proc, query_file, e_val): # you kinda have to trust me here, but having blast run on as many threads per CPU as you have total processors is fastest # I have no idea why this is... ugh. unfiltered_db_list = [i for i in returnRecursiveDirFiles(database_folder) if i.split('/')[-1].split('.')[-1] == 'ffc'] if filter_file == '': db_list = unfiltered_db_list else: filter_list = [i.strip() for i in open(filter_file).readlines()] db_list = [i for i in unfiltered_db_list if i.split('/')[-1].split('.')[0] in filter_list] #print len(unfiltered_db_list), len(db_list) #blast_arg_list = [(i, query_file, outfolder, num_proc, e_val) for i in db_list] blast_arg_list = [(i, query_file, outfolder, 1, e_val) for i in db_list] pool = Pool(processes = num_proc) pool.map(do_parallel_blast, blast_arg_list) def main(): start = time.time() parsed_args = parser_code() database_folder, outfolder, filter_file, num_proc, query_file, e_val = check_options(parsed_args) print database_folder, outfolder, filter_file, num_proc, query_file, e_val #parallel_blast(infile, query, folder, num_proc, e_val) parallel_blast(database_folder, outfolder, filter_file, num_proc, query_file, e_val) print time.time() - start # ./blast_script.py -d ./db/ -o ./blast_result/ -q ./operon_query.fa if __name__ == '__main__': main()
schaefce/gene_block_evolution
blast_script.py
Python
gpl-3.0
7,010
[ "BLAST" ]
8c798e2be4d261a073f3d556784e27ef3af723b3f9fa8577c45925fa1bf918c7
import matplotlib.pyplot as plt import os from astropy.table import Table import numpy as np # setup information sources degas = Table.read(os.path.join(os.environ['SCRIPTDIR'],'degas_base.fits')) stack = Table.read('/lustre/cv/users/akepley/degas/stack_test/stack_IR6p0_mom1.fits') plotDir = os.path.join(os.environ['ANALYSISDIR'],'plots','fdense_plots') if not os.path.exists(plotDir): os.mkdir(plotDir) # only look at dr1 galaxies dr1 = degas['DR1'] == 1 ndr1 = np.sum(dr1) # setup plot style markers = ['o','v','^','s','*','D'] # 6 items colors = ['royalblue','forestgreen','darkorange','royalblue','crimson','rebeccapurple','darkcyan','darkmagenta'] ndr1 = np.sum(dr1) markerlist = np.tile(markers,int(np.ceil(ndr1/len(markers)))) markerlist = markerlist[0:ndr1] colorlist = np.tile(colors,int(np.ceil(ndr1/len(colors)))) colorlist = colorlist[0:ndr1] # set up plot fig = plt.figure(figsize=(8,6),facecolor='white',edgecolor='white') fig.subplots_adjust(left=0.1,right=0.8,bottom=0.1, top=0.9) ax = fig.add_subplot(1,1,1) # for each dr1 galaxy, show radial trends for each line. for (galaxy,color,marker) in zip(degas[dr1],colorlist,markerlist): idx = ( (stack['galaxy'] == galaxy['NAME']) \ & (stack['bin_type'] == 'stellarmass')) mstar = stack[idx]['bin_mean'] lolims = stack[idx]['ratio_HCN_CO_lolim'] fdense = stack[idx]['ratio_HCN_CO'] fdense_err = stack[idx]['ratio_HCN_CO_err'] fdense_err[lolims] = fdense[lolims] * 0.3 ax.errorbar(mstar[~lolims], fdense[~lolims], yerr = fdense_err[~lolims], marker = marker, linestyle= '--', color=color, label=galaxy['NAME']) ax.set_yscale('log') ax.set_xscale('log') ax.legend(loc='upper left',bbox_to_anchor=(1.0,1.0)) ax.set_xlabel(r'$\Sigma_{*}$ (M$_\odot$ pc$^{-2}$)') ax.set_ylabel(r'log$_{10}$ (HCN-to-CO)') fig.show() fig.savefig(os.path.join(plotDir,'fdense_vs_mstar_combined_nolim.pdf')) fig.savefig(os.path.join(plotDir,'fdense_vs_mstar_combined_nolim.png')) plt.close()
low-sky/degas
scripts/plot_fdense_vs_mstar_combined_nolim.py
Python
gpl-3.0
2,115
[ "Galaxy" ]
bc80fb405a0c86f033a13aaa186a3471e8761dd2f0d26b10bc4c188614193e7c
""" >>> import phoebe Available environment variables: * PHOEBE_ENABLE_PLOTTING=TRUE/FALSE (whether to import plotting libraries with phoebe: defaults to True) * PHOEBE_ENABLE_SYMPY=TRUE/FALSE (whether to attempt to import sympy for constraint algebra: defaults to True if sympy installed, otherwise False) * PHOEBE_ENABLE_ONLINE_PASSBANDS=TRUE/FALSE (whether to query for online passbands and download on-the-fly: defaults to True) * PHOEBE_PBDIR (directory to search for passbands, in addition to phoebe.list_passband_directories()) * PHOEBE_DOWNLOAD_PASSBAND_DEFAULTS_GZIPPED=TRUE/FALSE (whether to download gzipped version of passbands by default. Defaults to False. Note that gzipped files take longer to load and will increase time for import, but take significantly less disk-space.) * PHOEBE_DOWNLOAD_PASSBAND_DEFAULTS_CONTENT (default content, comma separated for list. Defaults to 'all') * PHOEBE_UPDATE_PASSBAND_IGNORE_VERSION=TRUE/FALSE (update passbands that need new content even if the online version is newer than the installed version. Defaults to False.) * PHOEBE_ENABLE_MPI=TRUE/FALSE (whether to use internal parallelization: defaults to True if within mpirun, otherwise False, can override in python with phoebe.mpi.on() and phoebe.mpi.off()) * PHOEBE_MPI_NPROCS=INT (number of procs to spawn in mpi is enabled but not running within mpirun: defaults to 4, only applicable if not within mpirun and PHOEBE_ENABLE_MPI=TRUE or phoebe.mpi.on() called, can override in python by passing nprocs to phoebe.mpi.on() or by setting phoebe.mpi.nprocs) * PHOEBE_MULTIPROC_NPROCS=INT (number of proces to use within multiprocessing. Multiprocessing is used for solver that support it and when sampling over a distribution in run_compute if MPI is not in use. Set to 0 to disable multiprocessing and force serial. Defaults to number of CPUs available.) * PHOEBE_PBDIR (directory to search for passbands, in addition to phoebe.list_passband_directories()) * PHOEBE_DEVEL=TRUE/FALSE enable developer mode by default """ __version__ = '2.3.59' import os as _os import sys as _sys import inspect as _inspect import multiprocessing as _multiprocessing import atexit import re # People shouldn't import Phoebe from the installation directory (inspired upon # pymc warning message). if _os.getcwd().find(_os.path.abspath(_os.path.split(_os.path.split(__file__)[0])[0]))>-1: # We have a clash of package name with the standard library: we implement an # "io" module and also they do. This means that you can import Phoebe from its # main source tree; then there is no difference between io from here and io # from the standard library. Thus, if the user loads the package from here # it will never work. Instead of letting Python raise the io clash (which # is uniformative to the unexperienced user), we raise the importError here # with a helpful error message raise ImportError('\n\tYou cannot import Phoebe from inside its main source tree.\n') def _env_variable_string_or_list(key, default): value = _os.getenv(key, default) if "," in value: return value.split(",") else: return value def _env_variable_int(key, default): value = _os.getenv(key, default) return int(value) def _env_variable_int_or_none(key, default): value = _os.getenv(key, default) if value is None or isinstance(value, str) and value.lower()=='none': return None if isinstance(value, str): value = float(value) return int(value) def _env_variable_bool(key, default): value = _os.getenv(key, default) if isinstance(value, bool): return value elif value.upper()=='TRUE': return True else: return False # If we try to load matplotlib.pyplot on a non-X system, it will fail # unless 'Agg' is used before the import. All X-systems define the # 'DISPLAY' environment variable, and all non-X-systems do not. We do make a # distinction between windows and unix based system. Hence: if _env_variable_bool('PHOEBE_ENABLE_PLOTTING', True): try: import matplotlib except ImportError: pass # we'll catch this later in plotting and throw warnings as necessary else: if 'DISPLAY' not in _os.environ.keys() and _sys.platform not in ['win32','cygwin']: matplotlib.use('Agg') elif hasattr(_sys, 'real_prefix'): # then we're likely in a virtualenv. Our best bet is to use the 'TkAgg' # backend, but this will require python-tk to be installed on the system try: matplotlib.use('Agg') except: matplotlib.use('TkAgg') import logging _logger = logging.getLogger("PHOEBE") _logger.addHandler(logging.NullHandler()) ############################################################################### ######################### BEGIN MPI ########################## ############################################################################### # detect if we're within mpirun and if so, place all non-zero-rank # processors into a wait loop. This must happen before we start importing from # phoebe so that those can have access to the _mpi object. class MPI(object): def __init__(self): # this is a bit of a hack and will only work with openmpi, but environment # variables seem to be the only way to detect whether the script was run # via mpirun or not evars = _os.environ.keys() if 'OMPI_COMM_WORLD_SIZE' in evars or 'MV2_COMM_WORLD_SIZE' in evars or 'PMI_SIZE' in evars: from mpi4py import MPI as mpi4py self._within_mpirun = True self._internal_mpi = True self._comm = mpi4py.COMM_WORLD self._myrank = self.comm.Get_rank() self._nprocs = self.comm.Get_size() if self._nprocs==1: raise ImportError("need more than 1 processor to run with mpi") self._enabled = _env_variable_bool("PHOEBE_ENABLE_MPI", True) else: self._within_mpirun = False self._internal_mpi = False self._comm = None self._myrank = 0 self._nprocs = _env_variable_int("PHOEBE_MPI_NPROCS", 4) self._enabled = _env_variable_bool("PHOEBE_ENABLE_MPI", False) def __repr__(self): return "<MPI mode={} myrank={} nprocs={}>".format(self.mode, self.myrank, self.nprocs) @property def mode(self): if self.within_mpirun: if self.enabled: return "internal handling of mpi within mpirun" else: return "external handling of mpi by the user within mpirun" else: if self.enabled: return "internal handling of mpi in spawned separate threads during run_compute" else: return "serial mode" @property def enabled(self): return self._enabled def on(self, nprocs=None): if self.within_mpirun and not self.enabled: raise ValueError("cannot enable mpi after disabling within mpirun.") self._enabled = True if nprocs is not None: self.nprocs = nprocs def off(self): if self.within_mpirun and self.myrank == 0: self.comm.bcast({'worker_command': 'release'}, root=0) self._enabled = False @property def myrank(self): return self._myrank @property def nprocs(self): if not self.enabled and not self.within_mpirun: return 1 else: return self._nprocs @nprocs.setter def nprocs(self, nprocs): if self.within_mpirun: _logger.warning("ignoring setting nprocs while within mpirun, nprocs={}".format(self.nprocs)) else: self._nprocs = nprocs @property def comm(self): return self._comm @property def within_mpirun(self): return self._within_mpirun @property def detach_cmd(self): if self.within_mpirun: raise ValueError("detach not available within mpirun") # TODO: allow this as an option in the settings? python = 'python3' if self.enabled: return 'mpiexec -np %d %s {}' % (self.nprocs, python) else: return '%s {}' % python def shutdown_workers(self): if self.within_mpirun and self.myrank == 0: self.comm.bcast({'worker_command': 'shutdown'}, root=0) self._enabled = False # even though technically not true, we're now strictly serial and have no way of regaining the workers self._within_mpirun = False mpi = MPI() # NOTE: logic for worker waiting for tasks below after phoebe imports ############################################################################### ########################## END MPI ########################### ############################################################################### ############################################################################### ######################### BEGIN SETTINGS ######################## ############################################################################### class Settings(object): def __init__(self): # Check to see whether in interactive mode import __main__ # hasattr(__main__, '__file__') will be True if running a python script, but # false if in a python or ipython interpreter. # _sys.flags.interactive will be 1 if the -i flag is sent to python # For now we'll set interactive_constraints to True by default, requiring it to # explicitly be disabled. # See #154 (https://github.com/phoebe-project/phoebe2/issues/154) self._interactive_constraints = True # We'll set interactive system checks to be on if running within a Python # console, but False if running from within a script # See #255 (https://github.com/phoebe-project/phoebe2/issues/255) self._interactive_checks = not hasattr(__main__, '__file__') or bool(_sys.flags.interactive) self._download_passband_defaults = {'content': _env_variable_string_or_list('PHOEBE_DOWNLOAD_PASSBAND_DEFAULTS_CONTENT', 'all'), 'gzipped': _env_variable_bool('PHOEBE_DOWNLOAD_PASSBAND_DEFAULTS_GZIPPED', False)} self._update_passband_ignore_version = _env_variable_bool('PHOEBE_UPDATE_PASSBAND_IGNORE_VERSION', False) self._multiprocessing_nprocs = _env_variable_int_or_none('PHOEBE_MULTIPROC_NPROCS', None) self._progressbars = True # And we'll require explicitly setting developer mode on self._devel = _env_variable_bool('PHOEBE_DEVEL', False) def __repr__(self): return "<Settings interactive_checks={} interactive_constraints={}>".format(self.interactive_checks, self.interactive_constraints) def reset(self): self.__init__() def interactive_on(self): self.interactive_checks_on() self.interactive_constraints_on() def interactive_off(self, suppress_warning=False): self.interactive_checks_off(suppress_warning=suppress_warning) self.interactive_constraints_off(suppress_warning=suppress_warning) def interactive_checks_on(self): self._interactive_checks = True def interactive_checks_off(self, suppress_warning=False): if not suppress_warning: _logger.warning("checks will not be run until 'run_checks' or 'run_compute' is called.") self._interactive_checks = False def interactive_constraints_on(self): self._interactive_constraints = True def interactive_constraints_off(self, suppress_warning=False): if not suppress_warning: _logger.warning("constraints will not be run until 'run_delayed_constraints' or 'run_compute' is called. This may result in inconsistent parameters if printing values before calling either of these methods.") self._interactive_constraints = False @property def interactive_checks(self): return self._interactive_checks @property def interactive_constraints(self): return self._interactive_constraints def devel_on(self): self._devel = True def devel_off(self): self._devel = False @property def devel(self): return self._devel def set_download_passband_defaults(self, **kwargs): """ """ for k,v in kwargs.items(): if k not in self._download_passband_defaults.keys(): raise KeyError("{} must be one of {}".format(self._download_passband_defaults.keys())) if k=='content' and not (isinstance(v, str) or isinstance(v, list)): raise TypeError("content must be of type string or list") if k=='gzipped' and not (isinstance(v, bool)): raise TypeError("gzipped must be of type bool") self._download_passband_defaults[k] = v def get_download_passband_defaults(self): return self._download_passband_defaults @property def download_passband_defaults(self): return self._download_passband_defaults @property def update_passband_ignore_version(self): return self._update_passband_ignore_version def update_passband_ignore_version_on(self): self._update_passband_ignore_version = True def update_passband_ignore_version_on(self): self._update_passband_ignore_version = False def multiprocessing_off(self): self._multiprocessing_nprocs = 0 def multiprocessing_on(self): self._multiprocessing_nprocs = None def multiprocessing_set_nprocs(self, value): if not isinstance(value, int): return TypeError("must be integer") if value > _multiprocessing.cpu_count(): return ValueError("only {} CPUs available".format(value)) elif value < 0: return ValueError("nprocs must be >= 0") self._multiprocessing_nprocs = value @property def multiprocessing_nprocs(self): if self._multiprocessing_nprocs is None: return _multiprocessing.cpu_count() return self._multiprocessing_nprocs def progressbars_on(self): self._progressbars = True def progressbars_off(self): self._progressbars = False @property def progressbars(self): return self._progressbars conf = Settings() ############################################################################### ########################## END SETTINGS ######################### ############################################################################### # make packages available at top-level from .dependencies.unitsiau2015 import u,c from .dependencies.nparray import array, linspace, arange, logspace, geomspace, invspace from .dependencies.distl import gaussian, gaussian_around, normal, boxcar, uniform, uniform_around, histogram_from_bins, histogram_from_data, mvgaussian, mvhistogram_from_data from .atmospheres.passbands import install_passband, uninstall_passband, uninstall_all_passbands, download_passband, list_passband_online_history, update_passband_available, update_passband, update_all_passbands, list_all_update_passbands_available, list_online_passbands, list_installed_passbands, list_passbands, list_passband_directories, get_passband from .parameters import hierarchy, component, compute, constraint, dataset, feature, figure, solver from .frontend.bundle import Bundle from .backend import backends as _backends from .solverbackends import solverbackends as _solverbackends from . import utils as _utils from . import dynamics as dynamics from . import distortions as distortions from . import algorithms as algorithms import libphoebe # Shortcut to building logger def logger(*args, **kwargs): """ Return a basic logger via a log file and/or terminal. Example 1: log only to the console, accepting levels "INFO" and above ```py logger = logger() ``` Example 2: log only to the console, accepting levels "DEBUG" and above ```py logger(clevel='DEBUG') ``` Example 3: log only to a file, accepting levels "DEBUG" and above ```py logger(clevel=None,filename='mylog.log') ``` Example 4: log only to a file, accepting levels "INFO" and above ```py logger(clevel=None,flevel='INFO',filename='mylog.log') ``` Example 5: log to the terminal (INFO and above) and file (DEBUG and above) ```py logger(filename='mylog.log') ``` Arguments ---------- * `clevel` (string, optional): level to be logged to the console. One of: "ERROR", "WARNING", "INFO", "DEBUG". * `flevel` (string, optional): level to be logged to the file. Must also provide `filename`. One of: "ERROR", "WARNING", "INFO", "DEBUG". * `filename` (string, optional): path to the file to log at the `flevel` level. * `style` (string, optional, default='default'): style to use for logging. One of: "default", "minimal", "grandpa". """ if mpi.within_mpirun and mpi.myrank == 0: # tell the workers to invoke the same logger mpi.comm.bcast({'worker_command': 'logger', 'args': args, 'kwargs': kwargs}, root=0) return _utils.get_basic_logger(*args, **kwargs) if mpi.within_mpirun and mpi.enabled and mpi.myrank != 0: while True: packet = mpi.comm.bcast(None, root=0) if packet.get('worker_command', False) == 'shutdown': _logger.debug("rank:{}/{} message to shutdown".format(mpi.myrank, mpi.nprocs)) exit() if packet.get('worker_command', False) == 'release': _logger.debug("rank:{}/{} message to release".format(mpi.myrank, mpi.nprocs)) break elif packet.get('worker_command', False) == 'logger': _logger.debug("rank:{}/{} message to invoke logger".format(mpi.myrank, mpi.nprocs)) logger(*packet['args'], **packet['kwargs']) elif hasattr(_backends, packet.get('backend', False)): backend = getattr(_backends, packet.pop('backend'))() backend._run_worker(packet) elif hasattr(_solverbackends, packet.get('backend', False)): backend = getattr(_solverbackends, packet.pop('backend'))() backend._run_worker(packet) else: raise ValueError("could not recognize packet: {}".format(packet)) # Shortcuts to bundle classmethods def open(*args, **kwargs): return Bundle.open(*args, **kwargs) open.__doc__ = Bundle.open.__doc__ def load(*args, **kwargs): return Bundle.open(*args, **kwargs) load.__doc__ = Bundle.open.__doc__ def from_legacy(*args, **kwargs): return Bundle.from_legacy(*args, **kwargs) from_legacy.__doc__ = Bundle.from_legacy.__doc__ def from_server(*args, **kwargs): return Bundle.from_server(*args, **kwargs) from_server.__doc__ = Bundle.from_server.__doc__ def default_star(*args, **kwargs): return Bundle.default_star(*args, **kwargs) default_star.__doc__ = Bundle.default_star.__doc__ def default_binary(*args, **kwargs): return Bundle.default_binary(*args, **kwargs) default_binary.__doc__ = Bundle.default_binary.__doc__ def default_contact_binary(*args, **kwargs): return Bundle.default_contact_binary(*args, **kwargs) default_contact_binary.__doc__ = Bundle.default_contact_binary.__doc__ def default_triple(*args, **kwargs): return Bundle.default_triple(*args, **kwargs) default_triple.__doc__ = Bundle.default_triple.__doc__ # Shortcuts to settings def reset_settings(): """ Reset all configuration settings (interactivity, etc) but NOT MPI settings. See also: * <phoebe.interactive_on> * <phoebe.interactive_off> * <phoebe.interactive_constraints_on> * <phoebe.interactive_constraints_off> * <phoebe.interactive_checks_on> * <phoebe.interactive_checks_off> """ conf.reset() def interactive_on(): """ Turn on both interactive constraints and interactive checks See also: * <phoebe.interactive_off> * <phoebe.interactive_constraints_on> * <phoebe.interactive_checks_on> """ conf.interactive_on() def interactive_off(): """ **USE WITH CAUTION** Turn off both interactive constraints and interactive checks See also: * <phoebe.interactive_on> * <phoebe.interactive_constraints_off> * <phoebe.interactive_checks_off> """ conf.interactive_off() def interactive_constraints_on(): """ Turn interactive constraints on. When enabled, PHOEBE will update all constraints whenever a <phoebe.parameters.Parameter> value is changed. Although this adds to the run-time, it ensures that all values are updated when accessed. By default, interactive constraints are always on unless disabled. See also: * <phoebe.interactive_constraints_off> """ conf.interactive_constraints_on() def interactive_constraints_off(): """ **USE WITH CAUTION** Turn interactive constraints off. When disabled, PHOEBE will **NOT** update constraints whenever a <phoebe.parameters.Parameter> value is changed, but will instead wait until needed (for example, by <phoebe.frontend.bundle.Bundle.run_compute>). Accessing/printing the value of a constrained Parameter, may be out-of-date when interactive constraints is off. By default, interactive constraints are always on unless disabled. To update constraints manually, you can call <phoebe.frontend.bundle.Bundle.run_delayed_constraints>. See also: * <phoebe.interactive_constraints_on> """ conf.interactive_constraints_off() def interactive_checks_on(): """ Turn interactive checks on. When enabled, PHOEBE will run system checks (<phoebe.frontend.bundle.Bundle.run_checks>) after any <phoebe.parameters.Parameter> value is changed and will log any issues to the logger as a warning. In order to see these messages, you must have a logger enabled with at least the "WARNING" level (see <phoebe.logger>). Whether interactive checks is on or off, system checks will be run when calling <phoebe.frontend.bundle.Bundle.run_compute> and will raise an error if failing. By default, interactive checks is ON if running PHOEBE in an interactive console (or Jupyter notebook), but OFF if running in a script (to save time but also save confusing logger messages). See also: * <phoebe.interactive_checks_off> """ conf.interactive_checks_on() def interactive_checks_off(): """ Turn interactive checks off. When disabled, PHOEBE will **NOT** run system checks (<phoebe.frontend.bundle.Bundle.run_checks>) after any <phoebe.parameters.Parameter> value is changed and will **NOT** log any issues to the logger as a warning. Whether interactive checks is on or off, system checks will be run when calling <phoebe.frontend.bundle.Bundle.run_compute> and will raise an error if failing. To manually run system checks at any time, you can call <phoebe.frontend.bundle.Bundle.run_checks>. By default, interactive checks is ON if running PHOEBE in an interactive console (or Jupyter notebook), but OFF if running in a script (to save time but also save confusing logger messages). See also: * <phoebe.interactive_checks_on> """ conf.interactive_checks_off() def devel_on(): conf.devel_on() def devel_off(): conf.devel_off() def set_download_passband_defaults(**kwargs): """ Set default options to use for <phoebe.atmospheres.passbands.download_passband>. These can also be set at import time via the following environment variables: * PHOEBE_DOWNLOAD_PASSBAND_DEFAULTS_CONTENT (defaults to 'all') * PHOEBE_DOWNLOAD_PASSBAND_DEFAULTS_GZIPPED (defaults to FALSE) See also: * <phoebe.get_download_passband_defaults> * <phoebe.atmospheres.passbands.download_passband> * <phoebe.atmospheres.passbands.update_passband> * <phoebe.atmospheres.passbands.get_passband> Arguments ------------ * `content` (string or list, optional): override the current value for `content` in <phoebe.get_download_passband_defaults>. * `gzipped` (bool, optional): override the current value for `gzipped` in <phoebe.get_download_passband_defaults>. """ conf.set_download_passband_defaults(**kwargs) def get_download_passband_defaults(): """ Access default options to use for <phoebe.atmospheres.passbands.download_passband>. See also: * <phoebe.set_download_passband_defaults> * <phoebe.atmospheres.passbands.download_passband> * <phoebe.atmospheres.passbands.update_passband> * <phoebe.atmospheres.passbands.get_passband> Returns --------- * dictionary of defaults, including `content` and `gzipped`. """ return conf.get_download_passband_defaults() def update_passband_ignore_version_on(): """ Turn ingoring passband versions when checking for necessary updates on. <phoebe.frontend.bundle.Bundle.run_checks_compute> checks to see if any additional content is required from the used passbands. If so, these will be queried from the online tables if the timestamps match. Otherwise, an error will be raised requiring manually calling <phoebe.atmospheres.passbands.update_passband>. By enabling this, this version conflict will be ignored, preventing the need to manually update the passbands. This can also be set at import time via the following environment variables: * PHOEBE_UPDATE_PASSBAND_IGNORE_VERSION (defaults to FALSE) See also: * <phoebe.update_passband_ignore_version_off> """ conf.update_passband_ignore_version_on() def update_passband_ignore_version_off(): """ Turn ingoring passband versions when checking for necessary updates off. <phoebe.frontend.bundle.Bundle.run_checks_compute> checks to see if any additional content is required from the used passbands. If so, these will be queried from the online tables if the timestamps match. Otherwise, an error will be raised requiring manually calling <phoebe.atmospheres.passbands.update_passband>. This can also be set at import time via the following environment variables: * PHOEBE_UPDATE_PASSBAND_IGNORE_VERSION (defaults to FALSE) See also: * <phoebe.update_passband_ignore_version_on> """ conf.update_passband_ignore_version_on() # Shortcuts to MPI options def mpi_on(nprocs=None): """ ENABLE PHOEBE to use MPI (parallelization). Default case: * If PHOEBE is run within an mpirun environment, MPI is ENABLED by default. * If PHOEBE is not run within an mpirun environment, MPI is DISABLED by default. When MPI is enabled, PHOEBE will do the following: * if within mpirun: uses PHOEBE's built-in per-dataset or per-time parallelization for <phoebe.frontend.bundle.Bundle.run_compute> and per-model parallelization when possible for <phoebe.frontend.bundle.Bundle.run_solver>. * if not within mpirun (ie. in a serial python environment): will spawn a separate thread at <phoebe.frontend.bundle.Bundle.run_compute> and <phoebe.frontend.bundle.Bundle.run_solver>, using `nprocs` processors. This separate thread will be detached from the main thread if sending `detach=True` to <phoebe.frontend.bundle.Bundle.run_compute> or <phoebe.frontend.bundle.Bundle.run_solver>. See also: * <phoebe.mpi_off> Arguments ---------- * `nprocs` (int, optional): number of processors. Only applicable if **NOT** within mpirun (see above). """ mpi.on(nprocs=nprocs) def mpi_off(): """ Run PHOEBE in Serial Mode. Default case: * If PHOEBE is run within an mpirun environment, MPI is ENABLED by default. * If PHOEBE is not run within an mpirun environment, MPI is DISABLED by default. When MPI is disabled, PHOEBE will do the following: * if within mpirun: PHOEBE will run equally on all processors. The user can customize parallelization with access to `phoebe.mpi.nprocs`, `phoebe.mpi.myrank`. * if not within mpirun (ie. in a serial python environment): PHOEBE will run on a single processor in serial-mode. Compute jobs can still be detached from the main thread by sending `detach=True` to <phoebe.frontend.bundle.Bundle.run_compute> or <phoebe.frontend.bundle.Bundle.run_solver> but will still run on a single processor. See also: * <phoebe.mpi_on> """ mpi.off() def multiprocessing_on(): """ **NEW IN 2.3.26** Enable multiprocessing to use all CPUs available (this is the state by default). MPI will always take preference over multiprocessing. See <phoebe.mpi_on> and <phoebe.mpi_off>. Multiprocessing is used by <phoebe.frontend.bundle.Bundle.run_solver> (for some solvers) and <phoebe.frontend.bundle.Bundle.run_compute> when `sample_from` is used. See also: * <phoebe.multiprocessing_off> * <phoebe.multiprocessing_get_nprocs> * <phoebe.multiprocessing_set_nprocs> """ conf.multiprocessing_on() def multiprocessing_off(): """ **NEW IN 2.3.26** Disable multiprocessing and force serial mode (if MPI is also off: see <phoebe.mpi_on> and <phoebe.mpi_off>). See also: * <phoebe.multiprocessing_on> * <phoebe.multiprocessing_get_nprocs> * <phoebe.multiprocessing_set_nprocs> """ conf.multiprocessing_off() def multiprocessing_get_nprocs(): """ **NEW IN 2.3.26** Get the number of processors used within multiprocessing. MPI will always take preference over multiprocessing. See <phoebe.mpi_on> and <phoebe.mpi_off>. Multiprocessing is used by <phoebe.frontend.bundle.Bundle.run_solver> (for some solvers) and <phoebe.frontend.bundle.Bundle.run_compute> when `sample_from` is used. See also: * <phoebe.multiprocessing_on> * <phoebe.multiprocessing_off> * <phoebe.multiprocessing_set_nprocs> """ return conf.multiprocessing_nprocs def multiprocessing_set_nprocs(nprocs): """ **NEW IN 2.3.26** Set a custom number of processors to use within multiprocessing. MPI will always take preference over multiprocessing. See <phoebe.mpi_on> and <phoebe.mpi_off>. Multiprocessing is used by <phoebe.frontend.bundle.Bundle.run_solver> (for some solvers) and <phoebe.frontend.bundle.Bundle.run_compute> when `sample_from` is used. See also: * <phoebe.multiprocessing_on> * <phoebe.multiprocessing_off> * <phoebe.multiprocessing_get_nprocs> """ conf.multiprocessing_set_nprocs(nprocs) def progressbars_on(): """ Enable progressbars. Progressbars require `tqdm` to be installed (will silently ignore if not installed). See also: * <phoebe.progressbars_off> """ conf.progressbars_on() def progressbars_off(): """ Disable progressbars. Progressbars require `tqdm` to be installed (will silently ignore if not installed). See also: * <phoebe.progressbars_on> """ conf.progressbars_off() # let's use magic to shutdown the workers when the user-script is complete atexit.register(mpi.shutdown_workers) # edit API docs for imported functions def strip_docstring_refs(matchobj): text = matchobj.group(0) path = text[1:-1] return path def add_nparray_docstring(obj): docsprefix = """This is an included dependency from [nparray 1.2.0](https://nparray.readthedocs.io/en/1.2.0/).\n\n===============================================================\n\n""" docstring = docsprefix + "\n".join([l.lstrip() for l in obj.__doc__.split("\n")]) docstring = re.sub(r"(?P<name>\<[0-9a-zA-Z_\.]*\>)", strip_docstring_refs, docstring) obj.__doc__ = docstring add_nparray_docstring(array) add_nparray_docstring(linspace) add_nparray_docstring(arange) add_nparray_docstring(logspace) add_nparray_docstring(geomspace) add_nparray_docstring(invspace) def add_distl_docstring(obj): docsprefix = """This is an included dependency from [distl](https://distl.readthedocs.io).\n\n===============================================================\n\n""" docstring = docsprefix + "\n".join([l.lstrip() for l in obj.__doc__.split("\n")]) docstring = re.sub(r"(?P<name>\<[0-9a-zA-Z_\.]*\>)", strip_docstring_refs, docstring) obj.__doc__ = docstring add_distl_docstring(uniform) add_distl_docstring(boxcar) # add_distl_docstring(delta) add_distl_docstring(gaussian) add_distl_docstring(normal) add_distl_docstring(histogram_from_bins) add_distl_docstring(histogram_from_data) add_distl_docstring(mvgaussian) add_distl_docstring(mvhistogram_from_data) add_distl_docstring(uniform_around) add_distl_docstring(gaussian_around) # expose available "kinds" per-context def _get_phoebe_funcs(module, devel=False): ignore = ['_empty_array', 'deepcopy', 'fnmatch', 'download_passband', 'list_installed_passbands', 'list_online_passbands', 'list_passbands', 'parameter_from_json', 'parse_json', 'send_if_client', 'update_if_client', '_add_component', '_add_dataset', '_label_units_lims', '_run_compute', 'phase_mask_inds'] if not devel: ignore += ['pulsation'] ignore += ['photodynam'] mod_split = module.__name__.split('.') if mod_split[-1] in ['figure'] or (mod_split[-1] in ['solver'] and 'figure' not in mod_split): ret = [] for sub_module in _inspect.getmembers(module): if _inspect.ismodule(sub_module[1]): ret += [sub_module[0]+"."+o for o in _get_phoebe_funcs(sub_module[1], devel=devel)] return ret return [o[0] for o in _inspect.getmembers(module) if _inspect.isfunction(o[1]) and o[0] not in ignore and o[0][0] != '_'] def list_available_components(devel=False): """ List all available 'kinds' for component from <phoebe.parameters.component>. See also: * <phoebe.list_available_features> * <phoebe.list_available_datasets> * <phoebe.list_available_computes> * <phoebe.list_available_solvers> Arguments ----------- * `devel` (bool, default, optional=False): whether to include development-only kinds. See <phoebe.devel_on>. Returns --------- * (list of strings) """ return _get_phoebe_funcs(component, devel=devel) def list_available_features(devel=False): """ List all available 'kinds' for feature from <phoebe.parameters.feature>. See also: * <phoebe.list_available_components> * <phoebe.list_available_datasets> * <phoebe.list_available_computes> * <phoebe.list_available_solvers> Arguments ----------- * `devel` (bool, default, optional=False): whether to include development-only kinds. See <phoebe.devel_on>. Returns --------- * (list of strings) """ return _get_phoebe_funcs(feature, devel=devel) def list_available_datasets(devel=False): """ List all available 'kinds' for dataset from <phoebe.parameters.dataset>. See also: * <phoebe.list_available_components> * <phoebe.list_available_features> * <phoebe.list_available_computes> * <phoebe.list_available_solvers> Arguments ----------- * `devel` (bool, default, optional=False): whether to include development-only kinds. See <phoebe.devel_on>. Returns --------- * (list of strings) """ return _get_phoebe_funcs(dataset, devel=devel) def list_available_figures(devel=False): """ List all available 'kinds' for figure from <phoebe.parameters.figure>. See also: * <phoebe.list_available_components> * <phoebe.list_available_features> * <phoebe.list_available_computes> * <phoebe.list_available_solvers> Arguments ----------- * `devel` (bool, default, optional=False): whether to include development-only kinds. See <phoebe.devel_on>. Returns --------- * (list of strings) """ return _get_phoebe_funcs(figure, devel=devel) def list_available_computes(devel=False): """ List all available 'kinds' for compute from <phoebe.parameters.compute>. See also: * <phoebe.list_available_components> * <phoebe.list_available_features> * <phoebe.list_available_datasets> * <phoebe.list_available_solvers> Arguments ----------- * `devel` (bool, default, optional=False): whether to include development-only kinds. See <phoebe.devel_on>. Returns --------- * (list of strings) """ return _get_phoebe_funcs(compute, devel=devel) def list_available_solvers(devel=False): """ List all available 'kinds' for solver from <phoebe.parameters.solver>. See also: * <phoebe.list_available_components> * <phoebe.list_available_features> * <phoebe.list_available_datasets> * <phoebe.list_available_computes> Arguments ----------- * `devel` (bool, default, optional=False): whether to include development-only kinds. See <phoebe.devel_on>. Returns --------- * (list of strings) """ return _get_phoebe_funcs(solver, devel=devel) if _env_variable_bool('PHOEBE_ENABLE_ONLINE_PASSBANDS', 'TRUE'): for pb in list_all_update_passbands_available(): msg = 'passband "{}" has a newer version available. Run phoebe.list_passband_online_history("{}") to get a list of available changes and phoebe.update_passband("{}") or phoebe.update_all_passbands() to update.'.format(pb, pb, pb) # NOTE: we'll print since the logger hasn't been initialized yet. print('PHOEBE: {}'.format(msg)) # delete things we don't want exposed to the user at the top-level # NOTE: we need _sys for reset_settings, that's why its __sys del atexit try: del matplotlib except: pass try: del mpi4py except: pass del logging del Settings del MPI del re del strip_docstring_refs del add_nparray_docstring del add_distl_docstring
phoebe-project/phoebe2
phoebe/__init__.py
Python
gpl-3.0
38,334
[ "Gaussian" ]
ccfc0b984868f34095b77f9c0995a7f81c768c4f58ee98c7e1d0ab97237c8019
''' Just playing around with graphs and classes and stacks and queues and stuff. Nodes in a directed graph have values and children, a possibly other properties based on the situation. ''' from queue import Queue # .put() to put things in the queue and .get() to pop from the queue class Node: def __init__(self, value = 0, children = [], visited = False, ID = None): self.value = value self.children = children self.visited = visited self.ID = ID # Let's manually build a graph. Create 10 nodes and keep an id for each node in this graph dictionary. graph = {ID : Node() for ID in range(10)} def populate_graph(graph, node_ID, value, children): graph[node_ID].value = value graph[node_ID].children = children graph[node_ID].ID = node_ID populate_graph(graph, 0, 5, [graph[1],graph[9]]) populate_graph(graph, 1, 9, [graph[2],graph[4]]) populate_graph(graph, 2, 7, [graph[3]]) populate_graph(graph, 3, 2, [graph[1]]) populate_graph(graph, 4, 3, [graph[5],graph[6],graph[7]]) populate_graph(graph, 5, 7, []) populate_graph(graph, 6, 3, []) populate_graph(graph, 7, 5, [graph[8]]) populate_graph(graph, 8, 8, []) populate_graph(graph, 9, 1, [graph[8]]) def bfs(graph,root): # Let's do a breadth first search that prints when it visits each node. q = Queue() q.put(root) while q: current_node = q.get() current_node.visited = True print(f'We just visited node {current_node.ID}\n') for child in current_node.children: if not child.visited: q.put(child) def dfs(graph,root): # Let's do a depth first search that prints when it visits each node. # Notice we don't visit a node twice, otherwise we would get caught in # an infinite loop if there is a cycle in the directed graph. # We can actually use this DFS to detect cycles. See the "course schedule" # problem to see how to do that. stack = [root] while stack: current_node = stack.pop() current_node.visited = True print(f'We just visited node {current_node.ID}\n') if current_node.children: for child in current_node.children: if not child.visited: stack.append(child) dfs(graph,graph[0])
chuckinator0/Projects
scripts/graphPlay.py
Python
gpl-3.0
2,124
[ "VisIt" ]
a7e22bfb8db7dfeaabf6288db476b802ce7a78ad3d9c666b5003188bfe8a3356
# - Coding UTF8 - # # Networked Decision Making # Development Sites (source code): # http://code.google.com/p/global-decision-making-system/ # http://github.com/NewGlobalStrategy/NetDecisionMaking # # Demo Sites (Google App Engine) # http://netdecisionmaking.appspot.com # http://globaldecisionmaking.appspot.com # # License Code: MIT # License Content: Creative Commons Attribution 3.0 # # Also visit: www.web2py.com # or Groups: http://groups.google.com/group/web2py # For details on the web framework used for this development # # Developed by Russ King (newglobalstrategy@gmail.com # Russ also blogs occasionally to pass the time at: # http://proudofyourplanent.blogspot.com # His general thinking on why this project is very important is available at # http://www.scribd.com/doc/98216626/New-Global-Strategy # With thanks to Guido, Massimo and many other that make this sort of thing # much easier than it used to be """ This controller handles viewing the full details on questions if allowed and also displaying the reason you are not allowed to view the question the functionality to submit a challenge is also included in this controller and that is called via ajax from the view of the question detail The three functions are: index: displays the question details notshowing: explains why the question can't be displayed - actions should always be displayed challenge: allows submission of a challenge and return of whether this is allowed via ajax For actions not generally interested in user's views but would like these to be capable of prioritisation at any stage - need to see the date and will be some options to generate emails based on actions and also to challenge resolved actions to return them to proposed A separate comments function has now been created """ from ndsfunctions import getwraptext from jointjs2py import colourcode def index(): #This will be a general view on question details and it will require the #question id as an argument Logic will be to only display the question if it #has been submitted, resolved or answered/passed by the user #This maintains the general privacy approach to questions that may be #subject to answer eventually if resolved then there will be a view option #However approach for actions is different - they can be viewed at any time #but the buttons at the bottom should be very simlar #initialize variables as not used if action viewtext = '' lstanswers = [] lstnumanswers = [] numpass = 0 #not used if not answered uqanswered = False uqurg = 5 uqimp = 5 uqans = 0 questid = 0 if len(request.args): questid = int(request.args[0]) else: redirect(URL('gdms', 'viewquest', 'notshowing/' + 'NoQuestion')) #session.questid = questid # check why doing this questrow = db(db.question.id == questid).select(db.question.id, db.question.status, db.question.qtype, db.question.questiontext, db.question.answers, db.question.urgency, db.question.importance, db.question.numanswers, db.question.level, db.question.totanswers, db.question.category, db.question.correctans, db.question.correctanstext, db.question.answerreasons, db.question.answerreason2, db.question.answerreason3, db.question.answercounts, db.question.auth_userid).first() if questrow is None: redirect(URL('gdms', 'viewquest', 'notshowing/' + 'NoQuestion')) else: quest = questrow.as_dict() if quest['qtype'] == 'quest': response.view = 'viewquest/question.html' #View question logic if auth.user is None: if quest['status'] != 'Resolved': redirect(URL('gdms', 'viewquest', 'notshowing/' + 'NotResolved/' + str(questid))) else: uq = db((db.userquestion.auth_userid == auth.user.id) & (db.userquestion.questionid == questid)).select(db.userquestion.id, db.userquestion.urgency, db.userquestion.importance, db.userquestion.answer).first() if uq is None: uqanswered = False if quest['status'] != 'Resolved' and quest['auth_userid'] != auth.user: redirect(URL('gdms', 'viewquest', 'notshowing/NotAnswered/' + str(questid))) else: uqanswered = True #now three scenarios now either the user has answered the question #or they haven't but it is resolved the population of the question variables #v2 if not answered we will now open a link to answer the question #to return to the view should be broadly the same in both scenarios #or they have submitted question and are being allowed to see progress #Get question details into a list of correct size #possibly just append mastlstanswers = quest['answers'] mastlstnumanswers = quest['answercounts'] k = quest['numanswers'] lstanswers = mastlstanswers[:k] lstnumanswers = mastlstnumanswers[1:k + 1] numpass = mastlstnumanswers[0] #in terms of the user there are basically 3 things to pick-up on #the user answer #users rating of urgency and importance #did the user get this right (if resolved or under challenge) if uqanswered: uqurg = uq.urgency uqimp = uq.importance uqans = uq.answer #Now work out what we can say about this question #if resolved we can say if right or wrong and allow the question to be challenged if quest['status'] == 'Resolved': #Did the user answer the question if uqanswered: if uqans == 0: viewtext = 'You passed on this question but it has now been resolved' elif quest['correctans'] == uqans: viewtext = 'Well done - you helped resolve this question' else: viewtext = 'Your answer to this question disagrees with the resolved' 'correct answer - you may want to request a challenge' else: viewtext = "You didn't get to answer this question" elif quest['status'] == 'Rejected': viewtext = "This question has been rejected" else: #if not resolved can only say in progress and how many more answers are required #at present should only be here if #answered as we are not showing users unresolved and unanswered questions viewtext = 'This question is in progress at level ' + str(quest['level']) #That will do for now - display of challenges and probably numanswers remaining #and level can be added later else: # action #Get details of the action urgency and importance of actions is stored in a different table because they can #be prioritised without answering response.view = 'viewquest/action.html' if auth.user is not None: uq = db((db.questurgency.auth_userid == auth.user.id) & ( db.questurgency.questionid == questid)).select(db.questurgency.urgency, db.questurgency.importance).first() if uq is not None: uqanswered = True uqurg = uq.urgency uqimp = uq.importance #need to get priorquests and subsquests as lists which may be empty for each quest now priorquestrows = db(db.questlink.targetid == questid).select(db.questlink.sourceid) subsquestrows = db(db.questlink.sourceid == questid).select(db.questlink.targetid) priorquests = [row.sourceid for row in priorquestrows] subsquests = [row.targetid for row in subsquestrows] return dict(quest=quest, viewtext=viewtext, lstanswers=lstanswers, lstnumanswers=lstnumanswers, uqanswered=uqanswered, uqurg=uqurg, uqimp=uqimp, numpass=numpass, priorquests=priorquests, subsquests=subsquests) def qmap(): #This generates a view of a question and it's parents and children we are not now showing #siblings and partners - they can be shown on the main network view. The web2py question ids need #to be passed into the objects - however these must be unique and we may want to present #the same question twice on the map so think we need to make the ids combine the role #and the id. Current roles would be as follows: # # 1 Cen- the main central question on the map # 2 Par- a parent question # 3 Chi- a child question # #thinking we will have two different shapes of rectangle for questions and actions #questions will be longer and slightly narrower actions shorter and a little wider #wrapping of text will need to reflect this for now if len(request.args): questid = int(request.args[0]) else: redirect(URL('gdms', 'viewquest', 'notshowing/' + 'NoQuestion')) questrow = db(db.question.id == questid).select(db.question.id, db.question.status, db.question.qtype, db.question.questiontext, db.question.urgency, db.question.importance, db.question.level, db.question.priority, db.question.totanswers, db.question.category, db.question.correctanstext, db.question.answercounts).first() if questrow == None: redirect(URL('gdms', 'viewquest', 'notshowing/' + 'NoQuestion')) else: quest = questrow.as_dict() #so have quest['subsquests'] and quest['priorquests'] which I think we want to make #into a list of objects and associated values qmap may as well be a dictionary I think #with name, position, colour, text and font size, no width and height for now think textwrap can provide the #text so in qmap name will be the key and then list of x,y,colour, text, font size #so in this scenario would have a main question and then prior and subs and outputs #are keys, qmap and qlink #for now lets hard code some positions in terms of lists #width=140, height=250 xpos = [330, 170, 490, 10, 650] ypos = [10, 300, 550] obj = 'Cen' + str(questrow['id']) questmap = {} qlink = {} #for testing this allowed dummy linking #priortemp = [1,3,5] #substemp = [4,10,12] if quest['qtype'] == 'action': width = 200 height = 100 wraplength = 30 else: width = 160 height = 200 wraplength = 25 keys = '[' + obj #add the prior quests - so this should become a procedure shortly #with params and I think prefix plus quest ids is best as will need #to work back to updates to ids once we use events - think we can just #make a new function here in first instance and then move in fact maybe just #iterate over the list here is fine with separate function for the text #but may then need second pass for next generation - but later #however they can be defined as query and then do 4 iterations #if siblings: #change to just be a single question parentquery = db.questlink.targetid == questid childquery = db.questlink.sourceid == questid #so this needs to b parentlinks = db(parentquery).select(db.questlink.id, db.questlink.sourceid, db.questlink.targetid) mylist = [x.sourceid for x in parentlinks] query = db.question.id.belongs(mylist) #query = db.question.id.belongs(priortemp) parentquests = db(query).select(db.question.id, db.question.questiontext, db.question.correctanstext, db.question.status, db.question.level, db.question.qtype, db.question.category, db.question.priority) for i, x in enumerate(parentquests): if x['qtype'] == 'action': width = 200 height = 100 wraplength = 30 else: width = 160 height = 200 wraplength = 25 qtext = getwraptext(x.questiontext, x.correctanstext, wraplength) rectcolour = colourcode(x.qtype, x.status, x.priority) strobj = 'Par' + str(x.id) strlink = 'Plk' + str(x.id) questmap[strobj] = [xpos[i], ypos[0], qtext, rectcolour, 12, 'b', width, height] #change to call function in the line above qlink[strlink] = [strobj, obj] keys += ',' keys += strobj keys += ',' keys += strlink if parentquests: ypos.pop(0) qtext = getwraptext(quest['questiontext'], quest['correctanstext'], wraplength) rectcolour = colourcode(quest['qtype'], quest['status'], quest['priority']) #add the main question questmap[obj] = [xpos[0], ypos[0], qtext, rectcolour, 12, 'tb', width, height] ypos.pop(0) childlinks = db(childquery).select(db.questlink.id, db.questlink.sourceid, db.questlink.targetid) mylist = [x.targetid for x in childlinks] query = db.question.id.belongs(mylist) #query = db.question.id.belongs(substemp) childquests = db(query).select(db.question.id, db.question.questiontext, db.question.correctanstext, db.question.status, db.question.level, db.question.qtype, db.question.category, db.question.priority) for i, x in enumerate(childquests): if x['qtype'] == 'action': width = 200 height = 160 wraplength = 30 else: width = 160 height = 200 wraplength = 25 qtext = getwraptext(x.questiontext, x.correctanstext, wraplength) rectcolour = colourcode(x.qtype, x.status, x.priority) strobj = 'Chi' + str(x.id) strlink = 'Clk' + str(x.id) questmap[strobj] = [xpos[i], ypos[0], qtext, rectcolour, 12, 't', width, height] qlink[strlink] = [obj, strobj] keys += ',' keys += strobj keys += ',' keys += strlink keys += ']' return dict(quest=quest, questmap=questmap, keys=keys, qlink=qlink) def comments(): #This will be a general view on question comments it will require the #question id as an argument Logic will be to only display the comements if it #has been resolved #This maintains the general privacy approach to questions that may be #subject to answer eventually if resolved then there will be a view option #this needs the as_dict() treatment as well but lets debug viewquest first # and then do next page = 0 if len(request.args): questid = int(request.args[0]) if len(request.args) > 1: page = request.args[1] else: redirect(URL('default', 'index')) session.questid = questid quest = db.question[questid].as_dict() if quest is None: redirect(URL('viewquest', 'notshowing/' + 'NoQuestion')) # create the form before the comments then it includes in subsequent refresh if auth.user: form = crud.create(db.questcomment) else: form = 'You must be logged in to post comments' #Now select the question comments will have an add comments form if #the user is logged in - else comment that must login to add comments #below logic needs fixed as now question is request.args[0] but can #test the rest while I figure out how to fix if len(request.args) > 1: page = int(request.args[1]) else: page = 0 items_per_page = 3 limitby = (page * items_per_page, (page + 1) * items_per_page + 1) basenum = items_per_page * page comments = db((db.questcomment.questionid == questid)).select( db.questcomment.id, db.questcomment.numreject, db.questcomment.comment, db.questcomment.commentdate, db.questcomment.status, orderby=[db.questcomment.commentdate], limitby=limitby) return dict(quest=quest, comments=comments, page=page, items_per_page=items_per_page, form=form, basenum=basenum) def useranswers(): #This displays all users answers to the question and challenges if any #for now will probably display all challenges at the bottom of the page #as assumption is there won't be too many of these # looks like this also needs as_dict treatment page = 0 if len(request.args): questid = int(request.args[0]) if len(request.args) > 1: page = request.args[1] else: redirect(URL('default', 'index')) session.questid = questid quest = db(db.question.id == questid).select(db.question.id, db.question.status, db.question.qtype, db.question.questiontext, db.question.answers, db.question.urgency, db.question.importance, db.question.numanswers, db.question.level, db.question.totanswers, db.question.category, db.question.correctans, db.question.correctanstext, db.question.answerreasons, db.question.answerreason2, db.question.answerreason3, db.question.answercounts)[0] quest = db.question[questid].as_dict() if quest is None: redirect(URL('viewquest', 'notshowing/' + 'NoQuestion')) #Get question details into a list of correct size # this needs to become a function - duplicated code with viewquest mastlstanswers = quest['answers'] mastlstnumanswers = quest['answercounts'] k = quest['numanswers'] lstanswers = mastlstanswers[:k] lstnumanswers = mastlstnumanswers[1:k + 1] numpass = mastlstnumanswers[0] #Now select the userquestion records in order by level if len(request.args) > 1: page = int(request.args[1]) else: page = 0 items_per_page = 21 limitby = (page * items_per_page, (page + 1) * items_per_page + 1) uqs = db(db.userquestion.questionid == questid).select(db.userquestion.id, db.userquestion.level, db.userquestion.auth_userid, db.userquestion.answer, db.userquestion.answerreason, db.userquestion.urgency, db.userquestion.importance, db.userquestion.score, orderby=[~db.userquestion.level], limitby=limitby) challs = db(db.questchallenge.questionid == questid).select( db.questchallenge.auth_userid, db.questchallenge.status, db.questchallenge.challengereason, db.questchallenge.challengedate, orderby=[~db.questchallenge.challengedate]) return dict(quest=quest, uqs=uqs, page=page, items_per_page=items_per_page, lstanswers=lstanswers, lstnumanswers=lstnumanswers, challs=challs) def notshowing(): shortreason = request.args[0] if len(request.args) > 1: questid = request.args[1] else: questid = False if shortreason == 'NotResolved': reason = "This question is not yet resolved and you haven't answered it" elif shortreason == 'NotAnswered': reason = 'You have not answered this question' elif shortreason == 'NoQuestion': reason = 'This question does not exist' else: reason = 'Not Known' return dict(reason=reason, questid=questid) def create_action(): quest = request.args[0] return dict(quest=quest) def create_message(): quest = request.args[0] return dict(quest=quest) def challenge(): #This allows users to challenge resolved questions - whether or not they have answered them - users are not #allowed to challenge questions that are not currently in a state of resolved and this should be done by the #viewquestion function rather than the challenge ie option isn't available if question isn't resolved - actions #are similar and would only be challenged once they are in a state of Agreed responsetext = 'na' chquestid = request.args[0] #reason = request.args[1] if auth.user is None: responsetext = 'You must be logged in to challenge a question' else: #find out if user has previously challenged the question - this will be a userchallenge record qcs = db((db.questchallenge.auth_userid == auth.user.id) & (db.questchallenge.questionid == chquestid)).select() qc = qcs.first() if qc is None: db.questchallenge.insert(questionid=chquestid, auth_userid=auth.user.id, challengereason=request.vars.challreason) #Now also need to add 1 to the numchallenges figure - I think this will reset when back to resolved and #It shouldn't be possible to challenge unless resolved questrows = db(db.question.id == chquestid).select() quest = questrows.first() numchallenges = quest.numchallenges + 1 db(db.question.id == chquestid).update(numchallenges=numchallenges) if numchallenges >= 3: numchallenged = quest.numchallenged + 1 newlevel = quest.level + 2 #thinking behind this is to restore question two levels higher which is wher #it would have been if the 6 people had mixed up ie 3 think wrong and 3 that agreed db(db.question.id == chquestid).update(status='In Progress', level=newlevel, numchallenges=0, numchallenged=numchallenged) responsetext = 'Challenge accepted' else: responsetext = 'You have already challenged this question and only 1 challenge is allowed at present' return responsetext def agree(): #This allows users to record if they agree or disagree with resolve questions #- whether or not they have answered them - only resolved questions can #be agreed or disagreed with responsetext = 'na' chquestid = request.args[0] agreeval = int(request.args[1]) if auth.user == None: responsetext = 'You must be logged in to record agreement or disagreement' else: questrows = db(db.question.id == chquestid).select() quest = questrows.first() numagree = quest.numagree numdisagree = quest.numdisagree #find out if user has previously challenged the question - #this will be a userchallenge record qcs = db((db.questagreement.auth_userid == auth.user.id) & (db.questagreement.questionid == chquestid)).select() qc = qcs.first() if qc == None: db.questagreement.insert(questionid=chquestid, auth_userid=auth.user.id, agree=agreeval) #Now also need to add 1 to the numagreement or disagreement figure #It shouldn't be possible to challenge unless resolved if agreeval == 1: numagree += 1 responsetext = 'Agreement Recorded' else: numdisagree += 1 responsetext = 'Disagreement Recorded' else: if agreeval == qc.agree: if agreeval == 1: responsetext = 'You have already registered agreement' else: responsetext = 'You have already registered your disagreement' ' - you may be able to challenge' else: if agreeval == 1: responsetext = 'Your vote has been changed to agreement' numagree += 1 numdisagree -= 1 else: responsetext = 'Your vote has been changed to disagreement' numagree += 1 numdisagree -= 1 qc.update_record(agree=agreeval) db(db.question.id == chquestid).update(numagree=numagree, numdisagree=numdisagree) return responsetext def flagcomment(): #This allows users to record if they think a comment is inappropriate #if 3 separate users flag the comment then it is removed from display #permanently for now responsetext = '' commentid = request.args[0] requesttype = request.args[1] if auth.user is None: responsetext = 'You must be logged in to flage inappropriate comments' else: comment = db(db.questcomment.id == commentid).select( db.questcomment.id, db.questcomment.numreject, db.questcomment.usersreject, db.questcomment.status).first() if requesttype != 'admin': #chekc if user has previously challenged the question - #this will be an entry in the usersreject field if comment.usersreject is not None and auth.user.id in comment.usersreject: responsetext = 'You have already flagged this comment' else: responsetext = 'Rejection recorded' comment.numreject += 1 if comment.usersreject is not None: comment.usersreject.append(auth.user.id) else: comment.usersreject = [auth.user.id] if comment.numreject > 2: comment.status = 'NOK' comment.update_record() else: responsetext = 'Admin hide successful' comment.update_record(status='NOK') return responsetext def urgency(): #This allows users to record or update their assessment of the urgency and #importance of an action as this helps with prioritising the actions that #are required - next step is to attempt to get the view sorted and will #retrieve this as part of main index controller if request.vars.urgslider2 is None: urgslider = 5 else: urgslider = int(request.vars.urgslider2) if request.vars.impslider2 is None: impslider = 5 else: impslider = int(request.vars.impslider2) responsetext = 'na' chquestid = request.args[0] if auth.user == None: responsetext = 'You must be logged in to record urgency and importance' else: questrows = db(db.question.id == chquestid).select() quest = questrows.first() qurgency = quest.urgency qimportance = quest.importance #find out if user has rated the question already qcs = db((db.questurgency.auth_userid == auth.user.id) & (db.questurgency.questionid == chquestid)).select() qc = qcs.first() if qc == None: db.questurgency.insert(questionid=chquestid, auth_userid=auth.user.id, urgency=urgslider, importance=impslider) urgency = request.vars.urgslider2 responsetext = 'Your assessment has been recorded' else: qc.update_record(urgency=request.vars.urgslider2, importance=request.vars.impslider2) responsetext = 'Your assessment has been updated' if quest.totratings == 0: totratings = quest.totanswers else: totratings = quest.totratings urgent = (((quest.urgency * totratings) + urgslider) / (totratings + 1)) importance = (((quest.importance * totratings) + impslider) / (totratings + 1)) if qc is None: totratings += 1 priority = urgent * importance # perhaps a bit arbitary but will do for now db(db.question.id == chquestid).update(urgency=urgent, importance=importance, priority=priority, totratings=totratings) return responsetext
NewGlobalStrategy/NetDecisionMaking
controllers/viewquest.py
Python
mit
29,082
[ "VisIt" ]
784b51c4264f5fc107135f40d8e2457d6f3783b7905ae1a728c2b1130f24f752
from __future__ import annotations import ast from pre_commit_hooks.debug_statement_hook import Debug from pre_commit_hooks.debug_statement_hook import DebugStatementParser from pre_commit_hooks.debug_statement_hook import main from testing.util import get_resource_path def test_no_breakpoints(): visitor = DebugStatementParser() visitor.visit(ast.parse('import os\nfrom foo import bar\n')) assert visitor.breakpoints == [] def test_finds_debug_import_attribute_access(): visitor = DebugStatementParser() visitor.visit(ast.parse('import ipdb; ipdb.set_trace()')) assert visitor.breakpoints == [Debug(1, 0, 'ipdb', 'imported')] def test_finds_debug_import_from_import(): visitor = DebugStatementParser() visitor.visit(ast.parse('from pudb import set_trace; set_trace()')) assert visitor.breakpoints == [Debug(1, 0, 'pudb', 'imported')] def test_finds_breakpoint(): visitor = DebugStatementParser() visitor.visit(ast.parse('breakpoint()')) assert visitor.breakpoints == [Debug(1, 0, 'breakpoint', 'called')] def test_returns_one_for_failing_file(tmpdir): f_py = tmpdir.join('f.py') f_py.write('def f():\n import pdb; pdb.set_trace()') ret = main([str(f_py)]) assert ret == 1 def test_returns_zero_for_passing_file(): ret = main([__file__]) assert ret == 0 def test_syntaxerror_file(): ret = main([get_resource_path('cannot_parse_ast.notpy')]) assert ret == 1 def test_non_utf8_file(tmpdir): f_py = tmpdir.join('f.py') f_py.write_binary('# -*- coding: cp1252 -*-\nx = "€"\n'.encode('cp1252')) assert main((str(f_py),)) == 0 def test_py37_breakpoint(tmpdir): f_py = tmpdir.join('f.py') f_py.write('def f():\n breakpoint()\n') assert main((str(f_py),)) == 1
pre-commit/pre-commit-hooks
tests/debug_statement_hook_test.py
Python
mit
1,786
[ "VisIt" ]
2b4ed2cd8b8bb697f5ab1ee69a092e0a329e4a1d8eead3b1f126923b8598f1c8
""" Selected plots commonly used in astronomy and active learning. """ import pandas as pd import numpy as np import ephem import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.patches as mpatches import seaborn as sns from pandas import DataFrame from matplotlib.ticker import FuncFormatter from .photometry import fetch_spectrum, fetch_filter # These are the "Tableau 20" colors as RGB. tableau10 = [(214, 39, 40), (31, 119, 180), (44, 160, 44), (255, 127, 14), (148, 103, 189), (140, 86, 75), (127, 127, 127), (23, 190, 207), (188, 189, 34), (227, 119, 194)] # Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts. for i in range(len(tableau10)): r, g, b = tableau10[i] tableau10[i] = (r / 255., g / 255., b / 255.) def plot_class_distribution(target, ax=None): """ Plot the distribution of the classes. Parameters ---------- target : array The target column of the dataset. ax : Matplotlib Axes object A matplotlib Axes instance. Returns ------- ax : Matplotlib Axes object The matplotlib Axes instance where the figure is drawn. """ if not ax: ax = plt.gca() counts = DataFrame(target).apply(pd.value_counts) counts.plot(ax=ax, kind="bar", fontsize=12, legend=False) ax.set_xticklabels(labels=counts.index, rotation=0) format_thousands = lambda x, pos: format(int(x), ',') ax.get_yaxis().set_major_formatter(FuncFormatter(format_thousands)) ax.grid(False) return ax def plot_scores(scores, title, x_label, classifier_names): """ Make a barplot of the scores of some performance measure. Parameters ---------- scores : dict Where the keys are the classifier names and the values are the scores. title : str Title of the plot. x_label : str Label for the x-axis classifier_names : array List of the names of the classifiers, the order of which will be used to order the bars. """ scores = DataFrame(scores, index=[x_label]) scores = scores.reindex(columns=classifier_names) format_as_percent_plot = lambda x, pos: "{:.0f}%".format(x * 100) fig, ax = plt.subplots(figsize=(9, 5)) scores.plot(ax=ax, kind="bar", title=title, fontsize=12) ax.legend(bbox_to_anchor = (1.5, 0.6)) ax.set_xticklabels([], rotation=0) ax.get_yaxis().set_major_formatter(FuncFormatter(format_as_percent_plot)) plt.show() def plot_final_accuracy(data, labels, colors=None, sort=True, linewidth=1.5, inner='box', cut=2, ylim=None, ax=None): """ """ if not ax: ax = plt.gca() df = [np.array(h)[:,-1] for h in data] df = np.array(df).transpose() df = pd.DataFrame(df, columns=labels) sorted_labels = df.mean().order().index df = df.reindex_axis(sorted_labels, axis=1) sns.violinplot(data=df, ax=ax, inner=inner, cut=cut, palette=colors, linewidth=linewidth) format_as_percent_plot = lambda x, pos: "{:.0f}%".format(x * 100) ax.get_yaxis().set_major_formatter(FuncFormatter(format_as_percent_plot)) ax.set_ylabel('PBA') ax.set_ylim(ylim) ax.tick_params(top='off') return ax def plot_balanced_accuracy_violin(balanced_accuracy_samples, ax=None): """ Make a violin plot of the balanced posterior accuracy. Parameters ---------- balanced_accuracy_samples : dict Where the keys are the classifier names and the each value is an array of sample points from which an empirical pdf can be approxmiated. ax : Matplotlib Axes object A matplotlib Axes instance. Returns ------- ax : Matplotlib Axes object The matplotlib Axes instance where the figure is drawn. """ if not ax: ax = plt.gca() sns.violinplot(data=balanced_accuracy_samples, ax=ax, inner='box', cut=2) format_as_percent_plot = lambda x, pos: "{:.1f}%".format(x * 100) ax.get_yaxis().set_major_formatter(FuncFormatter(format_as_percent_plot)) return ax def plot_learning_curve(sample_sizes, learning_curves, curve_labels, xscale='log', ax=None): """ Plot the learning curve. Parameters ---------- sample_sizes : array The sample sizes in which the classifier is run. learning_curves : array List of learning_curves to be plotted curve_labels : array The labels of the learning curves. xscale : str The scale of the x-axis. Default is 'log'. ax : Matplotlib Axes object A matplotlib Axes instance. Returns ------- ax : Matplotlib Axes object The matplotlib Axes instance where the figure is drawn. """ if not ax: ax = plt.gca() for curve, label in zip(learning_curves, curve_labels): ax.plot(sample_sizes[:len(curve)], curve, label=label) format_as_percent_plot = lambda x, pos: "{:.1f}%".format(x * 100) ax.get_yaxis().set_major_formatter(FuncFormatter(format_as_percent_plot)) ax.legend(loc='lower right', frameon=True) ax.set_xlabel('Number of Training Example') ax.set_ylabel('Posterior Balanced Accuracy Rate') ax.set_xscale(xscale) ax.grid(False) return ax def plot_average_learning_curve(sample_sizes, learning_curves, curve_labels, ax=None): """ Plot the average learning curve from many trials. Parameters ---------- sample_sizes : array The sample sizes in which the classifier is run. learning_curves : array List of learning_curves to be plotted curve_labels : array The labels of the learning curves. ax : Matplotlib Axes object A matplotlib Axes instance. Returns ------- ax : Matplotlib Axes object The matplotlib Axes instance where the figure is drawn. """ mean_curves = [] for learning_curve in learning_curves: learning_curve = np.array(learning_curve) mean_curve = np.zeros(len(sample_sizes)) for i in range(len(sample_sizes)): mean_curve[i] = np.mean(learning_curve[:, i]) mean_curves.append(mean_curve) if not ax: ax = plt.gca() for mean_curve, curve_label in zip(mean_curves, curve_labels): ax.plot(sample_sizes, mean_curve, label=curve_label) ax.set_xlabel('Number of Training Examples') ax.set_ylabel('MPBA') ax.legend(loc='lower right', frameon=True) ax.grid(False) return ax def plot_hex_map(ra, dec, origin=180, title=None, projection='mollweide', gridsize=100, milky_way=True, C=None, reduce_C_function=np.mean, vmin=0, vmax=1500, mincnt=1, cmap=plt.cm.bone_r, axisbg='white', colorbar=True, labels=False, norm=None, ax=None): """ Plot the density of objects on a hex map. Parameters ---------- ra : array The array containing the ra coordinates. dec : array The array containing the dec coordinates. origin : int The ra value in the middle of the map. title : str The title of the plot. projection : str The projection mode to be used. Default is 'mollweide'. gridsize : int The number of hexagons in the *x*-direction, default is 100. The corresponding number of hexagons in the *y*-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the *x*-direction and the *y*-direction. milky_way : boolean Whether the plane of the Milky Way is plotted. Default is True. C : array If C is specified, it specifies values at the coordinate (ra[i],dec[i]). These values are accumulated for each hexagonal bin and then reduced according to reduce_C_function, which defaults to numpy’s mean function (np.mean). (If C is specified, it must also be a 1-D sequence of the same length as ra and dec.) reduce_C_function : function The function to be applied to the C values (or the count values) on each hexagon bin. vmin : scalar vmin is the value that sits at the bottom end of the colour bar. If None, the min of array C is used. vmax : scalar vmax is the value that sits at the top end of the colour bar. If None, the max of array C is used. mincnt : int If not None, only display cells with more than mincnt number of points in the cell. cmap : Colormap a matplotlib.colors.Colormap instance. axisbg : str The background colour of the map. Default is 'white'. colorbar : boolean Whether to render the color bar (i.e. legend). Default is True. labels : boolean Whether to render the axis labels. Default to False (to avoid clutter). Parameters ---------- ax : matplotlib Axes Returns the Axes object with the hex map drawn onto it. """ # shift ra values to range [-180, +180] ra = np.remainder(ra + 360 - origin, 360) ra[ra > 180] -= 360 # reverse scale so that East is to the left ra = -ra # set tick labels to correct values tick_labels = np.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210]) tick_labels = np.remainder(tick_labels + 360 + origin, 360) # plot data on map if not ax: ax = plt.gca(projection=projection, axisbg=axisbg) hex_quasar = ax.hexbin(np.radians(ra), np.radians(dec), gridsize=gridsize, cmap=cmap, mincnt=mincnt, zorder=-1, vmin=vmin, vmax=vmax, C=C, norm=norm, reduce_C_function=reduce_C_function) if colorbar: plt.gcf().colorbar(hex_quasar) if title: ax.set_title(title) if labels: ax.set_xlabel('ra') ax.set_ylabel('dec') ax.set_xticklabels(tick_labels) else: ax.set_xticklabels([]) ax.set_yticklabels([]) for spine in ax.spines.values(): spine.set_edgecolor('black') ax.grid(True) # draw the Milky Way if milky_way: lons = np.arange(0, 360) ra_dec = np.zeros((360, 2)) for lon in lons: gal_coords = ephem.Galactic(np.radians(lon), 0) equatorial_coords = ephem.Equatorial(gal_coords) ra_dec[lon] = np.degrees(equatorial_coords.get()) milky_ra = ra_dec[:, 0] milky_dec = ra_dec[:, 1] milky_ra = np.remainder(milky_ra + 360 - origin, 360) milky_ra[milky_ra > 180] -= 360 milky_ra = -milky_ra # sort so the line does not loop back sort_index = np.argsort(milky_ra) milky_ra_sorted = milky_ra[sort_index] milky_dec_sorted = milky_dec[sort_index] ax.plot(np.radians(milky_ra_sorted), np.radians(milky_dec_sorted)) return ax def plot_recall_maps(coords_test, y_test, y_pred_test, class_names, output, correct_boolean, vmin=0, vmax=1, mincnt=None, fig_dir='', cmap=plt.cm.YlGn): """ Plot the recall map. Parameters ---------- coords_test : array The ra and dec coordinates y_test : array The column of predicted values. y_pred_test : array The column of predicted values. class_names = array Names of the target (e.g. Galaxy, Star, Quasar) output : str The suffix on the saved figure. correct_boolean : array A boolean array indicating whehter a test exmaple was correctly predicted. vmin : scalar vmin is the value that sits at the bottom end of the colour bar. If None, the min of array C is used. vmax : scalar vmax is the value that sits at the top end of the colour bar. If None, the max of array C is used. mincnt : int If not None, only display cells with more than mincnt number of points in the cell. cmap : Matplotlib ColorMap object The color scheme to be used. """ C_func = lambda c: np.sum(c) / len(c) if c else 0 is_class = {} for class_name in class_names: is_class[class_name] = y_test == class_name ra = coords_test[:,0][is_class[class_name]] dec = coords_test[:,1][is_class[class_name]] C = correct_boolean[is_class[class_name]] fig = plt.figure(figsize=(10,5)) ax = plot_hex_map(ra, dec, C=C, reduce_C_function=C_func, vmin=vmin, vmax=vmax, mincnt=mincnt, cmap=cmap) file_name = fig_dir + 'map_recall_' + output + r'_' + class_name + r'.png' fig.savefig(file_name, bbox_inches='tight', dpi=300) def plot_filters_and_spectrum(filter_url, spectrum_url, filter_dir='', spectra_dir='', ax=None): """ Plot ugriz filters and spectrum in the same figure. filter_url : str The url where the ugriz filters can be obtained. spectrum_url : str The url where the spectrum data can be obtained. ax : Matplotlib Axes object A matplotlib Axes instance. Returns ------- ax : Matplotlib Axes object The matplotlib Axes instance where the figure is drawn. """ if not ax: ax = plt.gca() Xref = fetch_spectrum(spectrum_url, spectra_dir) Xref[:, 1] /= 2.1 * Xref[:, 1].max() ax.plot(Xref[:, 0], Xref[:, 1], '-k', lw=1) for f,c in zip('ugriz', 'bgrmk'): X = fetch_filter(f, filter_url, filter_dir) plt.fill(X[:, 0], X[:, 1], ec=c, fc=c, alpha=0.4) kwargs = dict(fontsize=20, ha='center', va='center', alpha=0.5) ax.text(3500, 0.02, 'u', color='b', **kwargs) ax.text(4600, 0.02, 'g', color='g', **kwargs) ax.text(6100, 0.02, 'r', color='r', **kwargs) ax.text(7500, 0.02, 'i', color='m', **kwargs) ax.text(8800, 0.02, 'z', color='k', **kwargs) ax.set_xlim(3000, 11000) #ax1a.set_title('SDSS Filters and Reference Spectrum') ax.set_xlabel('Wavelength (Å)') ax.set_ylabel('Normalised Flux / Filter Transmission') ax.tick_params(top='off', right='off') ax.grid(False) return ax def plot_scatter_with_classes(data, targets, classes, size=2, alpha=0.05, scatterpoints=500, ax=None): """ Plot a scater plot of the classes. data : array The target array. targets : array The list of class names used in the target array. ax : Matplotlib Axes object A matplotlib Axes instance. Returns ------- ax : Matplotlib Axes object The matplotlib Axes instance where the figure is drawn. """ if not ax: ax = plt.gca() class_data = {} cls_scatters = [] for i, cls in enumerate(classes): class_data[cls] = data[targets == cls] cls_scatter = ax.scatter(class_data[cls][:,0], class_data[cls][:,1], s=size, alpha=alpha, c=tableau10[i], label=cls) cls_scatters.append(cls_scatter) ax.legend(cls_scatters, classes, scatterpoints=scatterpoints, loc='upper right', frameon=True, ncol=1) ax.grid(False) return ax def reshape_grid_socres(grid_scores, row_length, col_length, transpose=False): """ Reshape the scores to be used as input for the heathap. grid_scores : array The grid scores obtain from the GridSearch insteance. row_length : int The width of the heatmap. col_length : int The height of the heatmap. transpose : boolean Whether to tranpose the heatmap (e.g. for easier viewing). Returns ------- scores : array The array of scores, shaped appropriately. """ scores = [x[1] for x in grid_scores] scores = np.array(scores).reshape(row_length, col_length) if transpose: scores = scores.transpose() return scores def plot_validation_accuracy_heatmap(scores, x_range=None, y_range=None, x_label=None, y_label=None, power10='both', vmin=None, vmax=None, ax=None): """ Plot heatmap of the validation accuracy from a grid search. Parameters ---------- scores : array List of scores that has been shaped appropriately. x_range : array or None The range on the x-axis which will replace the default numbering. y_range : array or None The range on the y-axis which will replace the default numbering. x_label : str Label of the x-axis y_label : str Label of the y-axis power10 : 'x' or 'y' or 'both' Whether to format the numbering on the axes as powers of 10. ax : Matplotlib Axes object A matplotlib Axes instance. Returns ------- ax : Matplotlib Axes object The matplotlib Axes instance where the figure is drawn. """ if not ax: ax = plt.gca() im = ax.imshow(scores, interpolation='nearest', cmap=plt.cm.summer, vmin=vmin, vmax=vmax) #plt.colorbar(im) format_power = lambda x, pos, p_range: "$10^{%d}$" % int(np.log10(p_range[pos])) if power10 == 'x' or power10 == 'both': plt.xticks(np.arange(len(x_range)), x_range, rotation=45) formatter = FuncFormatter(lambda x, pos: format_power(x, pos, x_range)) ax.xaxis.set_major_formatter(formatter) if power10 == 'y' or power10 == 'both': plt.yticks(np.arange(len(y_range)), y_range) formatter = FuncFormatter(lambda x, pos: format_power(x, pos, y_range)) ax.yaxis.set_major_formatter(formatter) if x_label: ax.set_xlabel(x_label) if y_label: ax.set_ylabel(y_label) ax.grid(False) return im def plot_learning_curve_df(sample_sizes, learning_curves, labels, colors, linestyles, ylim=None, loc='lower right', upper=None, ax=None): """ """ if not ax: ax = plt.gca() for label in labels: curve = learning_curves[label] ax.plot(sample_sizes[:len(curve)], curve, label=label, color=colors[label], ls=linestyles[label], linewidth=1.5) format_as_percent_plot = lambda x, pos: "{:.0f}%".format(x * 100) ax.get_yaxis().set_major_formatter(FuncFormatter(format_as_percent_plot)) ax.legend(loc=loc, frameon=True) ax.set_xlabel('Number of Training Examples') ax.set_ylabel('MPBA') if upper is not None: ax.plot([sample_sizes[0], sample_sizes[-1]], [upper, upper], ls='--', color='#377eb8') if ylim is not None: ax.set_ylim(ylim) return ax def plot_heuristic_selections(sample_sizes, selections, labels, colors, linestyles, loc='lower right', linewidth=1.5, ylim=None, ascending=False, ax=None): """ Plot the heuristic selections from active bandit. """ cumulatives = [] selections = np.asarray(selections) n_labels = len(labels) for i in range(n_labels): cumulative_i = selections == i cumulative_i = np.cumsum(cumulative_i, axis=1) cumulative_i = np.mean(cumulative_i, axis=0) cumulatives.append(cumulative_i) df = pd.DataFrame(cumulatives, index=labels).transpose() if not ax: ax = plt.gca() ordered_labels = df.iloc[-1].order(ascending=ascending).index for label in ordered_labels: curve = df[label] inital_n = sample_sizes[0] - 1 n_selections = sample_sizes - inital_n ax.plot(sample_sizes, curve / n_selections, label=label, color=colors[label], ls=linestyles[label], linewidth=linewidth) ax.set_xlabel('Training Size') ax.set_ylabel('Frequency of Selections') ax.legend(loc=loc, frameon=True) ax.set_ylim(ylim) return ax, cumulatives def plot_sum_selections(sample_sizes, selections, labels, colors, linestyles, loc='lower right', linewidth=1.5, ylim=None, ascending=True, width=0.8, ax=None): """ Plot the heuristic selections from active bandit. """ cumulatives = [] selections = np.asarray(selections) n_labels = len(labels) for i in range(n_labels): cumulative_i = selections == i cumulative_i = np.cumsum(cumulative_i, axis=1) cumulatives.append(cumulative_i[:, -1]) df = pd.DataFrame(cumulatives, index=labels).transpose() ordered_labels = df.mean().order(ascending=ascending).index df = df.reindex_axis(ordered_labels, axis=1) #ax = sns.barplot(data=df, palette=colors, linewidth=0) ax = sns.barplot(data=df, palette=colors, linewidth=0) #last = sample_sizes[-1] - sample_sizes[0] #last = pd.DataFrame(cumulatives, index=labels)[last] #plot(kind='bar', colormap=colors) ax.set_xticklabels(ordered_labels, rotation=45, rotation_mode="anchor", ha="right") ax.set_ylabel('Numer of Selections') return ax def plot_bandit_parameters(sample_sizes, parameters, labels, colors, linestyles, linewidth=1.5, xlabel='Training Size', ylabel='', yscale='linear', ascending=False, ax=None): """ Plot the parameters from the bandit experiment. """ n_samples = len(sample_sizes) n_labels = len(labels) param_avg = np.zeros((n_samples, n_labels)) for param in parameters: param_avg += np.vstack(param) param_avg /= len(parameters) df = pd.DataFrame(param_avg, columns=labels) ordered_labels = df.iloc[-1].order(ascending=ascending).index if not ax: ax = plt.gca() for label in ordered_labels: ax.plot(sample_sizes, df[label], label=label, color=colors[label], ls=linestyles[label], linewidth=linewidth) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_yscale(yscale) ax.legend(loc='upper right', frameon=True) ax.grid(False) return ax def plot_cumulative_rewards(sample_sizes, parameters, labels, colors, linestyles, linewidth=1.5, xlabel='Training Size', ylabel='Cumulative Reward', loc='upper left', ascending=False, ax=None): """ """ n_samples = len(sample_sizes) n_labels = len(labels) param_avg = np.zeros((n_samples, n_labels)) for param in parameters: param_avg += np.vstack(param) param_avg /= len(parameters) param_avg = np.cumsum(param_avg, axis=0) df = pd.DataFrame(param_avg, columns=labels) ordered_labels = df.iloc[-1].order(ascending=ascending).index if not ax: ax = plt.gca() for label in ordered_labels: ax.plot(sample_sizes, df[label], label=label, color=colors[label], ls=linestyles[label], linewidth=linewidth) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.legend(loc=loc, frameon=True) ax.grid(False) return ax def order_learning_curves(data, labels, ascending=False): """ """ df = [np.array(h).mean(axis=0) for h in data] df = np.array(df).transpose() df = pd.DataFrame(df, columns=labels) df = df.reindex_axis(df.iloc[-1].order(ascending=ascending).index, axis=1) return df
yen223/mclass-sky
mclearn/viz.py
Python
bsd-3-clause
23,490
[ "Galaxy" ]
8111a496a735f3699c1a3ec32b8ead6432a84bb5ff65ede4aa7a79048af6ec20