text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
""" Support code for 0alias scripts. @since: 0.28 """ # Copyright (C) 2009, Thomas Leonard # See the README file for details, or visit http://0install.net. from zeroinstall import _, SafeException from zeroinstall import support _old_template = '''#!/bin/sh if [ "$*" = "--versions" ]; then exec 0launch -gd '%s' "$@" else exec 0launch %s '%s' "$@" fi ''' _template = '''#!/bin/sh exec 0launch %s'%s' "$@" ''' class NotAnAliasScript(SafeException): pass class ScriptInfo: """@since: 1.3""" uri = None main = None command = 'run' # For backwards compatibility def __iter__(self): return iter([self.uri, self.main]) def parse_script_header(stream): """Parse a 0alias script, if possible. This does the same as L{parse_script}, except with an existing stream. The stream position at exit is undefined. @since: 1.12""" try: stream.seek(0) template_header = _template[:_template.index("%s'")] actual_header = stream.read(len(template_header)) stream.seek(0) if template_header == actual_header: # If it's a 0alias script, it should be quite short! rest = stream.read() line = rest.split('\n')[1] else: old_template_header = \ _old_template[:_old_template.index("-gd '")] actual_header = stream.read(len(old_template_header)) if old_template_header != actual_header: return None rest = stream.read() line = rest.split('\n')[2] except UnicodeDecodeError as ex: return None info = ScriptInfo() split = line.rfind("' '") if split != -1: # We have a --main or --command info.uri = line[split + 3:].split("'")[0] start, value = line[:split].split("'", 1) option = start.split('--', 1)[1].strip() value = value.replace("'\\''", "'") if option == 'main': info.main = value elif option == 'command': info.command = value or None else: return None else: info.uri = line.split("'", 2)[1] return info def parse_script(pathname): """Extract the URI and main values from a 0alias script. @param pathname: the script to be examined @return: information about the alias script @rtype: L{ScriptInfo} @raise NotAnAliasScript: if we can't parse the script """ with open(pathname, 'rt') as stream: info = parse_script_header(stream) if info is None: raise NotAnAliasScript(_("'%s' does not look like a script created by 0alias") % pathname) return info def write_script(stream, interface_uri, main = None, command = None): """Write a shell script to stream that will launch the given program. @param stream: the stream to write to @param interface_uri: the program to launch @param main: the --main argument to pass to 0launch, if any @param command: the --command argument to pass to 0launch, if any""" assert "'" not in interface_uri assert "\\" not in interface_uri assert main is None or command is None, "Can't set --main and --command together" if main is not None: option = "--main '%s' " % main.replace("'", "'\\''") elif command is not None: option = "--command '%s' " % command.replace("'", "'\\''") else: option = "" stream.write(support.unicode(_template) % (option, interface_uri))
timdiels/0install
zeroinstall/alias.py
Python
lgpl-2.1
3,119
[ "VisIt" ]
a97ec23db4adde1c5803f902b5bbcb78a3536b1072db4721810391225301bc7e
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: Beining --<ACICFG> # Purpose: Yet another danmaku and video file downloader of Bilibili. # Created: 11/06/2013 # # Biligrab is licensed under MIT license (https://github.com/cnbeining/Biligrab/blob/master/LICENSE) # # Copyright (c) 2013-2015 ''' Biligrab Beining@ACICFG cnbeining[at]gmail.com http://www.cnbeining.com https://github.com/cnbeining/Biligrab MIT license ''' from ast import literal_eval import sys import os from StringIO import StringIO import gzip import urllib import urllib2 import math import json import commands import subprocess import hashlib import getopt import logging import traceback import threading import Queue from time import time from xml.dom.minidom import parseString try: from danmaku2ass2 import * except Exception: pass global vid, cid, partname, title, videourl, part_now, is_first_run, APPKEY, SECRETKEY, LOG_LEVEL, VER, LOCATION_DIR, VIDEO_FORMAT, convert_ass, is_export, IS_SLIENT, pages, IS_M3U, FFPROBE_USABLE, QUALITY, IS_FAKE_IP, FAKE_IP cookies, VIDEO_FORMAT = '', '' LOG_LEVEL, pages, FFPROBE_USABLE = 0, 0, 0 APPKEY = '6f90a59ac58a4123' SECRETKEY = 'b78be1fef78c3e7fdc7633e5fd5eee90' SECRETKEY_MINILOADER = '1c15888dc316e05a15fdd0a02ed6584f' VER = '0.98.95' FAKE_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.52 Safari/537.36' FAKE_HEADER = { 'User-Agent': FAKE_UA, 'Cache-Control': 'no-cache', 'Pragma': 'no-cache', 'pianhao': '%7B%22qing%22%3A%22super%22%2C%22qtudou%22%3A%22real%22%2C%22qyouku%22%3A%22super%22%2C%22q56%22%3A%22super%22%2C%22qcntv%22%3A%22super%22%2C%22qletv%22%3A%22super2%22%2C%22qqiyi%22%3A%22real%22%2C%22qsohu%22%3A%22real%22%2C%22qqq%22%3A%22real%22%2C%22qhunantv%22%3A%22super%22%2C%22qku6%22%3A%22super%22%2C%22qyinyuetai%22%3A%22super%22%2C%22qtangdou%22%3A%22super%22%2C%22qxunlei%22%3A%22super%22%2C%22qsina%22%3A%22high%22%2C%22qpptv%22%3A%22super%22%2C%22qpps%22%3A%22high%22%2C%22qm1905%22%3A%22high%22%2C%22qbokecc%22%3A%22super%22%2C%22q17173%22%3A%22super%22%2C%22qcuctv%22%3A%22super%22%2C%22q163%22%3A%22super%22%2C%22q51cto%22%3A%22high%22%2C%22xia%22%3A%22auto%22%2C%22pop%22%3A%22no%22%2C%22open%22%3A%22no%22%7D'} LOCATION_DIR = os.path.dirname(os.path.realpath(__file__)) #---------------------------------------------------------------------- def list_del_repeat(list): """delete repeated items in a list, and keep the order. http://www.cnblogs.com/infim/archive/2011/03/10/1979615.html""" l2 = [] [l2.append(i) for i in list if not i in l2] return(l2) #---------------------------------------------------------------------- def logging_level_reader(LOG_LEVEL): """str->int Logging level.""" return { 'INFO': logging.INFO, 'DEBUG': logging.DEBUG, 'WARNING': logging.WARNING, 'FATAL': logging.FATAL }.get(LOG_LEVEL) #---------------------------------------------------------------------- def calc_sign(string): """str/any->str return MD5.""" return str(hashlib.md5(str(string).encode('utf-8')).hexdigest()) #---------------------------------------------------------------------- def read_cookie(cookiepath): """str->list Original target: set the cookie Target now: Set the global header""" global BILIGRAB_HEADER try: cookies_file = open(cookiepath, 'r') cookies = cookies_file.readlines() cookies_file.close() # print(cookies) return cookies except Exception: logging.warning('Cannot read cookie, may affect some videos...') return [''] #---------------------------------------------------------------------- def clean_name(name): """str->str delete all the dramas in the filename.""" return (str(name).strip().replace('\\',' ').replace('/', ' ').replace('&', ' ')).replace('-', ' ') #---------------------------------------------------------------------- def send_request(url, header, is_fake_ip): """str,dict,int->str Send request, and return answer.""" global IS_FAKE_IP data = '' if IS_FAKE_IP == 1: header['X-Forwarded-For'] = FAKE_IP header['Client-IP'] = FAKE_IP header['X-Real-IP'] = FAKE_IP try: #logging.debug(header) request = urllib2.Request(url, headers=header) response = urllib2.urlopen(request) data = response.read() except urllib2.HTTPError: logging.info('ERROR!') return '' if response.info().get('Content-Encoding') == 'gzip': buf = StringIO(response.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() #except Exception: #raise URLOpenException('Cannot open URL! Raw output:\n\n{output}'.format(output = command_result[1])) #print(request.headers) logging.debug(data) return data #---------------------------------------------------------------------- def mylist_to_aid_list(mylist): """str/int->list""" data = send_request('http://www.bilibili.com/mylist/mylist-{mylist}.js'.format(mylist = mylist), FAKE_HEADER, IS_FAKE_IP) #request = urllib2.Request('http://www.bilibili.com/mylist/mylist-{mylist}.js'.format(mylist = mylist), headers = FAKE_HEADER) #response = urllib2.urlopen(request) aid_list = [] #data = response.read() for i in data.split('\n')[-3].split(','): if 'aid' in i: aid_list.append(i.split(':')[1]) return aid_list #---------------------------------------------------------------------- def find_cid_api(vid, p, cookies): """find cid and print video detail str,int?,str->str,str,str,str TODO: Use json.""" global cid, partname, title, videourl, pages cid = 0 title , partname , pages, = '', '', '' if str(p) is '0' or str(p) is '1': #str2Hash = 'appkey={APPKEY}&id={vid}&type=xml{SECRETKEY}'.format(APPKEY = APPKEY, vid = vid, SECRETKEY = SECRETKEY) #biliurl = 'https://api.bilibili.com/view?appkey={APPKEY}&id={vid}&type=xml&sign={sign}'.format(APPKEY = APPKEY, vid = vid, SECRETKEY = SECRETKEY, sign = calc_sign(str2Hash)) biliurl = 'https://api.bilibili.com/view?appkey={APPKEY}&id={vid}&type=xml'.format(APPKEY = '8e9fc618fbd41e28', vid = vid, SECRETKEY = SECRETKEY) else: #str2Hash = 'appkey={APPKEY}&id={vid}&page={p}&type=xml{SECRETKEY}'.format(APPKEY = APPKEY, vid = vid, p = p, SECRETKEY = SECRETKEY) #biliurl = 'https://api.bilibili.com/view?appkey={APPKEY}&id={vid}&page={p}&type=xml&sign={sign}'.format(APPKEY = APPKEY, vid = vid, SECRETKEY = SECRETKEY, p = p, sign = calc_sign(str2Hash)) biliurl = 'https://api.bilibili.com/view?appkey={APPKEY}&id={vid}&page={p}&type=xml'.format(APPKEY = '8e9fc618fbd41e28', vid = vid, SECRETKEY = SECRETKEY, p = p) logging.debug('BiliURL: ' + biliurl) videourl = 'http://www.bilibili.com/video/av{vid}/index_{p}.html'.format(vid = vid, p = p) logging.info('Fetching api to read video info...') data = '' try: #request = urllib2.Request(biliurl, headers=BILIGRAB_HEADER) #response = urllib2.urlopen(request) #data = response.read() data = send_request(biliurl, BILIGRAB_HEADER, IS_FAKE_IP) logging.debug('Bilibili API: ' + data) dom = parseString(data) for node in dom.getElementsByTagName('cid'): if node.parentNode.tagName == "info": cid = node.toxml()[5:-6] logging.info('cid is ' + cid) break for node in dom.getElementsByTagName('partname'): if node.parentNode.tagName == "info": partname = clean_name(str(node.toxml()[10:-11])) logging.info('partname is ' + partname)# no more /\ drama break for node in dom.getElementsByTagName('title'): if node.parentNode.tagName == "info": title = clean_name(str(node.toxml()[7:-8])).decode("utf-8") logging.info((u'Title is ' + title).encode(sys.stdout.encoding)) for node in dom.getElementsByTagName('pages'): if node.parentNode.tagName == "info": pages = clean_name(str(node.toxml()[7:-8])) logging.info('Total pages is ' + str(pages)) return [cid, partname, title, pages] except Exception: # If API failed logging.warning('Cannot connect to API server! \nIf you think this is wrong, please open an issue at \nhttps://github.com/cnbeining/Biligrab/issues with *ALL* the screen output, \nas well as your IP address and basic system info.\nYou can get these data via "-l".') logging.debug('API Data: ' + data) return ['', '', '', ''] #---------------------------------------------------------------------- def find_cid_flvcd(videourl): """str->None set cid.""" global vid, cid, partname, title logging.info('Fetching webpage with raw page...') #request = urllib2.Request(videourl, headers=FAKE_HEADER) data = send_request(videourl, FAKE_HEADER, IS_FAKE_IP) #request.add_header('Accept-encoding', 'gzip') #try: #response = urllib2.urlopen(request) #except urllib2.HTTPError: #logging.info('ERROR!') #return '' #if response.info().get('Content-Encoding') == 'gzip': #buf = StringIO(response.read()) #f = gzip.GzipFile(fileobj=buf) #data = f.read() data_list = data.split('\n') logging.debug(data) # Todo: read title for lines in data_list: if 'cid=' in lines: cid = lines.split('&') cid = cid[0].split('=') cid = cid[-1] logging.info('cid is ' + str(cid)) break #---------------------------------------------------------------------- def check_dependencies(download_software, concat_software, probe_software): """None->str,str,str Will give softwares for concat, download and probe. The detection of Python3 is located at the end of Main function.""" concat_software_list = ['ffmpeg', 'avconv'] download_software_list = ['aria2c', 'axel', 'wget', 'curl'] probe_software_list = ['ffprobe', 'mediainfo'] name_list = [[concat_software, concat_software_list], [download_software, download_software_list], [probe_software, probe_software_list]] for name in name_list: if name[0].strip().lower() not in name[1]: # Unsupported software # Set a Unsupported software, not blank if len(name[0].strip()) != 0: logging.warning('Requested Software not supported!\n Biligrab only support these following software(s):\n ' + str(name[1]) + '\n Trying to find available one...') for software in name[1]: output = commands.getstatusoutput(software + ' --help') if str(output[0]) != '32512': # If exist name[0] = software break if name[0] == '': logging.fatal('Cannot find software in ' + str(name[1]) + ' !') exit() return name_list[0][0], name_list[1][0], name_list[2][0] #---------------------------------------------------------------------- def download_video_link(part_number, download_software, video_link, thread_single_download): """set->str""" logging.info('Downloading #{part_number}...'.format(part_number = part_number)) if download_software == 'aria2c': cmd = 'aria2c -c -U "{FAKE_UA}" -s{thread_single_download} -x{thread_single_download} -k1M --out {part_number}.flv "{video_link}"' elif download_software == 'wget': cmd = 'wget -c -A "{FAKE_UA}" -O {part_number}.flv "{video_link}"' elif download_software == 'curl': cmd = 'curl -L -C - -A "{FAKE_UA}" -o {part_number}.flv "{video_link}"' elif download_software == 'axel': cmd = 'axel -U "{FAKE_UA}" -n {thread_single_download} -o {part_number}.flv "{video_link}"' cmd = cmd.format(part_number = part_number, video_link = video_link, thread_single_download = thread_single_download, FAKE_UA = FAKE_UA) logging.debug(cmd) return cmd #---------------------------------------------------------------------- def execute_cmd(cmd): """""" return_code = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if return_code != 0: logging.warning('ERROR') return return_code def execute_sysencode_cmd(command): """execute cmd with sysencoding""" os.system(command.decode("utf-8").encode(sys.stdout.encoding)) #---------------------------------------------------------------------- def concat_videos(concat_software, vid_num, filename): """str,str->None""" global VIDEO_FORMAT,title if concat_software == 'ffmpeg': f = open('ff.txt', 'w') ff = '' cwd = os.getcwd() for i in range(vid_num): ff += 'file \'{cwd}/{i}.flv\'\n'.format(cwd = cwd, i = i) # ff = ff.encode("utf8") f.write(ff) f.close() logging.debug(ff) logging.info('Concating videos...') execute_sysencode_cmd('ffmpeg -f concat -i ff.txt -c copy "' + filename + '".mp4') VIDEO_FORMAT = 'mp4' if os.path.isfile((str(i) + '.mp4').decode("utf-8")): try: # os.remove('ff.txt') print((str(i) + '.flv').decode("utf-8")) os.remove((str(i) + '.flv').decode("utf-8")) for i in range(vid_num): os.remove((str(i) + '.flv').decode("utf-8")) #execute_sysencode_cmd('rm -r ' + str(i) + '.flv') logging.info('Done, enjoy yourself!') except Exception: logging.warning('Cannot delete temporary files!') return [''] else: print('ERROR: Cannot concatenate files, trying to make flv...') execute_sysencode_cmd('ffmpeg -f concat -i ff.txt -c copy "' + filename + '".flv') VIDEO_FORMAT = 'flv' if os.path.isfile((str(i) + '.flv').decode("utf-8")): logging.warning('FLV file made. Not possible to mux to MP4, highly likely due to audio format.') #execute_sysencode_cmd('rm -r ff.txt') # os.remove('ff.txt') print(('ff.txt').decode("utf-8")) os.remove(('ff.txt').decode("utf-8")) for i in range(vid_num): #execute_sysencode_cmd('rm -r ' + str(i) + '.flv') os.remove((str(i) + '.flv').decode("utf-8")) else: logging.error('Cannot concatenate files!') elif concat_software == 'avconv': pass #---------------------------------------------------------------------- def process_m3u8(url): """str->list Only Youku.""" url_list = [] data = send_request(url, FAKE_HEADER, IS_FAKE_IP) if data == '': logging.error('Cannot download required m3u8!') return [] #request = urllib2.Request(url, headers=BILIGRAB_HEADER) #try: #response = urllib2.urlopen(request) #except Exception: #logging.error('Cannot download required m3u8!') #return [] #data = response.read() #logging.debug(data) data = data.split() if 'youku' in url: return [data[4].split('?')[0]] #---------------------------------------------------------------------- def make_m3u8(video_list): """list->str list: [(VIDEO_URL, TIME_IN_SEC), ...]""" TARGETDURATION = int(max([i[1] for i in video_list])) + 1 line = '#EXTM3U\n#EXT-X-TARGETDURATION:{TARGETDURATION}\n#EXT-X-VERSION:2\n'.format(TARGETDURATION = TARGETDURATION) for i in video_list: line += '#EXTINF:{time}\n{url}\n'.format(time = str(i[1]), url = i[0]) line += '#EXT-X-ENDLIST' logging.debug('m3u8: ' + line) return line #---------------------------------------------------------------------- def find_video_address_html5(vid, p, header): """str,str,dict->list Method #3.""" api_url = 'http://www.bilibili.com/m/html5?aid={vid}&page={p}'.format(vid = vid, p = p) data = send_request(api_url, header, IS_FAKE_IP) if data == '': logging.error('Cannot connect to HTML5 API!') return [] #request = urllib2.Request(api_url, headers=header) #url_list = [] #try: #response = urllib2.urlopen(request) #except Exception: #logging.error('Cannot connect to HTML5 API!') #return [] #data = response.read() #Fix #13 #if response.info().get('Content-Encoding') == 'gzip': #data = gzip.GzipFile(fileobj=StringIO(data), mode="r").read() #logging.debug(data) info = json.loads(data.decode('utf-8')) raw_url = info['src'] if 'error.mp4' in raw_url: logging.error('HTML5 API returned ERROR or not available!') return [] #As in #11 if 'm3u8' in raw_url: logging.info('Found m3u8, processing...') return process_m3u8(raw_url) return [raw_url] #---------------------------------------------------------------------- def find_video_address_force_original(cid, header): """str,str->str Give the original URL, if possible. Method #2.""" # Force get oriurl #sign_this = calc_sign('appkey={APPKEY}&cid={cid}{SECRETKEY}'.format(APPKEY = APPKEY, cid = cid, SECRETKEY = SECRETKEY)) api_url = 'http://interface.bilibili.com/player?' #data = send_request(api_url + 'appkey={APPKEY}&cid={cid}&sign={sign_this}'.format(APPKEY = APPKEY, cid = cid, SECRETKEY = SECRETKEY, sign_this = sign_this), header, IS_FAKE_IP) data = send_request(api_url + 'appkey={APPKEY}&cid={cid}'.format(APPKEY = APPKEY, cid = cid, SECRETKEY = SECRETKEY), header, IS_FAKE_IP) #request = urllib2.Request(api_url + 'appkey={APPKEY}&cid={cid}&sign={sign_this}'.format(APPKEY = APPKEY, cid = cid, SECRETKEY = SECRETKEY, sign_this = sign_this), headers=header) #response = urllib2.urlopen(request) #data = response.read() #logging.debug('interface responce: ' + data) data = data.split('\n') for l in data: if 'oriurl' in l: originalurl = str(l[8:-9]) logging.info('Original URL is ' + originalurl) return originalurl logging.warning('Cannot get original URL! Chances are it does not exist.') return '' #---------------------------------------------------------------------- def find_link_flvcd(videourl): """str->list Used in method 2 and 5.""" logging.info('Finding link via Flvcd...') data = send_request('http://www.flvcd.com/parse.php?' + urllib.urlencode([('kw', videourl)]) + '&format=super', FAKE_HEADER, IS_FAKE_IP) #request = urllib2.Request('http://www.flvcd.com/parse.php?' + #urllib.urlencode([('kw', videourl)]) + '&format=super', headers=FAKE_HEADER) #request.add_header('Accept-encoding', 'gzip') #response = urllib2.urlopen(request) #data = response.read() #if response.info().get('Content-Encoding') == 'gzip': #buf = StringIO(data) #f = gzip.GzipFile(fileobj=buf) #data = f.read() data_list = data.split('\n') #logging.debug(data) for items in data_list: if 'name' in items and 'inf' in items and 'input' in items: c = items rawurlflvcd = c[59:-5] rawurlflvcd = rawurlflvcd.split('|') return rawurlflvcd #---------------------------------------------------------------------- def find_video_address_pr(cid, quality, header): """str,str->list The API provided by BilibiliPr.""" logging.info('Finding link via BilibiliPr...') api_url = 'http://pr.lolly.cc/P{quality}?cid={cid}'.format(quality = quality, cid = cid) data = send_request(api_url, header, IS_FAKE_IP) #request = urllib2.Request(api_url, headers=header) #try: #response = urllib2.urlopen(request, timeout=3) #data = response.read() #except Exception: #logging.warning('No response!') #return ['ERROR'] #logging.debug('BilibiliPr API: ' + data) if '!' in data[0:2]: logging.warning('API returned 404!') return ['ERROR'] else: rawurl = [] originalurl = '' dom = parseString(data) for node in dom.getElementsByTagName('durl'): url = node.getElementsByTagName('url')[0] rawurl.append(url.childNodes[0].data) return rawurl #---------------------------------------------------------------------- def find_video_address_normal_api(cid, header, method, convert_m3u = False): """str,str,str->list Change in 0.98: Return the file list directly. Method: 0: Original API 1: CDN API 2: Original URL API - Divided in another function 3: Mobile API - Divided in another function 4: Flvcd - Divided in another function 5: BilibiliPr [(VIDEO_URL, TIME_IN_SEC), ...] """ if method == '1': api_url = 'http://interface.bilibili.com/v_cdn_play?' else: #Method 0 or other api_url = 'http://interface.bilibili.com/playurl?' if QUALITY == -1: sign_this = calc_sign('cid={cid}&from=miniplay&player=1{SECRETKEY_MINILOADER}'.format(APPKEY = APPKEY, cid = cid, SECRETKEY_MINILOADER = SECRETKEY_MINILOADER)) interface_url = api_url + 'cid={cid}&from=miniplay&player=1&sign={sign_this}'.format(cid = cid, sign_this = sign_this) #interface_url = api_url + 'appkey={APPKEY}&cid={cid}'.format(APPKEY = APPKEY, cid = cid, SECRETKEY = SECRETKEY) else: sign_this = calc_sign('cid={cid}&from=miniplay&player=1&quality={QUALITY}{SECRETKEY_MINILOADER}'.format(APPKEY = APPKEY, cid = cid, SECRETKEY_MINILOADER = SECRETKEY_MINILOADER, QUALITY = QUALITY)) interface_url = api_url + 'cid={cid}&from=miniplay&player=1&quality={QUALITY}&sign={sign_this}'.format(cid = cid, sign_this = sign_this, QUALITY = QUALITY) logging.info(interface_url) data = send_request(interface_url, header, IS_FAKE_IP) #request = urllib2.Request(interface_url, headers=header) #logging.debug('Interface: ' + interface_url) #response = urllib2.urlopen(request) #data = response.read() #logging.debug('interface API: ' + data) for l in data.split('\n'): # In case shit happens if 'error.mp4' in l or 'copyright.mp4' in l: logging.warning('API header may be blocked!') return ['API_BLOCKED'] rawurl = [] originalurl = '' dom = parseString(data) if convert_m3u: for node in dom.getElementsByTagName('durl'): length = node.getElementsByTagName('length')[0] url = node.getElementsByTagName('url')[0] rawurl.append((url.childNodes[0].data, int(int(length.childNodes[0].data) / 1000) + 1)) else: for node in dom.getElementsByTagName('durl'): url = node.getElementsByTagName('url')[0] rawurl.append(url.childNodes[0].data) return rawurl #---------------------------------------------------------------------- def find_link_you_get(videourl): """str->list Extract urls with you-get.""" command_result = commands.getstatusoutput('you-get -u {videourl}'.format(videourl = videourl)) logging.debug(command_result) if command_result[0] != 0: raise YougetURLException('You-get failed somehow! Raw output:\n\n{output}'.format(output = command_result[1])) else: url_list = command_result[1].split('\n') for k, v in enumerate(url_list): if v.startswith('http'): url_list = url_list[k:] break #url_list = literal_eval(url_list_str) logging.debug('URL_LIST:{url_list}'.format(url_list = url_list)) return list(url_list) #---------------------------------------------------------------------- def get_video(oversea, convert_m3u = False): """str->list A full parser for getting video. convert_m3u: [(URL, time_in_sec)] else: [url,url]""" rawurl = [] if oversea == '2': raw_link = find_video_address_force_original(cid, BILIGRAB_HEADER) rawurl = find_link_flvcd(raw_link) elif oversea == '3': rawurl = find_video_address_html5(vid, p, BILIGRAB_HEADER) if rawurl == []: #As in #11 rawurl = find_video_address_html5(vid, p, FAKE_HEADER) elif oversea == '4': rawurl = find_link_flvcd(videourl) elif oversea == '5': rawurl = find_video_address_pr(cid, 1080, BILIGRAB_HEADER) if '404' in rawurl[0]: logging.info('Using lower quality...') rawurl = find_video_address_pr(cid, 720, BILIGRAB_HEADER) if '404' in rawurl[0]: logging.error('Failed!') rawurl = [] else: pass elif 'ERROR' in rawurl[0]: logging.info('Wait a little bit...') time.sleep(5) rawurl = find_video_address_pr(cid, 1080, BILIGRAB_HEADER) elif oversea == '6': raw_link = find_video_address_force_original(cid, BILIGRAB_HEADER) rawurl = find_link_you_get(raw_link) else: rawurl = find_video_address_normal_api(cid, BILIGRAB_HEADER, oversea, convert_m3u) if 'API_BLOCKED' in rawurl[0]: logging.warning('API header may be blocked! Using fake one instead...') rawurl = find_video_address_normal_api(cid, FAKE_HEADER, oversea, convert_m3u) return rawurl #---------------------------------------------------------------------- def get_resolution(filename, probe_software): """str,str->list""" resolution = [] filename = filename + '.' + VIDEO_FORMAT try: if probe_software == 'mediainfo': resolution = get_resolution_mediainfo(filename) if probe_software == 'ffprobe': resolution = get_resolution_ffprobe(filename) logging.debug('Software: {probe_software}, resolution {resolution}'.format(probe_software = probe_software, resolution = resolution)) return resolution except Exception: # magic number return[1280, 720] #---------------------------------------------------------------------- def get_resolution_mediainfo(filename): """str->list [640,360] path to dimention""" resolution = str(os.popen('mediainfo \'--Inform=Video;%Width%x%Height%\' "' +filename +'"').read()).strip().split('x') return [int(resolution[0]), int(resolution[1])] #---------------------------------------------------------------------- def get_resolution_ffprobe(filename): '''str->list [640,360]''' width = '' height = '' cmnd = ['ffprobe', '-show_format', '-show_streams', '-pretty', '-loglevel', 'quiet', filename] p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # print filename out, err = p.communicate() if err: print err return None try: for line in out.split(): if 'width=' in line: width = line.split('=')[1] if 'height=' in line: height = line.split('=')[1] except Exception: return None # return width + 'x' + height return [int(width), int(height)] #---------------------------------------------------------------------- def get_url_size(url): """str->int Get remote URL size by reading Content-Length. In bytes.""" site = urllib.urlopen(url) meta = site.info() return int(meta.getheaders("Content-Length")[0]) #---------------------------------------------------------------------- def getvideosize(url, verbose=False): try: if url.startswith('http:') or url.startswith('https:'): ffprobe_command = ['ffprobe', '-icy', '0', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_format', '-show_streams', '-timeout', '60000000', '-user-agent', BILIGRAB_UA, url] else: ffprobe_command = ['ffprobe', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', url] logcommand(ffprobe_command) ffprobe_process = subprocess.Popen(ffprobe_command, stdout=subprocess.PIPE) try: ffprobe_output = json.loads(ffprobe_process.communicate()[0].decode('utf-8', 'replace')) except KeyboardInterrupt: logging.warning('Cancelling getting video size, press Ctrl-C again to terminate.') ffprobe_process.terminate() return 0, 0 width, height, widthxheight, duration, total_bitrate = 0, 0, 0, 0, 0 try: if dict.get(ffprobe_output, 'format')['duration'] > duration: duration = dict.get(ffprobe_output, 'format')['duration'] except Exception: pass for stream in dict.get(ffprobe_output, 'streams', []): try: if duration == 0 and (dict.get(stream, 'duration') > duration): duration = dict.get(stream, 'duration') if dict.get(stream, 'width')*dict.get(stream, 'height') > widthxheight: width, height = dict.get(stream, 'width'), dict.get(stream, 'height') if dict.get(stream, 'bit_rate') > total_bitrate: total_bitrate += int(dict.get(stream, 'bit_rate')) except Exception: pass if duration == 0: duration = int(get_url_size(url) * 8 / total_bitrate) return [[int(width), int(height)], int(float(duration))+1] except Exception as e: logorraise(e) return [[0, 0], 0] #---------------------------------------------------------------------- def convert_ass_py3(filename, probe_software, resolution = [0, 0]): """str,str->None With danmaku2ass, branch master. https://github.com/m13253/danmaku2ass/ Author: @m13253 GPLv3 A simple way to do that. resolution_str:1920x1080""" xml_name = os.path.abspath(filename + '.xml') ass_name = filename + '.ass' logging.info('Converting danmaku to ASS file with danmaku2ass(main)...') logging.info('Resolution is %dx%d' % (resolution[0], resolution[1])) if resolution == [0, 0]: logging.info('Trying to get resolution...') resolution = get_resolution(filename, probe_software) logging.info('Resolution is %dx%d' % (resolution[0], resolution[1])) if execute_sysencode_cmd('python3 %s/danmaku2ass3.py -o %s -s %dx%d -fs %d -a 0.8 -dm 8 %s' % (LOCATION_DIR, ass_name, resolution[0], resolution[1], int(math.ceil(resolution[1] / 21.6)), xml_name)) == 0: logging.info('The ASS file should be ready!') else: logging.error('''Danmaku2ASS failed. Head to https://github.com/m13253/danmaku2ass/issues to complain about this.''') #---------------------------------------------------------------------- def convert_ass_py2(filename, probe_software, resolution = [0, 0]): """str,str->None With danmaku2ass, branch py2. https://github.com/m13253/danmaku2ass/tree/py2 Author: @m13253 GPLv3""" logging.info('Converting danmaku to ASS file with danmaku2ass(py2)...') xml_name = filename + '.xml' if resolution == [0, 0]: logging.info('Trying to get resolution...') resolution = get_resolution(filename, probe_software) logging.info('Resolution is {width}x{height}'.format(width = resolution[0], height = resolution[1])) #convert_ass(xml_name, filename + '.ass', resolution) try: Danmaku2ASS(xml_name, filename + '.ass', resolution[0], resolution[1], font_size = int(math.ceil(resolution[1] / 21.6)), text_opacity=0.8, duration_marquee=8.0) logging.info('INFO: The ASS file should be ready!') except Exception as e: logging.error('''Danmaku2ASS failed: %s Head to https://github.com/m13253/danmaku2ass/issues to complain about this.'''% e) logging.debug(traceback.print_exc()) pass #Or it may stop leaving lots of lines unprocessed #---------------------------------------------------------------------- def download_danmaku(cid, filename): """str,str,int->None Download XML file, and convert to ASS(if required) Used to be in main(), but replaced due to the merge of -m (BiligrabLite). If danmaku only, will see whether need to export ASS.""" logging.info('Fetching XML...') execute_sysencode_cmd('curl -o "{filename}.xml" --compressed http://comment.bilibili.com/{cid}.xml'.format(filename = filename, cid = cid)) #execute_sysencode_cmd('gzip -d '+cid+'.xml.gz') logging.info('The XML file, {filename}.xml should be ready...enjoy!'.format(filename = filename.decode("utf-8").encode(sys.stdout.encoding))) #---------------------------------------------------------------------- def logcommand(command_line): logging.debug('Executing: '+' '.join('\''+i+'\'' if ' ' in i or '&' in i or '"' in i else i for i in command_line)) #---------------------------------------------------------------------- def logorraise(message, debug=False): if debug: raise message else: logging.error(str(message)) ######################################################################## class DanmakuOnlyException(Exception): '''Deal with DanmakuOnly to stop the main() function.''' #---------------------------------------------------------------------- def __init__(self, value): self.value = value #---------------------------------------------------------------------- def __str__(self): return repr(self.value) ######################################################################## class Danmaku2Ass2Exception(Exception): '''Deal with Danmaku2ASS2 to stop the main() function.''' #---------------------------------------------------------------------- def __init__(self, value): self.value = value #---------------------------------------------------------------------- def __str__(self): return repr(self.value) ######################################################################## class NoCidException(Exception): '''Deal with no cid to stop the main() function.''' #---------------------------------------------------------------------- def __init__(self, value): self.value = value #---------------------------------------------------------------------- def __str__(self): return repr(self.value) ######################################################################## class NoVideoURLException(Exception): '''Deal with no video URL to stop the main() function.''' #---------------------------------------------------------------------- def __init__(self, value): self.value = value #---------------------------------------------------------------------- def __str__(self): return repr(self.value) ######################################################################## class ExportM3UException(Exception): '''Deal with export to m3u to stop the main() function.''' #---------------------------------------------------------------------- def __init__(self, value): self.value = value #---------------------------------------------------------------------- def __str__(self): return repr(self.value) ######################################################################## class YougetURLException(Exception): '''you-get cannot get URL somehow''' #---------------------------------------------------------------------- def __init__(self, value): self.value = value #---------------------------------------------------------------------- def __str__(self): return repr(self.value) ######################################################################## class URLOpenException(Exception): '''cannot get URL somehow''' #---------------------------------------------------------------------- def __init__(self, value): self.value = value #---------------------------------------------------------------------- def __str__(self): return repr(self.value) ######################################################################## class DownloadVideo(threading.Thread): """Threaded Download Video""" #---------------------------------------------------------------------- def __init__(self, queue): threading.Thread.__init__(self) self.queue = queue #---------------------------------------------------------------------- def run(self): while True: #grabs start time from queue down_set = self.queue.get() #return_value = download_video(down_set) cmd = download_video_link(*down_set) return_value = execute_cmd(cmd) self.queue.task_done() #---------------------------------------------------------------------- def main_threading(download_thread = 3, video_list = [], thread_single_download = 16): """""" command_pool = [(video_list.index(url_this), download_software, url_this, thread_single_download) for url_this in video_list] #spawn a pool of threads, and pass them queue instance for i in range(int(download_thread)): t = DownloadVideo(queue) t.setDaemon(True) t.start() #populate queue with data for command_single in command_pool: queue.put(command_single) #wait on the queue until everything has been processed queue.join() #---------------------------------------------------------------------- def main(vid, p, oversea, cookies, download_software, concat_software, is_export, probe_software, danmaku_only, time_fetch=5, download_thread= 16, thread_single_download= 16): global cid, partname, title, videourl, is_first_run videourl = 'http://www.bilibili.com/video/av{vid}/index_{p}.html'.format(vid = vid, p = p) # Check both software logging.debug(concat_software + ', ' + download_software) # Start to find cid, api cid, partname, title, pages = find_cid_api(vid, p, cookies) #if cid is 0: #logging.warning('Cannot find cid, trying to do it brutely...') #find_cid_flvcd(videourl) if cid is 0: if IS_SLIENT == 0: logging.warning('Strange, still cannot find cid... ') is_black3 = str(raw_input('Type y for trying the unpredictable way, or input the cid by yourself; Press ENTER to quit.')) else: is_black3 = 'y' if 'y' in str(is_black3): vid = str(int(vid) - 1) p = 1 find_cid_api(int(vid) - 1, p) cid = cid + 1 elif str(is_black3) is '': raise NoCidException('FATAL: Cannot get cid anyway!') else: cid = str(is_black3) # start to make folders... if title is not '': folder = title else: folder = cid if len(partname) is not 0: filename = partname elif title is not '': filename = title else: filename = cid #In case cannot find which s which filename = str(p) + ' - ' + filename # In case make too much folders folder_to_make = os.getcwd() + '/' + folder if is_first_run == 0: if not os.path.exists(folder_to_make): os.makedirs(folder_to_make) is_first_run = 1 os.chdir(folder_to_make) # Download Danmaku download_danmaku(cid, filename) if is_export >= 1 and IS_M3U != 1 and danmaku_only == 1: rawurl = get_video(oversea, convert_m3u = True) check_dependencies_remote_resolution('ffprobe') resolution = getvideosize(rawurl[0])[0] convert_ass(filename, probe_software, resolution = resolution) if IS_M3U == 1: rawurl = [] #M3U export, then stop if oversea in {'0', '1'}: rawurl = get_video(oversea, convert_m3u = True) else: duration_list = [] rawurl = get_video(oversea, convert_m3u = False) for url in rawurl: duration_list.append(getvideosize(url)[1]) rawurl = map(lambda x,y: (x, y), rawurl, duration_list) #print(rawurl) resolution = getvideosize(rawurl[0][0])[0] m3u_file = make_m3u8(rawurl) f = open(filename + '.m3u', 'w') cwd = os.getcwd() m3u_file = m3u_file.encode("utf8") f.write(m3u_file) f.close() convert_ass(filename, probe_software, resolution = resolution) logging.debug(m3u_file) raise ExportM3UException('INFO: Export to M3U') if danmaku_only == 1: raise DanmakuOnlyException('INFO: Danmaku only') # Find video location logging.info('Finding video location...') # try api # flvcd url_flag = 1 rawurl = [] logging.info('Trying to get download URL...') rawurl = get_video(oversea, convert_m3u = False) if len(rawurl) == 0 and oversea != '4': # hope this never happen logging.warning('API failed, using falloff plan...') rawurl = find_link_flvcd(videourl) vid_num = len(rawurl) if IS_SLIENT == 0 and vid_num == 0: logging.warning('Cannot get download URL!') rawurl = list(str(raw_input('If you know the url, please enter it now: URL1|URL2...'))).split('|') vid_num = len(rawurl) if vid_num is 0: # shit really hit the fan raise NoVIdeoURLException('FATAL: Cannot get video URL anyway!') logging.info('{vid_num} videos in part {part_now} to download, fetch yourself a cup of coffee...'.format(vid_num = vid_num, part_now = part_now)) #Multi thread if len(rawurl) == 1: cmd = download_video_link(0,download_software,rawurl[0], thread_single_download) execute_sysencode_cmd(cmd) else: global queue queue = Queue.Queue() main_threading(download_thread, rawurl, thread_single_download) queue.join() concat_videos(concat_software, vid_num, filename) if is_export >= 1: try: convert_ass(filename, probe_software) except Exception: logging.warning('Problem with ASS conversion!') pass logging.info('Part Done!') #---------------------------------------------------------------------- def get_full_p(p_raw): """str->list""" p_list = [] p_raw = p_raw.split(',') for item in p_raw: if '~' in item: # print(item) lower = 0 higher = 0 item = item.split('~') part_now = '0' try: lower = int(item[0]) except Exception: logging.warning('Cannot read lower!') try: higher = int(item[1]) except Exception: logging.warning('Cannot read higher!') if lower == 0 or higher == 0: if lower == 0 and higher != 0: lower = higher elif lower != 0 and higher == 0: higher = lower else: logging.warning('Cannot find any higher or lower, ignoring...') # break mid = 0 if higher < lower: mid = higher higher = lower lower = mid p_list.append(lower) while lower < higher: lower = lower + 1 p_list.append(lower) # break else: try: p_list.append(int(item)) except Exception: logging.warning('Cannot read "{item}", abandon it.'.format(item = item)) # break p_list = list_del_repeat(p_list) return p_list #---------------------------------------------------------------------- def check_dependencies_remote_resolution(software): """""" if 'ffprobe' in software: output = commands.getstatusoutput('ffprobe --help') if str(output[0]) == '32512': FFPROBE_USABLE = 0 else: FFPROBE_USABLE = 1 #---------------------------------------------------------------------- def check_dependencies_exportm3u(IS_M3U): """int,str->int,str""" if IS_M3U == 1: output = commands.getstatusoutput('ffprobe --help') if str(output[0]) == '32512': logging.error('ffprobe DNE, python3 does not exist or not callable!') err_input = str(raw_input('Do you want to exit, ignore or stop the conversion?(e/i/s)')) if err_input == 'e': exit() elif err_input == '2': FFPROBE_USABLE = 0 elif err_input == 's': IS_M3U = 0 else: logging.warning('Cannot read input, stop the conversion!') IS_M3U = 0 else: FFPROBE_USABLE = 1 return IS_M3U #---------------------------------------------------------------------- def check_dependencies_danmaku2ass(is_export): """int,str->int,str""" if is_export == 3: convert_ass = convert_ass_py3 output = commands.getstatusoutput('python3 --help') if str(output[0]) == '32512' or not os.path.exists(os.path.join(LOCATION_DIR, 'danmaku2ass3.py')): logging.warning('danmaku2ass3.py DNE, python3 does not exist or not callable!') err_input = str(raw_input('Do you want to exit, use Python 2.x or stop the conversion?(e/2/s)')) if err_input == 'e': exit() elif err_input == '2': convert_ass = convert_ass_py2 is_export = 2 elif err_input == 's': is_export = 0 else: logging.warning('Cannot read input, stop the conversion!') is_export = 0 elif is_export == 2 or is_export == 1: convert_ass = convert_ass_py2 if not os.path.exists(os.path.join(LOCATION_DIR, 'danmaku2ass2.py')): logging.warning('danmaku2ass2.py DNE!') err_input = str(raw_input('Do you want to exit, use Python 3.x or stop the conversion?(e/3/s)')) if err_input == 'e': exit() elif err_input == '3': convert_ass = convert_ass_py3 is_export = 3 elif err_input == 's': is_export = 0 else: logging.warning('Cannot read input, stop the conversion!') is_export = 0 else: convert_ass = convert_ass_py2 return is_export, convert_ass #---------------------------------------------------------------------- def usage(): """""" print(''' Biligrab https://github.com/cnbeining/Biligrab http://www.cnbeining.com/ Beining@ACICFG Usage: python biligrab.py (-h) (-a) (-p) (-s) (-c) (-d) (-v) (-l) (-e) (-b) (-m) (-n) (-u) (-t) (-q) (-r) (-g) -h: Default: None Print this usage file. -a: Default: None The av number. If not set, Biligrab will use the fallback interactive mode. Support "~", "," and mix use. Examples: Input Output 1 [1] 1,2 [1, 2] 1~3 [1, 2, 3] 1,2~3 [1, 2, 3] -p: Default: 0 The part number. Able to use the same syntax as "-a". If set to 0, Biligrab will download all the available parts in the video. -s: Default: 0 Source to download. 0: The original API source, can be Letv backup, and can fail if the original video is not available(e.g., deleted) 1: The CDN API source, "oversea accelerate". Can be MINICDN backup in Mainland China or oversea. Good to bypass some bangumi's restrictions. 2: Force to use the original source. Use Flvcd to parse the video, but would fail if 1) The original source DNE, e.g., some old videos 2) The original source is Letvcloud itself. 3) Other unknown reason(s) that stops Flvcd from parsing the video. For any video that failed to parse, Biligrab will try to use Flvcd. (Mainly for oversea users regarding to copyright-restricted bangumies.) If the API is blocked, Biligrab would fake the UA. 3: (Not stable) Use the HTML5 API. This works for downloading some cached Letvcloud videos, but is slow, and would fail for no reason sometimes. Will retry if unavailable. 4: Use Flvcd. Good to fight with oversea and copyright restriction, but not working with iQiyi. May retrieve better quality video, especially for Youku. 5: Use BilibiliPr. Good to fight with some copyright restriction that BilibiliPr can fix. Not always working though. 6: Use You-get (https://github.com/soimort/you-get). You need a you-get callable directly like "you-get -u blahblah". -c: Default: ./bilicookies The path of cookies. Use cookies to visit member-only videos. -d: Default: None Set the desired download software. Biligrab supports aria2c(16 threads), axel(20 threads), wget and curl by far. If not set, Biligrab will detect an available one; If none of those is available, Biligrab will quit. For more software support, please open an issue at https://github.com/cnbeining/Biligrab/issues/ -v: Default:None Set the desired concatenate software. Biligrab supports ffmpeg by far. If not set, Biligrab will detect an available one; If none of those is available, Biligrab will quit. For more software support, please open an issue at https://github.com/cnbeining/Biligrab/issues/ Make sure you include a *working* command line example of this software! -l: Default: INFO Dump the log of the output for better debugging. Can be set to debug. -e: Default: 1 Export Danmaku to ASS file. Fulfilled with danmaku2ass(https://github.com/m13253/danmaku2ass/tree/py2), Author: @m13253, GPLv3 License. *For issue with this function, if you think the problem lies on the danmaku2ass side, please open the issue at both projects.* If set to 1 or 2, Biligrab will use Danmaku2ass's py2 branch. If set to 3, Biligrab will use Danmaku2ass's master branch, which would require a python3 callable via 'python3'. If python3 not callable or danmaku2ass2/3 DNE, Biligrab will ask for action. -b: Default: None Set the probe software. Biligrab supports Mediainfo and FFprobe. If not set, Biligrab will detect an available one; If none of those is available, Biligrab will quit. For more software support, please open an issue at https://github.com/cnbeining/Biligrab/issues/ Make sure you include a *working* command line example of this software! -m: Default: 0 Only download the danmaku. -n: Default: 0 Silent Mode. Biligrab will not ask any question. -u: Default: 0 Export video link to .m3u file, which can be used with MPlayer, mpc, VLC, etc. Biligrab will export a m3u8 instead of downloading any video(s). Can be broken with sources other than 0 or 1. -t: Default: None The number of Mylist. Biligrab will process all the videos in this list. -q: Default: 3 The thread number for downloading. Good to fix overhead problem. -r: Default: -1 Select video quality. Only works with Source 0 or 1. Range: 0~4, higher for better quality. -g: Default: 6 Threads for downloading every part. Works with aria2 and axel. -i: Default: None Fake IP address. ''') #---------------------------------------------------------------------- if __name__ == '__main__': is_first_run, is_export, danmaku_only, IS_SLIENT, IS_M3U, mylist, time_fetch, download_thread, QUALITY, thread_single_download = 0, 1, 0, 0, 0, 0, 5, 16, -1, 16 argv_list,av_list = [], [] argv_list = sys.argv[1:] p_raw, vid, oversea, cookiepath, download_software, concat_software, probe_software, vid_raw, LOG_LEVEL, FAKE_IP, IS_FAKE_IP = '', '', '', '', '', '', '', '', 'INFO', '', 0 convert_ass = convert_ass_py2 try: opts, args = getopt.getopt(argv_list, "ha:p:s:c:d:v:l:e:b:m:n:u:t:q:r:g:i:", ['help', "av=", 'part=', 'source=', 'cookie=', 'download=', 'concat=', 'log=', 'export=', 'probe=', 'danmaku=', 'slient=', 'm3u=', 'mylist=', 'thread=', 'quality=', 'thread_single=', 'fake-ip=']) except getopt.GetoptError: usage() exit() for o, a in opts: if o in ('-h', '--help'): usage() exit() if o in ('-a', '--av'): vid_raw = a if o in ('-p', '--part'): p_raw = a if o in ('-s', '--source'): oversea = a if o in ('-c', '--cookie'): cookiepath = a if cookiepath == '': logging.warning('No cookie path set, use default: ./bilicookies') cookiepath = './bilicookies' if o in ('-d', '--download'): download_software = a if o in ('-v', '--concat'): concat_software = a if o in ('-l', '--log'): try: LOG_LEVEL = str(a) except Exception: LOG_LEVEL = 'INFO' if o in ('-e', '--export'): is_export = int(a) if o in ('-b', '--probe'): probe_software = a if o in ('-m', '--danmaku'): danmaku_only = int(a) if o in ('-n', '--slient'): IS_SLIENT = int(a) if o in ('-u', '--m3u'): IS_M3U = int(a) if o in ('-t', '--mylist'): mylist = a if o in ('-q', '--thread'): download_thread = int(a) if o in ('-r', '--quality'): QUALITY = int(a) if o in ('-g', '--thread_single'): thread_single_download = int(a) if o in ('-i', '--fake-ip'): FAKE_IP = a IS_FAKE_IP = 1 if len(vid_raw) == 0: vid_raw = str(raw_input('av')) p_raw = str(raw_input('P')) oversea = str(raw_input('Source?')) cookiepath = './bilicookies' logging.basicConfig(level = logging_level_reader(LOG_LEVEL)) logging.debug('FAKE IP: ' + str(IS_FAKE_IP) + ' ' + FAKE_IP) av_list = get_full_p(vid_raw) if mylist != 0: av_list += mylist_to_aid_list(mylist) logging.debug('av_list') if len(cookiepath) == 0: cookiepath = './bilicookies' if len(p_raw) == 0: logging.info('No part number set, download all the parts.') p_raw = '0' if len(oversea) == 0: oversea = '0' logging.info('Oversea not set, use original API(methon 0).') IS_M3U = check_dependencies_exportm3u(IS_M3U) if IS_M3U == 1 and oversea not in {'0', '1'}: # See issue #8 logging.info('M3U exporting with source other than 0 or 1 can be broken, and lead to wrong duration!') if IS_SLIENT == 0: input_raw = str(raw_input('Enter "q" to quit, or enter the source you want.')) if input_raw == 'q': exit() else: oversea = input_raw concat_software, download_software, probe_software = check_dependencies(download_software, concat_software, probe_software) p_list = get_full_p(p_raw) if len(av_list) > 1 and len(p_list) > 1: logging.warning('You are downloading multi parts from multiple videos! This may result in unpredictable outputs!') if IS_SLIENT == 0: input_raw = str(raw_input('Enter "y" to continue, "n" to only download the first part, "q" to quit, or enter the part number you want.')) if input_raw == 'y': pass elif input_raw == 'n': p_list = ['1'] elif input_raw == 'q': exit() else: p_list = get_full_p(input_raw) cookies = read_cookie(cookiepath) global BILIGRAB_HEADER, BILIGRAB_UA # deal with danmaku2ass's drama / Twice in case someone failed to check dependencies is_export, convert_ass = check_dependencies_danmaku2ass(is_export) is_export, convert_ass = check_dependencies_danmaku2ass(is_export) python_ver_str = '.'.join([str(i) for i in sys.version_info[:2]]) BILIGRAB_UA = 'Biligrab/{VER} (cnbeining@gmail.com) (Python-urllib/{python_ver_str}, like libcurl/1.0 NSS-Mozilla/2.0)'.format(VER = VER, python_ver_str = python_ver_str) #BILIGRAB_UA = 'Biligrab / ' + str(VER) + ' (cnbeining@gmail.com) (like )' BILIGRAB_HEADER = {'User-Agent': BILIGRAB_UA, 'Cache-Control': 'no-cache', 'Pragma': 'no-cache', 'Cookie': cookies[0]} if LOG_LEVEL == 'DEBUG': logging.debug('!!!!!!!!!!!!!!!!!!!!!!!\nWARNING: This log contains some sensitive data. You may want to delete some part of the data before you post it publicly!\n!!!!!!!!!!!!!!!!!!!!!!!') logging.debug('BILIGRAB_HEADER') try: request = urllib2.Request('http://ipinfo.io/json', headers=FAKE_HEADER) response = urllib2.urlopen(request) data = response.read() print('!!!!!!!!!!!!!!!!!!!!!!!\nWARNING: This log contains some sensitive data. You may want to delete some part of the data before you post it publicly!\n!!!!!!!!!!!!!!!!!!!!!!!') print('=======================DUMP DATA==================') print(data) print('========================DATA END==================') print('DEBUG: ' + str(av_list)) except Exception: print('WARNING: Cannot connect to IP-geo database server!') pass for av in av_list: vid = str(av) if str(p_raw) == '0': logging.info('You are downloading all the parts in this video...') try: p_raw = str('1~' + find_cid_api(vid, p_raw, cookies)[3]) p_list = get_full_p(p_raw) except Exception: logging.info('Error when reading all the parts!') if IS_SLIENT == 0: input_raw = str(raw_input('Enter the part number you want, or "q" to quit.')) if input_raw == '0': print('ERROR: Cannot use all the parts!') exit() elif input_raw == 'q': exit() else: p_list = get_full_p(input_raw) else: logging.info('Download the first part of the video...') p_raw = '1' p_list = [1] logging.info('Your target download is av{vid}, part {p_raw}, from source {oversea}'.format(vid = vid, p_raw = p_raw, oversea = oversea)) for p in p_list: reload(sys) sys.setdefaultencoding('utf-8') part_now = str(p) try: logging.info('Downloading part {p} ...'.format(p = p)) main(vid, p, oversea, cookies, download_software, concat_software, is_export, probe_software, danmaku_only, time_fetch, download_thread, thread_single_download) except DanmakuOnlyException: pass except ExportM3UException: pass except Exception as e: print('ERROR: Biligrab failed: %s' % e) print(' If you think this should not happen, please dump your log using "-l", and open a issue at https://github.com/cnbeining/Biligrab/issues .') print(' Make sure you delete all the sensitive data before you post it publicly.') traceback.print_exc() exit()
cnbeining/Biligrab
biligrab.py
Python
mit
60,115
[ "VisIt" ]
71d9a61a3dcf3c03f5d48f92e2c0c9b522a9666e7974b843e94ca7b692f380bf
import logging import os import re import copy import time from argparse import ArgumentParser, RawTextHelpFormatter, SUPPRESS from datetime import datetime import os from Top import Top log = logging.getLogger(__name__) # noinspection PyShadowingNames class Settings(Top): """ Parses settings in the following order: 1) Built-in settings 2) Settings from self.configfile 3) If command line option --config=configfile is present, read settings from configfile 4) Settings from the command line options Settings from the previous steps can be overwritten. """ def __init__(self, from_config_file=True): # self.OutputFolder = os.environ['HOME']+'/Sites/' super().__init__() self.settings = None # settings is the only class that does not need settings attribute ### Default settings ### self.OutputFolder = os.environ['HOME'] + '/public_html/' # self.BackupFolder = os.environ['HOME']+'/Sites/terse-backup/' # sub-folder name for storing xyz and png files for HTML page self.tersepic = 'terse-pics/' # self.configFile = os.path.join(os.path.dirname(__file__), "data/terse.rc") self.configFile = os.path.join(os.environ['HOME'], "terse.rc") #self.configFile = '/Users/talipovm/Dropbox/development/terse/terse.rc' self.tersehtml = 'terse.html' # Relative location of Jmol folder with respect to self.tersehtml # Should be within the web server folders self.JmolFolder = './Jmol/' # Debug run self.debug = False # Prints out additional information about CPU usage for each calculation self.usage = False # Current folder from which terse.py is invoked self.selfPath = '' # Options from the command line interface self.optsCLI = {} # Number of available CPUs # Not in use currently but might be used to parallelize e.g. cubegen self.nproc = 0 # File extension <-> Parsing interface matching self.exts = {} # Time stamp -- used to produce unique xyz/png file names and avoid problems # with undesired use of cached old files. # Now, it is probably not needed since HTML page itself prevents caching self.suffix = time.strftime('%y-%m-%d--%H-%M-%S') # A timestamp to be shown on the web page self.timestamp = datetime.now().strftime("%Y-%m-%d %I:%M %p") self.EnergyUnits = 'kJ/mol' self.EnergyFactor = 2625.5 # Threshold for strong/weak inter-orbital interactions self.inbo_threshold = 10.0 # Show the set of internal coordinates (not implemented yet) self.connectivityMode = 0 # Request to show all geometries from geometry optimization # even when geometry optimization was successful self.FullGeomInfo = False # Was used to print out IRC energies # self.textirc = False # Show IRC gradients? self.ircgrad = 0 # Request to print detailed information (e.g. for str(object) self.detailed_print = False # Counters to keep track of the log files and jobs within them self.counter = 1 self.subcounter = 0 # Number of points if a cube file is requested (e.g. for orbital or ESP) self.npoints_cube = '0' # Store surface plots as isosurfaces in JVXL format rather than volumetric cube files # Cube files are huge and very slow to show on a web page self.useJVXL = True # Remove cube files after .jvxl files were produced self.save_cube = False # Default orbital colors self.color_mo_plus = 'red' self.color_mo_minus = 'blue' # Default path to gnuplot for making png; [TODO] to be replaced by matplotlib self.preferred_plotting_method = 'gnuplot' self.gnuplot = 'gnuplot' # if gnuplot will try to find it in PATH # Path to Gaussian utilities for making orbital/ESP plots self.cubegen = 'cubegen' self.formchk = 'formchk' self.color = {'err':'red', 'imag':'blue', 'lot':'green'} from JSMol import JSMol self.Engine3D = JSMol # self.JSMolLocation = "http://comp.chem.mu.edu" self.JSMolLocation = '' # Applet window dimensions self.JmolWinX, self.JmolWinY = 800, 600 # Molecule representation in Jmol self.JavaOptions = """vector ON; vector SCALE 3;\\ set animationFPS 10;\\ set measurementUnits pm;\\ set measureAllModels ON;\\ wireframe 20; spacefill 40;\\ """ # Overwrite the built-in parameters by those from the provided config file if from_config_file: self.read_config() def set_arg_val(self, line): """ Imports attr=value to settings. It will be accessible as settings.attr """ s = re.search(r'\s*(\S+)\s*=\s*(.*)', line) if s: attr = s.group(1) value = s.group(2) setattr(self, attr, value) def read_config(self, configfile=''): """ Read configuration file """ if configfile: fname = configfile else: fname = self.configFile try: f = open(fname, 'r') for s in f: if '#' in s: s = s.split('#')[0] # Strip comments self.set_arg_val(s) f.close() log.debug('Configuration file %s was successfully loaded' % fname) except IOError: log.warning("Cannot open config file " + fname) def float(self): # TODO convert the input data into proper type on the fly for a, v in self.__dict__.items(): try: fv = float(v) setattr(self, a, fv) except TypeError: pass @staticmethod def expand_range(s): """ Converts an input string that looks like '3,15-17,20' into an expanded list [3,15,16,17,20] :param s: :return: list """ if not s: return [] s_out = [] for item in s.split(','): if '-' in item: range_start, range_end = item.split('-') for i_mo in range(int(range_start), int(range_end) + 1): s_out.append(i_mo) else: s_out.append(int(item)) return s_out def prepare_filenames(self): if not ('filenames' in self.optsCLI): log.error('No files provided') return [] fnames = self.optsCLI['filenames'] if not isinstance(fnames, list): fnames = [fnames, ] return fnames def parse_shortcuts(self): """ Recognizes simplified input and applies to all files. At this moment, the files are assumed to be checkpoint files :param optsCLI: :return: list of tasks """ rec = {} # Make an +/-0.001 au isovalue spin density plot for all files in the command line rec['spin'] = {'type': 'spin', 'iv': 0.001} # Make an +0.001 au (i.e. only positive) isovalue spin density plot for all tasks in the command line rec['spin2'] = {'type': 'spin2', 'iv': 0.001} # Make a +/-0.03 au isovalue HOMO plot for all tasks in the command line rec['homo'] = {'type': 'mo=homo', 'iv': 0.03} # Make a +/-0.03 au isovalue LUMO plot for all tasks in the command line rec['lumo'] = {'type': 'mo=lumo', 'iv': 0.03} tasks = [] fnames = self.prepare_filenames() if not fnames: return [] for key, item in rec.items(): if key in self.optsCLI: for fn in fnames: item = copy.deepcopy(item) item['c'] = fn tasks.append(('iso', item)) return tasks def parse_by_orb_range(self): """ Recognizes simplified input of plotting a series of molecular orbitals :param optsCLI: :return: """ rec = {} # Make a +/-0.03 au isovalue plots for MOs selected by indices (starting from 1) rec['mos'] = {'type': 'mo=%i', 'iv': 0.03} # Make a +/-0.03 au isovalue plots for _alpha_ MOs selected by indices (starting from 1) rec['amos'] = {'type': 'amo=%i', 'iv': 0.03} # Make a +/-0.03 au isovalue plots for _beta_ MOs selected by indices (starting from 1) rec['bmos'] = {'type': 'bmo=%i', 'iv': 0.03} # for all files in the command line tasks = [] fnames = self.prepare_filenames() if not fnames: return [] for key, item in rec.items(): if key in self.optsCLI: mo_range = self.expand_range(self.optsCLI[key]) for fname in fnames: item['c'] = fname for mo in mo_range: item2 = copy.deepcopy(item) item2['type'] %= mo tasks.append(('iso', item2)) return tasks def parse_extensions(self): # Register extensions for attr, value in self: if '_extension' in attr: progname = attr.strip().split('_extension')[0] values = value.replace(' ', '').split(',') for v in values: if v in self.exts: log.warning('Several parsers for %s requested; will use the last_value request, %s' %(v, progname)) else: self.exts[v] = progname log.debug('Registered extensions: %s' % self.exts) return self.exts @staticmethod def parse_CLI_list(CLI_list, dict_name, leading_symbol): """ Combines a dictionary-type command line argument as dictionary: Recognized input: 0. Single record, simple syntax: --inbo a.chk (i.e., CLI_list=['a.chk']; leading_symbol='c') 1. Single record, key/value pair(s): --inbo c=a.chk --inbo l=a.log; (i.e., CLI_list=['c=a.chk', 'l=a.log']; leading_symbol will be parsed from that) 2. Multiple records, e.g. --inbo c1=a.chk --inbo l1=a.log --inbo c2=b.chk --inbo l2=b.log (i.e., CLI_list=['c1=a.chk', 'l1=a.log', 'c2=b.chk', 'l2=b.log']; leading_symbol will be parsed from that) """ implicit_counter = 0 tasks = [] jobs = {} for arg in CLI_list: # Separate keys from values ssplit = arg.split('=', 1) if len(ssplit) == 1: # Deal with simplified input 1. key, value = leading_symbol, ssplit[0] else: key, value = ssplit if re.search('^' + leading_symbol, key): implicit_counter += 1 # If job index is not provided, use implicit counter if not re.search(r'\d+', key): key += str(implicit_counter) # Split keys by job numbers k, i = re.search(r'^(\D+)(\d+)$', key).groups() i = int(i) if not (i in jobs): jobs[i] = {} jobs[i][k] = value for job in sorted(jobs): tasks.append((dict_name, jobs[job])) return tasks def parse_full_syntax_lists(self): """ Converts dictionary-type command-line arguments into tasks :param optsCLI: :return: """ rec = [] # full isosurface syntax for a checkpoint file rec.append({'type': 'isosurface', 'name': 'iso', 'ls': 'c'}) # full isosurface syntax for a formatted checkpoint file TODO figure out why it was commented out # rec.append({'type':'isosurface', 'name':'iso', 'ls': 'f'}) # inbo TODO: recollect how to use them rec.append({'type': 'inbo', 'name': 'inbo', 'ls': 'l'}) # full isosurface syntax for a JVXL rec.append({'type': 'jvxl', 'name': 'jvxl', 'ls': 'j'}) tasks = [] for v in rec: if not v['type'] in self.optsCLI: continue task = self.parse_CLI_list(CLI_list=self.optsCLI[v['type']], dict_name=v['name'], leading_symbol=v['ls']) tasks.extend(task) return tasks def parse_command_line(self, args): """ Parse command line parameters """ descr = """ terse.py - Helps to perform express visual analysis of output files of electronic structure calculations (mostly, Gaussian). The main idea of this script is to collect results of multiple calculations in one HTML file. For each calculation, short text description of each step will be given, and most important geometries will be shown in 3D mode using Jmol inside the web page. Features: Visualization of multiple files on the same web page Extensive support of Gaussian package Basic support of US-Gamess, Firefly packages Gzipped files supported Requirements: Python (also need argparse 'pip install argparse') OpenBabel (must install with python bindings and png) Gnuplot (optional, used for convergence/Scan/IRC plot generation) Jmol (to show molecules in 3D mode on the web page) JSMol (to show molecules in 3D mode on the web page using javascript) Author: Marat Talipov, talipovm@nmsu.edu """ sp = args[0] self.selfPath = sp[:sp.rfind('/') + 1] ar = args[1:] parser = ArgumentParser(descr, formatter_class=RawTextHelpFormatter, argument_default=SUPPRESS) parser.add_argument('--connectivityMode', action='store_true', help='Show set of internal coordinates') parser.add_argument('--debug', action='store_true', help='Debug mode') parser.add_argument('--config', action='store', help='Path to config file') parser.add_argument('--usage', action='store_true', help='Shows small statistics on CPU usage') parser.add_argument('--detailed_print', action='store_true', help='Show more detailed information') parser.add_argument('--inbo', help='Show interacting NBO orbitals; specially arranged Gaussian calculation needed,\nneeds two keys, "l" and "c"(optional key "t").\nUsage: terse.py --inbo l=LOGFILE --inbo c=CHECKPOINTFILE\nOptional usage: --inbo t=NBOENERGYTHRESHHOLD(default is 10 kcal/mol)', action='append', default=[]) parser.add_argument('--jvxl', help='Show JVXL isosurface, needs two keys, "j" and "x"', action='append', default=[]) parser.add_argument('--isosurface', help='Show isosurface from Gaussian .chk file', action='append', default=[]) parser.add_argument('--settings', help='Redefine settings', action='append', default=[]) parser.add_argument('--OutputFolder', help='Write terse pages to local folder in Sites. Useful for saving\nterse pages to some local archive.\nUsage: terse.py --OutputFolder=$HOME/Sites/PATH_TO_ARCHIVE_DIR', action='store') parser.add_argument('--spin', help='Show spin density', action='store_true') parser.add_argument('--spin2', help='Show spin density without negative polarization (red) lobes', action='store_true') parser.add_argument('--homo', help='Show HOMO', action='store_true') parser.add_argument('--lumo', help='Show LUMO', action='store_true') parser.add_argument('--mos', help='Show selected MOs', action='store') parser.add_argument('--amos', help='Show selected alpha-MOs', action='store') parser.add_argument('--bmos', help='Show selected beta-MOs', action='store') parser.add_argument('filenames', nargs='*', help='Files to parse') # parser.add_argument('--rmcube', help='Remove cube files from DIR after JVXL created', action='store_true') # parser.add_argument('--backup', help='Backup Mode: Create web page in backup directory for later use',action='store') # parser.add_argument('irc', help='Force terse.pl to consider two files as the part of the same IRC calculation') # parser.add_argument('uv', help='Show interacting NBO orbitals') self.optsCLI = vars(parser.parse_args(ar)) # Update settings from a user-provided config file if 'config' in self.optsCLI: self.read_config(self.optsCLI['config']) # Additional settings could be chimed in using --setting attr=value mechanism if 'settings' in self.optsCLI: for opt in self.optsCLI['settings']: self.set_arg_val(opt) # Update the settings directly requested from CLI for attr in ('detailed_print', 'debug', 'usage', 'OutputFolder'): if attr in self.optsCLI: setattr(self, attr, self.optsCLI[attr]) # Incorporate the syntax shortcut options above into the list of tasks to be processed tasks = self.parse_shortcuts() tasks_defined = bool(tasks) if not tasks_defined: tasks = self.parse_by_orb_range() tasks_defined = bool(tasks) if not tasks_defined: # Regular tasks fnames = self.prepare_filenames() for f in fnames: tasks.append(('file', f)) tasks.extend(self.parse_full_syntax_lists()) return tasks def real_path(self, fname): s = '%s/%s/%s--%i-%i%s' % (self.OutputFolder, self.tersepic, self.suffix, self.counter, self.subcounter, fname) return s def web_path(self, fname): s = './%s/%s--%i-%i%s' % (self.tersepic, self.suffix, self.counter, self.subcounter, fname) return s if __name__ == "__main__": print("Test output") print("\n===Hard-coded settings:===") s = Settings(from_config_file=False) print(s) print("\n===Settings updated from config file (%s)===" % s.configFile) s.read_config() print(s) print("\n===Settings updated from the command line===") import sys files = s.parse_command_line(sys.argv) print(s) print(files)
talipovm/terse
terse/Settings.py
Python
mit
18,566
[ "Firefly", "GAMESS", "Gaussian", "Jmol" ]
ccdb7543da05d748c29d4a4b79c9adc52a1f144136de217b6e1263011ab29366
# $Id: __init__.py 5574 2008-06-23 09:16:39Z grubert $ # Author: Engelbert Gruber <grubert@users.sourceforge.net> # Copyright: This module has been placed in the public domain. """ LaTeX2e document tree Writer. """ __docformat__ = 'reStructuredText' # code contributions from several people included, thanks to all. # some named: David Abrahams, Julien Letessier, Lele Gaifax, and others. # # convention deactivate code by two # e.g. ##. import sys import time import re import string from types import ListType from docutils import frontend, nodes, languages, writers, utils from docutils.writers.newlatex2e import unicode_map from docutils.transforms.references import DanglingReferencesVisitor class Writer(writers.Writer): supported = ('latex','latex2e') """Formats this writer supports.""" settings_spec = ( 'LaTeX-Specific Options', 'The LaTeX "--output-encoding" default is "latin-1:strict".', (('Specify documentclass. Default is "article".', ['--documentclass'], {'default': 'article', }), ('Specify document options. Multiple options can be given, ' 'separated by commas. Default is "10pt,a4paper".', ['--documentoptions'], {'default': '10pt,a4paper', }), ('Use LaTeX footnotes. LaTeX supports only numbered footnotes (does it?). ' 'Default: no, uses figures.', ['--use-latex-footnotes'], {'default': 0, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Format for footnote references: one of "superscript" or ' '"brackets". Default is "superscript".', ['--footnote-references'], {'choices': ['superscript', 'brackets'], 'default': 'superscript', 'metavar': '<format>', 'overrides': 'trim_footnote_reference_space'}), ('Use LaTeX citations. ' 'Default: no, uses figures which might get mixed with images.', ['--use-latex-citations'], {'default': 0, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Format for block quote attributions: one of "dash" (em-dash ' 'prefix), "parentheses"/"parens", or "none". Default is "dash".', ['--attribution'], {'choices': ['dash', 'parentheses', 'parens', 'none'], 'default': 'dash', 'metavar': '<format>'}), ('Specify a stylesheet file. The file will be "input" by latex in ' 'the document header. Default is no stylesheet (""). ' 'Overrides --stylesheet-path.', ['--stylesheet'], {'default': '', 'metavar': '<file>', 'overrides': 'stylesheet_path'}), ('Specify a stylesheet file, relative to the current working ' 'directory. Overrides --stylesheet.', ['--stylesheet-path'], {'metavar': '<file>', 'overrides': 'stylesheet'}), ('Table of contents by docutils (default) or LaTeX. LaTeX (writer) ' 'supports only one ToC per document, but docutils does not know of ' 'pagenumbers. LaTeX table of contents also means LaTeX generates ' 'sectionnumbers.', ['--use-latex-toc'], {'default': 0, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Add parts on top of the section hierarchy.', ['--use-part-section'], {'default': 0, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Let LaTeX print author and date, do not show it in docutils ' 'document info.', ['--use-latex-docinfo'], {'default': 0, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Use LaTeX abstract environment for the documents abstract.' 'Per default the abstract is an unnumbered section.', ['--use-latex-abstract'], {'default': 0, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Color of any hyperlinks embedded in text ' '(default: "blue", "0" to disable).', ['--hyperlink-color'], {'default': 'blue'}), ('Enable compound enumerators for nested enumerated lists ' '(e.g. "1.2.a.ii"). Default: disabled.', ['--compound-enumerators'], {'default': None, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Disable compound enumerators for nested enumerated lists. This is ' 'the default.', ['--no-compound-enumerators'], {'action': 'store_false', 'dest': 'compound_enumerators'}), ('Enable section ("." subsection ...) prefixes for compound ' 'enumerators. This has no effect without --compound-enumerators. ' 'Default: disabled.', ['--section-prefix-for-enumerators'], {'default': None, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Disable section prefixes for compound enumerators. ' 'This is the default.', ['--no-section-prefix-for-enumerators'], {'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}), ('Set the separator between section number and enumerator ' 'for compound enumerated lists. Default is "-".', ['--section-enumerator-separator'], {'default': '-', 'metavar': '<char>'}), ('When possibile, use the specified environment for literal-blocks. ' 'Default is quoting of whitespace and special chars.', ['--literal-block-env'], {'default': '', }), ('When possibile, use verbatim for literal-blocks. ' 'Compatibility alias for "--literal-block-env=verbatim".', ['--use-verbatim-when-possible'], {'default': 0, 'action': 'store_true', 'validator': frontend.validate_boolean}), ('Table style. "standard" with horizontal and vertical lines, ' '"booktabs" (LaTeX booktabs style) only horizontal lines ' 'above and below the table and below the header or "nolines". ' 'Default: "standard"', ['--table-style'], {'choices': ['standard', 'booktabs','nolines'], 'default': 'standard', 'metavar': '<format>'}), ('LaTeX graphicx package option. ' 'Possible values are "dvips", "pdftex". "auto" includes LaTeX code ' 'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. ' 'Default is no option.', ['--graphicx-option'], {'default': ''}), ('LaTeX font encoding. ' 'Possible values are "T1", "OT1", "" or some other fontenc option. ' 'The font encoding influences available symbols, e.g. "<<" as one ' 'character. Default is "" which leads to package "ae" (a T1 ' 'emulation using CM fonts).', ['--font-encoding'], {'default': ''}), ('Per default the latex-writer puts the reference title into ' 'hyperreferences. Specify "ref*" or "pageref*" to get the section ' 'number or the page number.', ['--reference-label'], {'default': None, }), ('Specify style and database for bibtex, for example ' '"--use-bibtex=mystyle,mydb1,mydb2".', ['--use-bibtex'], {'default': None, }), ),) settings_defaults = {'output_encoding': 'latin-1'} relative_path_settings = ('stylesheet_path',) config_section = 'latex2e writer' config_section_dependencies = ('writers',) visitor_attributes = ("head_prefix", "head", "body_prefix", "body", "body_suffix") output = None """Final translated form of `document`.""" def __init__(self): writers.Writer.__init__(self) self.translator_class = LaTeXTranslator def translate(self): visitor = self.translator_class(self.document) self.document.walkabout(visitor) self.output = visitor.astext() # copy parts for attr in self.visitor_attributes: setattr(self, attr, getattr(visitor, attr)) def assemble_parts(self): writers.Writer.assemble_parts(self) for part in self.visitor_attributes: self.parts[part] = ''.join(getattr(self, part)) """ Notes on LaTeX -------------- * LaTeX does not support multiple tocs in one document. (might be no limitation except for docutils documentation) The "minitoc" latex package can produce per-chapter tocs in book and report document classes. * width * linewidth - width of a line in the local environment * textwidth - the width of text on the page Maybe always use linewidth ? *Bug* inside a minipage a (e.g. Sidebar) the linewidth is not changed, needs fix in docutils so that tables are not too wide. So we add locallinewidth set it initially and on entering sidebar and reset on exit. """ class Babel: """Language specifics for LaTeX.""" # country code by a.schlock. # partly manually converted from iso and babel stuff, dialects and some _ISO639_TO_BABEL = { 'no': 'norsk', #XXX added by hand ( forget about nynorsk?) 'gd': 'scottish', #XXX added by hand 'hu': 'magyar', #XXX added by hand 'pt': 'portuguese',#XXX added by hand 'sl': 'slovenian', 'af': 'afrikaans', 'bg': 'bulgarian', 'br': 'breton', 'ca': 'catalan', 'cs': 'czech', 'cy': 'welsh', 'da': 'danish', 'fr': 'french', # french, francais, canadien, acadian 'de': 'ngerman', #XXX rather than german # ngerman, naustrian, german, germanb, austrian 'el': 'greek', 'en': 'english', # english, USenglish, american, UKenglish, british, canadian 'eo': 'esperanto', 'es': 'spanish', 'et': 'estonian', 'eu': 'basque', 'fi': 'finnish', 'ga': 'irish', 'gl': 'galician', 'he': 'hebrew', 'hr': 'croatian', 'hu': 'hungarian', 'is': 'icelandic', 'it': 'italian', 'la': 'latin', 'nl': 'dutch', 'pl': 'polish', 'pt': 'portuguese', 'ro': 'romanian', 'ru': 'russian', 'sk': 'slovak', 'sr': 'serbian', 'sv': 'swedish', 'tr': 'turkish', 'uk': 'ukrainian' } def __init__(self,lang): self.language = lang # pdflatex does not produce double quotes for ngerman in tt. self.double_quote_replacment = None if re.search('^de',self.language): #self.quotes = ("\"`", "\"'") self.quotes = ('{\\glqq}', '{\\grqq}') self.double_quote_replacment = "{\\dq}" elif re.search('^it',self.language): self.quotes = ("``", "''") self.double_quote_replacment = r'{\char`\"}' else: self.quotes = ("``", "''") self.quote_index = 0 def next_quote(self): q = self.quotes[self.quote_index] self.quote_index = (self.quote_index+1)%2 return q def quote_quotes(self,text): t = None for part in text.split('"'): if t == None: t = part else: t += self.next_quote() + part return t def double_quotes_in_tt (self,text): if not self.double_quote_replacment: return text return text.replace('"', self.double_quote_replacment) def get_language(self): if self._ISO639_TO_BABEL.has_key(self.language): return self._ISO639_TO_BABEL[self.language] else: # support dialects. l = self.language.split("_")[0] if self._ISO639_TO_BABEL.has_key(l): return self._ISO639_TO_BABEL[l] return None latex_headings = { 'optionlist_environment' : [ '\\newcommand{\\optionlistlabel}[1]{\\bf #1 \\hfill}\n' '\\newenvironment{optionlist}[1]\n' '{\\begin{list}{}\n' ' {\\setlength{\\labelwidth}{#1}\n' ' \\setlength{\\rightmargin}{1cm}\n' ' \\setlength{\\leftmargin}{\\rightmargin}\n' ' \\addtolength{\\leftmargin}{\\labelwidth}\n' ' \\addtolength{\\leftmargin}{\\labelsep}\n' ' \\renewcommand{\\makelabel}{\\optionlistlabel}}\n' '}{\\end{list}}\n', ], 'lineblock_environment' : [ '\\newlength{\\lineblockindentation}\n' '\\setlength{\\lineblockindentation}{2.5em}\n' '\\newenvironment{lineblock}[1]\n' '{\\begin{list}{}\n' ' {\\setlength{\\partopsep}{\\parskip}\n' ' \\addtolength{\\partopsep}{\\baselineskip}\n' ' \\topsep0pt\\itemsep0.15\\baselineskip\\parsep0pt\n' ' \\leftmargin#1}\n' ' \\raggedright}\n' '{\\end{list}}\n' ], 'footnote_floats' : [ '% begin: floats for footnotes tweaking.\n', '\\setlength{\\floatsep}{0.5em}\n', '\\setlength{\\textfloatsep}{\\fill}\n', '\\addtolength{\\textfloatsep}{3em}\n', '\\renewcommand{\\textfraction}{0.5}\n', '\\renewcommand{\\topfraction}{0.5}\n', '\\renewcommand{\\bottomfraction}{0.5}\n', '\\setcounter{totalnumber}{50}\n', '\\setcounter{topnumber}{50}\n', '\\setcounter{bottomnumber}{50}\n', '% end floats for footnotes\n', ], 'some_commands' : [ '% some commands, that could be overwritten in the style file.\n' '\\newcommand{\\rubric}[1]' '{\\subsection*{~\\hfill {\\it #1} \\hfill ~}}\n' '\\newcommand{\\titlereference}[1]{\\textsl{#1}}\n' '% end of "some commands"\n', ] } class DocumentClass: """Details of a LaTeX document class.""" def __init__(self, document_class, with_part=False): self.document_class = document_class self._with_part = with_part def section(self, level): """ Return the section name at the given level for the specific document class. Level is 1,2,3..., as level 0 is the title.""" sections = [ 'section', 'subsection', 'subsubsection', 'paragraph', 'subparagraph' ] if self.document_class in ('book', 'report', 'scrreprt', 'scrbook'): sections.insert(0, 'chapter') if self._with_part: sections.insert(0, 'part') if level <= len(sections): return sections[level-1] else: return sections[-1] class Table: """ Manage a table while traversing. Maybe change to a mixin defining the visit/departs, but then class Table internal variables are in the Translator. Table style might be * standard: horizontal and vertical lines * booktabs (requires booktabs latex package): only horizontal lines * nolines, borderless : no lines """ def __init__(self,latex_type,table_style): self._latex_type = latex_type self._table_style = table_style self._open = 0 # miscellaneous attributes self._attrs = {} self._col_width = [] self._rowspan = [] self.stubs = [] def open(self): self._open = 1 self._col_specs = [] self.caption = None self._attrs = {} self._in_head = 0 # maybe context with search def close(self): self._open = 0 self._col_specs = None self.caption = None self._attrs = {} self.stubs = [] def is_open(self): return self._open def set_table_style(self, table_style): if not table_style in ('standard','booktabs','borderless','nolines'): return self._table_style = table_style def used_packages(self): if self._table_style == 'booktabs': return '\\usepackage{booktabs}\n' return '' def get_latex_type(self): return self._latex_type def set(self,attr,value): self._attrs[attr] = value def get(self,attr): if self._attrs.has_key(attr): return self._attrs[attr] return None def get_vertical_bar(self): if self._table_style == 'standard': return '|' return '' # horizontal lines are drawn below a row, because we. def get_opening(self): if self._latex_type == 'longtable': # otherwise longtable might move before paragraph and subparagraph prefix = '\\leavevmode\n' else: prefix = '' return '%s\\begin{%s}[c]' % (prefix, self._latex_type) def get_closing(self): line = "" if self._table_style == 'booktabs': line = '\\bottomrule\n' elif self._table_style == 'standard': lines = '\\hline\n' return '%s\\end{%s}' % (line,self._latex_type) def visit_colspec(self, node): self._col_specs.append(node) # "stubs" list is an attribute of the tgroup element: self.stubs.append(node.attributes.get('stub')) def get_colspecs(self): """ Return column specification for longtable. Assumes reST line length being 80 characters. Table width is hairy. === === ABC DEF === === usually gets to narrow, therefore we add 1 (fiddlefactor). """ width = 80 total_width = 0.0 # first see if we get too wide. for node in self._col_specs: colwidth = float(node['colwidth']+1) / width total_width += colwidth self._col_width = [] self._rowspan = [] # donot make it full linewidth factor = 0.93 if total_width > 1.0: factor /= total_width bar = self.get_vertical_bar() latex_table_spec = "" for node in self._col_specs: colwidth = factor * float(node['colwidth']+1) / width self._col_width.append(colwidth+0.005) self._rowspan.append(0) latex_table_spec += "%sp{%.3f\\locallinewidth}" % (bar,colwidth+0.005) return latex_table_spec+bar def get_column_width(self): """ return columnwidth for current cell (not multicell) """ return "%.2f\\locallinewidth" % self._col_width[self._cell_in_row-1] def visit_thead(self): self._in_thead = 1 if self._table_style == 'standard': return ['\\hline\n'] elif self._table_style == 'booktabs': return ['\\toprule\n'] return [] def depart_thead(self): a = [] #if self._table_style == 'standard': # a.append('\\hline\n') if self._table_style == 'booktabs': a.append('\\midrule\n') if self._latex_type == 'longtable': a.append('\\endhead\n') # for longtable one could add firsthead, foot and lastfoot self._in_thead = 0 return a def visit_row(self): self._cell_in_row = 0 def depart_row(self): res = [' \\\\\n'] self._cell_in_row = None # remove cell counter for i in range(len(self._rowspan)): if (self._rowspan[i]>0): self._rowspan[i] -= 1 if self._table_style == 'standard': rowspans = [] for i in range(len(self._rowspan)): if (self._rowspan[i]<=0): rowspans.append(i+1) if len(rowspans)==len(self._rowspan): res.append('\\hline\n') else: cline = '' rowspans.reverse() # TODO merge clines while 1: try: c_start = rowspans.pop() except: break cline += '\\cline{%d-%d}\n' % (c_start,c_start) res.append(cline) return res def set_rowspan(self,cell,value): try: self._rowspan[cell] = value except: pass def get_rowspan(self,cell): try: return self._rowspan[cell] except: return 0 def get_entry_number(self): return self._cell_in_row def visit_entry(self): self._cell_in_row += 1 def is_stub_column(self): if len(self.stubs) >= self._cell_in_row: return self.stubs[self._cell_in_row-1] return False class LaTeXTranslator(nodes.NodeVisitor): # When options are given to the documentclass, latex will pass them # to other packages, as done with babel. # Dummy settings might be taken from document settings # Templates # --------- latex_head = '\\documentclass[%s]{%s}\n' linking = "\\ifthenelse{\\isundefined{\\hypersetup}}{\n" \ +"\\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}\n" \ +"}{}\n" stylesheet = '\\input{%s}\n' # add a generated on day , machine by user using docutils version. generator = '% generated by Docutils <http://docutils.sourceforge.net/>\n' # Config setting defaults # ----------------------- # use latex tableofcontents or let docutils do it. use_latex_toc = 0 # TODO: use mixins for different implementations. # list environment for docinfo. else tabularx use_optionlist_for_docinfo = 0 # NOT YET IN USE # Use compound enumerations (1.A.1.) compound_enumerators = 0 # If using compound enumerations, include section information. section_prefix_for_enumerators = 0 # This is the character that separates the section ("." subsection ...) # prefix from the regular list enumerator. section_enumerator_separator = '-' # default link color hyperlink_color = "blue" def __init__(self, document): nodes.NodeVisitor.__init__(self, document) self.settings = settings = document.settings self.latex_encoding = self.to_latex_encoding(settings.output_encoding) self.use_latex_toc = settings.use_latex_toc self.use_latex_docinfo = settings.use_latex_docinfo self.use_latex_footnotes = settings.use_latex_footnotes self._use_latex_citations = settings.use_latex_citations self._reference_label = settings.reference_label self.hyperlink_color = settings.hyperlink_color self.compound_enumerators = settings.compound_enumerators self.font_encoding = settings.font_encoding self.section_prefix_for_enumerators = ( settings.section_prefix_for_enumerators) self.section_enumerator_separator = ( settings.section_enumerator_separator.replace('_', '\\_')) if self.hyperlink_color == '0': self.hyperlink_color = 'black' self.colorlinks = 'false' else: self.colorlinks = 'true' if self.settings.use_bibtex: self.bibtex = self.settings.use_bibtex.split(",",1) # TODO avoid errors on not declared citations. else: self.bibtex = None # language: labels, bibliographic_fields, and author_separators. # to allow writing labes for specific languages. self.language = languages.get_language(settings.language_code) self.babel = Babel(settings.language_code) self.author_separator = self.language.author_separators[0] self.d_options = self.settings.documentoptions if self.babel.get_language(): self.d_options += ',%s' % self.babel.get_language() self.d_class = DocumentClass(settings.documentclass, settings.use_part_section) # object for a table while proccessing. self.table_stack = [] self.active_table = Table('longtable',settings.table_style) # HACK. Should have more sophisticated typearea handling. if settings.documentclass.find('scr') == -1: self.typearea = '\\usepackage[DIV12]{typearea}\n' else: if self.d_options.find('DIV') == -1 and self.d_options.find('BCOR') == -1: self.typearea = '\\typearea{12}\n' else: self.typearea = '' if self.font_encoding == 'OT1': fontenc_header = '' elif self.font_encoding == '': fontenc_header = '\\usepackage{ae}\n\\usepackage{aeguill}\n' else: fontenc_header = '\\usepackage[%s]{fontenc}\n' % (self.font_encoding,) if self.latex_encoding.startswith('utf8'): input_encoding = '\\usepackage{ucs}\n\\usepackage[utf8x]{inputenc}\n' else: input_encoding = '\\usepackage[%s]{inputenc}\n' % self.latex_encoding if self.settings.graphicx_option == '': self.graphicx_package = '\\usepackage{graphicx}\n' elif self.settings.graphicx_option.lower() == 'auto': self.graphicx_package = '\n'.join( ('%Check if we are compiling under latex or pdflatex', '\\ifx\\pdftexversion\\undefined', ' \\usepackage{graphicx}', '\\else', ' \\usepackage[pdftex]{graphicx}', '\\fi\n')) else: self.graphicx_package = ( '\\usepackage[%s]{graphicx}\n' % self.settings.graphicx_option) self.head_prefix = [ self.latex_head % (self.d_options,self.settings.documentclass), '\\usepackage{babel}\n', # language is in documents settings. fontenc_header, '\\usepackage{shortvrb}\n', # allows verb in footnotes. input_encoding, # * tabularx: for docinfo, automatic width of columns, always on one page. '\\usepackage{tabularx}\n', '\\usepackage{longtable}\n', self.active_table.used_packages(), # possible other packages. # * fancyhdr # * ltxtable is a combination of tabularx and longtable (pagebreaks). # but ?? # # extra space between text in tables and the line above them '\\setlength{\\extrarowheight}{2pt}\n', '\\usepackage{amsmath}\n', # what fore amsmath. self.graphicx_package, '\\usepackage{color}\n', '\\usepackage{multirow}\n', '\\usepackage{ifthen}\n', # before hyperref! self.typearea, self.generator, # latex lengths '\\newlength{\\admonitionwidth}\n', '\\setlength{\\admonitionwidth}{0.9\\textwidth}\n' # width for docinfo tablewidth '\\newlength{\\docinfowidth}\n', '\\setlength{\\docinfowidth}{0.9\\textwidth}\n' # linewidth of current environment, so tables are not wider # than the sidebar: using locallinewidth seems to defer evaluation # of linewidth, this is fixing it. '\\newlength{\\locallinewidth}\n', # will be set later. ] self.head_prefix.extend( latex_headings['optionlist_environment'] ) self.head_prefix.extend( latex_headings['lineblock_environment'] ) self.head_prefix.extend( latex_headings['footnote_floats'] ) self.head_prefix.extend( latex_headings['some_commands'] ) ## stylesheet is last: so it might be possible to overwrite defaults. stylesheet = utils.get_stylesheet_reference(settings) if stylesheet: settings.record_dependencies.add(stylesheet) self.head_prefix.append(self.stylesheet % (stylesheet)) # hyperref after stylesheet # TODO conditionally if no hyperref is used dont include self.head_prefix.append( self.linking % ( self.colorlinks, self.hyperlink_color, self.hyperlink_color)) # if self.settings.literal_block_env != '': self.settings.use_verbatim_when_possible = True if self.linking: # and maybe check for pdf self.pdfinfo = [ ] self.pdfauthor = None # pdftitle, pdfsubject, pdfauthor, pdfkeywords, # pdfcreator, pdfproducer else: self.pdfinfo = None # NOTE: Latex wants a date and an author, rst puts this into # docinfo, so normally we do not want latex author/date handling. # latex article has its own handling of date and author, deactivate. # self.astext() adds \title{...} \author{...} \date{...}, even if the # "..." are empty strings. self.head = [ ] # separate title, so we can appen subtitle. self.title = '' # if use_latex_docinfo: collects lists of author/organization/contact/address lines self.author_stack = [] self.date = '' self.body_prefix = ['\\raggedbottom\n'] self.body = [] self.body_suffix = ['\n'] self.section_level = 0 self.context = [] self.topic_classes = [] # column specification for tables self.table_caption = None # Flags to encode # --------------- # verbatim: to tell encode not to encode. self.verbatim = 0 # insert_newline: to tell encode to replace blanks by "~". self.insert_none_breaking_blanks = 0 # insert_newline: to tell encode to add latex newline. self.insert_newline = 0 # mbox_newline: to tell encode to add mbox and newline. self.mbox_newline = 0 # inside citation reference labels underscores dont need to be escaped. self.inside_citation_reference_label = 0 # Stack of section counters so that we don't have to use_latex_toc. # This will grow and shrink as processing occurs. # Initialized for potential first-level sections. self._section_number = [0] # The current stack of enumerations so that we can expand # them into a compound enumeration. self._enumeration_counters = [] # The maximum number of enumeration counters we've used. # If we go beyond this number, we need to create a new # counter; otherwise, just reuse an old one. self._max_enumeration_counters = 0 self._bibitems = [] # docinfo. self.docinfo = None # inside literal block: no quote mangling. self.literal_block = 0 self.literal_block_stack = [] self.literal = 0 # true when encoding in math mode self.mathmode = 0 def to_latex_encoding(self,docutils_encoding): """ Translate docutils encoding name into latex's. Default fallback method is remove "-" and "_" chars from docutils_encoding. """ tr = { "iso-8859-1": "latin1", # west european "iso-8859-2": "latin2", # east european "iso-8859-3": "latin3", # esperanto, maltese "iso-8859-4": "latin4", # north european,scandinavian, baltic "iso-8859-5": "iso88595", # cyrillic (ISO) "iso-8859-9": "latin5", # turkish "iso-8859-15": "latin9", # latin9, update to latin1. "mac_cyrillic": "maccyr", # cyrillic (on Mac) "windows-1251": "cp1251", # cyrillic (on Windows) "koi8-r": "koi8-r", # cyrillic (Russian) "koi8-u": "koi8-u", # cyrillic (Ukrainian) "windows-1250": "cp1250", # "windows-1252": "cp1252", # "us-ascii": "ascii", # ASCII (US) # unmatched encodings #"": "applemac", #"": "ansinew", # windows 3.1 ansi #"": "ascii", # ASCII encoding for the range 32--127. #"": "cp437", # dos latine us #"": "cp850", # dos latin 1 #"": "cp852", # dos latin 2 #"": "decmulti", #"": "latin10", #"iso-8859-6": "" # arabic #"iso-8859-7": "" # greek #"iso-8859-8": "" # hebrew #"iso-8859-10": "" # latin6, more complete iso-8859-4 } if tr.has_key(docutils_encoding.lower()): return tr[docutils_encoding.lower()] # convert: latin-1 and utf-8 and similar things return docutils_encoding.replace("_", "").replace("-", "").lower() def language_label(self, docutil_label): return self.language.labels[docutil_label] latex_equivalents = { u'\u00A0' : '~', u'\u2013' : '{--}', u'\u2014' : '{---}', u'\u2018' : '`', u'\u2019' : '\'', u'\u201A' : ',', u'\u201C' : '``', u'\u201D' : '\'\'', u'\u201E' : ',,', u'\u2020' : '{\\dag}', u'\u2021' : '{\\ddag}', u'\u2026' : '{\\dots}', u'\u2122' : '{\\texttrademark}', u'\u21d4' : '{$\\Leftrightarrow$}', # greek alphabet ? } def unicode_to_latex(self,text): # see LaTeX codec # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124 # Only some special chracters are translated, for documents with many # utf-8 chars one should use the LaTeX unicode package. for uchar in self.latex_equivalents.keys(): text = text.replace(uchar,self.latex_equivalents[uchar]) return text def ensure_math(self, text): if not self.__dict__.has_key('ensure_math_re'): chars = { # lnot,pm,twosuperior,threesuperior,mu,onesuperior,times,div 'latin1' : '\xac\xb1\xb2\xb3\xb5\xb9\xd7\xf7' , # also latin5 and latin9 } self.ensure_math_re = re.compile('([%s])' % chars['latin1']) text = self.ensure_math_re.sub(r'\\ensuremath{\1}', text) return text def encode(self, text): """ Encode special characters (``# $ % & ~ _ ^ \ { }``) in `text` & return """ # Escaping with a backslash does not help with backslashes, ~ and ^. # < > are only available in math-mode or tt font. (really ?) # $ starts math- mode. # AND quotes if self.verbatim: return text # compile the regexps once. do it here so one can see them. # # first the braces. if not self.__dict__.has_key('encode_re_braces'): self.encode_re_braces = re.compile(r'([{}])') text = self.encode_re_braces.sub(r'{\\\1}',text) if not self.__dict__.has_key('encode_re_bslash'): # find backslash: except in the form '{\{}' or '{\}}'. self.encode_re_bslash = re.compile(r'(?<!{)(\\)(?![{}]})') # then the backslash: except in the form from line above: # either '{\{}' or '{\}}'. text = self.encode_re_bslash.sub(r'{\\textbackslash}', text) # then dollar text = text.replace("$", '{\\$}') if not ( self.literal_block or self.literal or self.mathmode ): # the vertical bar: in mathmode |,\vert or \mid # in textmode \textbar text = text.replace("|", '{\\textbar}') text = text.replace("<", '{\\textless}') text = text.replace(">", '{\\textgreater}') # then text = text.replace("&", '{\\&}') # the ^: # * verb|^| does not work in mbox. # * mathmode has wedge. hat{~} would also work. # text = text.replace("^", '{\\ensuremath{^\\wedge}}') text = text.replace("^", '{\\textasciicircum}') text = text.replace("%", '{\\%}') text = text.replace("#", '{\\#}') text = text.replace("~", '{\\textasciitilde}') # Separate compound characters, e.g. "--" to "-{}-". (The # actual separation is done later; see below.) separate_chars = '-' if self.literal_block or self.literal: # In monospace-font, we also separate ",,", "``" and "''" # and some other characters which can't occur in # non-literal text. separate_chars += ',`\'"<>' # pdflatex does not produce doublequotes for ngerman. text = self.babel.double_quotes_in_tt(text) if self.font_encoding == 'OT1': # We're using OT1 font-encoding and have to replace # underscore by underlined blank, because this has # correct width. text = text.replace('_', '{\\underline{ }}') # And the tt-backslash doesn't work in OT1, so we use # a mirrored slash. text = text.replace('\\textbackslash', '\\reflectbox{/}') else: text = text.replace('_', '{\\_}') else: text = self.babel.quote_quotes(text) if not self.inside_citation_reference_label: text = text.replace("_", '{\\_}') for char in separate_chars * 2: # Do it twice ("* 2") becaues otherwise we would replace # "---" by "-{}--". text = text.replace(char + char, char + '{}' + char) if self.insert_newline or self.literal_block: # Insert a blank before the newline, to avoid # ! LaTeX Error: There's no line here to end. text = text.replace("\n", '~\\\\\n') elif self.mbox_newline: # TODO dead code: remove after 0.5 release if self.literal_block: closings = "}" * len(self.literal_block_stack) openings = "".join(self.literal_block_stack) else: closings = "" openings = "" text = text.replace("\n", "%s}\\\\\n\\mbox{%s" % (closings,openings)) text = text.replace('[', '{[}').replace(']', '{]}') if self.insert_none_breaking_blanks: text = text.replace(' ', '~') if self.latex_encoding != 'utf8': text = self.unicode_to_latex(text) text = self.ensure_math(text) return text def literal_block_env(self, begin_or_end): env = 'verbatim' opt = '' if self.settings.literal_block_env != '': (none, env, opt, none) = re.split("(\w+)(.*)", self.settings.literal_block_env) if begin_or_end == 'begin': return '\\begin{%s}%s\n' % (env, opt) return '\n\\end{%s}\n' % (env, ) def attval(self, text, whitespace=re.compile('[\n\r\t\v\f]')): """Cleanse, encode, and return attribute value text.""" return self.encode(whitespace.sub(' ', text)) def astext(self): if self.pdfinfo is not None and self.pdfauthor: self.pdfinfo.append('pdfauthor={%s}' % self.pdfauthor) if self.pdfinfo: pdfinfo = '\\hypersetup{\n' + ',\n'.join(self.pdfinfo) + '\n}\n' else: pdfinfo = '' head = '\\title{%s}\n\\author{%s}\n\\date{%s}\n' % \ (self.title, ' \\and\n'.join(['~\\\\\n'.join(author_lines) for author_lines in self.author_stack]), self.date) return ''.join(self.head_prefix + [head] + self.head + [pdfinfo] + self.body_prefix + self.body + self.body_suffix) def visit_Text(self, node): self.body.append(self.encode(node.astext())) def depart_Text(self, node): pass def visit_address(self, node): self.visit_docinfo_item(node, 'address') def depart_address(self, node): self.depart_docinfo_item(node) def visit_admonition(self, node, name=''): self.body.append('\\begin{center}\\begin{sffamily}\n') self.body.append('\\fbox{\\parbox{\\admonitionwidth}{\n') if name: self.body.append('\\textbf{\\large '+ self.language.labels[name] + '}\n'); self.body.append('\\vspace{2mm}\n') def depart_admonition(self, node=None): self.body.append('}}\n') # end parbox fbox self.body.append('\\end{sffamily}\n\\end{center}\n'); def visit_attention(self, node): self.visit_admonition(node, 'attention') def depart_attention(self, node): self.depart_admonition() def visit_author(self, node): self.visit_docinfo_item(node, 'author') def depart_author(self, node): self.depart_docinfo_item(node) def visit_authors(self, node): # not used: visit_author is called anyway for each author. pass def depart_authors(self, node): pass def visit_block_quote(self, node): self.body.append( '\\begin{quote}\n') def depart_block_quote(self, node): self.body.append( '\\end{quote}\n') def visit_bullet_list(self, node): if 'contents' in self.topic_classes: if self.use_latex_toc: raise nodes.SkipNode self.body.append( '\\begin{list}{}{}\n' ) else: self.body.append( '\\begin{itemize}\n' ) def depart_bullet_list(self, node): if 'contents' in self.topic_classes: self.body.append( '\\end{list}\n' ) else: self.body.append( '\\end{itemize}\n' ) # Imperfect superscript/subscript handling: mathmode italicizes # all letters by default. def visit_superscript(self, node): self.body.append('$^{') self.mathmode = 1 def depart_superscript(self, node): self.body.append('}$') self.mathmode = 0 def visit_subscript(self, node): self.body.append('$_{') self.mathmode = 1 def depart_subscript(self, node): self.body.append('}$') self.mathmode = 0 def visit_caption(self, node): self.body.append( '\\caption{' ) def depart_caption(self, node): self.body.append('}') def visit_caution(self, node): self.visit_admonition(node, 'caution') def depart_caution(self, node): self.depart_admonition() def visit_title_reference(self, node): self.body.append( '\\titlereference{' ) def depart_title_reference(self, node): self.body.append( '}' ) def visit_citation(self, node): # TODO maybe use cite bibitems if self._use_latex_citations: self.context.append(len(self.body)) else: self.body.append('\\begin{figure}[b]') for id in node['ids']: self.body.append('\\hypertarget{%s}' % id) def depart_citation(self, node): if self._use_latex_citations: size = self.context.pop() label = self.body[size] text = ''.join(self.body[size+1:]) del self.body[size:] self._bibitems.append([label, text]) else: self.body.append('\\end{figure}\n') def visit_citation_reference(self, node): if self._use_latex_citations: self.body.append('\\cite{') self.inside_citation_reference_label = 1 else: href = '' if node.has_key('refid'): href = node['refid'] elif node.has_key('refname'): href = self.document.nameids[node['refname']] self.body.append('[\\hyperlink{%s}{' % href) def depart_citation_reference(self, node): if self._use_latex_citations: self.body.append('}') self.inside_citation_reference_label = 0 else: self.body.append('}]') def visit_classifier(self, node): self.body.append( '(\\textbf{' ) def depart_classifier(self, node): self.body.append( '})\n' ) def visit_colspec(self, node): self.active_table.visit_colspec(node) def depart_colspec(self, node): pass def visit_comment(self, node): # Escape end of line by a new comment start in comment text. self.body.append('%% %s \n' % node.astext().replace('\n', '\n% ')) raise nodes.SkipNode def visit_compound(self, node): pass def depart_compound(self, node): pass def visit_contact(self, node): self.visit_docinfo_item(node, 'contact') def depart_contact(self, node): self.depart_docinfo_item(node) def visit_container(self, node): pass def depart_container(self, node): pass def visit_copyright(self, node): self.visit_docinfo_item(node, 'copyright') def depart_copyright(self, node): self.depart_docinfo_item(node) def visit_danger(self, node): self.visit_admonition(node, 'danger') def depart_danger(self, node): self.depart_admonition() def visit_date(self, node): self.visit_docinfo_item(node, 'date') def depart_date(self, node): self.depart_docinfo_item(node) def visit_decoration(self, node): pass def depart_decoration(self, node): pass def visit_definition(self, node): pass def depart_definition(self, node): self.body.append('\n') def visit_definition_list(self, node): self.body.append( '\\begin{description}\n' ) def depart_definition_list(self, node): self.body.append( '\\end{description}\n' ) def visit_definition_list_item(self, node): pass def depart_definition_list_item(self, node): pass def visit_description(self, node): self.body.append( ' ' ) def depart_description(self, node): pass def visit_docinfo(self, node): self.docinfo = [] self.docinfo.append('%' + '_'*75 + '\n') self.docinfo.append('\\begin{center}\n') self.docinfo.append('\\begin{tabularx}{\\docinfowidth}{lX}\n') def depart_docinfo(self, node): self.docinfo.append('\\end{tabularx}\n') self.docinfo.append('\\end{center}\n') self.body = self.docinfo + self.body # clear docinfo, so field names are no longer appended. self.docinfo = None def visit_docinfo_item(self, node, name): if name == 'author': if not self.pdfinfo == None: if not self.pdfauthor: self.pdfauthor = self.attval(node.astext()) else: self.pdfauthor += self.author_separator + self.attval(node.astext()) if self.use_latex_docinfo: if name in ('author', 'organization', 'contact', 'address'): # We attach these to the last author. If any of them precedes # the first author, put them in a separate "author" group (for # no better semantics). if name == 'author' or not self.author_stack: self.author_stack.append([]) if name == 'address': # newlines are meaningful self.insert_newline = 1 text = self.encode(node.astext()) self.insert_newline = 0 else: text = self.attval(node.astext()) self.author_stack[-1].append(text) raise nodes.SkipNode elif name == 'date': self.date = self.attval(node.astext()) raise nodes.SkipNode self.docinfo.append('\\textbf{%s}: &\n\t' % self.language_label(name)) if name == 'address': self.insert_newline = 1 self.docinfo.append('{\\raggedright\n') self.context.append(' } \\\\\n') else: self.context.append(' \\\\\n') self.context.append(self.docinfo) self.context.append(len(self.body)) def depart_docinfo_item(self, node): size = self.context.pop() dest = self.context.pop() tail = self.context.pop() tail = self.body[size:] + [tail] del self.body[size:] dest.extend(tail) # for address we did set insert_newline self.insert_newline = 0 def visit_doctest_block(self, node): self.body.append( '\\begin{verbatim}' ) self.verbatim = 1 def depart_doctest_block(self, node): self.body.append( '\\end{verbatim}\n' ) self.verbatim = 0 def visit_document(self, node): self.body_prefix.append('\\begin{document}\n') # titled document? if self.use_latex_docinfo or len(node) and isinstance(node[0], nodes.title): self.body_prefix.append('\\maketitle\n') # alternative use titlepage environment. # \begin{titlepage} # ... self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n') def depart_document(self, node): # TODO insertion point of bibliography should none automatic. if self._use_latex_citations and len(self._bibitems)>0: if not self.bibtex: widest_label = "" for bi in self._bibitems: if len(widest_label)<len(bi[0]): widest_label = bi[0] self.body.append('\n\\begin{thebibliography}{%s}\n'%widest_label) for bi in self._bibitems: # cite_key: underscores must not be escaped cite_key = bi[0].replace(r"{\_}","_") self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], cite_key, bi[1])) self.body.append('\\end{thebibliography}\n') else: self.body.append('\n\\bibliographystyle{%s}\n' % self.bibtex[0]) self.body.append('\\bibliography{%s}\n' % self.bibtex[1]) self.body_suffix.append('\\end{document}\n') def visit_emphasis(self, node): self.body.append('\\emph{') self.literal_block_stack.append('\\emph{') def depart_emphasis(self, node): self.body.append('}') self.literal_block_stack.pop() def visit_entry(self, node): self.active_table.visit_entry() # cell separation if self.active_table.get_entry_number() == 1: # if the firstrow is a multirow, this actually is the second row. # this gets hairy if rowspans follow each other. if self.active_table.get_rowspan(0): count = 0 while self.active_table.get_rowspan(count): count += 1 self.body.append(' & ') self.active_table.visit_entry() # increment cell count else: self.body.append(' & ') # multi{row,column} # IN WORK BUG TODO HACK continues here # multirow in LaTeX simply will enlarge the cell over several rows # (the following n if n is positive, the former if negative). if node.has_key('morerows') and node.has_key('morecols'): raise NotImplementedError('Cells that ' 'span multiple rows *and* columns are not supported, sorry.') if node.has_key('morerows'): count = node['morerows'] + 1 self.active_table.set_rowspan(self.active_table.get_entry_number()-1,count) self.body.append('\\multirow{%d}{%s}{' % \ (count,self.active_table.get_column_width())) self.context.append('}') # BUG following rows must have empty cells. elif node.has_key('morecols'): # the vertical bar before column is missing if it is the first column. # the one after always. if self.active_table.get_entry_number() == 1: bar1 = self.active_table.get_vertical_bar() else: bar1 = '' count = node['morecols'] + 1 self.body.append('\\multicolumn{%d}{%sl%s}{' % \ (count, bar1, self.active_table.get_vertical_bar())) self.context.append('}') else: self.context.append('') # header / not header if isinstance(node.parent.parent, nodes.thead): self.body.append('\\textbf{') self.context.append('}') elif self.active_table.is_stub_column(): self.body.append('\\textbf{') self.context.append('}') else: self.context.append('') def depart_entry(self, node): self.body.append(self.context.pop()) # header / not header self.body.append(self.context.pop()) # multirow/column # if following row is spanned from above. if self.active_table.get_rowspan(self.active_table.get_entry_number()): self.body.append(' & ') self.active_table.visit_entry() # increment cell count def visit_row(self, node): self.active_table.visit_row() def depart_row(self, node): self.body.extend(self.active_table.depart_row()) def visit_enumerated_list(self, node): # We create our own enumeration list environment. # This allows to set the style and starting value # and unlimited nesting. enum_style = {'arabic':'arabic', 'loweralpha':'alph', 'upperalpha':'Alph', 'lowerroman':'roman', 'upperroman':'Roman' } enum_suffix = "" if node.has_key('suffix'): enum_suffix = node['suffix'] enum_prefix = "" if node.has_key('prefix'): enum_prefix = node['prefix'] if self.compound_enumerators: pref = "" if self.section_prefix_for_enumerators and self.section_level: for i in range(self.section_level): pref += '%d.' % self._section_number[i] pref = pref[:-1] + self.section_enumerator_separator enum_prefix += pref for ctype, cname in self._enumeration_counters: enum_prefix += '\\%s{%s}.' % (ctype, cname) enum_type = "arabic" if node.has_key('enumtype'): enum_type = node['enumtype'] if enum_style.has_key(enum_type): enum_type = enum_style[enum_type] counter_name = "listcnt%d" % len(self._enumeration_counters) self._enumeration_counters.append((enum_type, counter_name)) # If we haven't used this counter name before, then create a # new counter; otherwise, reset & reuse the old counter. if len(self._enumeration_counters) > self._max_enumeration_counters: self._max_enumeration_counters = len(self._enumeration_counters) self.body.append('\\newcounter{%s}\n' % counter_name) else: self.body.append('\\setcounter{%s}{0}\n' % counter_name) self.body.append('\\begin{list}{%s\\%s{%s}%s}\n' % \ (enum_prefix,enum_type,counter_name,enum_suffix)) self.body.append('{\n') self.body.append('\\usecounter{%s}\n' % counter_name) # set start after usecounter, because it initializes to zero. if node.has_key('start'): self.body.append('\\addtocounter{%s}{%d}\n' \ % (counter_name,node['start']-1)) ## set rightmargin equal to leftmargin self.body.append('\\setlength{\\rightmargin}{\\leftmargin}\n') self.body.append('}\n') def depart_enumerated_list(self, node): self.body.append('\\end{list}\n') self._enumeration_counters.pop() def visit_error(self, node): self.visit_admonition(node, 'error') def depart_error(self, node): self.depart_admonition() def visit_field(self, node): # real output is done in siblings: _argument, _body, _name pass def depart_field(self, node): self.body.append('\n') ##self.body.append('%[depart_field]\n') def visit_field_argument(self, node): self.body.append('%[visit_field_argument]\n') def depart_field_argument(self, node): self.body.append('%[depart_field_argument]\n') def visit_field_body(self, node): # BUG by attach as text we loose references. if self.docinfo: self.docinfo.append('%s \\\\\n' % self.encode(node.astext())) raise nodes.SkipNode # BUG: what happens if not docinfo def depart_field_body(self, node): self.body.append( '\n' ) def visit_field_list(self, node): if not self.docinfo: self.body.append('\\begin{quote}\n') self.body.append('\\begin{description}\n') def depart_field_list(self, node): if not self.docinfo: self.body.append('\\end{description}\n') self.body.append('\\end{quote}\n') def visit_field_name(self, node): # BUG this duplicates docinfo_item if self.docinfo: self.docinfo.append('\\textbf{%s}: &\n\t' % self.encode(node.astext())) raise nodes.SkipNode else: self.body.append('\\item [') def depart_field_name(self, node): if not self.docinfo: self.body.append(':]') def visit_figure(self, node): if (not node.attributes.has_key('align') or node.attributes['align'] == 'center'): # centering does not add vertical space like center. align = '\n\\centering' align_end = '' else: # TODO non vertical space for other alignments. align = '\\begin{flush%s}' % node.attributes['align'] align_end = '\\end{flush%s}' % node.attributes['align'] self.body.append( '\\begin{figure}[htbp]%s\n' % align ) self.context.append( '%s\\end{figure}\n' % align_end ) def depart_figure(self, node): self.body.append( self.context.pop() ) def visit_footer(self, node): self.context.append(len(self.body)) def depart_footer(self, node): start = self.context.pop() footer = (['\n\\begin{center}\small\n'] + self.body[start:] + ['\n\\end{center}\n']) self.body_suffix[:0] = footer del self.body[start:] def visit_footnote(self, node): if self.use_latex_footnotes: num,text = node.astext().split(None,1) num = self.encode(num.strip()) self.body.append('\\footnotetext['+num+']') self.body.append('{') else: self.body.append('\\begin{figure}[b]') for id in node['ids']: self.body.append('\\hypertarget{%s}' % id) def depart_footnote(self, node): if self.use_latex_footnotes: self.body.append('}\n') else: self.body.append('\\end{figure}\n') def visit_footnote_reference(self, node): if self.use_latex_footnotes: self.body.append("\\footnotemark["+self.encode(node.astext())+"]") raise nodes.SkipNode href = '' if node.has_key('refid'): href = node['refid'] elif node.has_key('refname'): href = self.document.nameids[node['refname']] format = self.settings.footnote_references if format == 'brackets': suffix = '[' self.context.append(']') elif format == 'superscript': suffix = '\\raisebox{.5em}[0em]{\\scriptsize' self.context.append('}') else: # shouldn't happen raise AssertionError('Illegal footnote reference format.') self.body.append('%s\\hyperlink{%s}{' % (suffix,href)) def depart_footnote_reference(self, node): if self.use_latex_footnotes: return self.body.append('}%s' % self.context.pop()) # footnote/citation label def label_delim(self, node, bracket, superscript): if isinstance(node.parent, nodes.footnote): if self.use_latex_footnotes: raise nodes.SkipNode if self.settings.footnote_references == 'brackets': self.body.append(bracket) else: self.body.append(superscript) else: assert isinstance(node.parent, nodes.citation) if not self._use_latex_citations: self.body.append(bracket) def visit_label(self, node): self.label_delim(node, '[', '$^{') def depart_label(self, node): self.label_delim(node, ']', '}$') # elements generated by the framework e.g. section numbers. def visit_generated(self, node): pass def depart_generated(self, node): pass def visit_header(self, node): self.context.append(len(self.body)) def depart_header(self, node): start = self.context.pop() self.body_prefix.append('\n\\verb|begin_header|\n') self.body_prefix.extend(self.body[start:]) self.body_prefix.append('\n\\verb|end_header|\n') del self.body[start:] def visit_hint(self, node): self.visit_admonition(node, 'hint') def depart_hint(self, node): self.depart_admonition() def latex_image_length(self, width_str): match = re.match('(\d*\.?\d*)\s*(\S*)', width_str) if not match: # fallback return width_str res = width_str amount, unit = match.groups()[:2] if unit == "px": # LaTeX does not know pixels but points res = "%spt" % amount elif unit == "%": res = "%.3f\\linewidth" % (float(amount)/100.0) return res def visit_image(self, node): attrs = node.attributes # Add image URI to dependency list, assuming that it's # referring to a local file. self.settings.record_dependencies.add(attrs['uri']) pre = [] # in reverse order post = [] include_graphics_options = [] inline = isinstance(node.parent, nodes.TextElement) if attrs.has_key('scale'): # Could also be done with ``scale`` option to # ``\includegraphics``; doing it this way for consistency. pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,)) post.append('}') if attrs.has_key('width'): include_graphics_options.append('width=%s' % ( self.latex_image_length(attrs['width']), )) if attrs.has_key('height'): include_graphics_options.append('height=%s' % ( self.latex_image_length(attrs['height']), )) if attrs.has_key('align'): align_prepost = { # By default latex aligns the top of an image. (1, 'top'): ('', ''), (1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'), (1, 'bottom'): ('\\raisebox{-\\height}{', '}'), (0, 'center'): ('{\\hfill', '\\hfill}'), # These 2 don't exactly do the right thing. The image should # be floated alongside the paragraph. See # http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG (0, 'left'): ('{', '\\hfill}'), (0, 'right'): ('{\\hfill', '}'),} try: pre.append(align_prepost[inline, attrs['align']][0]) post.append(align_prepost[inline, attrs['align']][1]) except KeyError: pass # XXX complain here? if not inline: pre.append('\n') post.append('\n') pre.reverse() self.body.extend( pre ) options = '' if len(include_graphics_options)>0: options = '[%s]' % (','.join(include_graphics_options)) self.body.append( '\\includegraphics%s{%s}' % ( options, attrs['uri'] ) ) self.body.extend( post ) def depart_image(self, node): pass def visit_important(self, node): self.visit_admonition(node, 'important') def depart_important(self, node): self.depart_admonition() def visit_interpreted(self, node): # @@@ Incomplete, pending a proper implementation on the # Parser/Reader end. self.visit_literal(node) def depart_interpreted(self, node): self.depart_literal(node) def visit_legend(self, node): self.body.append('{\\small ') def depart_legend(self, node): self.body.append('}') def visit_line(self, node): self.body.append('\item[] ') def depart_line(self, node): self.body.append('\n') def visit_line_block(self, node): if isinstance(node.parent, nodes.line_block): self.body.append('\\item[] \n' '\\begin{lineblock}{\\lineblockindentation}\n') else: self.body.append('\n\\begin{lineblock}{0em}\n') def depart_line_block(self, node): self.body.append('\\end{lineblock}\n') def visit_list_item(self, node): # Append "{}" in case the next character is "[", which would break # LaTeX's list environment (no numbering and the "[" is not printed). self.body.append('\\item {} ') def depart_list_item(self, node): self.body.append('\n') def visit_literal(self, node): self.literal = 1 self.body.append('\\texttt{') def depart_literal(self, node): self.body.append('}') self.literal = 0 def visit_literal_block(self, node): """ Render a literal-block. Literal blocks are used for "::"-prefixed literal-indented blocks of text, where the inline markup is not recognized, but are also the product of the parsed-literal directive, where the markup is respected. """ # In both cases, we want to use a typewriter/monospaced typeface. # For "real" literal-blocks, we can use \verbatim, while for all # the others we must use \mbox. # # We can distinguish between the two kinds by the number of # siblings that compose this node: if it is composed by a # single element, it's surely either a real one or a # parsed-literal that does not contain any markup. # if not self.active_table.is_open(): # no quote inside tables, to avoid vertical space between # table border and literal block. # BUG: fails if normal text preceeds the literal block. self.body.append('\\begin{quote}') self.context.append('\\end{quote}\n') else: self.body.append('\n') self.context.append('\n') if (self.settings.use_verbatim_when_possible and (len(node) == 1) # in case of a parsed-literal containing just a "**bold**" word: and isinstance(node[0], nodes.Text)): self.verbatim = 1 self.body.append(self.literal_block_env('begin')) else: self.literal_block = 1 self.insert_none_breaking_blanks = 1 self.body.append('{\\ttfamily \\raggedright \\noindent\n') # * obey..: is from julien and never worked for me (grubert). # self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n') def depart_literal_block(self, node): if self.verbatim: self.body.append(self.literal_block_env('end')) self.verbatim = 0 else: self.body.append('\n}') self.insert_none_breaking_blanks = 0 self.literal_block = 0 # obey end: self.body.append('}\n') self.body.append(self.context.pop()) def visit_meta(self, node): self.body.append('[visit_meta]\n') # BUG maybe set keywords for pdf ##self.head.append(self.starttag(node, 'meta', **node.attributes)) def depart_meta(self, node): self.body.append('[depart_meta]\n') def visit_note(self, node): self.visit_admonition(node, 'note') def depart_note(self, node): self.depart_admonition() def visit_option(self, node): if self.context[-1]: # this is not the first option self.body.append(', ') def depart_option(self, node): # flag tha the first option is done. self.context[-1] += 1 def visit_option_argument(self, node): """The delimiter betweeen an option and its argument.""" self.body.append(node.get('delimiter', ' ')) def depart_option_argument(self, node): pass def visit_option_group(self, node): self.body.append('\\item [') # flag for first option self.context.append(0) def depart_option_group(self, node): self.context.pop() # the flag self.body.append('] ') def visit_option_list(self, node): self.body.append('\\begin{optionlist}{3cm}\n') def depart_option_list(self, node): self.body.append('\\end{optionlist}\n') def visit_option_list_item(self, node): pass def depart_option_list_item(self, node): pass def visit_option_string(self, node): ##self.body.append(self.starttag(node, 'span', '', CLASS='option')) pass def depart_option_string(self, node): ##self.body.append('</span>') pass def visit_organization(self, node): self.visit_docinfo_item(node, 'organization') def depart_organization(self, node): self.depart_docinfo_item(node) def visit_paragraph(self, node): index = node.parent.index(node) if not ('contents' in self.topic_classes or (isinstance(node.parent, nodes.compound) and index > 0 and not isinstance(node.parent[index - 1], nodes.paragraph) and not isinstance(node.parent[index - 1], nodes.compound))): self.body.append('\n') def depart_paragraph(self, node): self.body.append('\n') def visit_problematic(self, node): self.body.append('{\\color{red}\\bfseries{}') def depart_problematic(self, node): self.body.append('}') def visit_raw(self, node): if 'latex' in node.get('format', '').split(): self.body.append(node.astext()) raise nodes.SkipNode def visit_reference(self, node): # BUG: hash_char "#" is trouble some in LaTeX. # mbox and other environment do not like the '#'. hash_char = '\\#' if node.has_key('refuri'): href = node['refuri'].replace('#',hash_char) elif node.has_key('refid'): href = hash_char + node['refid'] elif node.has_key('refname'): href = hash_char + self.document.nameids[node['refname']] else: raise AssertionError('Unknown reference.') self.body.append('\\href{%s}{' % href.replace("%", "\\%")) if self._reference_label and not node.has_key('refuri'): self.body.append('\\%s{%s}}' % (self._reference_label, href.replace(hash_char, ''))) raise nodes.SkipNode def depart_reference(self, node): self.body.append('}') def visit_revision(self, node): self.visit_docinfo_item(node, 'revision') def depart_revision(self, node): self.depart_docinfo_item(node) def visit_section(self, node): self.section_level += 1 # Initialize counter for potential subsections: self._section_number.append(0) # Counter for this section's level (initialized by parent section): self._section_number[self.section_level - 1] += 1 def depart_section(self, node): # Remove counter for potential subsections: self._section_number.pop() self.section_level -= 1 def visit_sidebar(self, node): # BUG: this is just a hack to make sidebars render something self.body.append('\n\\setlength{\\locallinewidth}{0.9\\admonitionwidth}\n') self.body.append('\\begin{center}\\begin{sffamily}\n') self.body.append('\\fbox{\\colorbox[gray]{0.80}{\\parbox{\\admonitionwidth}{\n') def depart_sidebar(self, node): self.body.append('}}}\n') # end parbox colorbox fbox self.body.append('\\end{sffamily}\n\\end{center}\n'); self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n') attribution_formats = {'dash': ('---', ''), 'parentheses': ('(', ')'), 'parens': ('(', ')'), 'none': ('', '')} def visit_attribution(self, node): prefix, suffix = self.attribution_formats[self.settings.attribution] self.body.append('\n\\begin{flushright}\n') self.body.append(prefix) self.context.append(suffix) def depart_attribution(self, node): self.body.append(self.context.pop() + '\n') self.body.append('\\end{flushright}\n') def visit_status(self, node): self.visit_docinfo_item(node, 'status') def depart_status(self, node): self.depart_docinfo_item(node) def visit_strong(self, node): self.body.append('\\textbf{') self.literal_block_stack.append('\\textbf{') def depart_strong(self, node): self.body.append('}') self.literal_block_stack.pop() def visit_substitution_definition(self, node): raise nodes.SkipNode def visit_substitution_reference(self, node): self.unimplemented_visit(node) def visit_subtitle(self, node): if isinstance(node.parent, nodes.sidebar): self.body.append('~\\\\\n\\textbf{') self.context.append('}\n\\smallskip\n') elif isinstance(node.parent, nodes.document): self.title = self.title + \ '\\\\\n\\large{%s}\n' % self.encode(node.astext()) raise nodes.SkipNode elif isinstance(node.parent, nodes.section): self.body.append('\\textbf{') self.context.append('}\\vspace{0.2cm}\n\n\\noindent ') def depart_subtitle(self, node): self.body.append(self.context.pop()) def visit_system_message(self, node): pass def depart_system_message(self, node): self.body.append('\n') def visit_table(self, node): if self.active_table.is_open(): self.table_stack.append(self.active_table) # nesting longtable does not work (e.g. 2007-04-18) self.active_table = Table('tabular',self.settings.table_style) self.active_table.open() for cl in node['classes']: self.active_table.set_table_style(cl) self.body.append('\n' + self.active_table.get_opening()) def depart_table(self, node): self.body.append(self.active_table.get_closing() + '\n') self.active_table.close() if len(self.table_stack)>0: self.active_table = self.table_stack.pop() else: self.active_table.set_table_style(self.settings.table_style) def visit_target(self, node): # BUG: why not (refuri or refid or refname) means not footnote ? if not (node.has_key('refuri') or node.has_key('refid') or node.has_key('refname')): for id in node['ids']: self.body.append('\\hypertarget{%s}{' % id) self.context.append('}' * len(node['ids'])) elif node.get("refid"): self.body.append('\\hypertarget{%s}{' % node.get("refid")) self.context.append('}') else: self.context.append('') def depart_target(self, node): self.body.append(self.context.pop()) def visit_tbody(self, node): # BUG write preamble if not yet done (colspecs not []) # for tables without heads. if not self.active_table.get('preamble written'): self.visit_thead(None) # self.depart_thead(None) def depart_tbody(self, node): pass def visit_term(self, node): self.body.append('\\item[{') def depart_term(self, node): # definition list term. # \leavevmode results in a line break if the term is followed by a item list. self.body.append('}] \leavevmode ') def visit_tgroup(self, node): #self.body.append(self.starttag(node, 'colgroup')) #self.context.append('</colgroup>\n') pass def depart_tgroup(self, node): pass def visit_thead(self, node): self.body.append('{%s}\n' % self.active_table.get_colspecs()) if self.active_table.caption: self.body.append('\\caption{%s}\\\\\n' % self.active_table.caption) self.active_table.set('preamble written',1) # TODO longtable supports firsthead and lastfoot too. self.body.extend(self.active_table.visit_thead()) def depart_thead(self, node): # the table header written should be on every page # => \endhead self.body.extend(self.active_table.depart_thead()) # and the firsthead => \endfirsthead # BUG i want a "continued from previous page" on every not # firsthead, but then we need the header twice. # # there is a \endfoot and \endlastfoot too. # but we need the number of columns to # self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns) # self.body.append('\\hline\n\\endfoot\n') # self.body.append('\\hline\n') # self.body.append('\\endlastfoot\n') def visit_tip(self, node): self.visit_admonition(node, 'tip') def depart_tip(self, node): self.depart_admonition() def bookmark(self, node): """Append latex href and pdfbookmarks for titles. """ if node.parent['ids']: for id in node.parent['ids']: self.body.append('\\hypertarget{%s}{}\n' % id) if not self.use_latex_toc: # BUG level depends on style. pdflatex allows level 0 to 3 # ToC would be the only on level 0 so i choose to decrement the rest. # "Table of contents" bookmark to see the ToC. To avoid this # we set all zeroes to one. l = self.section_level if l>0: l = l-1 # pdftex does not like "_" subscripts in titles text = self.encode(node.astext()) for id in node.parent['ids']: self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \ (l, text, id)) def visit_title(self, node): """Section and other titles.""" if isinstance(node.parent, nodes.topic): # the table of contents. self.bookmark(node) if ('contents' in self.topic_classes and self.use_latex_toc): self.body.append('\\renewcommand{\\contentsname}{') self.context.append('}\n\\tableofcontents\n\n\\bigskip\n') elif ('abstract' in self.topic_classes and self.settings.use_latex_abstract): raise nodes.SkipNode else: # or section titles before the table of contents. # BUG: latex chokes on center environment with # "perhaps a missing item", therefore we use hfill. self.body.append('\\subsubsection*{~\\hfill ') # the closing brace for subsection. self.context.append('\\hfill ~}\n') # TODO: for admonition titles before the first section # either specify every possible node or ... ? elif isinstance(node.parent, nodes.sidebar) \ or isinstance(node.parent, nodes.admonition): self.body.append('\\textbf{\\large ') self.context.append('}\n\\smallskip\n') elif isinstance(node.parent, nodes.table): # caption must be written after column spec self.active_table.caption = self.encode(node.astext()) raise nodes.SkipNode elif self.section_level == 0: # document title self.title = self.encode(node.astext()) if not self.pdfinfo == None: self.pdfinfo.append( 'pdftitle={%s}' % self.encode(node.astext()) ) raise nodes.SkipNode else: self.body.append('\n\n') self.body.append('%' + '_' * 75) self.body.append('\n\n') self.bookmark(node) if self.use_latex_toc: section_star = "" else: section_star = "*" section_name = self.d_class.section(self.section_level) self.body.append('\\%s%s{' % (section_name, section_star)) # MAYBE postfix paragraph and subparagraph with \leavemode to # ensure floatables stay in the section and text starts on a new line. self.context.append('}\n') def depart_title(self, node): self.body.append(self.context.pop()) for id in node.parent['ids']: self.body.append('\\label{%s}\n' % id) def visit_topic(self, node): self.topic_classes = node['classes'] if ('abstract' in self.topic_classes and self.settings.use_latex_abstract): self.body.append('\\begin{abstract}\n') def depart_topic(self, node): if ('abstract' in self.topic_classes and self.settings.use_latex_abstract): self.body.append('\\end{abstract}\n') self.topic_classes = [] if 'contents' in node['classes'] and self.use_latex_toc: pass else: self.body.append('\n') def visit_inline(self, node): # titlereference classes = node.get('classes', ['Unknown', ]) for cls in classes: self.body.append( '\\docutilsrole%s{' % cls) self.context.append('}'*len(classes)) def depart_inline(self, node): self.body.append(self.context.pop()) def visit_rubric(self, node): self.body.append('\\rubric{') self.context.append('}\n') def depart_rubric(self, node): self.body.append(self.context.pop()) def visit_transition(self, node): self.body.append('\n\n') self.body.append('%' + '_' * 75) self.body.append('\n\\hspace*{\\fill}\\hrulefill\\hspace*{\\fill}') self.body.append('\n\n') def depart_transition(self, node): pass def visit_version(self, node): self.visit_docinfo_item(node, 'version') def depart_version(self, node): self.depart_docinfo_item(node) def visit_warning(self, node): self.visit_admonition(node, 'warning') def depart_warning(self, node): self.depart_admonition() def unimplemented_visit(self, node): raise NotImplementedError('visiting unimplemented node type: %s' % node.__class__.__name__) # def unknown_visit(self, node): # def default_visit(self, node): # vim: set ts=4 et ai :
creasyw/IMTAphy
documentation/toolchain/docutils-0.5-py2.5.egg/docutils/writers/latex2e/__init__.py
Python
gpl-2.0
83,484
[ "VisIt" ]
1802a44a579f209fcea8af72d88c994c64fd3c689a9bce42c9d0998ef8d58fa5
import sys sys.path.insert(1,"../../../") import h2o from tests import pyunit_utils def pyunit_make_glm_model(): # TODO: PUBDEV-1717 pros = h2o.import_file(pyunit_utils.locate("smalldata/prostate/prostate.csv")) model = h2o.glm(x=pros[["AGE","DPROS","DCAPS","PSA","VOL","GLEASON"]], y=pros["CAPSULE"], family="gaussian", alpha=[0]) new_betas = {"AGE":0.5, "DPROS":0.5, "DCAPS":0.5, "PSA":0.5, "VOL":0.5, "GLEASON":0.5} names = '[' for n in new_betas.keys(): names += "\""+n+"\"," names = names[0:len(names)-1]+"]" betas = '[' for b in new_betas.values(): betas += str(b)+"," betas = betas[0:len(betas)-1]+"]" res = h2o.H2OConnection.post_json("MakeGLMModel",model=model._id,names=names,beta=betas) if __name__ == "__main__": pyunit_utils.standalone_test(pyunit_make_glm_model) else: pyunit_make_glm_model()
pchmieli/h2o-3
h2o-py/tests/testdir_algos/glm/pyunit_NOFEATURE_make_glm_model.py
Python
apache-2.0
869
[ "Gaussian" ]
3c854020a4f14cfab16cb1026861f8ae70adba7d1195ea45865cbca501452442
""" stats.py This file contains functions for reporting results from EM and assembly steps and raw numbers for visualization with another tool. Sam Vohr (svohr@soe.ucsc.edu) Fri May 20 14:04:02 PDT 2016 """ import sys import collections import numpy from mixemt import observe from mixemt import phylotree def report_top_props(haplogroups, props, top_n=10): """ Prints to stderr the names and fractions of the n haplogroups with the highest estimated proportions. """ order = numpy.argsort(props)[::-1] sys.stderr.write('\nTop %d haplogroups by proportion...\n' % (top_n)) for i in range(top_n): sys.stderr.write("%d\t%0.6f\t%s\n" % (i + 1, props[order[i]], haplogroups[order[i]])) sys.stderr.write('\n') return def report_read_votes(haplogroups, read_hap_mat, top_n=10): """ Each read "votes" for a haplogroup; the haplogroup with the highest probability. Report the vote counts for the top N. """ votes = numpy.argmax(read_hap_mat, 1) vote_count = collections.Counter(votes) sys.stderr.write("\nTop 10 haplogroups by read probabilities...\n") for hap_i, count in vote_count.most_common(top_n): sys.stderr.write("%s\t%d\n" % (haplogroups[hap_i], count)) sys.stderr.write('\n') return def report_contributors(out, contribs, contrib_reads): """ Prints a table that summarizes the contributors, their proportions and number of reads assigned to each. Formats the output nicely if out is a TTY, otherwise prints a tab-delimited table. """ if out.isatty(): out.write("hap# Haplogroup Contribution Reads\n") out.write("-------------------------------------------\n") for hap_id, haplogroup, prop in contribs: total_reads = len(contrib_reads[hap_id]) if out.isatty(): prop_str = '%.4f' % (prop) read_str = '%d' % (total_reads) out.write('%s %s %s %s\n' % (hap_id.ljust(6), haplogroup.ljust(15), prop_str.rjust(12), read_str.rjust(7))) else: out.write('%s\t%s\t%.4f\t%d\n' % (hap_id, haplogroup, prop, total_reads)) return def write_base_obs(out, obs_tab, ref, prefix=''): """ Write the counts of observed bases for each position to the file handle. Args: out: File handle to write output to. obs_tab: ObservedBases object of observations per reference position. ref: The reference sequence. Used to finding the number of positions we must write. prefix: string to write before each entry (i.e. an ID followed by a tab character) Returns: nothing """ if prefix: prefix += '\t' for ref_pos in range(len(ref)): out.write( "%s%d\t%s\t%d\n" % (prefix, ref_pos, '\t'.join([str(obs_tab.obs_at(ref_pos, base)) for base in 'ACGT']), sum(obs_tab.obs_tab[ref_pos].values()))) return def write_variants(out, phylo, contribs, obs_tab, args): """ Write a table of the variants used in this analysis and note whether the position is expected to be polymorphic in the sample given the set of identified contributors. Args: out: File handle to write output to. phylo: The Phylotree object used in EM analysis contribs: Table of identified contributors with fields hap#, haplogroup, fraction args: The argparse namespace Returns: nothing """ haplogroups = [con[1] for con in contribs] variants = collections.defaultdict(list) for hap in haplogroups: for var in phylo.hap_var[hap]: pos = phylotree.pos_from_var(var) variants[pos].append("%s:%s" % (hap, var)) polymorphic = set(phylo.polymorphic_sites(haplogroups)) for ref_pos in range(len(phylo.refseq)): obs = obs_tab.obs_at(ref_pos) samp_status = "sample_fixed" threshold = max(args.min_var_reads, obs_tab.total_obs(pos) * args.frac_var_reads) if sum(obs[base] >= threshold for base in 'ACGT') > 1: samp_status = "variant" phy_status = "fixed" if ref_pos in polymorphic: phy_status = "polymorphic" out.write("%d\t%s\t%s\t%s\t%s\n" % (ref_pos + 1, '\t'.join([str(obs[base]) for base in 'ACGT']), phy_status, samp_status, ','.join(variants[ref_pos]))) return def write_statistics(phylo, all_obs, contribs, contrib_reads, args): """ Write a bunch of files to use for plotting the results of our EM and assembly steps. These will include 1) base observations for each contributor and 2) sites from phylotree that were used to estimate mixture contributions and whether or not we think these should be polymorphic or not. Args: phylo: The phylotree object these assignments are based on. ref: The reference sequence. all_obs: ObservedBases object of observations per reference position. contribs: The contributor table returned by assembly.get_contributors, a list of (hap#, haplogroup, proportion) tuples. contrib_reads: a dictionary mapping hap#s to list of pysam AlignedSegments args: The argparse namespace, used for the stats_prefix filename prefix Returns: nothing """ haplogroups = {con[0]:con[1] for con in contribs} with open("%s.pos.tab" % (args.stats_prefix), 'w') as var_out: write_variants(var_out, phylo, contribs, all_obs, args) with open("%s.obs.tab" % (args.stats_prefix), 'w') as obs_out: for con in sorted(contrib_reads): obs_tab = observe.ObservedBases(contrib_reads[con], args.min_mq, args.min_bq) haplogroup = "unassigned" if con in haplogroups: haplogroup = haplogroups[con] write_base_obs(obs_out, obs_tab, phylo.refseq, "%s\t%s" % (con, haplogroup)) if len(contrib_reads) > 1: write_base_obs(obs_out, all_obs, phylo.refseq, "all\tmix") return
svohr/mixemt
mixemt/stats.py
Python
mit
6,570
[ "pysam" ]
7571257aae70b50b3050cfcc9e40e44e83daebef6b0c8136b0336889b64d67fd
"""testGame_modular_animation.py; -Warning, code probably isn't structured very well, needs cleaned up. -Still lots of hardcoded numbers/file paths, no clue if this will work on any system that's not GNU/Linux. -If you want to run it, please remember to mess with the filepaths around line 95, I should change them to constants at the top of the file but either way I don't know how to write an error catching piece of code to find the correct animation files on someone else's system. -Sprite was made with 8 images, 2 frames for each direction. -More disclaimers: I don't really know the difference between blitting, updating, and display flipping, so for all I know this could be taking up way more resources than it needs to. -On animations: all of the ints that deal with speed, animation, framerates, etc. are "trial and errored." Soon I'll figure out the right way to calculate how many FPS you need when taking size of file, number of frames per animation, size of tiles in a tiled map, and such into consideration. -Alot of this is messy as hell. I really don't like how I'm making maps. Anytime you want to change the width/height of the map, you're going to have to go into the list and manually edit it. I couldn't find any decent tutorials on using Tiled maps in pygame, so oh well. Another future to-do. -Watch out changing the screen variable for the display. I was trying to make this a little more modular by putting the map function into a different file, and for some reason I now have to define screen in both the main game file and the maps.py module, due to the fact that my Player class doesn't know what to do when I tell him to update because the display_mymaps function hasn't been called yet, therefore screen hasn't been defined yet; introduce previously mentioned dirty workaround, because if I call the maps function anywhere else it displays ontop of my sprite, which isn't too exciting. -I'm very new to python, pygame, and OO concepts. So the OO code below is probably some twisted halfbreed but it really helped with animating the player so it's there. Code could probably be much more robust to expansion and change, but I'm still very much at a beginner level so this game is very much at a beginner level. At least my sprite moves around. COPYRIGHT: RYAN CAMARATTA, 2014 CONTACT: RYANCNAP@GMAIL.COM DEPENDS ON: PYTHON 2.X; PYGAME(V.?); """ # Imports import pygame import sys, os, glob from pygame import * # Crashes if I remove import all; I heard this was bad practice so I'll have to # figure out what pygame module deals with event/keypress handling as # that seems to be the problem. # Need tiledtmxloader. import tiledtmxloader # parse the map. (it is done here to initialize the # window the same size as the map if it is small enough) path_to_map = os.path.join(os.pardir, "../../python-dev/game/tile-images/tmx-tilemaps/hopefully.tmx") print("usage: python %s your_map.tmx\n\nUsing default map '%s'\n" % \ (os.path.basename(__file__), path_to_map)) world_map = tiledtmxloader.tmxreader.TileMapParser().parse_decode(path_to_map) # load the images using pygame resources = tiledtmxloader.helperspygame.ResourceLoaderPygame() resources.load(world_map) # prepare map rendering assert world_map.orientation == "orthogonal" # renderer renderer = tiledtmxloader.helperspygame.RendererPygame() # init pygame and set up a screen pygame.init() screen_width = min(1024, world_map.pixel_width) screen_height = min(768, world_map.pixel_height) screen = pygame.display.set_mode((screen_width, screen_height)) BLACK = ((255, 255, 255)) pygame.display.set_caption("What a neat game") font = pygame.font.SysFont('Calibri', 25, True, False) frames_per_sec = 45.0 clock = pygame.time.Clock() # cam_offset is for scrolling cam_world_pos_x = 0 cam_world_pos_y = 0 # set initial cam position and size renderer.set_camera_position_and_size(cam_world_pos_x, cam_world_pos_y, \ screen_width, screen_height, "topleft") # retrieve the layers sprite_layers = tiledtmxloader.helperspygame.get_layers_from_map(resources) # Define base player class; more to be added later. class Player: def __init__(self): self.x = 200 self.y = 300 self.ani_speed_init = 8 self.ani_speed = self.ani_speed_init self.ani = glob.glob("bmage/bmage_right*.png") self.ani.sort() # make sure frames are in numbered order self.ani_pos = 0 # this will be the first frame self.ani_max = len(self.ani) - 1 # ^will find max frames of self_ani array, minus one # due to counting from 0 self.img = pygame.image.load(self.ani[0]) self.update_x(0) self.update_y(0) # Let's try to make an inventory. self.inventory = {} # test collision with edges of map. self.blocked = False def update_x(self, x_pos): if x_pos != 0: # ie, if we're not standing still self.ani_speed -= 2 self.x += x_pos # Increments player's x-pos # TODO: this seems really hacky... # making a class method rely on a global variable sounds like it's bad # but should the Player class need to know anything about the camera? global cam_world_pos_x cam_world_pos_x = cam_world_pos_x + 1 if self.ani_speed == 0: self.img = pygame.image.load(self.ani[self.ani_pos]) self.ani_speed = self.ani_speed_init # If reach last frame, reset to first frame if self.ani_pos == self.ani_max: self.ani_pos = 0 else: self.ani_pos += 1 screen.blit(self.img, (self.x, self.y)) # Had to make a separate update function for y-movement, # couldn't wrap my mind around making one update method that would work for # both. Needless to say, this section of code also makes me sad. def update_y(self, y_pos): if y_pos != 0: self.ani_speed -= 2 self.y -= y_pos # Decrements player's current y-pos if self.ani_speed == 0: self.img = pygame.image.load(self.ani[self.ani_pos]) self.ani_speed = self.ani_speed_init # If reach last frame, reset to first frame if self.ani_pos == self.ani_max: self.ani_pos = 0 else: self.ani_pos += 1 screen.blit(self.img, (self.x, self.y)) # let's try to do something with inventory! # This turned out a lot uglier than expected, must be doing something wrong def populate_inv(self, item, description): self.inventory[item] = description # TODO: remove the magic numbers below when I have proper screen size # TODO: Make it blit under the previous entry so things aren't # getting\drawn on top of each other. for e in self.inventory: inv_text = font.render(item + ": " + description, True, BLACK) #screen.blit(inv_text, [MAP_WIDTH * TILE_SIZE - 400, # MAP_HEIGHT * TILE_SIZE + 20]) # end player class player1 = Player() x_pos = 0 y_pos = 0 # Enter main game loop. while True: # test collision with edges of screen #if player1.x_pos == world_map.pixel_width: # player1.blocked = True #else: # player1.blocked = False # Make sure the screen is cleared before/after every loop. screen.fill((0, 0, 0)) clock.tick(frames_per_sec) # render the map for sprite_layer in sprite_layers: if sprite_layer.is_object_group: # we dont draw the object group layers continue else: renderer.render_layer(screen, sprite_layer) #pygame.display.flip() for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() # Handling cardinal direction movement with arrow keys # If the right arrowkey is pressed, player1's animation changes to # the two-frame animation of him walking right. elif event.type == KEYDOWN and event.key == K_RIGHT: player1.ani = glob.glob("bmage/bmage_right*.png") # As long as key is held down, update pos(position) by two. # This sets pos to 2, the actual updating of frames and coordinates # is handled by the player classes update methods. # test collision #if player1.blocked == False: x_pos = 2 #else: # player1.x_pos = 0 elif event.type == KEYUP and event.key == K_RIGHT: x_pos = 0 # Once the pressed key is released, set pos to 0. The way the # update method handles pos means that the whole method will # never be run, ie. we won't animate or move because now # no keys are being pressed. elif event.type == KEYDOWN and event.key == K_LEFT: player1.ani = glob.glob("bmage/bmage_left*.png") x_pos = -2 elif event.type == KEYUP and event.key == K_LEFT: x_pos = 0 elif event.type == KEYDOWN and event.key == K_UP: player1.ani = glob.glob("bmage/bmage_up*.png") y_pos = 2 elif event.type == KEYUP and event.key == K_UP: y_pos = 0 elif event.type == KEYDOWN and event.key == K_DOWN: player1.ani = glob.glob("bmage/bmage_down*.png") y_pos = -2 elif event.type == KEYUP and event.key == K_DOWN: y_pos = 0 # Call mymap, update player's position, and draw everything to screen. #display_mymap() player1.update_x(x_pos) player1.update_y(y_pos) player1.populate_inv('Sword', 'A rusty thing') pygame.display.update() # Line 227-ish, only updating cam_world_pos_x for every right button press, # just wanted to see if it worked. renderer.set_camera_position_and_size(cam_world_pos_x, cam_world_pos_y, \ screen_width, screen_height, "topleft")
ryancnap/sadsack-rpg
my-game.py
Python
gpl-2.0
10,192
[ "exciting" ]
086f6b084878e6b253defce228a928fe98f411229d7c751b765904ed2dc989f7
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Patient.serial' db.add_column(u'clinics_patient', 'serial', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False) # Adding field 'Clinic.code' db.add_column(u'clinics_clinic', 'code', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'Patient.serial' db.delete_column(u'clinics_patient', 'serial') # Deleting field 'Clinic.code' db.delete_column(u'clinics_clinic', 'code') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'clinics.clinic': { 'Meta': {'object_name': 'Clinic'}, 'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'code': ('django.db.models.fields.PositiveIntegerField', [], {}), 'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}), 'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}) }, u'clinics.clinicstaff': { 'Meta': {'object_name': 'ClinicStaff'}, 'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}), 'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}) }, u'clinics.clinicstatistic': { 'Meta': {'unique_together': "[('clinic', 'statistic', 'month')]", 'object_name': 'ClinicStatistic'}, 'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'float_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'int_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'month': ('django.db.models.fields.DateField', [], {}), 'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'statistic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.Statistic']"}), 'text_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'clinics.patient': { 'Meta': {'object_name': 'Patient'}, 'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}), 'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'serial': ('django.db.models.fields.PositiveIntegerField', [], {}) }, u'clinics.region': { 'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'}, 'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}), 'external_id': ('django.db.models.fields.IntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'}) }, u'clinics.service': { 'Meta': {'object_name': 'Service'}, 'code': ('django.db.models.fields.PositiveIntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) }, u'clinics.visit': { 'Meta': {'object_name': 'Visit'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}), 'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']"}), 'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}), 'visit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'rapidsms.contact': { 'Meta': {'object_name': 'Contact'}, 'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}), 'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, u'statistics.statistic': { 'Meta': {'object_name': 'Statistic'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.StatisticGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'statistic_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}) }, u'statistics.statisticgroup': { 'Meta': {'object_name': 'StatisticGroup'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) } } complete_apps = ['clinics']
myvoice-nigeria/myvoice
myvoice/clinics/migrations/0009_auto__add_field_patient_serial__add_field_clinic_code.py
Python
bsd-2-clause
12,250
[ "VisIt" ]
e303ed89700776efdcd5032d2bf9866ccb4ec02b1216d3ef1581fa39682fd33f
""" Palette is a tool to generate colors for various Graphs plots and legends The DIRAC Graphs package is derived from the GraphTool plotting package of the CMS/Phedex Project by ... <to be added> """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = "$Id$" import hashlib from DIRAC.WorkloadManagementSystem.Client import JobStatus from DIRAC.WorkloadManagementSystem.Client import JobMinorStatus job_status_palette = { JobStatus.RECEIVED: "#D9E7F8", JobStatus.CHECKING: "#FAFAFA", JobStatus.STAGING: "#6190CD", JobStatus.WAITING: "#004EFF", JobStatus.MATCHED: "#FEF7AA", JobStatus.RUNNING: "#FDEE65", JobStatus.COMPLETING: "#FFAF55", JobStatus.STALLED: "#BC5757", JobStatus.COMPLETED: "#00FF21", JobStatus.DONE: "#238802", JobStatus.FAILED: "#FF0000", JobStatus.KILLED: "#111111", } job_minor_status_palette = { JobMinorStatus.APP_ERRORS: "#BC2133", JobMinorStatus.EXCEPTION_DURING_EXEC: "#AA240C", JobMinorStatus.EXEC_COMPLETE: "#338B39", JobMinorStatus.ILLEGAL_JOB_JDL: "#D96C00", JobMinorStatus.INPUT_NOT_AVAILABLE: "#2822A6", JobMinorStatus.INPUT_DATA_RESOLUTION: "#FFBE94", JobMinorStatus.DOWNLOADING_INPUT_SANDBOX: "#586CFF", JobMinorStatus.INPUT_CONTAINS_SLASHES: "#AB7800", JobMinorStatus.INPUT_INCORRECT: "#6812D6", JobMinorStatus.JOB_WRAPPER_INITIALIZATION: "#FFFFCC", JobMinorStatus.JOB_EXCEEDED_WALL_CLOCK: "#FF33CC", JobMinorStatus.JOB_INSUFFICIENT_DISK: "#33FFCC", JobMinorStatus.JOB_EXCEEDED_CPU: "#AABBCC", "No Ancestors Found For Input Data": "#BDA544", JobMinorStatus.NO_CANDIDATE_SITE_FOUND: "#E2FFBC", JobMinorStatus.PENDING_REQUESTS: "#52FF4F", JobMinorStatus.RECEIVED_KILL_SIGNAL: "#FF312F", JobMinorStatus.WATCHDOG_STALLED: "#FFCC99", } miscelaneous_pallette = {"Others": "#666666", "NoLabels": "#0025AD", "Total": "#00FFDC", "Default": "#FDEE65"} country_palette = { "France": "#73C6BC", "UK": "#DCAF8A", "Spain": "#C2B0E1", "Netherlands": "#A9BF8E", "Germany": "#800000", "Russia": "#00514A", "Italy": "#004F00", "Switzerland": "#433B00", "Poland": "#528220", "Hungary": "#825CE2", "Portugal": "#009182", "Turkey": "#B85D00", } class Palette(object): def __init__(self, palette={}, colors=[]): self.palette = country_palette self.palette.update(job_status_palette) self.palette.update(miscelaneous_pallette) self.palette.update(job_minor_status_palette) def setPalette(self, palette): self.palette = palette def setColor(self, label, color): self.palette[label] = color def addPalette(self, palette): self.palette.update(palette) def getColor(self, label): if label in self.palette: return self.palette[label] else: return self.generateColor(label) def generateColor(self, label): myMD5 = hashlib.md5() myMD5.update(label.encode()) hexstring = myMD5.hexdigest() color = "#" + hexstring[:6] return color
ic-hep/DIRAC
src/DIRAC/Core/Utilities/Graphs/Palette.py
Python
gpl-3.0
3,155
[ "DIRAC" ]
34a78bf425b26416952edeb0e0d093ad0d65f308b97ebab264bd82970d2e58c3
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Localization Compiler # import os, sys, codecs, shutil from xml.dom.minidom import parse template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename)) sys.path.append(os.path.join(template_dir,'../')) from tiapp import * ignoreFiles = ['.gitignore', '.cvsignore'] ignoreDirs = ['.git','.svn', 'CVS'] class LocaleCompiler(object): def __init__(self,name,dir,platform,mode='simulator',outdir=None): self.dir = os.path.join(dir,'i18n') self.platform = platform self.name = name self.mode = mode self.outdir = outdir self.iphone_dir = os.path.join(dir,'build','iphone','build') self.android_dir = os.path.join(dir,'build','android','res') if self.outdir!=None: self.android_dir = self.outdir def get_locale(self,file): return os.path.basename(os.path.dirname(file)) def get_ios_dir(self): if self.outdir!=None: return self.outdir if self.mode == 'development': # simulator return os.path.join(self.iphone_dir,'Debug-iphonesimulator','%s.app' % self.name) elif self.mode == 'test': # adhoc install return os.path.join(self.iphone_dir,'Debug-iphoneos','%s.app' % self.name) else: # distribution return os.path.join(self.iphone_dir,'Release-iphoneos','%s.app' % self.name) def getText(self,nodelist): rc = u"" for node in nodelist: if node.nodeType == node.TEXT_NODE: rc = rc + node.data return rc def isApp(self,file): return (os.path.basename(file) == "app.xml") def localization_file_name_ios(self,file): if self.isApp(file): return "InfoPlist.strings" return "Localizable.strings" def compile_for_ios(self,file): locale = self.get_locale(file) build_dir = self.get_ios_dir() lproj_dir = os.path.join(build_dir,'%s.lproj' % locale) if not os.path.exists(lproj_dir): os.makedirs(lproj_dir) locale_file = os.path.join(lproj_dir,self.localization_file_name_ios(file)) f = codecs.open(locale_file,'w','utf-16') f.write(u'/**\n * Appcelerator Titanium\n * this is a generated file - DO NOT EDIT\n */\n\n') dom = parse(file) appkeys = { 'appname' : 'CFBundleDisplayName' } for node in dom.documentElement.childNodes: if node.nodeType != 1: continue name = node.attributes['name'].nodeValue if self.isApp(file): name = appkeys[name] if name is None: pass value = self.getText(node.childNodes) # TODO: translate any more symbols? value = value.replace("%s",'%@') f.write(u'"%s" = "%s";\n' % (name,value)) f.close() if self.mode!='development': #only compile if not simulator os.system("/usr/bin/plutil -convert binary1 \"%s\"" % locale_file) print "[DEBUG] compiled ios file: %s" % locale_file def compile_for_android(self,file): #TODO: Add android support for app.xml if self.isApp(file): return locale = self.get_locale(file) # for andoird, we can simply copy into the right directory if locale == 'en' or locale.lower() == 'en-us': dir = os.path.join(self.android_dir,'values') else: if len(locale) == 5 and locale[2] == '-': # Android en-US -> en-rUS (need the r) locale = locale[0:3] + 'r' + locale[-2:] dir = os.path.join(self.android_dir,'values-%s' % locale) if not os.path.exists(dir): os.makedirs(dir) shutil.copy(file,os.path.join(dir,'strings.xml')) print "[DEBUG] compiled android file: %s" % file def compile(self): if not os.path.exists(self.dir): return print "[INFO] Compiling localization files" sys.stdout.flush() for dirname,dirs,files in os.walk(self.dir): for name in ignoreDirs: if name in dirs: dirs.remove(name) # don't visit ignored directories for f in files: if f in ignoreFiles: continue if not f.endswith('.xml'): continue file = os.path.join(dirname,f) if self.platform == 'ios' or self.platform == 'iphone' or self.platform == 'ipad' or self.platform == 'universal': self.compile_for_ios(file) elif self.platform == 'android': self.compile_for_android(file) elif self.platform == 'blackberry': # TODO pass if __name__ == "__main__": if len(sys.argv)==1 or len(sys.argv) < 3: print "Appcelerator Locale Compiler" print "Usage: %s <project_dir> <platform> [mode] [outdir]" % os.path.basename(sys.argv[0]) sys.exit(1) path = os.path.expanduser(sys.argv[1]) if not os.path.exists(path): print "Project directory not found: %s" % path sys.exit(1) tiapp_xml_path = os.path.join(path,'tiapp.xml') if not os.path.exists(tiapp_xml_path): print "Project directory doesn't look like a valid Titanium project: %s" % path sys.exit(1) resources_dir = os.path.join(path,'Resources') if not os.path.exists(resources_dir): print "Project directory doesn't look like a valid Titanium project: %s" % path sys.exit(1) platform = sys.argv[2] tiapp = TiAppXML(tiapp_xml_path) app_name = tiapp.properties['name'] mode = 'simulator' outdir = None if len(sys.argv) > 3: mode = sys.argv[3] if len(sys.argv) > 4: outdir = os.path.expanduser(sys.argv[4]) c = LocaleCompiler(app_name,path,platform,mode,outdir) c.compile()
arnaudsj/titanium_mobile
support/common/localecompiler.py
Python
apache-2.0
5,110
[ "VisIt" ]
319298d11383871204e2f0932230ad319f137e5bc5a14a9e745807c8a533b61a
import os import pylab import numpy as np from math import sqrt from LinearAlgebra import solve_linear_equations as solve from gpaw.testing.atomization_data import atomization_vasp from gpaw.utilities import fix, fix2 from moleculetest import dd from gpaw.testing.data import Ea12 def main(molecules, moleculedata, results, Ea): for formula, molecule in molecules.items(): reference = moleculedata[formula] result = results[formula] if result['Em0'] is None: continue E0 = 0.0 ok = True for atom in molecule: symbol = atom.GetChemicalSymbol() if Ea[symbol] is None: ok = False break E0 += Ea[symbol] if ok: result['Ea'] = E0 - result['Em0'] if len(molecule) == 2: d = result['d0'] + dd M = np.zeros((4, 5)) for n in range(4): M[n] = d**-n a = solve(np.innerproduct(M, M), np.dot(M, result['Em'] - E0)) dmin = 1 / ((-2 * a[2] + sqrt(4 * a[2]**2 - 12 * a[1] * a[3])) / (6 * a[3])) #B = xmin**2 / 9 / vmin * (2 * a[2] + 6 * a[3] * xmin) dfit = np.arange(d[0] * 0.95, d[4] * 1.05, d[2] * 0.005) emin = a[0] efit = a[0] for n in range(1, 4): efit += a[n] * dfit**-n emin += a[n] * dmin**-n result['d'] = dmin result['Eamin'] = -emin pylab.plot(dfit, efit, '-', color='0.7') if ok: pylab.plot(d, result['Em'] - E0, 'g.') else: pylab.plot(d, result['Em'] - E0, 'ro') pylab.text(dfit[0], efit[0], fix(formula)) pylab.xlabel(u'Bond length [Å]') pylab.ylabel('Energy [eV]') pylab.savefig('molecules.png') o = open('molecules.txt', 'w') print >> o, """\ .. contents:: ============== Molecule tests ============== Atomization energies (*E*\ `a`:sub:) and bond lengths (*d*) for 20 small molecules calculated with the PBE functional. All calculations are done in a box of size 12.6 x 12.0 x 11.4 Å with a grid spacing of *h*\ =0.16 Å and zero-boundary conditions. Compensation charges are expanded with correct multipole moments up to *l*\ `max`:sub:\ =2. Open-shell atoms are treated as non-spherical with integer occupation numbers, and zero-point energy is not included in the atomization energies. The numbers are compared to very accurate, state-of-the-art, PBE calculations (*ref* subscripts). .. figure:: molecules.png Bond lengths and atomization energies at relaxed geometries =========================================================== (*rlx* subscript) .. list-table:: :widths: 2 3 8 5 6 8 * - - *d* [Å] - *d*-*d*\ `ref`:sub: [Å] - *E*\ `a,rlx`:sub: [eV] - *E*\ `a,rlx`:sub:-*E*\ `a`:sub: [eV] - *E*\ `a,rlx`:sub:-*E*\ `a,rlx,ref`:sub: [eV] [1]_""" for formula, Ea1, Ea2 in Ea12: reference = moleculedata[formula] result = results[formula] if 'Eamin' in result: print >> o, ' * -', fix2(formula) print >> o, ' - %5.3f' % result['d'] if 'dref' in reference: print >> o, (' - ' + ', '.join(['%+5.3f [%d]_' % (result['d'] - dref, ref) for dref, ref in reference['dref']])) else: print >> o, ' -' print >> o, ' - %6.3f' % result['Eamin'] if result.get('Ea') is not None: print >> o, ' - %6.3f' % (result['Eamin'] - result['Ea']) else: print >> o, ' - Unknown' if formula in atomization_vasp: print >> o, ' - %6.3f' % (result['Eamin'] - atomization_vasp[formula][1] / 23.0605) else: print >> o, ' -' print >> o, """\ Atomization energies at experimental geometries =============================================== .. list-table:: :widths: 6 6 12 * - - *E*\ `a`:sub: [eV] - *E*\ `a`:sub:-*E*\ `a,ref`:sub: [eV]""" for formula, Ea1, Ea2 in Ea12: reference = moleculedata[formula] result = results[formula] print >> o, ' * -', fix2(formula) if 'Ea' in result: print >> o, ' - %6.3f' % result['Ea'] if 'Earef' in reference: print >> o, (' - ' + ', '.join(['%+5.3f [%d]_' % (result['Ea'] - Ecref, ref) for Ecref, ref in reference['Earef']])) else: print >> o, ' -' else: print >> o, ' -' print >> o, ' -' print >> o, """ References ========== .. [1] "The Perdew-Burke-Ernzerhof exchange-correlation functional applied to the G2-1 test set using a plane-wave basis set", J. Paier, R. Hirschl, M. Marsman and G. Kresse, J. Chem. Phys. 122, 234102 (2005) .. [2] "Molecular and Solid State Tests of Density Functional Approximations: LSD, GGAs, and Meta-GGAs", S. Kurth, J. P. Perdew and P. Blaha, Int. J. Quant. Chem. 75, 889-909 (1999) .. [3] "Comment on 'Generalized Gradient Approximation Made Simple'", Y. Zhang and W. Yang, Phys. Rev. Lett. .. [4] Reply to [3]_, J. P. Perdew, K. Burke and M. Ernzerhof """ o.close() os.system('rst2html.py ' + '--no-footnote-backlinks ' + '--trim-footnote-reference-space ' + '--footnote-references=superscript molecules.txt molecules.html')
qsnake/gpaw
scripts/data2restructured.py
Python
gpl-3.0
5,990
[ "GPAW" ]
2bb4652bad1bd4b1cec97c41792069f0ca426c038788a4a484acb4b06d791757
#!/usr/bin/env python from PIL import Image import numpy as np from scipy.ndimage.interpolation import rotate from scipy.ndimage import gaussian_filter import argparse import os from TiffWriter import write_tiff def crop_center(img, new_shape): """ Crop an image equally on each size to create the new_shape Args: img (numpy array): 2D array to crop new_shape: desired shape of the return Returns: numpy array: array cropped according to shape """ ul = ((img.shape[0]-new_shape[0])/2, (img.shape[1]-new_shape[1])/2) br = (ul[0]+new_shape[0], ul[1]+new_shape[1]) return img[ul[0]:br[0], ul[1]:br[1]] def gauss_kernel(size, sigma=None, size_y=None, sigma_y=None): """ Generates a 2D Gaussian kernel as a numpy array Args: size (int): 1/2 the width of the kernel; total width := 2*size+1 sigma (float): spread of the gaussian in the width direction size_y (int): 1/2 the height of the kernel; defaults to size sigma_y (float): spread of the gaussian in the height direction; defaults to sigma Returns: numpy array: normalized 2D gaussian array """ size = int(size) if not size_y: size_y = size else: size_y = int(size_y) if not sigma: sigma = 0.5 * size + .1 if not sigma_y: sigma_y = sigma x, y = np.mgrid[-size:size+1, -size_y:size_y+1] g = np.exp(-0.5 * (x ** 2 / sigma ** 2 + y ** 2 / sigma_y ** 2)) return g / g.sum() def resize(a, shape): """ if array a is larger than shape, crop a; if a is smaller than shape, pad a with zeros Args: a (numpy array): 2D array to resize shape: desired shape of the return Returns: numpy array: array a resized according to shape """ if a.shape[0] < shape[0]: a = np.pad(a, ((0, shape[0]-a.shape[0]), (0, 0)), mode="constant") if a.shape[1] < shape[1]: a = np.pad(a, ((0, 0), (0, shape[1]-a.shape[1])), mode="constant") if a.shape[0] > shape[0]: a = a[0:shape[0], :] if a.shape[1] > shape[1]: a = a[:, 0:shape[1]] return a def halftone(cmyk, size, angles, fill, sharpness): """ Generates a halftone image from a cmyk image Args: cmyk (numpy array): 0.0-1.0 r x c x 4 image size (int): half size of the averaging kernel in pixels angles (list of float): 4 angles for the relative rotation of each channel Returns: numpy array: 0.0-1.0 r x c x 4 halftoned image """ halftone_image = np.zeros(cmyk.shape) for i, (channel, angle) in enumerate(zip(np.rollaxis(cmyk, 2), angles)): # total width of the kernel s = 2 * size + 1 # rotate the image to eliminate overlap between the channels rotated = rotate(channel, angle, reshape=True, prefilter=False, order=1) # apply a gaussian filter to average over a the region of the kernel averaged = gaussian_filter(rotated, size) # find the central value of the filtered image; this is the average intensity in the region halftone_weights = averaged[size::s, size::s] # tile the weight image with the average intensity value halftone_weights = np.repeat(np.repeat(halftone_weights, s, 0), s, 1) halftone_weights = resize(halftone_weights, rotated.shape) # TODO: consider using sigma to scale with magnitude # create a 2D gaussian kernel that will be the "dot"; normalize it to be 1.0 in the center kernel = gauss_kernel(size, sigma=fill*size) # Apply the sharpness multiplier and clip the kernel to 1.0 kernel *= sharpness / np.max(kernel) kernel = np.clip(kernel, 0.0, 1.0) # tile the kernel across the image num_kernels = np.array(rotated.shape) / s + 1 tiled_kernel = np.tile(kernel, num_kernels) tiled_kernel = resize(tiled_kernel, rotated.shape) # multiply the kernel image with the weights to generate the halftone image halftone = tiled_kernel * halftone_weights # rotate the image back to zero halftone = rotate(halftone, -angle, prefilter=False, order=1) # crop the image to the original size halftone = crop_center(halftone, channel.shape) # add this chanel to the full cmyk image halftone_image[:,:,i] = halftone # Image.fromarray(halftone*255).show() # Image.fromarray(cmyk_to_rgb(halftone_image)).show() return halftone_image def cmyk_to_rgb(cmyk): """ Converts a cmyk image to a rgb representation Args: cmyk (numpy array): 0.0-1.0 r x c x 4 image Returns: numpy array: 0-255 r x c x 3 image """ rgb = 255 * (1.0 - cmyk[:,:,0:3]) * (1 - np.stack([cmyk[:,:,3],cmyk[:,:,3],cmyk[:,:,3]], axis=2)) return np.round(rgb).astype(np.uint8) def rgb_to_cmyk(rgb, percent_gray=100): """ Converts an rgb image to a cmyk representation Args: rgb (numpy array): 0-255 r x c x 3 image percent_gray (int): 0-100 percent of K channel to replace in CMY Returns: numpy array: 0.0-1.0 r x c x 4 image """ cmy = 1 - rgb / 255.0 k = np.min(cmy, axis=2) * (percent_gray / 100.0) k[np.where(np.sum(rgb,axis=2)==0)] = 1.0 # anywhere there is no color, set the k chanel to max k_mat = np.stack([k,k,k], axis=2) with np.errstate(divide='ignore', invalid='ignore'): cmy = (cmy - k_mat) / (1.0 - k_mat) cmy[~np.isfinite(cmy)] = 0.0 return np.dstack((cmy, k)) def test(): # test rgb_to_cmyk assert np.allclose(rgb_to_cmyk(np.array([[[255, 255, 255]]], dtype=np.uint8), 100), [[[0, 0, 0, 0]]]) assert np.allclose(rgb_to_cmyk(np.array([[[0, 0, 0]]], dtype=np.uint8), 100), [[[0, 0, 0, 1]]]) assert np.allclose(rgb_to_cmyk(np.array([[[0, 0, 0]]], dtype=np.uint8), 0), [[[0, 0, 0, 1]]]) assert np.allclose(rgb_to_cmyk(np.array([[[10, 20, 30]]], dtype=np.uint8), 100), [[[0.66666667, 0.33333333, 0.0, 0.88235294]]]) # test cmyk_to_rgb assert np.allclose(cmyk_to_rgb(np.array([[[0, 0, 0, 1]]])), [[[0, 0, 0]]]) assert np.allclose(cmyk_to_rgb(np.array([[[1, 1, 1, 0]]])), [[[0, 0, 0]]]) assert np.allclose(cmyk_to_rgb(np.array([[[0, 0, 0, 0]]])), [[[255, 255, 255]]]) assert np.allclose(cmyk_to_rgb(np.array([[[0.66666667, 0.33333333, 0.0, 0.88235294]]])), [[[10, 20, 30]]]) # test inverse relationship between rgb_to_cmyk and cmyk_to_rgb for i in range(1000): rgb = np.array([[np.random.randint(0, 255, 3)]]) gray = (np.random.rand(1)*100)[0] assert(np.allclose(cmyk_to_rgb(rgb_to_cmyk(rgb, gray)) - rgb, 0.0)) if __name__ == '__main__': test() # parse command line arguments parser = argparse.ArgumentParser(description='Generates CMYK halftone images from a color image.') parser.add_argument("file", type=str, help="input file name") parser.add_argument("-a", "--angles", type=int, nargs="+", default = [15, 75, 0, 45], help="four angles for rotation of each channel") parser.add_argument("-b", "--bits", type=int, choices=[1, 2, 4, 8], default=8, help="bits of color info per channel") parser.add_argument("-c", "--colorize_CMYK", default=False, action="store_true", help="save CMYK files as RGB color images") parser.add_argument("-e", "--extra_file_name", type=str, default="_Clr", help="final name addition for each channel") parser.add_argument("-f", "--fill", type=float, default=0.5, help="dot fill (size) value") parser.add_argument("-g", "--gray", type=int, default=100, help="percent of grey component replacement (K level)") parser.add_argument("-l", "--halftone", default=False, action="store_true", help="halftone the image") parser.add_argument("-p", "--sharpness", type=float, default=1.0, help="level of sharpness of the dots") parser.add_argument("-s", "--size", type=int, default=3, help="half size of averaging region (pixels)") args = parser.parse_args() # open file try: im = Image.open(args.file) except IOError: print "Cannot open ", args.file exit(1) # convert to numpy array img = np.array(im)[:,:,0:3] # separate into CMYK channels; would be better to use pyCMS and an ICC color profile CMYK = rgb_to_cmyk(img, args.gray) # halftone cmyk images if args.halftone: CMYK = halftone(CMYK, args.size, args.angles, args.fill, args.sharpness) # save files f, e = os.path.splitext(args.file) for i in range(4): # save the RGB color version of the individual CMYK images if args.colorize_CMYK: filename = f + ['_C', '_M', '_Y', '_K'][i] + ".TIF" channel = np.zeros(CMYK.shape) channel[:,:,i] = CMYK[:,:,i] out = cmyk_to_rgb(channel) write_tiff(filename, out) # save the individual CMYK files else: filename = f + args.extra_file_name + str(i + 1) + ".TIF" img = (255 * CMYK[:,:,i]).astype(np.uint8) write_tiff(filename, img, bit_depth=args.bits, photometric=0) # save CMYK full image Image.fromarray(cmyk_to_rgb(CMYK)).save(f + ".BMP")
ClayFlannigan/halftone
halftone.py
Python
apache-2.0
9,337
[ "Gaussian" ]
4dc4b9ee9dd095b7a809ce9b21b9ae5b1898f55552a00c4752345a70ce5ada70
# Merge active and previous version's generated next major version candidate # shadow. This involve simultaneously traversing both FileDescriptorProtos and: # 1. Recovering hidden_envoy_depreacted_* fields and enum values in active proto. # 2. Recovering deprecated (sub)message types. # 3. Misc. fixups for oneof metadata and reserved ranges/names. import copy import pathlib import sys from google.protobuf import descriptor_pb2 from google.protobuf import text_format # Note: we have to include those proto definitions for text_format sanity. from google.api import annotations_pb2 as _ from validate import validate_pb2 as _ from envoy.annotations import deprecation_pb2 as _ from envoy.annotations import resource_pb2 as _ from udpa.annotations import migrate_pb2 as _ from udpa.annotations import sensitive_pb2 as _ from udpa.annotations import status_pb2 as _ from udpa.annotations import versioning_pb2 as _ # Set reserved_range in target_proto to reflex previous_reserved_range skipping # skip_reserved_numbers. def AdjustReservedRange(target_proto, previous_reserved_range, skip_reserved_numbers): del target_proto.reserved_range[:] for rr in previous_reserved_range: # We can only handle singleton ranges today. assert ((rr.start == rr.end) or (rr.end == rr.start + 1)) if rr.start not in skip_reserved_numbers: target_proto.reserved_range.add().MergeFrom(rr) # Merge active/shadow EnumDescriptorProtos to a fresh target EnumDescriptorProto. def MergeActiveShadowEnum(active_proto, shadow_proto, target_proto): target_proto.MergeFrom(active_proto) shadow_values = {v.name: v for v in shadow_proto.value} skip_reserved_numbers = [] # For every reserved name, check to see if it's in the shadow, and if so, # reintroduce in target_proto. del target_proto.reserved_name[:] for n in active_proto.reserved_name: hidden_n = 'hidden_envoy_deprecated_' + n if hidden_n in shadow_values: v = shadow_values[hidden_n] skip_reserved_numbers.append(v.number) target_proto.value.add().MergeFrom(v) else: target_proto.reserved_name.append(n) AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers) # Special fixup for deprecation of default enum values. for tv in target_proto.value: if tv.name == 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE': for sv in shadow_proto.value: if sv.number == tv.number: assert (sv.number == 0) tv.CopyFrom(sv) # Merge active/shadow DescriptorProtos to a fresh target DescriptorProto. def MergeActiveShadowMessage(active_proto, shadow_proto, target_proto): target_proto.MergeFrom(active_proto) shadow_fields = {f.name: f for f in shadow_proto.field} skip_reserved_numbers = [] # For every reserved name, check to see if it's in the shadow, and if so, # reintroduce in target_proto. del target_proto.reserved_name[:] for n in active_proto.reserved_name: hidden_n = 'hidden_envoy_deprecated_' + n if hidden_n in shadow_fields: f = shadow_fields[hidden_n] skip_reserved_numbers.append(f.number) missing_field = target_proto.field.add() missing_field.MergeFrom(f) # oneof fields from the shadow need to have their index set to the # corresponding index in active/target_proto. if missing_field.HasField('oneof_index'): oneof_name = shadow_proto.oneof_decl[missing_field.oneof_index].name missing_oneof_index = None for oneof_index, oneof_decl in enumerate(active_proto.oneof_decl): if oneof_decl.name == oneof_name: missing_oneof_index = oneof_index assert (missing_oneof_index is not None) missing_field.oneof_index = missing_oneof_index else: target_proto.reserved_name.append(n) # protoprint.py expects that oneof fields are consecutive, so need to sort for # this. if len(active_proto.oneof_decl) > 0: fields = copy.deepcopy(target_proto.field) fields.sort(key=lambda f: f.oneof_index if f.HasField('oneof_index') else -1) del target_proto.field[:] for f in fields: target_proto.field.append(f) AdjustReservedRange(target_proto, active_proto.reserved_range, skip_reserved_numbers) # Visit nested message types del target_proto.nested_type[:] shadow_msgs = {msg.name: msg for msg in shadow_proto.nested_type} for msg in active_proto.nested_type: MergeActiveShadowMessage(msg, shadow_msgs[msg.name], target_proto.nested_type.add()) # Visit nested enum types del target_proto.enum_type[:] shadow_enums = {msg.name: msg for msg in shadow_proto.enum_type} for enum in active_proto.enum_type: MergeActiveShadowEnum(enum, shadow_enums[enum.name], target_proto.enum_type.add()) # Ensure target has any deprecated sub-message types in case they are needed. active_msg_names = set([msg.name for msg in active_proto.nested_type]) for msg in shadow_proto.nested_type: if msg.name not in active_msg_names: target_proto.nested_type.add().MergeFrom(msg) # Merge active/shadow FileDescriptorProtos, returning a the resulting FileDescriptorProto. def MergeActiveShadowFile(active_file_proto, shadow_file_proto): target_file_proto = copy.deepcopy(active_file_proto) # Visit message types del target_file_proto.message_type[:] shadow_msgs = {msg.name: msg for msg in shadow_file_proto.message_type} for msg in active_file_proto.message_type: MergeActiveShadowMessage(msg, shadow_msgs[msg.name], target_file_proto.message_type.add()) # Visit enum types del target_file_proto.enum_type[:] shadow_enums = {msg.name: msg for msg in shadow_file_proto.enum_type} for enum in active_file_proto.enum_type: MergeActiveShadowEnum(enum, shadow_enums[enum.name], target_file_proto.enum_type.add()) # Ensure target has any deprecated message types in case they are needed. active_msg_names = set([msg.name for msg in active_file_proto.message_type]) for msg in shadow_file_proto.message_type: if msg.name not in active_msg_names: target_file_proto.message_type.add().MergeFrom(msg) return target_file_proto if __name__ == '__main__': active_src, shadow_src, dst = sys.argv[1:] active_proto = descriptor_pb2.FileDescriptorProto() text_format.Merge(pathlib.Path(active_src).read_text(), active_proto) shadow_proto = descriptor_pb2.FileDescriptorProto() text_format.Merge(pathlib.Path(shadow_src).read_text(), shadow_proto) pathlib.Path(dst).write_text(str(MergeActiveShadowFile(active_proto, shadow_proto)))
istio/envoy
tools/protoxform/merge_active_shadow.py
Python
apache-2.0
6,516
[ "VisIt" ]
dc43ee9873c70cb9b661c4b127401f7042d458f11d13e58e1eef8c025cf57aab
# -*- coding: utf-8 -*- { '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je opcioni izraz kao sto je "polje1=\'nova vrijednost\'". Ne mozete azurirati ili izbristi rezultati JOIN-a', '# of Houses Damaged': 'Broj oštećenih kuća', '# of Houses Destroyed': 'Broj uništenih kuća', '# of International Staff': 'Broj međunarodnog osoblja', '# of National Staff': 'Broj nacionalnog osoblja', '# of People Deceased': 'Broj prenminulih ljudi', '# of People Injured': 'Broj povrijeđenih osoba', '# of Vehicles': 'Broj vozila', '# Results per query': 'Broj rezultata po upitu', '# selected': '# odabrano', '%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s nije instaliran. Pitajte administratora servera da vam to instalira na serveru.', '%(count)s Recipients': '%(count)s primalaca', '%(count)s Roles of the user removed': '%(count)s uloga korisnika obrisano', '%(count)s Users removed from Role': '%(count)s korisnika izbačeno iz uloge', '%(count_of)d translations have been imported to the %(language)s language file': '%(count_of)d prijevoda je uvezeno u %(language)s jezičku datoteku', '%(GRN)s Number': '%(GRN)s broj', '%(GRN)s Status': '%(GRN)s Status', '%(item)s requested from %(site)s': '%(item)s zahtijevano sa %(site)s', '%(label)s contains %(values)s': '%(label)s sadrži %(values)s', '%(label)s contains any of %(values)s': '%(label)s sadrži jedno od %(values)s', '%(label)s does not contain %(values)s': '%(label)s ne sadrži %(values)s', '%(label)s is %(values)s': '%(label)s je %(values)s', '%(label)s like %(values)s': '%(label)s želi %(values)s', '%(label)s not like %(values)s': '%(label)s ne želiu %(values)s', '%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nAko je tip zahtjeva "%(type)s", molim unesite %(type)s na slijedećem ekranu.', '%(msg)s\r\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\r\nAko je tip zahtjeva "%(type)s", molim unesite %(type)s na slijedećem ekranu.', '%(pe)s in %(location)s': '%(pe)s u %(location)s', '%(PO)s Number': '%(PO)s broj', '%(proj4js)s definition': '%(proj4js)s definicija', '%(quantity)s in stock': '%(quantity)s na zalihi', '%(REQ)s Number': '%(REQ)s broj', '%(resource)s Filter': '%(resource)s Filter', '%(site)s (Recipient)': '%(site)s (primaoc)', '%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!': '%(site)s nema stavki koje odgovaraju ovom zahtjevu. Možda ima drugih stavki koje mogu ispuniti ovaj zahtjev!', '%(site_label)s Status': '%(site_label)s Status', '%(site_label)s Status added': '%(site_label)s Status dodan', '%(site_label)s Status deleted': '%(site_label)s Status obrisan', '%(site_label)s Status updated': '%(site_label)s Status ažuriran', '%(system_name)s - New User Registered': '%(system_name)s - Novi korisnik registrovan', '%(system_name)s - New User Registration Approval Pending': '%(system_name)s - Zahtjev za registracijom novog korisnika', '%(system_name)s - Verify Email': '%(system_name) - Potvrdite vaš Email', '%(system_name)s has sent an email to %(email)s to verify your email address.nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '%(system_name)s je poslao poštu za %(email)s da provjeru vašu adresu elektronske pošte.Molim provjerite vašu elektronsku poštu da ovjerite ovu adresu. Ako ne primite poruku elektronske pošte, provjerite vaše spam filtere ili poruke u smeću.', '%m-%d-%Y': '%d-%m-%Y', '%m-%d-%Y %H:%M:%S': '%m-%d-%Y %H:%M:%S', '%s linked to %s': '%s vezan za %s', '%s or %s': '%s ili %s', '%s rows deleted': '%s redova uklonjeno', '%s rows updated': '%s redova ažurirano', '%s selected': '%s izabrano', '%Y-%m-%d': '%d-%m-%Y', '%Y-%m-%d %H:%M': '%d-%m-%Y %H:%M', '%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S', '%Y-%m-%d %H:%M:00': '%d.%m.%Y. %H:%M:00', '& then click on the map below to adjust the Lat/Lon fields': 'i pritisnete na mapu ispod za podešavanje geografske Dužine/Širine', "'%s %%{row} deleted',nrows": "'%s %%{row} obrisan',nrows", "'%s %%{row} updated',nrows": "'%s %%{row} ažurirano',nrows", "'Cancel' will indicate an asset log entry did not occur": "'Otkaži' će indicirati na to se unos sredstva u zapisnik nije desio.", '(filtered from _MAX_ total entries)': '(filtrirano iz _MAX_ elemenata)', '* Required Fields': '* Obavezna polja', '...or add a new bin': '...ili dodaj novu korpu', '0-15 minutes': '0-15 minuta', '1 Assessment': '1 Procjena', '1 location, shorter time, can contain multiple Tasks': '1 lokacija, kraće vrijeme, može sadržavati više Zadataka', '1-3 days': '1-3 dana', '1. Fill the necessary fields in BLOCK CAPITAL letters.': '1. Popunite potrebna polja VELIKIM SLOVIMA.', '15-30 minutes': '15-30 minuta', '2 different options are provided here currently:': '2 različite opcije su pružene trenutno:', '2. Always use one box per letter and leave one box space to separate words.': '2. Uvijek koristite jednu kućicu po slovu i koristite praznu kućicu da odvajate riječi', '2x4 Car': '2x4 auto', '3. Fill in the circles completely.': '3. Kružiće potpuno popunite', '30-60 minutes': '30 do 60 minuta', '3W Report': '3W izvještaj', '4-7 days': '4-7 dana', '4x4 Car': '4x4 auto', '8-14 days': '8-14 dana', '_NUM_ duplicates found': '_NUM_ duplikata nađeno', 'A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.': 'Blok bogatog teksta koji se može ugraditi u stranicu, vidljiv kao potpuna strana ili vidljiv kao lista novih stavki.', 'A brief description of the group (optional)': 'Kratki opis grupe (proizvoljno)', 'A catalog of different Assessment Templates including summary information': 'Katalog raznih predložaka procjena uključujući sumarne informacije', 'A collection of Feature Classes which can be displayed together on a map or exported together.': 'Skup klasa karakteristika koje se mogu prikazati na mapi ili izvesti zajedno', 'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Dokument skinut iz GPS-a koji sadrži geografske lokacije u XML formatu', 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Datoteka u GPX formatu uzeta iz GPS-a čije vremenske oznake mogu biti povezane sa vremenskim oznakama na slikama da bi ih locirali na mapi', 'A file in GPX format taken from a GPS.': 'Datoteka u GPX formatu uzeta s GPS.', 'A library of digital resources, such as photos, documents and reports': 'Biblioteka digitalnih izvora, kao što su fotografije, dokumenti i izvješća', 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Grupa lokacija se može iskoristiti da se definiše obim pogođene oblasti, ako ne spada unutar jedne administrativne regije.', 'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Lokacijska grupa je skup lokacija (često set administrativnih regija koje predstavljaju kombinovano područje).', 'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'Grupa lokacija je skup lokacija (često, skup administrativnih regija koji predstavlja kombinovano područje). Članske lokacije se dodaje grupi lokacija ovdje. Grupe lokacija se mogu koristiti za filtriranje onoga što je prikazano na karti i na rezultate pretraživanja samo po mjestima unutar grupe lokacija. Grupe lokacija se mogu koristiti za definiranje područja na ugroženom području, ako one ne spadaju u jedan administrativni region. Grupe lokacija mogu se koristiti u meniju regiona.', 'A location group must have at least one member.': 'Grupa lokacije mora imati bar jednog člana', "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Lokacija koja određuje geografsko područje ove regije. Ovo može biti mjesto iz lokacijske hijerarhije , ili grupna lokacija , ili lokacija na granici područja', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Oznaka je dodijeljena pojedinačnoj lokaciji u slučaju da postoji potreba za zamjenu oznake dodijeljene Klasi karakteristika.', 'A place within a Site like a Shelf, room, bin number etc.': 'Tačka na mjestu, poput police, sobe, broja korpe itd.', 'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'Projektni miljokaz predstavlja značajan datum u kalendaru koji pokazuje da je napredak prema glavnom cilju postignut.', 'A Reference Document such as a file, URL or contact person to verify this data.': 'Prateći dokument u vidu datoteke, URL-a ili kontakt osobe za potvrdu ovih podataka.', 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Referentni dokument, poput datoteke, URL-a ili kontakt osobe da se verificiraju ovi podaci. Možete ukucati prvih nekoliko karaktera naziva dokumenta da bi se povezalo s postojećim dokumentom.', 'A strict location hierarchy cannot have gaps.': 'Stroga hijerarhija lokacija ne može imati rupa', 'A task is a piece of work that an individual or team can do in 1-2 days': 'Zadatak je dio posla koji se samostalno ili u timu može završiti za 1-2 dana.', 'A task is a piece of work that an individual or team can do in 1-2 days.': 'Zadatak je dio posla koji se samostalno ili u timu može završiti za 1-2 dana.', 'A Warehouse is a physical place to store items.': 'Skladište je fizičko mjesto za smještanje predmeta.', 'Abbreviation': 'Skraćenica', 'Ability to customize the list of details tracked at a Shelter': 'Sposobnost da se prilagodi lista detalja praćenih u skloništu', 'Ability to customize the list of human resource tracked at a Shelter': 'Sposobnost prilagođavanja liste ljudskih resursa praćene u skloništu', 'Ability to customize the list of important facilities needed at a Shelter': 'Sposobnost da se prilagodi lista važnih objekata potrebnih u skloništu', 'Ability to Fill Out Surveys': 'Mogućnost ispunjavanja ankete', 'Ability to view Results of Completed and/or partially filled out Surveys': 'Mogućnost pregleda rezultata završenih i/ili djelimično popunjenih anketa', 'Abkhazia': 'Abhazija', 'Able to Respond?': 'U mogućnosti odgovoriti?', 'About': 'O programu', 'ABOUT': 'O', 'About Sahana': 'O Sahana', 'About Sahana Eden': 'O Sahana Eden', 'ABOUT THIS MODULE': 'O OVOM MODULU', 'About this module': 'O ovom modulu', 'Above %s': 'Iznad %s', 'Academic': 'Akademska', 'Accept Push': 'Prihvati guranje', 'Accept unsolicited data transmissions from the repository.': 'Prihvati neplanirane prenose podataka iz repozitorija.', 'ACCESS DATA': 'PRISTUPNI PODACI', 'Access denied': 'Zabranjen pristup', 'Access to education services': 'Pristup obrazovnim uslugama', 'Access to Shelter': 'Pristup skloništu', 'Accessibility of Affected Location': 'Pristupnost pogođenih lokacija', 'Accompanying Relative': 'Član rodbine koji je pratnja', 'Account added': 'Nalog dodana', 'Account Registered - Please Check Your Email': 'Korisnički račun registrovan - molimo provjerite svoj Email', 'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Korisnički nalog registrovan, ali prijava još čeka odobrenje od ovlaštene osobe - molimo pričekajte dok se prijava ne odobri.', 'Accuracy': 'Preciznost', 'Acronym': 'Akronim', "Acronym of the organization's name, eg. IFRC.": 'Akronim od naziva organizacije, npr IFRC.', 'act': 'akt', 'Action': 'Akcija', 'ACTION REQUIRED': 'AKCIJA POTREBNA', 'Actionable': 'Djelatno', 'Actionable by all targeted recipients': 'Ima razloga da se djeluje prema svim ciljanim primateljima', 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Moguće pokrenuti samo od strane imenovanih učesnika vježbe; identifikator vježbe treba da se pojavi u polju <note>', 'Actioned?': 'Riješeno?', 'Actioning officer': 'Zaduženi službenik', 'Actions': 'Akcije', 'Actions taken as a result of this request.': 'Akcije preduzete kao rezultat ovog zahtjeva.', 'Activate': 'Aktiviraj', 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktiviraj događaje iz šablona scenarija za alokaciju odgovarajućih resursa (ljudi, sredstva i objekti).', 'activate to sort column ascending': 'aktivno za sortiranje kolone u rastućem redoslijedu', 'activate to sort column descending': 'aktivno za sortiranje kolone u opadajućem redoslijedu', 'active': 'aktivno', 'Active': 'Aktivno', 'Active Problems': 'Aktivni problemi', 'Active?': 'Aktivan?', 'Activities': 'Aktivnosti', 'Activities matching Assessments': 'Aktivnosti koje odgovaraju procjenama', 'Activities matching Assessments:': 'Aktivnosti koje odgovaraju procjenama:', 'Activities of boys 13-17yrs before disaster': 'Aktivnosti dječaka dobi 13-17 godina prije katastrofe', 'Activities of boys 13-17yrs now': 'Aktivnosti dječaka između 13 i 17 godina', 'Activities of boys <12yrs before disaster': 'Aktivnosti dječaka mlađih od 12 godina prije nepogode', 'Activities of boys <12yrs now': 'Trenutne aktivnosti dječaka <12 godina', 'Activities of children': 'Aktivnosti djece', 'Activities of girls 13-17yrs before disaster': 'Aktivnosti djevojaka 13-17 godina prije katastrofe', 'Activities of girls 13-17yrs now': 'Trenutne aktivnosti djevojčica između 13 i 17 godina', 'Activities of girls <12yrs before disaster': 'Aktivnosti djevojčica mlađih od 12 godina prije katastrofe', 'Activities of girls <12yrs now': 'Aktivnosti djevojčica <12god sada', 'Activities:': 'Aktivnosti:', 'Activity': 'Aktivnost', 'Activity Added': 'Dodana aktivnost', 'Activity added': 'Dodana aktivnost', 'Activity Deleted': 'Obrisana aktivnost', 'Activity Details': 'Detalji aktivnosti', 'Activity Organization': 'Organizacija aktivnosti', 'Activity Organization Added': 'Dodana organizacija aktivnosti', 'Activity Organization Deleted': 'Organizacija aktivnosti obrisana', 'Activity Organization Updated': 'Organizacija aktivnosti ažurirana', 'Activity Organizations': 'Organizacije aktivnosti', 'Activity removed': 'Aktivnost uklonjena', 'Activity Report': 'Izvještaj o aktivnosti', 'Activity Reports': 'Izvještaji aktivnosti', 'Activity Type': 'Tip aktivnosti', 'Activity Type Added': 'Dodan tip aktivnosti', 'Activity Type added to Activity': 'Tip aktivnosti dodan u aktivnost', 'Activity Type added to Project Location': 'Vrsta aktivnosti dodana na lokaciju projekta', 'Activity Type Deleted': 'Izbrisan tip aktivnosti', 'Activity Type removed from Activity': 'Tip aktivnosti uklonjen iz aktivnosti', 'Activity Type removed from Project Location': 'Vrsta aktivnosti uklonjena iz lokacije projekta', 'Activity Type Updated': 'Ažuriran tip aktivnosti', 'Activity Types': 'Tipovi aktivnosti', 'Activity Updated': 'Djelatnost ažurirana', 'Activity updated': 'Aktivnost ažurirana', 'Add': 'Dodati', 'Add %(site_label)s Status': 'Dodaj %(site_label)s status', 'Add a new certificate to the catalog.': 'Dodaj novi certifikat u katalog', 'Add a new competency rating to the catalog.': 'Dodaj novu ocjenu sposobnosti u katalog.', 'Add a new course to the catalog.': 'Dodaj novi kurs u katalog', 'Add a New Inventory Location': 'Dodaj novu lokaciju skladišta', 'Add a new job role to the catalog.': 'Dodaj novu poziciju u katalog', 'Add a new program to the catalog.': 'Dodaj novi program u katalog', 'Add a New Relief Item': 'Dodaj novu stavku pomoći', 'Add a new Site from where the Item is being sent.': 'Navedite mjesto gdje se šalje ova stavka.', 'Add a new skill provision to the catalog.': 'Dodaj novu zalihu vještina u katalog.', 'Add a new skill to the catalog.': 'Dodaj novu vještinu u katalog', 'Add a new skill type to the catalog.': 'Dodaj novi tip vještine u katalog', 'Add a new vehicle category': 'Dodaj novu kategoriju vozila', 'Add a new vehicle type': 'Dodaj novi tip vozila.', 'Add a Person': 'Dodaj osobu', 'Add a Reference Document such as a file, URL or contact person to verify this data.': 'Dodaj prateći dokument u vidu datoteke, URL-a ili kontakt osobe za potvrdu ovih podataka.', 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Dodajte referencu, kao što je dokument, URL ili kontakt osobu da potvrdi ove podatke. Ako ne stavite referencu, prikazat će se vaš mail.', 'Add a Volunteer': 'Dodaj volontera', 'Add Activity': 'Dodaj aktivnost', 'Add Activity Report': 'Dodaj izvještaj o radu', 'Add Activity Type': 'Dodaj tip aktivnosti', 'Add Address': 'Dodaj Adresu', 'Add Affiliation': 'Dodaj namještenje', 'Add Aid Request': 'Dodaj zahtjev za pomoć', 'Add all organizations which are involved in different roles in this project': 'Dodaj sve organizacije koje su uključene u različite uloge u ovom projektu', 'Add Alternative Item': 'Dodaj alternativnu stavku', 'Add an image, such as a Photo.': 'Dodaj sliku,kao sto je fotografija', 'Add an Photo.': 'Dodaj fotografiju.', 'Add Annual Budget': 'Dodaj godišnji budžet', 'Add Appraisal': 'Dodaj ispunjenje', 'Add Assessment': 'Dodaj procjenu', 'Add Assessment Summary': 'Dodaj sažetak o procjeni', 'Add Asset': 'Dodaj sredstvo', 'Add Asset Log Entry - Change Label': 'Dodaj stavku zapisnika sredstava - promijeni oznaku', 'Add Availability': 'Dodaj dostupnost', 'Add Baseline': 'Dodaj referentnu tačku', 'Add Baseline Type': 'Dodaj tip referentne tačke', 'Add Bed Type': 'Dodaj vrstu ležaja', 'Add Beneficiaries': 'Dodaj korisnike', 'Add Bin Type': 'Dodaj tip korpe', 'Add Bins': 'Dodaj korpe', 'Add Bookmark': 'Dodaj zabilješku', 'Add Branch Organization': 'Dodaj pripadnu organizaciju', 'Add Brand': 'Dodaj marku proizvoda', 'Add Budget': 'Dodaj budžet', 'Add Bundle': 'Dodaj paket', 'Add Camp': 'Dodaj kamp', 'Add Camp Service': 'Dodaj uslugu kampa', 'Add Camp Status': 'Dodaj status kampa', 'Add Camp Type': 'Dodaj tip kampa', 'Add Campaign Message': 'Dodaj poruku kampanje', 'Add Catalog': 'Dodaj Katalog', 'Add Catalog Item': 'Dodaj katalog stavku', 'Add Catalog.': 'Dodaj katalog.', 'Add Category': 'Dodaj kategoriju', 'Add Category<>Sub-Category<>Catalog Relation': 'Dodaj Kategorija<>Podkategorija<>kataloški odnos', 'Add Certificate': 'Dodaj certifikat', 'Add Certificate for Course': 'Dodaj certifikat za kurs', 'Add Certification': 'Dodja certifikat', 'Add Cholera Treatment Capability Information': 'Dodajte informacije o sposobnosti liječenja kolere', 'Add Cluster': 'Dodaj skup', 'Add Cluster Subsector': 'Dodaj podsektor skupa', 'Add Competency': 'Dodaj stručnost', 'Add Competency Rating': 'Dodaj ocjenu kompetentnosti', 'Add Config': 'Dodaj konfiguraciju.', 'Add Contact': 'Dodaj kontakt', 'Add Contact Information': 'Dodajte kontakt informacije', 'Add Course': 'Dodaj kurs', 'Add Course Certicate': 'Dodaj certifikat kursa', 'Add Credential': 'Dodaj akreditiv', 'Add Credentials': 'Dodaj akreditive', 'Add Data to Theme Layer': 'Dodaj podatke tematskom sloju', 'Add Dead Body Report': 'Dodaj izvještaj o preminulim osobama', 'Add Disaster Victims': 'Dodaj žrtve nepogode', 'Add Distribution': 'Dodaj distribuciju', 'Add Distribution Item': 'Dodaj stavku raspodjele', 'Add Distribution.': 'Dodaj distribuciju', 'Add Document': 'Dodaj dokument', 'Add Donation': 'Dodaj donaciju', 'Add Donor': 'Dodaj donatora', 'Add Education Detail': 'Dodaj detalje o obrazovanju', 'Add Education Level': 'Dodaj nivo obrazovanja', 'Add Email Account': 'Dodaj e-mail nalog', 'Add Facility': 'Dodaj objekat', 'Add Feature Class': 'Dodaj klasu karakteristika', 'Add Feature Group': 'Dodaj grupu karakteristika', 'Add Feature Layer': 'Dodaj sloj karakteristika', 'Add Find Report': 'Dodaj novi izvještaj o traženju', 'Add Flood Report': 'Dodaj izvještaj o poplavi', 'Add GIS Feature': 'Dodaj GIS karakteristiku', 'Add GPS data': 'Dodaj GPS podatke', 'Add Group': 'Dodaj grupu', 'Add Group Member': 'Dodaj člana grupe', 'Add Group Membership': 'Dodaj članstvo grupe', 'Add Hospital': 'Dodaj Bolnicu', 'Add Hours': 'Dodaj sate', 'Add Human Resource': 'Dodaj ljudski resurs', 'Add Identification Report': 'Dodaj izvještaj o identifikaciji', 'Add Identity': 'Dodaj identitet', 'Add Image': 'Dodaj sliku', 'Add Impact': 'Dodaj utjecaj', 'Add Impact Type': 'Dodaj tip utjecaja', 'Add Incident': 'Dodaj incident', 'Add Incident Report': 'Dodaj izvještaj o incidentu', 'Add Incoming Email': 'Dodaj dolaznu e-pošte', 'Add Incoming SMS': 'Dodaj dolazni SMS', 'Add Inventory Item': 'Dodaj stavkuinventara', 'Add Inventory Store': 'Dodaj novi smještaj inventara', 'Add Item': 'Dodaj stavku', 'Add Item (s)': 'Dodaj stavku', 'Add Item Catalog': 'Dodaj katalog stavki', 'Add Item Catalog Category': 'Dodaj kategoriju stavke kataloga', 'Add Item Category': 'Dodaj kategoriju stavke', 'Add Item Pack': 'Dodaj paket stavki', 'Add Item Packet': 'Dodaj paket stavki', 'Add Item Sub-Category': 'Dodaj podkategoriju stavke', 'Add Item to Catalog': 'Dodaj stavku u katalog', 'Add Item to Commitment': 'Dodaj stavku zaduženja', 'Add Item to Inventory': 'Dodaj stavku u inventar', 'Add Item to Request': 'Dodati stavku na zahtjev', 'Add Item to Shipment': 'Dodaj Stavku u Pošiljku', 'Add Item to Stock': 'Dodaj stavku u zalihu', 'Add Job Role': 'Dodajte poziciju za posao', 'Add Key': 'Dodaj ključ', 'Add Keyword': 'Dodaj ključnu riječ', 'Add Kit': 'Dodaj komplet', 'Add Layer': 'Dodaj sloj', 'Add Layer to this Profile': 'Dodaj sloj u profil', 'Add Level 1 Assessment': 'Dodaj procjenu nivoa 1', 'Add Level 2 Assessment': 'Dodaj 2. level procjene', 'Add Line': 'Dodaj liniju', 'Add Location': 'Dodaj Lokaciju', 'Add Locations': 'Dodaj lokacije', 'Add Log Entry': 'Dodaj stavku zapisnika', 'Add main Item Category.': 'Dodaj glavnu kategoriju stavke', 'Add main Item Sub-Category.': 'Dodaj glavnu podkategoriju stavke', 'Add Map Configuration': 'Dodaj podešenje mape', 'Add Marker': 'Dodaj oznaku', 'Add Member': 'Dodaj novog člana', 'Add Membership': 'Dodaj članstvo', 'Add Message': 'Dodaj poruku', 'Add Metadata': 'Dodaj metapodatke', 'Add Mission': 'Dodaj misiju', 'Add Mobile Commons Settings': 'Dodaj mobilne postavke', 'Add Need': 'Dodaj potrebu', 'Add Need Type': 'Dodaj tip potrebe', 'Add New': 'Dodaj nov', 'Add New Activity': 'Dodaj novu aktivnost', 'Add New Address': 'Dodaj novu adresu', 'Add New Aid Request': 'Dodaj novi zahtjev za pomoć', 'Add New Alternative Item': 'Dodaj novi alternativni artikl', 'Add New Assessment': 'Dodaj novu procjenu', 'Add New Assessment Summary': 'Dodaj novi rezime procjene', 'Add New Asset': 'Dodaj novo sredstvo', 'Add New Baseline': 'Dodaj novu referentnu tačku', 'Add New Baseline Type': 'Dodaj novi tip referentne tačke', 'Add New Bin': 'Dodaj novu korpu', 'Add New Bin Type': 'Dodaj novi tip korpe', 'Add New Brand': 'Dodaj novu marku', 'Add New Budget': 'Dodaj novi budžet', 'Add New Bundle': 'Dodaj novi paket', 'Add New Camp': 'Dodaj Novi Kamp', 'Add New Camp Service': 'Dodaj novu uslugu Kampa', 'Add New Camp Type': 'Dodaj novi tip kampa', 'Add New Catalog': 'dodaj novi katalog', 'Add New Catalog Item': 'Dodaj novu stavku kataloga', 'Add New Cluster': 'Dodaj novi skup', 'Add New Cluster Subsector': 'Dodajte novi podsektor skupa', 'Add New Commitment Item': 'Dodaj novo zaduženje', 'Add New Config': 'Dodaj novu konfiguraciju.', 'Add New Contact': 'Dodaj Novi Kontakt', 'Add New Credential': 'Dodaj novi akreditiv', 'Add New Distribution': 'Dodaj novu raspodjelu', 'Add New Document': 'Dodaj novi dokument', 'Add New Donor': 'Dodaj novog donatora', 'Add New Entry': 'Dodaj novi element', 'Add New Event': 'Dodaj novi događaj', 'Add New Facility': 'Dodaj novi objekt', 'Add New Feature Class': 'Dodaj novu klasu karakteristika', 'Add New Feature Group': 'Dodaj novu grupu karakteristika', 'Add New Feature Layer': 'Dodaj novi sloj karakteristika', 'Add New Find Report': 'Dodaj novi izvještaj o traženju', 'Add New Flood Report': 'Dodaj novi izvještaj o poplavi', 'Add New Group': 'Dodaj novu grupu', 'Add new Group': 'Dodaj novu grupu', 'Add New Group Membership': 'Dodaj novo članstvo grupe', 'Add New Home': 'Dodaj novi dom', 'Add New Hospital': 'Dodaj novu bolnicu', 'Add New Human Resource': 'Dodaj novi ljudski resurs', 'Add New Identity': 'Dodaj novi identitet', 'Add New Image': 'Dodaj novu sliku', 'Add New Impact': 'Dodaj novi utjecaj', 'Add New Impact Type': 'Dodaj novi tip utjecaja', 'Add New Incident': 'Dodaj novi incident', 'Add New Incident Report': 'Dodaj novi izvještaj o incidentu', 'Add new Individual': 'Dodaj novu osobu', 'Add New Information': 'Dodaj nove informacija', 'Add New Inventory Item': 'Dodaj novi artikl u inventar', 'Add New Inventory Store': 'Dodaj novi smještaj inventara', 'Add New Item': 'Dodajte novu stavku', 'Add New Item Catalog': 'Kreiraj novu stavku kataloga', 'Add New Item Catalog Category': 'Dodaj novu kategoriju stavke kataloga', 'Add New Item Category': 'Dodaj novu kategoriju', 'Add New Item Pack': 'Dodaj novi paket stavki', 'Add New Item Packet': 'Dodaj novi paket staviki', 'Add New Item Sub-Category': 'Dodaj novu podkategoriju stavke', 'Add New Item to Kit': 'Dodaj novu stavku u komplet', 'Add New Key': 'Dodaj novi ključ', 'Add New Kit': 'Dodaj novi komplet', 'Add New Layer': 'Dodaj novi sloj', 'Add New Level 1 Assessment': 'Dodaj Procjenu za Novi Nivo 1', 'Add New Level 2 Assessment': 'Dodaj novu procjenu nivoa 2', 'Add New Location': 'Dodaj novu lokaciju', 'Add New Log Entry': 'Dodaj novi unos zapisnika', 'Add New Map Configuration': 'Dodajte novu konfiguraciju plana', 'Add New Marker': 'Dodaj novi marker', 'Add New Member': 'Dodajte Novog Člana', 'Add New Membership': 'Dodaj novo članstvo', 'Add New Metadata': 'Dodaj novi metapodatak ', 'Add New Need': 'Dodajte novu potrebu', 'Add New Need Type': 'Dodaj Novi Tip Potrebe', 'Add New Note': 'Kreiraj novu bilješku', 'Add New Office': 'Dodaj novi ured', 'Add New Organization': 'Dodaj novu organizaciju', 'Add New Partner': 'Dodaj novog partnera', 'Add new Patient': 'Dodaj novog pacijenta', 'Add New Patient': 'Dodaj novog pacijenta', 'Add New Peer': 'Dodaj novog suradnika', 'Add New Person': 'Dodaj novu osobu.', 'Add New Person to Commitment': 'Dodaj novu osobu u zaduženje', 'Add new person.': 'Dodaj novu osobu.', 'Add New Photo': 'Dodaj novu fotografiju', 'Add New Population Statistic': 'Dodati novu statistiku populacije', 'Add new position.': 'Dodaj novu poziciju.', 'Add New Problem': 'Dodaj novi problem', 'Add New Project': 'Dodaj novi projekt', 'Add new project.': 'Dodaj novi projekat', 'Add New Projection': 'Dodaj novu projekciju', 'Add New Rapid Assessment': 'dodaj novu brzu procjenu', 'Add New Received Item': 'Dodaj novi primljeni predmet', 'Add New Record': 'Dodaj novi zapis', 'Add New Relative': 'Dodaj novog srodnika', 'Add New Relief Item': 'Dodaj novu stavku pomoći', 'Add New Report': 'Dodaj novi izvještaj', 'Add New Request': 'Dodaj novi zahtjev', 'Add New Request Item': 'Dodaj novu stavku zahtjeva', 'Add New Resource': 'Dodaj novi resurs', 'Add New Resource Type': 'Dodaj novi tip resursa', 'Add New Response': 'Dodaj novi odgovor', 'Add New River': 'dodaj novu rijeku', 'Add New Role': 'Dodaj novu ulogu', 'Add New Role to User': 'Dodajte novu ulogu korisniku', 'Add New Room': 'Dodaj novu prostoriju', 'Add New Scenario': 'Dodaj novi scenarij', 'Add New School District': 'Dodaj novi školski rejon', 'Add New School Report': 'Dodaj novi izvještaj o školama', 'Add New Sent Item': 'Dodaj novu poslanu stavku', 'Add New Setting': 'Dodaj novu postavku', 'Add New Shelter': 'Dodaj novo sklonište', 'Add New Shelter Service': 'Dodavanje nove usluge skloništa', 'Add New Shelter Type': 'Dodaj novi tip skloništa', 'Add New Shipment to Send': 'Dodaj novu pošiljku u slanje', 'Add New Site': 'Dodaj novo mjesto', 'Add New Skill': 'Dodaj novu vještinu', 'Add New Skill Type': 'Dodaj novi tip vještine', 'Add New Solution': 'Dodaj novo rješenje', 'Add New Source': 'Dodaj novi izvor', 'Add New Staff': 'Dodaj novo osoblje', 'Add New Staff Member': 'Dodaj novog člana osoblja', 'Add new staff role.': 'Dodati u ulogu osoblja', 'Add New Staff Type': 'Dodaj novi tip osoblja', 'Add new staff.': 'Dodaj novo osoblje.', 'Add New Storage Location': 'Dodaj novu lokaciju o smještaju', 'Add New Subsector': 'Dodaj novi podsektor', 'Add New Survey Answer': 'Dodaj novi odgovor za anketu', 'Add New Survey Question': 'Dodaj novo anketno pitanje', 'Add New Survey Section': 'Dodaj novo pitanje za anketu', 'Add New Survey Series': 'Dodaj novi niz anketa', 'Add New Survey Template': 'Dodajte novi ankentni šablon', 'Add New Task': 'Dodaj novi zadatak', 'Add New Team': 'Dodaj novi tim', 'Add New Theme': 'Dodaj novu temu', 'Add New Ticket': 'Dodaj novu karticu', 'Add New Track': 'Dodaj novo praćenje', 'Add New Unit': 'Dodaj novu jedinicu', 'Add New User': 'Dodaj novog korisnika', 'Add New User to Group': 'Doda novog korisnika u grupu', 'Add New User to Role': 'Dodaj novog korisnika ulozi', 'Add New Vehicle': 'Dodaj novo vozilo', 'Add New Vehicle Type': 'Dodaj novi tip VOZILA', 'Add New Volunteer': 'Dodaj novog volontera', 'Add New Warehouse': 'Dodajte novo skladište', 'Add New Warehouse Item': 'Dodaj novu stavku skladišta', 'Add Note': 'Dodaj bilješku', 'Add Office': 'Dodaj ured', 'Add or Update': 'Dodavanje ili ažuriranje', 'Add Order': 'Dodaj narudžbu', 'Add Organization': 'Dodaj organizaciju', 'Add Organization Domain': 'Dodaj domenu organizaciju', 'Add Organization Needs': 'Dodaj potrebe organizacije', 'Add Organization to Activity': 'Dodaj organizaciju u aktivnost', 'Add Organization to Project': 'Dodaj organizaciju projektu', 'Add Participant': 'Dodaj učesnika', 'Add Partner': 'Dodaj partnera', 'Add Peer': 'Dodaj saradnika', 'Add People to Commitment': 'Dodaj ljude u zaduženje', 'Add Person': 'Dodaj osobu', 'Add Person to Commitment': 'Dodaj osobu u zaduženje', "Add Person's Details": 'Dodaj detalje o osobi', 'Add Personal Effects': 'Dodaj vlastite efekte', 'Add Photo': 'Dodajte sliku', 'Add Point': 'Dodaj tačku', 'Add Polygon': 'Dodaj mnogougao', 'Add Population Statistic': 'Dodaj statistiku o stanovništvu', 'Add Position': 'Dodaj poziciju', 'Add Problem': 'Dodaj problem', 'Add Professional Experience': 'Dodaj profesionalno iskustvo', 'Add Profile Configuration for this Layer': 'Dodaj konfiguraciju profila za ovaj sloj', 'Add Project': 'Dodaj projekat', 'Add Projection': 'Dodajte projekciju', 'Add Projections': 'Dodaj projekciju', 'Add Question': 'Dodaj pitanje', 'Add Rapid Assessment': 'Dodaj Brzu Procjenu', 'Add Recipient': 'Dodaj primaoca', 'Add Recipient Site': 'Dodaj mjesto primaoca', 'Add Recipient Site.': 'Dodaj lokaciju primaoca', 'Add Record': 'Dodaj zapis', 'Add Recovery Report': 'Dodaj izvještaj o pronalaženju', 'Add Reference Document': 'Dodaj prateći/referentni dokument', 'Add Region': 'Dodaj područje', 'Add Relief Item': 'Dodaj stavku pomoći', 'Add Report': 'Dodaj izvještaj', 'Add Request': 'Dodaj zahtjev', 'Add Request Detail': 'Dodaj detalje o zahtjevu', 'Add Request Template': 'Dodaj predložak zahtjeva', 'Add Resource': 'Dodaj Resurs', 'Add Resource Type': 'Dodaj tip resursa', 'Add Response': 'Dodaj odgovor', 'Add Response Summary': 'Dodaj sumarni odgovor', 'Add River': 'Dodaj rijeku', 'Add Role': 'Dodaj ulogu', 'Add Room': 'Dodajte prostoriju', 'Add RSS Settings': 'Dodaj RSS Postavke', 'Add School District': 'Dodaj školski rejon', 'Add School Report': 'Dodaj školski izvještaj', 'Add Section': 'Dodaj sekciju', 'Add Sector': 'Dodaj sektor', 'Add Sender Site.': 'Dodaj lokaciju pošiljaoca.', 'Add Service': 'Dodaj usluga', 'Add Service Profile': 'Dodaj profil usluga', 'Add Setting': 'Dodaj Postavke', 'Add Shelter': 'Dodaj sklonište', 'Add Shelter Service': 'Dodaj uslugu skloništa', 'Add Shelter Type': 'Dodaj tip skloništa', 'Add Site Needs': 'Dodaj potrebe mjesta', 'Add Skill': 'Dodaj vještinu', 'Add Skill Equivalence': 'Dodaj ekvivalenciju vještine', 'Add Skill Provision': 'Dodaj pružanje vještina', 'Add Skill to Request': 'Dodati vještinu u zahtjev', 'Add Skill Type': 'Dodaj tip vještine', 'Add Skill Types': 'Dodaj tip vještine', 'Add Solution': 'Dodaj rješenje', 'Add Source': 'Dodaj izvor', 'Add Staff': 'Dodaj članove osoblja', 'Add Staff Member': 'Dodaj člana osoblja', 'Add staff members': 'Dodaj članove osoblja', 'Add Staff Type': 'Dodaj tip osoblja', 'Add Status': 'Dodaj status', 'Add Stock to Warehouse': 'Dodaj zalihu u skladište', 'Add Storage Bin Type': 'Dodaj tip korpe za smještaj', 'Add Storage Location': 'Dodaj lokaciju skladištenja', 'Add strings manually': 'Dodaj stringove ručno', 'Add strings manually through a text file': 'Dodaj nizove znakova ručno kroz tekstualnu datoteku', 'Add Sub-Category': 'Traži potkategoriju', 'Add Subscription': 'Dodaj pretplatu', 'Add Subsector': 'Dodaj podsektor', 'Add Survey Answer': 'Dodaj odgovor na istraživanje', 'Add Survey Question': 'Dodajte anketno pitanje', 'Add Survey Section': 'Dodajte anketnu sekciju', 'Add Survey Series': 'Dodajte niz anketa', 'Add Survey Template': 'Dodaj predložak anketi', 'Add Symbology to Layer': 'Dodaj značenje simbola na sloj', 'Add Task': 'Dodaj zadatak', 'Add Team': 'Dodaj tim', 'Add Team Member': 'Dodaj člana tima', 'Add the Storage Bin Type.': 'Dodaj tip korpe za smještaj', 'Add the Storage Location where this bin is located.': 'Dodaj lokaciju gdje je ova korpa.', 'Add the Storage Location where this this Bin belongs to.': 'Dodaj lokaciju gdje ova korpa pripada.', 'Add Theme': 'Dodaj temu', 'Add this entry': 'Dodaj ovaj unos', 'Add Ticket': 'Dodaj karticu', 'Add to a Team': 'Dodaj u Tim', 'Add to Bin': 'Dodaj u korpu', 'Add to budget': 'Dodati budžetu', 'Add to Bundle': 'Dodaj u paket', 'Add to Catalog': 'Dodaj u katalog', 'Add to Feature Group': 'Dodaj u grupu karakteristika', 'Add Training': 'Dodaj trening', 'Add Translation Language': 'Dodaj jezik za prevođenje', 'Add Twilio Settings': 'Dodaj Twilio postavke', 'Add Twitter Search Query': 'Dodaj Twitter upit za pretragu', 'Add Unit': 'Dodaj Jedinicu', 'Add User': 'Dodaj korisnika', 'Add Vehicle': 'Dodaj vozilo', 'Add Vehicle Category': 'Dodaj kategoriju vozila', 'Add Vehicle Detail': 'Dodaj detalje o vozilu', 'Add Vehicle Details': 'Dodaj detalje o vozilu', 'Add Vehicle Type': 'Dodaj tip vozila', 'Add Volunteer': 'Dodajte volontera', 'Add Volunteer Availability': 'Dodaj dostupnost volontera', 'Add Volunteer Registration': 'Dodaj registraciju volontera', 'Add Volunteer Role': 'Dodaj ulogu volontera', 'Add volunteers': 'Dodaj volontere', 'Add Warehouse': 'Dodaj skladište', 'Add Warehouse Item': 'Dodaj stavku skladišta', 'Add...': 'Dodaj...', 'Add/Edit/Remove Layers': 'Dodaj/Uredi/Obriši slojeve', 'added': 'dodano', 'Added to Group': 'Dodano u grupu', 'Added to Team': 'Dodano u tim', 'Additional Beds / 24hrs': 'Dodatni kreveti / 24 sata', 'Additional Comments': 'Dodatni komentari', 'Additional quantity quantifier – i.e. “4x5”.': 'Dodatni kvantifikator količine, tj. “4x5”.', 'Address': 'Adresa', 'Address added': 'Dodana adresa', 'Address deleted': 'Obrisana adresa', 'Address Details': 'Detalji adrese', 'Address Found': 'Pronađena adresa', 'Address Mapped': 'Adresa mapirana', 'Address NOT Found': 'Nije pronađena adresa', 'Address NOT Mapped': 'Adresa NIJE mapirana', "Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": 'Adresa slike koja će se koristiti za ovaj sloj u legendi. Ovo će omogućiti upotrebu kontrolisane statičke slike umjesto automatskog upita servera za ono što on pruža (što neće raditi s GeoWebCache )', 'Address Type': 'Tip adrese', 'Address updated': 'Ažurirana adresa', 'Addresses': 'Adrese', 'Adequate': 'Odgovarajuće', 'Adequate food and water available': 'Dostupna adekvatna hrana i voda', 'Adjust Item Quantity': 'Prilagodi količinu stavke', 'Adjust Items due to Theft/Loss': 'Prilagodi stavke zbog krađe/gubitka', 'Adjust Stock': 'Prilagodi zalihu', 'Adjust Stock Item': 'Prilagodi stavku zalihe', 'Adjust Stock Levels': 'Prilagodi nivo zalihe', 'Adjustment created': 'Prilagođenje kreirano', 'Adjustment deleted': 'Prilagođenje obrisano', 'Adjustment modified': 'Prilagođenje izmijenjeno', 'Admin Email': 'Email administratora', 'Admin Name': 'Ime administratora', 'Admin Tel': 'Telefon administratora', 'Administration': 'Administracija', 'Admissions/24hrs': 'Ulazi/24 sata', 'Adolescent (12-20)': 'Adolescent (12-20)', 'Adolescent participating in coping activities': 'Učestvovanje adolescenata u aktivnostima prilagođavanja', 'Adult (21-50)': 'Odrasli (21-50)', 'Adult female': 'Odrasla ženska osoba', 'Adult ICU': 'Intenzivna njega za odrasle', 'Adult male': 'Odrasli muškarac', 'Adult Psychiatric': 'Psihijatar za odrasle', 'Adults in prisons': 'Odrasli u zatvoru', 'advanced': 'napredno', 'Advanced Bin Search': 'Napredna pretraga korpi', 'Advanced Catalog Search': 'Napredna pretraga kataloga', 'Advanced Category Search': 'Napredna pretraga kategorija', 'Advanced Item Search': 'Napredna pretragastavki', 'Advanced Sub-Category Search': 'Napredna pretraga podkategorije', 'Advanced Unit Search': 'Napredna pretragajedinica', 'Advanced:': 'Napredno:', 'Advisory': 'Savjeti', 'Advocacy': 'Advokatura', 'Affected Persons': 'Osobe na koje je bio utjecaj', 'Affiliation added': 'Preduzeće dodano', 'Affiliation deleted': 'Preduzeće obrisano', 'Affiliation Details': 'Detalji preduzeća', 'Affiliation updated': 'Preduzeće ažurirano', 'Affiliations': 'Preduzeća', 'Afghanistan': 'Afganistan', 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nakon što kliknete na dugme, pojavit će se niz stavki u paru jedan za drugim. Molimo da odaberete jedno rješenje iz svakog para koje preferirate.', 'After clicking on the Vote button ... (#TODO [String]) Please select the one item from each pair that you prefer over the other.': 'Nakon što kliknete na dugme glasaj, (#TODO [String]). Molimo da odaberete jednu stavku iz svakog para koje preferirate.', 'Age': 'Starost', 'Age group': 'Starosna grupa', 'Age Group': 'Starosna grupa', 'Age group does not match actual age.': 'Starosna grupa ne odgovara stvarnim godinama.', 'Aggravating factors': 'Otežavajući faktori', 'Agriculture': 'Poljoprivreda', 'Aid Management': 'Upravljanje pomoći', 'Aid Request': 'Dodaj zahtjev', 'Aid Request added': 'Zahtjev za pomoć dodan', 'Aid Request Details': 'Detalji o zahtjevu za pomoć', 'Aid Request updated': 'Zahtijev za pomoć je ažuriran', 'Aid Requests': 'Dodaj zahtjeve', 'Air Transport Service': 'Usluga zračnog prijevoznog sredstva', 'Aircraft Crash': 'Pad aviona', 'Aircraft Hijacking': 'Avionska otmica', 'Aircraft Maximum Size': 'Maksimalna veličina aviona', 'Airport': 'Aerodrom', 'Airport added': 'Aerodrom dodan', 'Airport Closure': 'Zatvaranje aerodroma', 'Airport deleted': 'Aerodrom obrisan', 'Airport Details': 'Detalji aerodroma', 'Airport updated': 'Aerodrom ažuriran', 'Airports': 'Aerodromi', 'Airspace Closure': 'Zatvaranje zračnog prostora', 'Albania': 'Albanija', 'Alcohol': 'Alkohol', 'Alcoholics': 'Alkoholičari', 'Alert': 'Uzbuna', 'Alimentary Support Vehicle': 'Vozila za hitnu podršku', 'All': 'Sve', 'ALL': 'Sve', 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Svi podaci obezbjeđeni od strane Sahana Software fondacije sa ove stranice su licencirani pod Creative Commons Attribution licencom. Međutim, svi podaci ne potiču odavde. Molimo, pregledajte polje izvora svakog pristupa.', 'All data provided by the Sahana Software Foundation from this site is licensed under a Creative Commons Attribution license. However, not all data originates here. Please consult the source field of each entry.': 'Svi podaci obezbjeđeni od strane Sahana Software fondacije sa ove stranice su licencirani pod Creative Commons Attribution licencom. Međutim, svi podaci ne potiču odavde. Molimo, pregledajte polje izvora svakog pristupa.', 'All Entities': 'Sve jedinke', 'All Inbound & Outbound Messages are stored here': 'Sve ulazne i izlazne poruke su smještene ovdje', 'All Open Tasks': 'Svi otvoreni zadaci', 'All Pledges': 'Svi zahtjevi', 'All Records': 'Svi zapisi', 'all records': 'svi zapisi', 'All Requested Items': 'Sve zahtijevani stavke', 'All Resources': 'Svi Resursi', 'All selected': 'Sve izabrano', 'All Tasks': 'Svi zadaci', 'Allowed to push': 'Dozvoljeno gurnuti', 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'dozvoljava da se budžet uspostavi na osnovu troškova osoblja i opreme, uključujući bilo koje režijske troškove administratora.', 'Allows a Budget to be drawn up': 'Dozvoljava izradu budžeta', 'Allows authorized users to control which layers are available to the situation map.': 'Omogućava ovlaštenim korisnicima da kontrolišu koji slojevi su dostupni na karti situacije.', 'Allows authorized users to upload multiple features into the situation map.': 'Omogućava ovlaštenim korisnicima da pošalju više karakteristima na kartu situacije.', 'allows for creation and management of assessments.': 'dozvoljava kreiranje i upravljanje procjenama.', 'allows for creation and management of surveys to assess the damage following a natural disaster.': 'dozvoljava kreiranje i upravljanje istraživanjima za procjenu nesreće uzrokovane prirodnom katastrofom', 'Already in this Feature Group!': 'Već je u ovoj grupi karakteristika', 'Alternative infant nutrition in use': 'Alternativa u prehrani djece', 'Alternative Item': 'Alternativna stavka', 'Alternative Item added': 'Alternativna stavka dodana', 'Alternative Item deleted': 'Alternativna stavka obrisana', 'Alternative Item Details': 'Detalji alternativne stavke', 'Alternative Item updated': 'Alternativna stavka ažurirana', 'Alternative Items': 'Alternativne stavke', 'Alternative places for studying': 'Alternativna mjesta za učenje', 'Alternative places for studying available': 'Dostupna alternativna mjesta za studiranje', 'always update': 'uvijek ažuriraj', 'Ambulance Service': 'Usluge u ambulanti', 'Amount': 'Iznos', 'Amount of the Project Budget spent at this location': 'Iznos budžeta projekta potrošen na ovoj lokaciji', 'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Predložak procjene se može izabrati za kreiranje procjene katastrofe. Unutar procjene katastrofe, odgovori se mogu sakupiti a rezultati analizirani kao tabele, dijagrami i mape.', 'An error occured, please %(reload)s the page.': 'Desila se greška, molim %(reload)s stranicu.', 'An ESRI Shapefile (zipped)': 'ESRI indeks datoteka s likovima (kompresovana zip)', 'an individual/team to do in 1-2 days': 'pojedinac-tim da to uradi u 1 do 2 dana', 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Usisni sistem, sistem upravljanja skladšitem, praćenje robe, upravljanje lancem nabavke, nabavka i ostala sredstva, te sposobnosti upravljanja resursima.', 'An interactive map the situation.': 'Interaktivna mapa situacije', 'An Item Category must have a Code OR a Name.': 'Kategorija sredstva mora imati šifru ili ime.', 'An item which can be used in place of another item': 'Stavka koja se može koristiti umjesto druge stavke', 'Analysis of assessments': 'Analiza procjena', 'Analysis of Completed Surveys': 'Analiza kompletiranih anketa', 'Analyze with KeyGraph': 'Analiza pomoću KeyGraph', 'Anamnesis': 'Anamneza', 'and': 'i', 'Andorra': 'Andora', 'angular': 'uglaono', 'Animal Die Off': 'Izumiranje životinja', 'Animal Feed': 'Hrana za životinje', 'Animals': 'Životinje', 'Annual Budget': 'Godišnji budžet', 'Annual Budget deleted': 'Obrisan godišnji budžet', 'Annual Budget updated': 'Ažuriran godišnji budžet', 'Annual Budgets': 'Godišnji budžeti', 'Anonymous': 'Anoniman', 'anonymous user': 'anonimni korisnik', 'Answer Choices (One Per Line)': 'Izbor odgovora (Jedan po liniji)', 'Anthropology': 'Antropologija', 'Antibiotics available': 'Dostupni antibiotici', 'Antibiotics needed per 24h': 'Antibiotici potrebni u 24 sata', 'Antigua and Barbuda': 'Antigua i Barbuda', 'Any': 'Bilo koji', 'ANY': 'BILO KOJE', 'Any comments about this sync partner.': 'Neki komentari o sinhronizacijskom partneru', 'API is documented here': 'Aplikacijski programerski interfejs (API) je ovdje dokumentiran', 'API Key': 'API ključ', 'Apparent Age': 'Prividne godine', 'Apparent Gender': 'Vidljiv spol', 'Appearance': 'Izgled', 'Applicable to projects in Pacific countries only': 'Primjenjivo samo na projekte u pacifičkim zemljama', 'Application': 'Aplikacija', 'Application Deadline': 'Zadnji rok za prijavu', 'Application Permissions': 'Aplikacijske dozvole', 'Apply': 'Primijeni', 'Appraisal added': 'Ispunjenje dodano', 'Appraisal deleted': 'Ispunjenje obrisano', 'Appraisal Details': 'Detalji ispunjenja', 'Appraisal updated': 'Ispunjenje ažurirano', 'Appraisals': 'Ispunjenja', 'Appropriate clothing available': 'Odgovarajuća odjeća dostupna', 'Appropriate cooking equipment/materials in HH': 'Odgovarajuća oprema za kuhanje/materijali u domaćinstvu', 'Approve': 'Odobri', 'approved': 'Odobreno', 'Approved': 'Odobreno', 'Approved By': 'Potvrdio', 'Approver': 'Onaj koji odobrava', 'Approx. number of cases/48h': 'Približan broj slučaja u 48 h', 'Approximately how many children under 5 with diarrhea in the past 48 hours?': 'Približno koliko djece s dijarejom ispod 5 godina u zadnjih 48 sati?', 'Arabic': 'Arapski', 'ArcGIS REST Layer': 'ArcGIS REST sloj', 'Archive not Delete': 'Arhiva ne briši', 'Arctic Outflow': 'artički odljev', 'Are breast milk substitutes being used here since the disaster?': 'Da li se koriste zamjene za majčino mlijeko nakon katastrofe?', 'are mandatory and must be filled': 'su obavezna polja i moraju biti popunjena', 'Are there adults living in prisons in this area?': 'Ima li odrazlih u zatvoru u ovom području?', 'Are there alternative places for studying?': 'Postoje li alternativna mjesta za studiranje?', 'Are there cases of diarrhea among children under the age of 5?': 'Ima li slučajeva dijareje među djecom ispod 5 godina?', 'Are there children living in adult prisons in this area?': 'Ima li djece u zatvoru za odrasle u ovom području?', 'Are there children living in boarding schools in this area?': 'Ima li djece u internatima u ovom području?', 'Are there children living in homes for disabled children in this area?': 'Ima li djece u kućama za djecu s invaliditetom u ovom području?', 'Are there children living in juvenile detention in this area?': 'Ima li djece u pritvoru za maloljetnike?', 'Are there children living in orphanages in this area?': 'Ima li djece u domovima za napuštenu djecu u ovom području?', 'Are there older people living in care homes in this area?': 'Ima li starijih ljudi u domovima u ovom području?', 'Are there separate latrines for women and men available?': 'Da li su dostupni odvojeni zahodi za žene i muškarce?', 'Are you sure you want to commit to this request and send a shipment?': 'Da li ste sigurni da želite potvrditi ovaj zahtjev i poslati pošiljku', 'Are you sure you want to delete this record?': 'Jeste li sigurni da želite obrisati ovaj zapis?', 'Are you sure you want to send this shipment?': 'Jeste li sigurni da želite poslati ovu pošiljku?', 'Are you susbscribed?': 'Jeste li pretplaćeni?', 'Area': 'Površina', 'Areas inspected': 'Istražena područja', 'Arguments': 'Argumenti', 'Armenia': 'Armenija', 'Arrived': 'Pristiglo', 'artificial': 'vještački', 'Artificial eye left': 'Vještačko lijevo oko', 'As of yet, no sections have been added to this template.': 'Do sada nisu nove sekcije dodate u šablon', 'Assessment': 'Procjena', 'Assessment added': 'Procjena dodana', 'Assessment admin level': 'Nivo administratora procjene', 'Assessment Answer added': 'Dodan odgovor ocjene', 'Assessment Answer deleted': 'Obrisan odgovor ocjene', 'Assessment Answer Details': 'Detalji odgovora ocjene', 'Assessment Answer updated': 'Ažuriran odgovor ocjene', 'Assessment Answers': 'Odgovori ocjene', 'Assessment deleted': 'Procjena je izbrisana', 'Assessment Details': 'Detalji procjene', 'Assessment Question added': 'Pitanja ocjene dodana', 'Assessment Question deleted': 'Pitanja ocjene obrisana', 'Assessment Question Details': 'Detalji pitanja ocjene', 'Assessment Question updated': 'Pitanje ocjene ažurirano', 'Assessment Questions': 'Pitanja ocjene', 'Assessment Reported': 'Procjena izvještena', 'Assessment Summaries': 'Kratka procjena', 'Assessment Summary added': 'Dodat rezime procjene', 'Assessment Summary deleted': 'Izbrisan je rezime procjena', 'Assessment Summary Details': 'Detalji sažetka procjene', 'Assessment Summary updated': 'Sažetak procjene ažuriran', 'Assessment Template added': 'Dodan predložak ocjene', 'Assessment Template deleted': 'Obrisan predložak ocjene', 'Assessment Template Details': 'Detalji predloška ocjene', 'Assessment Template updated': 'Ažuriran predložak ocjene', 'Assessment Templates': 'Predlošci ocjene', 'Assessment timeline': 'Procjena vremenskog roka', 'Assessment Type:': 'Vrsta procjene:', 'Assessment updated': 'Ažurirana procjena', 'Assessments': 'Procjene', 'Assessments and Activities': 'Dodjele i aktivnosti', 'Assessments are structured reports done by Professional Organizations': 'Procjene su struktuirani izvještaji koje obavljaju profesionalne organizacije', 'Assessments Needs vs. Activities': 'Procjena potreba u usporedbi s aktivnostima', 'Assessments:': 'Procjene:', 'Assessor': 'Procjenitelj', 'Asset': 'Sredstvo', 'Asset added': 'Dodano sredstvo', 'Asset Assignments': 'Dodjela sredstava', 'Asset Assignments deleted': 'Dodjela sredstava je izbrisana', 'Asset deleted': 'Obrisano sredstvo', 'Asset Details': 'Detalji sredstva', 'Asset Item': 'Stavka sredstava', 'Asset Log': 'Zapisnik sredstava', 'Asset Log Details': 'Detalji zapisnika imovine i sredstava', 'Asset Log Empty': 'Zapisnik sredstava je prazan', 'Asset Log Entry Added - Change Label': 'Stavka zapisnika o sredstvu dodana - Promijenite naziv', 'Asset Log Entry deleted': 'Unos sredstva u zapisnik je obrisan', 'Asset Log Entry updated': 'Unos je ažuriran', 'Asset Management': 'Upravljanje sredstvima', 'Asset Number': 'Broj sredstva', 'Asset removed': 'Sredstvo uklonjeno', 'Asset updated': 'Ažurirano sredstvo', 'Assets': 'Materijalno-tehnička Sredstva', 'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Materijalno-tehnička sredstva su resursi koji nisu potrošna roba i očekuje se njihov povrat, stoga je neophodan nadzor.', 'Assign': 'Dodjeli', 'Assign %(staff)s': 'Dodijeli %(staff)s', 'Assign another Role': 'Dodijeli drugu ulogu', 'Assign Asset': 'Dodijeli sredstvo', 'Assign Facility': 'Dodijeli objekat', 'Assign Group': 'Dodijeli grupu', 'Assign Human Resource': 'Dodijeli ljudske resurse', 'Assign Role to a User': 'Dodijeli ulogu korisniku', 'Assign Roles': 'Dodijeli uloge', 'Assign Staff': 'Dodjeli Osoblje', 'Assign Storage Location': 'Dodijeli lokaciju skladišta', 'Assign to Facility/Site': 'Dodijeli objektu/mjestu', 'Assign to Org.': 'Dodijeliti organizaciji', 'Assign to Organisation': 'Dodijeli organizaciji', 'Assign to Organization': 'Dodijeli organizaciji', 'Assign to Person': 'Dodijeli osobi', 'Assign to Site': 'Dodijeli mjestu', 'Assign Vehicle': 'Dodijeli vozila', 'assigned': 'dodijeljen', 'Assigned': 'Dodijeljeno', 'Assigned By': 'Dodijeljen od strane', 'Assigned Human Resources': 'Dodijeljeni ljudski resursi', 'Assigned Roles': 'Dodijeljene uloge', 'Assigned To': 'Dodjeljen', 'Assigned to': 'Dodijeljen', 'Assigned to Facility/Site': 'Dodijeljeni objeku/mjestu', 'Assigned to Organisation': 'dodijeljen organizaciji', 'Assigned to Organization': 'Dodijeljeno organizaciji', 'Assigned to Person': 'Dodijeljeno Osobi', 'Assigned to Site': 'Pridružen mjestu', 'Assignments': 'Zadaci', 'Assistant': 'Asistent', 'Assisted Family Care': 'Pomoć u porodičnoj njezi', 'Assisted Self-care': 'Pomoć u samonjezi', 'Association': 'Savez', 'At or below %s': 'Na ili ispod %s', 'At/Visited Location (not virtual)': 'Na/posjećena lokacija (ne virtuelno)', 'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 brza procjena modifikovana za New Zealand', 'Attachments': 'Dodaci', 'Attend to information sources as described in <instruction>': 'Pobrinuti se za izvore informacija kao što je opisano u polju <instruction>', 'Attributes': 'Atributi', 'Attribution': 'Pripisivanje', 'Audit Read': 'Prati čitanje', 'Australia': 'Australija', 'Austria': 'Austrija', 'AUTH TOKEN': 'AUTH TOKEN', "Authenticate system's Twitter account": 'Potvrdite Twitter račun sistema', 'Authentication Required': 'Potrebna provjera autentičnosti', 'Author': 'Autor', 'Auto start': 'Samopokretanje', 'Automatic Database Synchronization History': 'Historijat automatske sinhronizacije sa bazom podataka', 'Automotive': 'Samohodni', 'Availability': 'Dostupnost', 'Available': 'Raspoloživo', 'Available Alternative Inventories': 'Dostupne alternativne zalihe', 'Available Beds': 'Dostupni kreveti', 'Available databases and tables': 'Dostupne baze podataka i tabele', 'Available Databases and Tables': 'Dostupne baze podataka i tabele', 'Available for Location': 'Dostupno za lokaciju', 'Available Forms': 'Dostupne forme', 'Available from': 'Na raspolaganju od', 'Available in Viewer?': 'Je li dostupno u pregledniku?', 'Available Inventories': 'Dostupne zalihe', 'Available Messages': 'Dostupne Poruke', 'Available Records': 'Dostupni zapisi', 'Available Recovery Reports': 'Dostupni izvještaji o nađenim tijelima', 'Available until': 'Dostupno do', 'Avalanche': 'Lavina', 'average': 'prosjek', 'Average': 'Prosjek', 'Avoid the subject event as per the <instruction>': 'Izbjegni predmet događanja kao po <instruction>', 'Award': 'Nagrada', 'Award added': 'Nagrada dodana', 'Award deleted': 'Nagrada obrisana', 'Award updated': 'Nagrada ažurirana', 'Awards': 'Nagrade', 'Awareness raising': 'Podizanje obaviještenosti', 'Azerbaijan': 'Azerbejdžan', 'Babies who are not being breastfed, what are they being fed on?': 'Bebe koje nisu dojene, na koji su način hranjene?', 'Baby And Child Care': 'Bebe i briga za djecu', 'Back to Roles List': 'Nazad na listu uloga', 'Back to Top': 'Nazad na vrh', 'Back to Users List': 'Nazad na listu korisnika', 'Background Color': 'Pozadinska boja', 'Background Colour': 'Boja pozadine', 'Background Colour for Text blocks': 'Boja pozadine za tekstualne blokove', 'Bahai': 'Bahai', 'Bahamas': 'Bahami', 'Bahrain': 'Bahrein', 'Baldness': 'ćelavost', 'Banana': 'Banana', 'Bangladesh': 'Bangladeš', 'Bank/micro finance': 'Banka/mikrokreditna organizacija', 'Barge Capacity': 'Kapacitet skele', 'Barricades are needed': 'Potrebne su barikade', 'Base %(facility)s Set': 'Baza %(facility)s postavljena', 'Base Facility/Site Set': 'Postavljeno mjesto/objekt baze', 'Base Layer?': 'Osnovni sloj?', 'Base Layers': 'Osnovni slojevi', 'Base Location': 'Osnovna lokacija', 'Base Location Updated!': 'Lokacija baze ažurirana!', 'Base Site Set': 'Postavljeno mjesto baze', 'Base Station added': 'Sodana bazna stanica', 'Base Station deleted': 'Obrisana bazna stanica', 'Base Station Details': 'Detalji bazne stanice', 'Base Station updated': 'Ažurirana bazna stanica', 'Base Stations': 'Bazne stanice', 'Base Unit': 'Bazna jedinica', 'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden': 'Bazni URL udaljene Sahana Eden instance uključujući stazu aplikacije, npr. http://www.example.org/eden', 'Baseline added': 'referentna tačka dodana', 'Baseline Data': 'Referentni podaci', 'Baseline deleted': 'Referentna tačka je izbrisana', 'Baseline Number of Beds': 'Bazni broj kreveta', 'Baseline number of beds of that type in this unit.': 'referentni broj kreveta tog tipa u ovoj jedinici', 'Baseline Type': 'Vrsta referentne tačku', 'Baseline Type added': 'Tip referentne tačke dodan', 'Baseline Type deleted': 'Izbrisan tip referentne tačke', 'Baseline Type Details': 'Detalji tipa referentne tačke', 'Baseline Type updated': 'Tip referentne tačke je ažuriran', 'Baseline Types': 'Tip referentne tačke', 'Baseline updated': 'Izmijenjena referentna tačka', 'Baselines': 'Referentne tačke', 'Baselines Details': 'Detalji referentne tačke', 'Basic Assessment': 'Osnovna procjena', 'Basic Assessment Reported': 'Osnovna procjena prijavljena', 'Basic Details': 'Osnovni detalji', 'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Osnovne informacije o zahtjevima i donacijama, kao što su kategorija, jedinice, kontaktni detalji i status-', 'Basic medical supplies available prior to disaster': 'Osnovna medicinska podrška dostupna prije katastrove', 'Basic medical supplies available since disaster': 'Osnovna medicinska podrška dostupna nakon katastrove', 'Basic reports on the Shelter and drill-down by region': 'Osnovni izvještaji o skloništu i dublja analiza po regijama', 'Baud': 'Baud', 'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate korišten za Vaš modem - Zadano je sigurno za većinu slučajeva', 'BDRT (Branch disaster response teams)': 'BDRT (Odgovorni timovi ogranka u slučaju katastrofe)', 'Beam': 'Zraka', 'Bed Capacity': 'Krevetni kapaciteti', 'Bed Capacity per Unit': 'Kapacitet kreveta po jedinici', 'Bed Type': 'TIp ležaja', 'Bed type already registered': 'Tip kreveta već registriran', 'Bedding materials available': 'Dostupni materijali posteljine', 'beginning': 'početak', 'Belarus': 'Bjelorusija', 'Belgium': 'Belgija', 'belongs to': 'pripada u', 'Below ground level': 'Ispod nivoa tla', 'Beneficiaries': 'Korisnici', 'Beneficiaries Added': 'Korisnici dodani', 'Beneficiaries Deleted': 'Korisnici izbrisani', 'Beneficiaries Details': 'Detalji korisnika', 'Beneficiaries Updated': 'Korisnici ažurirani', 'Beneficiary': 'Korisnik', 'Beneficiary Report': 'Izvještaj o korisnicima', 'Beneficiary Type': 'Tip korisnika', 'Beneficiary Type Added': 'Dodan tip korisnika', 'Beneficiary Type Deleted': 'Izbrisan tip korisnika', 'Beneficiary Type Updated': 'Ažuriran tip korisnika', 'Beneficiary Types': 'Tipovi korisnika', 'Bhuddist': 'Bhudist', 'Bhutan': 'Butan', 'Big Capacity Tank Vehicle': 'Vozilo rezervoar velikog kapaciteta', 'Bilateral': 'Dvostrana', 'Bin': 'Korpa', 'Bing Layer': 'Bing sloj', "Bing Layers cannot be displayed if there isn't a valid API Key": 'Bing slojevi ne mogu biti prikazani ako nije ispravan API ključ', 'Biological Hazard': 'Biološke opasnosti', 'Biscuits': 'Keks', 'black': 'crna', 'Blizzard': 'Mećava', 'Blocked': 'Blokirano', 'blond': 'plavokosa', 'Blood Type (AB0)': 'Krvna grupa (AB0)', 'Blowing Snow': 'Mećava', 'blue': 'plavo', 'Boat': 'Čamac', 'Bodies': 'Tijela', 'Bodies found': 'Pronađena tijela', 'Bodies recovered': 'Pronađena tijela', 'Bodily Constitution': 'Tjelesna konstitucija', 'Body': 'Tijelo', 'Body Finds': 'Nađena tijela', 'Body Hair': 'Dlake po tijelu', 'Body hair, Colour': 'Dlake po tijelu, boja', 'Body hair, Extent': 'Dlake po tijelu, dužina', 'Body Recovery': 'Izvlačenje tijela', 'Body Recovery Request': 'Zahtjev za izvlačenje tijela', 'Body Recovery Requests': 'Zahtjeci za izvlačenje tijela', 'Bolivia': 'Bolivija', 'Bomb': 'Bomba', 'Bomb Explosion': 'Eksplozija bombe', 'Bomb Threat': 'Prijetnja bombom', 'Border Colour for Text blocks': 'Boja rubova tekstualnih polja', 'Bosnia and Herzegovina': 'Bosna i Herzegovina', 'Both': 'Oboje', 'Botswana': 'Bocvana', 'Bounding Box Insets': 'Nacrti okvirne kutije', 'Bounding Box Size': 'Velilčina ambalažne kutije', 'box': 'kutija', 'Boys 13-18 yrs in affected area': 'Dječaci 13-18 god u pogođenom području', 'Boys 13-18 yrs not attending school': 'Dječaci 13-18 godina koji ne pohađaju školu', 'Boys 6-12 yrs in affected area': 'Dječaci 6-12 godina u zahvaćenim područjima', 'Boys 6-12 yrs not attending school': 'Dječaci 6-12 godina koje ne pohađaju školu', 'Branch': 'Ogranak', 'Branch Coordinator': 'Koordinator ogranka', 'Branch Organization added': 'Dodan ogranak organizacije', 'Branch Organization deleted': 'Obrisan ogranak organizacije', 'Branch Organization Details': 'Detalji ogranka organizacije', 'Branch Organization updated': 'Ažuriran ogranak organizacije', 'Branch Organizations': 'ogranci organizacije', 'Branches': 'Ogranci', 'Brand': 'Marka', 'Brand added': 'Marka dodana', 'Brand deleted': 'Marka obrisana', 'Brand Details': 'Detalji marke', 'Brand updated': 'Marka ažurirana', 'Brands': 'Marke', 'Breakdown': 'Prekid', 'Breast milk substitutes in use since disaster': 'Zamjene za majčino mlijeko korištene nakon katastrofe', 'Breast milk substitutes used prior to disaster': 'Korištene zamjene za majčino mlijeko prije katastrofe', 'Bricks': 'Cigle', 'Bridge Closed': 'Most zatvoren', 'broad': 'široko', 'brown': 'smeđa', 'Brunei': 'Brunej', 'Bucket': 'Kanta', 'Buddhist': 'Budist', 'Budget': 'Budžet', 'Budget added': 'Dodat budžet', 'Budget deleted': 'Budžet obrisan', 'Budget Details': 'Detalji Budzeta', 'Budget Updated': 'Budžet Ažuriran', 'Budget updated': 'Budžet ažuriran', 'Budgeting Module': 'Modul za budžetiranje', 'Budgets': 'Budžeti', 'Buffer': 'Spremnik', 'Bug': 'Buba', 'Building Assessments': 'Procjene građevina', 'Building Collapsed': 'Zgrada srušena', 'Building Name': 'Ime zgrade', 'Building or storey leaning': 'Zgrada ili sprat su nageti', 'Building Safety Assessments': 'Procjena sigurnosti objekta', 'Building Short Name/Business Name': 'Ime zgrade/biznisa', 'Built using the Template agreed by a group of NGOs working together as the': 'Izgrađeno koristeći šablon kreiran od strane grupe NVO radeći zajedno kao', 'Bulgaria': 'Bugarska', 'Bulk Uploader': 'Masovni prenos', 'Bundle': 'Paket', 'Bundle added': 'Paket dodan', 'Bundle Contents': 'Sadržaji paketa', 'Bundle deleted': 'Paket obrisan', 'Bundle Details': 'Detalji paketa', 'Bundle Updated': 'Paket ažuriran', 'Bundle updated': 'Paket je ažuriran', 'Bundles': 'Svežnji', 'Bunion': 'Kriv nožni palac', 'Burn': 'Spaljeno', 'Burn ICU': 'Spaljen ICU', 'Burned/charred': 'Spaljeno/ugljenisano', 'Business damaged': 'Oštećenje industrije', 'Button name': 'Ime dugmeta', 'by': 'od strane', 'by %(person)s': 'od %(person)s', 'By %(site)s': 'Po %(site)s', 'By Facility': 'Po objektu', 'By Inventory': 'Po skladištu', 'By selecting this you agree that we may contact you.': 'Izborom ovoga slažete se da vas možemo kontaktirati.', 'By Site': 'Po mjestu', 'By Warehouse': 'NEBESKO SKLADIŠTE', 'c/o Name': 'c/o Ime', 'Cache': 'Keš', 'Cache Keys': 'Ključevi za gotovinu', 'Calculate': 'Izračunaj', 'Calendar': 'Kalendar', 'Cambodia': 'Kampučija', 'Cameroon': 'Kamerun', 'Camp': 'Kamp', 'Camp added': 'Dodan kamp', 'Camp Coordination/Management': 'Koordinacija kampa/Menadžment', 'Camp deleted': 'Obrisan kamp', 'Camp Details': 'Detalji o kampu', 'Camp Service': 'Usluga kampa', 'Camp Service added': 'Dodana je uluga kampa', 'Camp Service deleted': 'Obrisana je usluga kampa', 'Camp Service Details': 'Detalji o uslugama kampa', 'Camp Service updated': 'Ažurirana je usluga kampa', 'Camp Services': 'Usluge kampa', 'Camp Status': 'Status kampa', 'Camp Status added': 'Dodana je status kampa', 'Camp Status deleted': 'Obrisan je status kampa', 'Camp Status Details': 'Detalji statusa kampa', 'Camp Status updated': 'Ažuriran je status kampa', 'Camp Statuses': 'Statusi kampa', 'Camp Type': 'Tip kampa', 'Camp Type added': 'Tip kampa dodan', 'Camp Type deleted': 'Tip kampa obrisan', 'Camp Type Details': 'Detalji tipa kampa', 'Camp Type updated': 'Tip kampa ažuriran', 'Camp Types': 'Vrste kampa', 'Camp Types and Services': 'Tipovi i usluge kampova', 'Camp updated': 'Ažuriran kamp', 'Campaign': 'Kampanja', 'Campaign Added': 'Kampanja dodana', 'Campaign Deleted': 'Kampanja izbrisana', 'Campaign ID': 'ID kampanje', 'Campaign Message': 'Poruka kampanje', 'Campaign Message Added': 'Dodana poruka kampanje', 'Campaign Message Deleted': 'Obrisana poruka kampanje', 'Campaign Message Updated': 'Ažurirana poruka kampanje', 'Campaign Messages': 'Poruke kampanje', 'Campaign Updated': 'Kampanja ažurirana', 'Campaigns': 'Kampanje', 'Camps': 'Kampovi', 'Can be grouped together into Feature Groups': 'Mogu se grupisati u grupe karakteristika', 'can be used to extract data from spreadsheets and put them into database tables.': 'može se koristiti za izvlačenje podataka iz tabelarnog prikaza i stavljanje istih u tabele baza podataka.', 'Can only approve 1 record at a time!': 'Moguće je potvrditi samo jedan zapis istovremeno!', 'Can only disable 1 record at a time!': 'Moguće je onemogućiti samo 1 zapis trenutno!', 'Can only enable 1 record at a time!': 'Omogućen je samo jedan zapis istovremeno!', 'Can only update 1 record at a time!': 'Moguće je ažurirati samo jedan zapis istovremeno!', 'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Može čitati tačke interesa iz OpenStreetMap datoteke (.osm) ili rezervnog servera.', "Can't import tweepy": 'Nemoguće unijeti tweepy', 'Canada': 'Kanada', 'Cancel': 'Otkaži', 'Cancel Crop': 'Otkaži rezanej', 'Cancel editing': 'Otkaži uređivanje', 'Cancel Log Entry': 'Otkaži stavku zapisnika', 'Cancel Shipment': 'Otkazati pošiljku', 'Canceled': 'Otkazano', 'Cancelled': 'Otkazano', 'cancelled': 'otkazano', 'Candidate Matches for Body %(label)s': 'Kandidat odgovara tijelu %(label)s', 'Candidate Matches for Body %s': 'Kandidat odgovara tijelu %s', 'Canned Fish': 'Konzervirana riba', 'cannot be deleted.': 'ne može se obrisati.', 'Cannot be empty': 'Ne može biti prazno', 'Cannot disable your own account!': 'Ne možete onesposobiti svoj račun!', 'Cannot make an Organization a branch of itself!': 'Ne može se napraviti organizacija koja je vlastiti ogranak!', 'Cannot open created OSM file!': 'Ne mogu otvoriti kreiranu OSM datoteku!', 'Cannot read from file: %(filename)s': 'Ne mogu pročitati iz datoteke: %(filename)s', 'Cannot send messages if Messaging module disabled': 'Ne mogu se slati poruke ako je modul za poruke isključen', 'Capacity (Day / Evacuation)': 'Kapacitet (Dan / Evakuacija)', 'Capacity (Day and Night)': 'Capacity (Day and Night)', 'Capacity (Day)': 'Kapacitet (dan)', 'Capacity (Max Persons)': 'Kapacitet (maksimalan broj osoba)', 'Capacity (Night / Post-Impact)': 'Kapacitet (Noć / nakon utjecaja)', 'Capacity (Night only)': 'Capacity (Night only)', 'Capacity (Night)': 'Kapacitet (noć)', 'Capacity (W x D X H)': 'Kapacitet (Š x D x V)', 'Capacity Building': 'Kapacitet zgrada', 'Cape Verde': 'Zelenortska Ostrva', 'Capture Contact Information': 'Dohvati informacije o kontaktu', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Snimi informacije o grupama žrtava nesreće (turisti, putnici, porodice, itd.)', 'Capture Information on each disaster victim': 'Unesi informacije o svakoj žrtvi katastrofe', 'Capturing the projects each organization is providing and where': 'Bilježenje projekata koje svaka organizacija omogućava i gdje', 'Card holder': 'Vlasnik kartice', 'Cardiology': 'Kardiologija', 'Cargo Pier Depth': 'Dubina mola za teret', 'Case added': 'Dodan slučaj', 'Case deleted': 'Obrisan slučaj', 'Case Details': 'Detalji slučaja', 'Case Number': 'Broj slučaja', 'Case updated': 'Ažuriran slučaj', 'Cases': 'Slučajevi', 'Cash available to restart business': 'Gotovina dostupna za ponovni početak posla', 'Cassava': 'Tropska biljka manioka', 'Casual Labor': 'Obični rad', 'Casualties': 'Gubici', 'Catalog': 'Katalog', 'Catalog added': 'Katalog dodan', 'Catalog deleted': 'Katalog obrisan', 'Catalog Details': 'Detalji o katalogu', 'Catalog Item': 'Stavka kataloga', 'Catalog Item added': 'Dodata stavka u katalog', 'Catalog Item deleted': 'Obrisana stavka iz katalog', 'Catalog Item updated': 'Ažurirana stavka u katalog', 'Catalog Items': 'stavke kataloga', 'Catalog Name': 'Ime kataloga', 'Catalog updated': 'Katalog ažuriran', 'Catalogs': 'Katalozi', 'Categories': 'Kategorije', 'Category': 'Kategorija', 'Category:': 'Kategorija:', 'Category<>Sub-Category<>Catalog Relation added': 'Kategorija<>Podkategorija<>kataloški odnos dodan', 'Category<>Sub-Category<>Catalog Relation updated': 'Kategorija<>Podkategorija<>kataloški odnos ažuriran', 'caucasoid': 'bjelačka', "Caution: doesn't respect the framework rules!": 'Upozorenje: nepoštivanje okvirnih pravila!', 'CBA Women': 'CBA žena', 'CDRT (Community disaster response teams)': 'CDRT (Timovi zajednice za odgovore u slučaju katastrofe)', 'Ceilings, light fixtures': 'Stropovi, popravke svjetla', 'Cell Phone': 'Mobilni telefon', 'Cell Tower': 'Ćelijski toranj', 'Central African Republic': 'Centralnoafrička Republka', 'Central point to record details on People': 'Centralna lokacija za bilježenje detalja o ljudima', 'Certificate': 'Certifikat', 'Certificate added': 'Dodat certifikat', 'Certificate Catalog': 'Katalog Certifikata', 'Certificate deleted': 'Obrisan certifikat', 'Certificate Details': 'Detalji o certifikatu', 'Certificate Status': 'Status certifikata', 'Certificate updated': 'Ažuriran certifikat', 'Certificates': 'Certifikati', 'Certification': 'Certificiranje', 'Certification added': 'Dodan certifikat', 'Certification deleted': 'Obrisana certifikacija', 'Certification Details': 'Detalji certifikacije', 'Certification updated': 'Ažurirana certifikacija', 'Certifications': 'Certifikati', 'Certifying Organization': 'Organizacija koja daje certifikat', 'Chad': 'Čad', 'Change Password': 'Promijeni lozinku', 'Channel': 'Kanal', 'Chart': 'Grafikon', 'Chat on IRC': 'Ćaskanje na IRC', 'Check': 'Provjera', 'check all': 'označi sve', 'Check all': 'Provjeri sve', 'Check for errors in the URL, maybe the address was mistyped.': 'Pogledajte greške na URL , možda je došlo do greške pri kucanju.', 'Check if the URL is pointing to a directory instead of a webpage.': 'Provjeri da li URL pokazuje na direktorij umjesto na stranicu', 'Check outbox for the message status': 'Provjerite izlaznu poštu za status poruke', 'Check Request': 'Provjerite zahtjev', 'Check this to make your search viewable by others.': 'Označite ovo da vaša pretraga bude vidljiva ostavlim', 'Check to delete': 'Označi za brisanje', 'Check to delete:': 'Označi z abrisanje', 'Check-In': 'Ubaci', 'Check-in at Facility': 'Označi na objektu', 'Check-Out': 'Izdvoji', 'Checked': 'Provjereno', 'checked': 'provjereno', 'Checked-In successfully!': 'Uspješno ubačeno', 'Checked-Out successfully!': 'Uspješno izdvojeno', 'Checklist': 'Spisak', 'Checklist created': 'Kontrolni spisak kreiran', 'Checklist deleted': 'Lista zadataka obrisana', 'Checklist Item': 'Zadatak u listi', 'Checklist of Operations': 'Lista operacija', 'Checklist updated': 'Ažurirana lista zadataka', 'Checklists': 'Liste zadataka', 'Chemical Hazard': 'Hemijska opasnost', 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Hemijske, biološke, radiološke, nuklearne ili visoko-prinosne eksplozivne prijetnje ili napadi', 'Chewing tobacco': 'Duhan za žvakanje', 'Chicken': 'Pilići', 'Child': 'Dijete', 'Child (2-11)': 'Dijete (2-11)', 'Child (< 18 yrs)': 'dijete(<18 godina)', 'Child Abduction Emergency': 'Hitan slučaj otmice djeteta', 'Child headed households (<18 yrs)': 'Dijete na čelu domaćinstva (<18 god)', 'Children (2-5 years)': 'Djeca (2-5 godina)', 'Children (5-15 years)': 'Djeca (5-15 godina)', 'Children (< 2 years)': 'Djeca (mlađa od 2 godine)', 'Children in adult prisons': 'Djeca u zatvorima za odrasle', 'Children in boarding schools': 'Djeca u internatima', 'Children in homes for disabled children': 'Djeca u kućama za djecu s invaliditetom', 'Children in juvenile detention': 'Djeca u pritvoru za maloljetnike', 'Children in orphanages': 'Djeca u sirotištu', 'Children living on their own (without adults)': 'Djeca koja žive sama (bez staratelja)', 'Children not enrolled in new school': 'Djeca koja nisu upisana o novu školu', 'Children orphaned by the disaster': 'Djeca koja su siročad zbog katastrofe', 'Children separated from their parents/caregivers': 'Djeca odvojena od svojih roditelja/staratelja', 'Children that have been sent to safe places': 'Djeca koja su poslana na sigurna mjesta', 'Children who have disappeared since the disaster': 'Djeca nestala nakon katastrofe', 'Children with chronical illnesses': 'Djeca s hroničnim bolestima', "Children's Education": 'Obrazovanje djece', 'Chile': 'Čile', 'Chin, Inclination': 'Brada, nagib', 'Chin, Shape': 'Brada, oblik', 'Chin, Size': 'Brada, veličina', 'China': 'Kina', 'Chinese': 'Kineski', 'Chinese (Simplified)': 'Kineski (pojednostavljen)', 'Chinese (Taiwan)': 'Kineski (Tajvan)', 'Cholera Treatment': 'Tretman kolere', 'Cholera Treatment Capability': 'Sposobnost tretmana kolere', 'Cholera Treatment Center': 'Centar za tretman kolere', 'Cholera-Treatment-Center': 'Centar za liječenje kolere', 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Izaberi novo slanje bazirano na novim vrednovanjima i procjeni tima. Teški uslovi koji utiču na cijelu zgradu su temeljni za NESIGURNO postavljanje. Lokalizirano teški i pretežno umjereni uslovi mogu zahtjevati OGRANIČENO korištenje. Stavite UOČLJIV plakat na glavni ulaz. Postavite sve ostale plakate na sve značajnije ulaze.', 'Choose Country': 'Izaberite državu', 'Choose Manually': 'Izaberi ručno', 'Choosing Skill and Resources of Volunteers': 'Izbor vještina i resursa volontera', 'Christian': 'Kršćanin', 'Church': 'Crkva', 'Cigarettes': 'Cigarete', 'Cigars': 'Cigare', 'circular': 'kružno', 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Okolnosi nestanka, druge žrtve/svjedoci koji su zadnji vidjeli živu osobu.', 'City': 'grad', 'City / Town / Village': 'Općina/Mjesto', 'Civil Emergency': 'Civilno izvanredno stanje', 'Cladding, glazing': 'Oblaganje, glačanje', 'Clean Instance': 'Čista kopija', 'clear': 'čisto', 'Clear all': 'Obriši sve', 'Clear all Layers': 'Očisti sve slojeve', 'Clear CACHE?': 'Obrisati predmemoriju?', 'Clear DISK': 'Obriši na disku', 'Clear filter': 'Očisti filter', 'Clear RAM': 'Obriši u memoriji', 'Clear Selection': 'Obriši izbor', 'Cleft chin': 'Rupica na bradi', "Click 'Start' to synchronize with this repository now:": "Kliknite 'Start' za sinhronizaciju s ovim repozitorijem sada:", 'click for more details': 'pritisni za više detalja', 'click here': 'kliknite ovdje', 'Click on a marker to see the Completed Assessment Form': 'Kliknite na marker da vidite formular za završenu ocjenu', "Click on questions below to select them, then click 'Display Selected Questions' button to view the selected questions for all Completed Assessment Forms": "Kliknite na pitanja ispod da ih odaberete, zatim kliknite na 'Prikaži izabrana pitanja' dugme da vidite izabrana pitanja za sve završene formulare procjene.", 'Click on the chart to show/hide the form.': 'Kliknite na dijagram za prikaz/sakrivanje formulara', 'Click on the link': 'Kliknite na link', 'Click on the slider to choose a value': 'Kliknite na klizač za izbor vrijednosti', 'Click to edit': 'Kliknite da uredite', 'Click where you want to open Streetview': 'Kliknite gdje želite otvoriti Streetview', 'Client ID': 'Identifikacija korisnika', 'Client IP': 'IP klijenta', 'Client Secret': 'Tajni ključ korisnika ', 'Climate': 'Klima', 'Climate change mitigation': 'Ograničenja izmjena klime', 'Climate change preparednes': 'Pripreme na izmjene klime', 'Clinical Laboratory': 'Klinički laboratorij', 'Clinical Operations': 'Kliničke operacije', 'Clinical Status': 'Klinički status', 'Close': 'Zatvori', 'Close map': 'Zatvori mapu', 'Closed': 'Zatvoreno', 'CLOSED': 'ZATVORENO', 'Closed?': 'Zatvoreno?', 'Closure': 'Zatvaranje', 'Clothing': 'Odjeća', 'Cluster': 'Skup', 'Cluster added': 'Skup dodan', 'Cluster Attribute': 'Atribut skupa', 'Cluster deleted': 'Skup obrisan', 'Cluster Details': 'Detalji skupa', 'Cluster Distance': 'Udaljenost skupova', 'Cluster Subsector': 'Podsektor skupa', 'Cluster Subsector added': 'Podsektor skupa dodan', 'Cluster Subsector deleted': 'Podsektor skupa obrisan', 'Cluster Subsector Details': 'Detalji podsektora skupa', 'Cluster Subsector updated': 'Podsektor skupa ažuriran', 'Cluster Subsectors': 'Podsektori skupa', 'Cluster Threshold': 'Prag skupa', 'Cluster updated': 'Skup ažuriran', 'Cluster(s)': 'Skup(ovi)', 'Clusters': 'Skupovi', 'CN': 'CN', 'Coalition added': 'Koalicija dodana', 'Coalition Details': 'Detalji koalicije', 'Coalition removed': 'Uklonjena koalicija', 'Coalition updated': 'Koalicija ažurirana', 'Coalitions': 'Koalicije', 'Code': 'Šifra', 'Code Share': 'Dijeljenja koda', 'Code:': 'Kôd:', 'Cold Wave': 'Hladni talas', 'Collapse, partial collapse, off foundation': 'Kolaps, djelimični kolaps, pomjereni temelji', 'collateral event': 'kolateralni događaj', 'Collective center': 'Kolektivni centar', 'Colombia': 'Kolumbija', 'Colour for Underline of Subheadings': 'Boja za podvlačenje podnaslova', 'Colour of bottom of Buttons when not pressed': 'Boja dna dugmadi kada nisu pritisnuta', 'Colour of bottom of Buttons when pressed': 'Boja dna tastera kada je pritisnut', 'Colour of Buttons when hovering': 'Boja dugmadi kada se prelijeću', 'Colour of dropdown menus': 'Boja padajućih menija', 'Colour of selected Input fields': 'Boja selektovanih polja za unos', 'Colour of selected menu items': 'Boja označenih stavki meni-a', 'Column Choices (One Per Line': 'Izbor kolona (Jedan po liniji)', 'Columns, pilasters, corbels': 'Stubovi, pilastri, korbali', 'Combined Method': 'Kombinovana metoda', 'Come back later.': 'Vratite se poslije.', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Pokušajte kasnije. Svi koji posjećuju ovaj sajt vjerojatno imaju isti problem kao i vi.', 'Command Tactical Operational Vehicle': 'Komandno taktičko radno vozilo', 'Comment': 'Komentar', 'Comments': 'Komentari', 'Comments permitted?': 'Komentarisanje dozvoljeno', 'Commercial/Offices': 'Poslovni/Uredi', 'Commit': 'Izvrši', 'Commit All': 'Potvrdi sve', 'Commit Date': 'Datum izvršenja', 'Commit from %s': 'Izvrši od %s', 'Commit Status': 'Status potvrđivanja', 'Commit. Status': 'Status zaduženja', 'Commiting a changed spreadsheet to the database': 'Predavanje izmijenjenog tabelarnog prikaza bazi podataka', 'Commitment': 'Zaduženje', 'Commitment Added': 'Zaduženje dodano', 'Commitment Canceled': 'Zaduženje otkazano', 'Commitment Details': 'Detalji o zaduženjima', 'Commitment Item': 'Stavka angažovanja', 'Commitment Item added': 'Stavka zaduženj dodana', 'Commitment Item deleted': 'Stavka zaduženja obrisana', 'Commitment Item Details': 'Detalji o zaduženju', 'Commitment Item updated': 'Stavka zaduženja ažurirana', 'Commitment Items': 'Stavke zaduženja', 'Commitment Status': 'Status zaduženja', 'Commitment Updated': 'Zaduženje ažurirano', 'Commitments': 'Zaduženja', 'Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Zaduženja mogu biti napravljena prema ovim zahtjevima, ali ona ostaju otvorena dok zahtjevaoc ne potvrdi da je zahtjev kompletan.', 'Committed': 'Zaduženo', 'Committed By': 'Zaduženo od strane', 'Committed Items': 'Zadužene stavke', 'Committed People': 'Zaduženo osoblje', 'Committed People Details': 'Detalji o zaduženoj osobi', 'Committed People updated': 'Zadužene osobe ažurirane', 'Committed Person Details': 'Detalji o zaduženoj osobi', 'Committed Person updated': 'Zadužena osoba ažurirana', 'Committing Inventory': 'Predavanje inventara', 'Committing Organization': 'Izvršna organizacija', 'Committing Person': 'Izvršna osoba', 'Committing Warehouse': 'Zadužena skladišta', 'Commodities Loaded': 'Roba natovarena', 'Communication problems': 'Komunikacijski problemi', 'Communities': 'Zajednice', 'Community': 'Zajednica', 'Community Added': 'Uajednica dodana', 'Community Based Health and First Aid (CBHFA)': 'Zdravstvo i prva pomoć koju organuzuje društvena zajednica (CBHFA)', 'Community Centre': 'Mjesna zajednica', 'Community Contacts': 'Kontakt podaci zajednice', 'Community Deleted': 'Zajednica obrisana', 'Community Details': 'Detalji zajednice', 'Community Health Center': 'Dom zdravlja', 'Community Member': 'Član zajednice', 'Community organisation': 'Organizacija zajednice', 'Community Updated': 'Zajednica ažurirana', 'Comoros': 'Komori', 'Company': 'Preduzeće', 'Competencies': 'sposobnosti', 'Competency': 'Sposobnost', 'Competency added': 'Stručnosti dodane', 'Competency deleted': 'Stručnost obrisana', 'Competency Details': 'Detalji o sposobnostima', 'Competency Rating': 'Nivo spremnosti', 'Competency Rating added': 'Ocjena stručnosti dodana', 'Competency Rating Catalog': 'Katalog ocjena stručnosti', 'Competency Rating deleted': 'Ocjena stručnosti obrisana', 'Competency Rating Details': 'Detalji statusa spremnosti', 'Competency Rating updated': 'Ocjena stručnosti ažurirana', 'Competency Ratings': 'Ocjene sposobnosti', 'Competency updated': 'Stručnost je ažurirana', 'Complete': 'Završeno', 'Complete Adjustment': 'Završi podešavanje', 'Complete Database Synchronized': 'Kompletna baza podataka sinhronizovana', 'Complete Returns': 'Završena vraćanja', 'Complete Unit Label for e.g. meter for m.': 'Puno ime jedinice, npr metar za m.', 'Complete? Please call': 'Završeno? Molim pozovite', 'Completed': 'Završeno', 'completed': 'završeno', 'Completed Assessment Form deleted': 'Formular završene procjene obrisan', 'Completed Assessment Form Details': 'Detalji završenog formulara ocjene', 'Completed Assessment Form entered': 'Unesen završen formular ocjene', 'Completed Assessment Form updated': 'Brza procjena ažurirana', 'Completed Assessment Forms': 'Završeni formulari ocjene', 'Completed Assessments': 'Završene ocjene', 'Completed tour?': 'Završena tura', 'Completion Question': 'Pitanje završavanja', 'Complexion': 'Ten', 'Compose': 'Sastavi', 'Compromised': 'Kompromitirano', 'concave': 'konkavn', 'Concrete frame': 'Betonski okvir', 'Concrete shear wall': 'Betonsko smicanje zida', 'Condition': 'Stanje', 'Conduct a Disaster Assessment': 'Obavi procjenu katastrofe', 'Config': 'Konfiguracija', 'Config added': 'Konfiguracija dodana', 'Config deleted': 'Konfiguracija izbrisana', 'Config Details': 'Detalji konfiguracije', 'Config not found!': 'Konfiguracija nije nađena!', 'Config updated': 'KOnfiguracija ažurirana', 'Configs': 'Konfiguracije', 'Configuration': 'Konfiguracija', 'Configurations': 'Konfiguracije', 'Configure connection details and authentication': 'Konfigurišite detalje o povezivanju i autentifikaciju', 'Configure Layer for this Symbology': 'Konfigurišite sloj za ovo značenje simbola', 'Configure resources to synchronize, update methods and policies': 'Konfigurišite resurse za sinhronizaciju, metode ažuriranja i politike', 'Configure Run-time Settings': 'Konfiguriši izvršne postavke', 'Configure the default proxy server to connect to remote repositories': 'Konfigurišite podrazumijevani proxy server za vezu s udaljenim repozitorijima', 'Configure/Monitor Synchronization': 'Konfiguriši/prati sinhronizaciju', 'Confirm Shipment Received': 'Potvrdite primljenu pošiljku', 'Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock.': 'Potvrdite da su neki artikli vraćeni od korisnika i da će biti prihvaćeni nazad u skladište.', 'Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.': 'Potvrdite da je dostava stigla na odredište koje neće bilježiti dostavu direktno u sistem i da je potvrđeno kao primljeno.', 'Confirmed': 'Potvrđeno', 'confirmed': 'potvrđeno', 'Confirmed Incidents': 'Potvrđen incident', 'Confirming Organization': 'Organizacija koja potvrđuje', 'Conflict Details': 'Detalji sukoba', 'Conflict Policy': 'Politika konflikta', 'Conflict Resolution': 'Razrješenje konflikta', 'Congo, Democratic Republic of the (Congo-Kinshasa)': 'Kongo, Demokratska Republika (Zair)', 'Congo, Republic of the (Congo-Brazzaville)': 'Kongo, Republika (Brazzaville)', 'Connect Parser': 'Parser konekcija', 'consider': 'razmotri', 'Consignment Note': 'Sprovodni list', 'Consignment Number, Tracking Number, etc': 'Konsignacijski broj, praćeni broj itd.', 'constraint_id': 'ogranicenje_id', 'Constraints Only': 'Samo ograničenja', 'Consumable': 'Potrošni', 'Contact': 'Kontakt osoba', 'Contact Added': 'Informacije o kontaktu su dodane', 'Contact added': 'Informacije o kontaktu su dodane', 'Contact Data': 'Kontakt podaci', 'Contact deleted': 'Kontakt obrisan', 'Contact Deleted': 'Izbrisan kontakt', 'Contact details': 'Detalji o kontaktu', 'Contact Details': 'Detalji o kontaktu', 'Contact Details updated': 'Informacije o kontaktu su ažurirane', 'Contact Info': 'Kontakt podaci', 'Contact Information': 'Kontakt informacije', 'Contact Information Added': 'Informacije o kontaktu su unesene', 'Contact information added': 'Dodata kontakt informacija', 'Contact Information Deleted': 'Izbrisane informacije o kontaktu', 'Contact information deleted': 'Obrisana kontakt informacija', 'Contact Information Updated': 'Informacije o kontaktu ažurirane', 'Contact information updated': 'Ažurirana kontakt informacija', 'Contact Method': 'Način kontakta', 'Contact Name': 'Ime kontakt osobe', 'Contact People': 'Kontakt osobe', 'Contact Person': 'Kontakt osoba', 'Contact Phone': 'Kontakt telefon', 'Contact Updated': 'Ažurirane kontakt informacije', 'Contact Us': 'Kontaktirajte nas', 'Contact us': 'Kontaktirajte nas', 'Contacts': 'Kontakti', 'Contacts:': 'Kontakti:', 'Content': 'Sadržaj', 'Content Management': 'Upravljanje sadržajem', 'Content Management System': 'Sistem za upravljanje sadržajem', 'Contents': 'Sadržaj', 'Context': 'Kontekst', 'Contingency planning': 'Planiranje za vanredne slučajeve', 'Contract End Date': 'Krajnji datum ugovora', 'Contradictory values!': 'Protivriječne vrednosti!', 'Contributor': 'Saradnik', 'Controller': 'Kontroler', 'Controller name': 'Ime kontrolera', 'Controller tour is activated': 'Tura kontrolera je aktivirana', 'Conversion Tool': 'Sredstvo konverzije', 'convex': 'konveksni', 'Cook Islands': 'Kukova Ostrva', 'Cooking NFIs': 'Neprehrambeni artikli za kuhanje', 'Cooking Oil': 'Jestivo ulje', 'Coordinate Conversion': 'Pretvaranje Koordinata', 'Coordinate Layer': 'Sloj koordinata', 'Coping Activities': 'Aktivnosti suočavanja', 'Copy': 'Kopiraj', 'Corn': 'Kukuruz', 'Corporate Entity': 'Poslovna jedinica', 'Cost per Megabyte': 'Cijena po megabajtu', 'Cost per Minute': 'Trošak po minutu', 'Cost Type': 'Vrsta troška', 'Costa Rica': 'Kostarika', 'Could not add person record': 'Ne mogu dodati zapis o osobi', 'Could not auto-register at the repository, please register manually.': 'Ne mogu automatski registrovati na repozitoriju, molim registrujte ručno.', 'Could not create record.': 'Ne može se kreirati zapis', 'Could not initiate manual synchronization.': 'Ne mogu pokrenuti ručnu sinhronizaciju', 'Could not merge records. (Internal Error: %s)': 'Ne mogu spojiti slogove. (Interna greška: %s)', "couldn't be parsed so NetworkLinks not followed.": 'nije mogao biti analiziran pa se Mrežni linkovi ne prate', "Couldn't open %s!": 'Ne mogu otvoriti %s!', 'Counselling': 'Savjet', 'Count': 'Broj', 'Count of Question': 'Broj pitanja', 'Country': 'Država', 'Country Code': 'Kôd države', 'Country is required!': 'Zahtijevana država', 'Country of Residence': 'Država prebivališta', 'County': 'Pokrajina', 'County / District': 'Kanton / Regija', 'Course': 'Kurs', 'Course added': 'Dodan kurs', 'Course Catalog': 'Katalog kurseva', 'Course Certicate added': 'Dodat certifikat kursa', 'Course Certicate deleted': 'Certifikat kursa izbrisan', 'Course Certicate Details': 'Detalji certifikata kursa', 'Course Certicate updated': 'Potvrda o kursu ažurirana', 'Course Certicates': 'Certifikati kurseva', 'Course Certificate added': 'Dodat certifikat kursa', 'Course Certificate deleted': 'Obrisan certifikat kursa', 'Course Certificate Details': 'Detalji certifikata kursa', 'Course Certificate updated': 'Ažuriran certifikat kursa', 'Course Certificates': 'Certifikati kursa', 'Course deleted': 'Obrisan kurs', 'Course Details': 'Detalji kursa', 'Course updated': 'Ažuriran kurs', 'Courses': 'Kursevi', 'covered': 'prekriveno', 'Create': 'Kreiraj', 'Create & manage Distribution groups to receive Alerts': 'Kreiraj & upravljaj grupama distibucije za primanje znakova za uzbunu', "Create 'More Info'": 'Kreiraj dodatne podatke', 'Create a group entry in the registry.': 'Kreiraj unosenje grupe u registar.', 'Create a new facility or ensure that you have permissions for an existing facility.': 'Kreirajte novi objekat ili osigurajte da imate potrebna prava nad postojećim objektom.', 'Create a new Group.': 'Kreiraj novu grupu.', 'Create a new organization or ensure that you have permissions for an existing organization.': 'Kreirajte novu organizaciju ili osigurajte da imate potrebna prava nad postojećom organizacijom.', 'Create a new Team.': 'Kreiraj novi Tim', 'Create a Person': 'Kreiraj osobu', 'Create Activity': 'Kreiraj aktivnost', 'Create Activity Report': 'Kreiraj izvještaja o aktivnostima', 'Create Activity Type': 'Kreiraj tip aktivnosti', 'Create Airport': 'Kreiraj aerodrom', 'Create Alternative Item': 'Kreiraj alternativnu stavku', 'Create an Assessment Question': 'Kreiraj pitanje ocjene', 'Create Assessment Answer': 'Kreiraj odgovor ocjene', 'Create Assessment Template': 'Kreiraj predložak ocjene', 'Create Assessment': 'Kreiraj ocjene', 'Create Asset': 'Kreiraj sredstvo', 'Create Award': 'Kreiraj nagradu', 'Create Base Station': 'Kreiraj baznu stanicu', 'Create Bed Type': 'Kreiraj vrstu ležaja', 'Create Beneficiary Type': 'Kreiraj tip korisnika', 'Create Brand': 'Kreiraj proizvođačku marku', 'Create Campaign': 'Kreiraj kampanju', 'Create Case': 'Kreiraj slučaj', 'Create Catalog': 'Kreiraj katalog', 'Create Catalog Item': 'Kreiraj stavku kataloga', 'Create Certificate': 'Kreiraj certifikat', 'Create Checklist': 'Kreiraj listu zadataka', 'Create Cholera Treatment Capability Information': 'Kreiraj informacije o sposobnosti liječenja kolere', 'Create Cluster': 'Kreiraj grupisanje', 'Create Coalition': 'Kreiraj koaliciju', 'Create Community': 'Kreiraj Zajednicu', 'Create Competency Rating': 'Kreiraj status spremnosti', 'Create Contact': 'Kreiraj kontakt', 'Create Course': 'Kreiraj kurs', 'Create Dead Body Report': 'Kreiraj izvještaj o mrtvim tijelima', 'Create Department': 'Kreiraj odjeljenje', 'Create Details': 'Kreiraj detalje', 'Create Event': 'Kreiraj događaj', 'Create Event Type': 'Kreiraj tip događaja', 'Create Facility': 'Kreiraj objekat', 'Create Facility Type': 'Kreiraj vrstu objekta', 'Create Feature Layer': 'Kreiraj sloj karakteristika', 'Create GPS data': 'Kreiraj GPS podatke', 'Create Group': 'Kreiraj grupu', 'Create Group Entry': 'Kreiraj element grupe', 'Create Hazard': 'Kreiraj rizik', 'Create Heliport': 'Kreiraj heliodrom', 'Create Hospital': 'Kreiraj bolnicu', 'Create Identification Report': 'Kreiraj izvještaj o identifikacijama', 'Create Impact Assessment': 'Kreiraj procjenu utjecaja', 'Create Incident': 'Kreiraj incident', 'Create Incident Report': 'Kreiraj izvještaj o incidentu', 'Create Incident Type': 'Kreiraj tip incidenta', 'Create Item': 'Kreiraj stavku', 'Create Item Category': 'Kreiraj kategoriju stavke', 'Create Item Pack': 'Kreiraj paket stavki', 'Create Job': 'Kreiraj posao', 'Create Job Title': 'Kreiraj radno mjesto', 'Create Kit': 'Kreiraj komplet', 'Create Layer': 'Kreiraj sloj', 'Create Location': 'Kreiraj lokaciju', 'Create Location Hierarchy': 'Kreiraj hijerarhiju lokacija', 'Create Mailing List': 'Kreiraj listu za slanje poruka', 'Create Map Configuration': 'Kreiraj konfiguraciju mape', 'Create Marker': 'Kreiraj marker', 'Create Member': 'Kreiraj člana', 'Create Milestone': 'Kreiraj prekretnicu', 'Create Mobile Impact Assessment': 'Kreiraj mobilnu procjenu utjecaja', 'Create Morgue': 'Kreiraj mrtvačnicu', 'Create Network': 'Kreiraj mrežu', 'Create New Asset': 'Kreiraj novo sredstvo', 'Create New Catalog Item': 'Kreiraj novu stavku kataloga', 'Create New Event': 'Napravi novi događaj', 'Create New Item Category': 'Kreiraj novu kategoriju stavke', 'Create new Office': 'Kreiraj novi ured', 'Create new Organization': 'Napravi novu organizaciju', 'Create New Request': 'Kreiraj novi zahtjev', 'Create New Scenario': 'Kreiranje novog scenarija', 'Create New Vehicle': 'Kreiraj novo vozilo', 'Create Office': 'Kreiraj kancelariju', 'Create Office Type': 'Kreiraj tip kancelarije', 'Create Organization': 'Kreiraj organizaciju', 'Create Organization Type': 'Kreiraj tip organizacije', 'Create Partner Organization': 'Kreiraj partnersku organizaciju', 'Create Personal Effects': 'Kreiraj lične uticaja', 'Create PoI Type': 'Kreiraj tačku interesa', 'Create Point of Interest': 'Kreiraj tačku interesa', 'Create Policy or Strategy': 'Kreiraj politiku ili strategiju', 'Create Post': 'Kreiraj blok ugradivog teksta', 'Create Program': 'Kreiraj program', 'Create Project': 'Kreiraj projekat', 'Create Projection': 'Kreiraj projekciju', 'Create Question Meta-Data': 'Kreiraj metapodatke pitanja', 'Create Rapid Assessment': 'Napravi brzu procjenu', 'Create Report': 'Kreiraj izvještaj', 'Create Repository': 'Kreiraj repozitorij', 'Create Request': 'Kreiraj zahtjev', 'Create Request Template': 'Kreiraj predložak zahtjeva', 'Create Resource': 'Kreiraj resurs', 'Create Resource Type': 'Kreiraj tip resursa', 'Create River': 'Kreiraj rijeku', 'Create Role': 'Kreiraj ulogu', 'Create Room': 'Kreiraj sobu', 'Create Seaport': 'Kreiraj luku', 'Create search': 'Kreiraj pretragu', 'Create Sector': 'Kreiraj sektor', 'Create Series': 'Kreiraj seriju', 'Create Service': 'Kreiraj uslugu', 'Create Service Profile': 'Kreiraj profil usluge', 'Create Shelter': 'Kreiraj sklonište', 'Create Shelter Service': 'Kreiraj uslugu skloništa', 'Create Shelter Status': 'Kreiraj status skloništa', 'Create Shelter Type': 'Kreiraj tip skloništa', 'Create Skill': 'Kreiraj vještinu', 'Create Skill Type': 'Kreiraj tip vještine', 'Create Staff Member': 'Kreiraj člana osoblja', 'Create Status': 'Kreiraj status', 'Create Status Report': 'Kreiraj statusni izvještaj', 'Create Supplier': 'Kreiraj dobavljača', 'Create Symbology': 'Kreiraj značenje simbola', 'Create Tag': 'Kreiraj oznaku', 'Create Task': 'Kreiraj zadatak', 'Create Team': 'Kreiraj tim', 'Create Template Section': 'Kreiraj odjeljak predloška', 'Create Theme': 'Kreiraj temu', 'Create Tour': 'Kreiraj turu', 'Create Training Event': 'Kreiraj događaj obuke', 'Create User': 'Kreiraj korisnika', 'Create Vehicle': 'Kreiraj vozilo', 'Create Vehicle Detail': 'Kreiraj detalje o vozilu', 'Create Volunteer': 'Kreiraj volontera', 'Create Volunteer Cluster': 'Kreiraj skup volontera', 'Create Volunteer Cluster Position': 'Kreiraj poziciju skupa volontera', 'Create Volunteer Cluster Type': 'Kreiraj tip skup volontera', 'Create Warehouse': 'Kreiraj skladište', 'Create, enter, and manage surveys.': 'Kreiraj, pristupi i upravljaj anketama.', 'created': 'kreirano', 'Created By': 'Kreirao', 'Created on %s': 'Kreirano %s', 'Created on %s by %s': 'Kreirano dana %s od strane %s', 'Creation of assessments': 'Kreiranje procjena', 'Creation of Surveys': 'Kreiranje anketa', 'Credential': 'Akreditiv', 'Credential added': 'Akreditiv dodan', 'Credential deleted': 'Akreditiv obrisan', 'Credential Details': 'Detalji o akreditivima', 'Credential updated': 'Akreditiv ažuriran', 'Credentialling Organization': 'Akreditirajuća organizacija', 'Credentials': 'Akreditivi', 'Credit Card': 'Kreditna kartica', 'Crime': 'Zločin', 'criminal intent': 'Namjera zločina', 'Criteria': 'Kriteriji', 'critical': 'kritično', 'Croatia': 'Hrvatska', 'Crop Image': 'Sasijeci sliku', 'cross-eyed': 'razrok', 'CSS file %s not writable - unable to apply theme!': 'CSS datoteka %s nemoguća za pisati - nije moguće promijeniti temu!', 'CSV file required': 'CSV datoteka je potrebna', 'Cuba': 'Kuba', 'curly': 'kovrčavo', 'Currency': 'Valuta', 'current': 'tekuće', 'Current': 'Tekući', 'Current community priorities': 'Trenutni prioriteti zajednice', 'Current Entries': 'Trenutni elementi', 'Current general needs': 'Trenutne generalne potrebe', 'Current greatest needs of vulnerable groups': 'Trenutno najveće potrebe pogođenih grupa', 'Current Group Members': 'Trenutni članovi grupe', 'Current Group Memberships': 'Trenutni članovi grupe', 'Current health problems': 'Trenutni zdravstveni problemi', 'Current Home Address': 'Trenutna kućna adresa', 'Current Identities': 'Trenutni identiteti', 'Current Location': 'Trenutna lokacija', 'Current Location Country': 'Zemlja trenutne lokacije', 'Current Location Phone Number': 'Broj telefona na trenutnoj lokaciji', 'Current Location Treating Hospital': 'Bolnica za tretman na trenutnoj lokaciji', 'Current Log Entries': 'Trenutne stavke zapisnika', 'Current main income sources': 'Trenutni glavni izvori prihoda', 'Current major expenses': 'Trrenutni veći troškovi', 'Current Memberships': 'Trenutno članstvo', 'Current Mileage': 'Trenutna kilometraža', 'Current Notes': 'Trenutne napomene', 'Current number of patients': 'Trenutni broj pacijenata', 'Current Owned By (Organization/Branch)': 'Trenutni vlasnik (organizacija/grana)', 'Current problems, categories': 'Trenutni problemi, kategorije', 'Current problems, details': 'Tekući problemi, pojedinosti', 'Current Records': 'Trenutni zapisi', 'Current Registrations': 'Trenutna Registracija', 'Current request': 'Trenutni zahtjev', 'Current response': 'trenutni odziv', 'Current session': 'Trenutna sesija', 'Current staffing level at the facility.': 'Trenutni nivo osoblja na objektu.', 'Current Status': 'Trenutni status', 'Current Team Members': 'Trenutni članovi tima', 'Current Twitter account': 'Trenutni twitter profil.', 'Current type of health problems, adults': 'Trenutna vrsta zdravstvenih problema odraslih', 'Current type of health problems, children': 'Drugi tip zdravstvenih problema, djeca', 'Current type of source for drinking water': 'Trenutni tip izvora pitke vode', 'Current type of source for sanitary water': 'Trenutni tip izvora sanitarne vode', 'Currently no Appraisals entered': 'Nema trenutno unesenih procjena ponuda', 'Currently no Certifications registered': 'Nema trenutno registrovanih potvrda', 'Currently no Competencies registered': 'Trenutno nema registrovanih kompetencija', 'Currently no Course Certicates registered': 'Trenutno nisu registrovani certifikati kurseva', 'Currently no Course Certificates registered': 'Trenutno nisu registrovani certifikati kursa', 'Currently no Credentials registered': 'Trenutno nema registriranih akreditiva', 'Currently no entries in the catalog': 'Trenutno nema unosa u katalog', 'Currently no hours recorded for this volunteer': 'Trenutno nema zabilježenih sati za ovog volontera', 'Currently no Missions registered': 'Trenutno nema registrovanih misija', 'Currently no Participants registered': 'Trenutno nema registrovanih učesnika', 'Currently no Professional Experience entered': 'Trenutno nije uneseno profesionalno iskustvo', 'Currently no programs registered': 'Trenutno nema registrovanih programa', 'Currently no Skill Equivalences registered': 'Trenutno nije zabilježena ekvivalencija vještina', 'Currently no Skills registered': 'Trenutno nema registriranih vještina', 'Currently no staff assigned': 'Trenutno nema dodijeljenog osoblja', 'Currently no training events registered': 'Trenutno nema događaja obuke registrovanih', 'Currently no Trainings registered': 'Trenutno nema registrovanih treninga', 'currently registered': 'Trenutno registrirani', 'Currently your system has default username and password. Username and Password are required by foriegn machines to sync data with your computer. You may set a username and password so that only those machines can fetch and submit data to your machines which your grant access by sharing your password.': 'Trenutno vaš sistem ima podrazumijevano korisničko ime i lozinku. Korisničko ime i lozinka su potrebni foriegn mašine za sinhronizaciju podataka s računalom. Možete postaviti korisničko ime i lozinku, tako da samo one mašine mogu dohvatiti i dostaviti podatke vaše mašine kojima ste dodijelili pristup dijeleći svoju lozinku.', 'Customs Capacity': 'Kapacitet carine', 'Customs Warehousing Storage Capacity': 'Kapacitet carinskog skladišta', 'Cyprus': 'Kipar', 'Czech Republic': 'Češka Republika', "Côte d'Ivoire": 'Obala Slonovače', 'Daily': 'Dnevno', 'daily': 'dnevno', 'Daily Work': 'Dnevni rad', 'Dam Overflow': 'Preliv Brane', 'Damage': 'Šteta', 'Damage Assessment': 'Procjena štete', 'Damage sustained': 'Pretrpljena šteta', 'Damaged': 'Oštećeno', 'Dangerous Person': 'Opasna osoba', 'dark': 'tamno', 'Dashboard': 'Kontrolna ploča', 'Data': 'Podatak', 'Data added to Theme Layer': 'Podaci dodani na tematski sloj', 'Data import policy': 'Politika uvoza podataka', 'Data not available': 'Podaci nije dostupni', 'Data Type': 'Tip podataka', 'data uploaded': 'podaci poslani', 'Data uploaded': 'Podaci preneseni', 'database': 'baza podataka', 'Database': 'Baza podataka', 'database %s select': 'baza podataka %s selektovana', 'Database %s select': 'baza podataka %s selektovana', 'DataTable ID': 'ID tabele podataka', 'DataTable row': 'Red tabele podataka', 'Date': 'Datum', 'Date & Time': 'Datum i vrijeme', 'Date and Time': 'Datum i vrijeme', 'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Datum i vrijeme prijema robe. Normalno je ovdje prikazano trenutno vrijeme, ali se može izmijeniti u padajućoj listi.', 'Date and time this report relates to.': 'datum i vrijeme koje se odnose na ovaj izvještaj', 'Date Avaialble': 'Datum dostupan', 'Date Available': 'Datum dostupnosti', 'Date Created': 'Datum kreiranja', 'Date Due': 'Krajnji rok', 'Date Expected': 'Očekivan datum plaćanja', 'Date Modified': 'Datum izmjene', 'Date must be %(max)s or earlier!': 'Datum mora biti %(max)s ili raniji!', 'Date must be %(min)s or later!': 'Datum mora biti %(min)s ili kasniji!', 'Date must be between %(min)s and %(max)s!': 'Datum mora biti između %(min)s i %(max)s!', 'Date Needed By': 'Datum kada je potrebno', 'Date of Birth': 'Datum rođenja', 'Date of Latest Information on Beneficiaries Reached': 'Datum najnovijih informacija o korsnicima dostignut.', 'Date of Recovery': 'Datum pronalaska', 'Date of Report': 'Datum podnošenja izvještaja', 'Date of Treatment': 'Datum tretmana', 'Date Printed': 'Datum štampe', 'Date Published': 'Datum objavljivanja', 'Date Question': 'Datum pitanja', 'Date Received': 'Datum prijema', 'Date Released': 'Datum izlaza', 'Date Repacked': 'Datum ponovnog pakovanja', 'Date Requested': 'Trazeni datum', 'Date Required': 'Neophodan datum', 'Date Required Until': 'Datum potreban do', 'Date Sent': 'Datum slanja', 'Date Taken': 'Datum preuzimanja', 'Date Until': 'Datum do', 'Date/Time': 'Datum/Vrijeme', 'Date/Time of Alert': 'Vrijeme i datum uzbune', 'Date/Time of Dispatch': 'Vrijeme i datum raspodjele', 'Date/Time of Find': 'Datum/Vrijeme pretrage', 'Date/Time when found': 'Datum/Vrijeme kada je pronađeno', 'Date/Time when last seen': 'Dan/Vrijeme posljednjeg viđenja', 'Day': 'Dan', 'db': 'baza podataka', 'DC': 'DC', 'De-duplicate': 'Ukloni duplikat', 'De-duplicate Records': 'Ukloni duple slogove', 'De-duplicator': 'De-duplicator(Ukloni duple)', 'Dead Bodies': 'Mrtva Tijela', 'Dead Body': 'Leš', 'Dead Body Details': 'Detalji o mrtvim tijelima', 'Dead body report added': 'Dodat izvještaj o mrtvom tijelu', 'Dead body report deleted': 'Obrisan izvještaj o mrtvom tijelu', 'Dead body report updated': 'Ažuriran izvještaj o mrtvom tijelu', 'Dead Body Reports': 'Izvještaj o mrtvim tijelima', 'Deaths in the past 24h': 'Broj smrtnih slučajeva u protekla 24 sata', 'Deaths/24hrs': 'Smrtnost/24h', 'Debug': 'Praćenje grešaka', 'deceased': 'preminuo', 'Deceased': 'Preminuo', 'Decimal Degrees': 'Decimalni stepeni', 'DECISION': 'ODLUKA', 'Decision': 'Odluka', 'Decomposed': 'Raspadnuto', 'deep': 'duboko', 'Default': 'Zadano', 'Default Base layer?': 'Podrazumijevani bazni sloj', 'Default Height of the map window.': 'Početna visina prozora mape.', 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Podrazumijevana visina prozora mape. U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'Default Location': 'Podrazumijevana lokacija', 'Default Map': 'Osnovna karta', 'Default map question': 'Podrazumijevano pitanje mape', 'Default Marker': 'Zadani Marker', 'Default Realm': 'Zadano carstvo', 'Default Realm = All Entities the User is a Staff Member of': 'Podrazumijevano carstvo = Sve jedinke čiji je korisnik uposlenik', 'Default synchronization policy': 'Uobičajena polica sinhronizacije', 'Default Width of the map window.': 'Početna vrijednost širine prozora mape.', 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Podrazumijevana širina prozora mape. U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'Default?': 'Podrazumijevano?', 'Defaults updated': 'Podrazumijevane vrijednosti ažurirane', 'Defecation area for animals': 'Područje za vršenje nužde za životinje', 'deferred': 'odgođen', 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definirajte scenarije za raspodjelu prikladnih Resursa ( Ljudi ,sredstva i objekti).', 'Defines the icon used for display of features on handheld GPS.': 'Definiše ikonu korištenu za prikaz karakteristika na ručnom GPS uređaju.', 'Defines the icon used for display of features on interactive map & KML exports.': 'Definira ikonu korištenu za prikaz karakteristika na interaktivnoj mapi i KML exportima.', 'Defines the marker used for display & the attributes visible in the popup.': 'Definira marker korišten za prikaz i atribute vidljive u prozoru.', 'Degrees in a latitude must be between -90 to 90.': 'Stepeni u geografskoj dužini moraju biti između -90 to 90.', 'Degrees in a longitude must be between -180 to 180.': 'Stepeni u geografskoj širini moraju biti između -180 to 180.', 'Degrees must be a number between -180 and 180': 'Stepeni moraju biti broj između -180 i 180', 'Degrees must be a number.': 'Stepen mora biti broj', 'Dehydration': 'Dehidracija', 'Delete': 'Brisanje', 'delete': 'brisanje', 'Delete Affiliation': 'Obriši namještenje', 'Delete Aid Request': 'Obriši zahtjev za pomoć', 'Delete Airport': 'Obriši aerodrom', 'delete all checked': 'Izbriši sve provjerene', 'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'Obriši sve podatke ovog tipa za koje korisnik ima odobrenje prije postavljanja na server. Ovo je dizajnirano za radne tokove gdje se podaci ažuriraju na vanmrežnim tablicama i šalju samo za čitanje', 'Delete Alternative Item': 'Obriši alternativnu stavku', 'Delete Appraisal': 'Obriši ispunjenje', 'Delete Assessment': 'Brisanje Procjene', 'Delete Assessment Summary': 'Obriši sažetak procjene', 'Delete Asset': 'Obriši sredstvo', 'Delete Asset Log Entry': 'Obriši unosa u zapisniku sredstava', 'Delete Award': 'Obriši nagradu', 'Delete Base Station': 'Obriši baznu stanicu', 'Delete Baseline': 'Izbriši referentnu tačku', 'Delete Baseline Type': 'Obrišite tip referentne tačke', 'Delete Branch': 'Obriši ogranak', 'Delete Brand': 'Obriši proizvođačku marku', 'Delete Budget': 'Obriši budžet', 'Delete Bundle': 'Izbriši paket', 'Delete Case': 'Obriši slučaj', 'Delete Catalog': 'Obriši katalog', 'Delete Catalog Item': 'Obriši stavku kataloga', 'Delete Certificate': 'Obriši certifikat', 'Delete Certification': 'Obriši certifikat', 'Delete Cluster': 'Obriši grupisanje', 'Delete Cluster Subsector': 'Obriši podsektor skupa', 'Delete Commitment': 'Obriši Zajednicu', 'Delete Commitment Item': 'Obriši stavku obaveze', 'Delete Competency': 'Izbriši stručnost', 'Delete Competency Rating': 'Obriši status spremnosti', 'Delete Contact': 'Obriši kontakt', 'Delete Contact Information': 'Obriši informacije o kontaktu', 'Delete Course': 'Obriši kurs', 'Delete Course Certicate': 'Obriši certifikat kursa', 'Delete Course Certificate': 'Obriši certifikat kursa', 'Delete Credential': 'Obriši akreditiv', 'Delete Data from Theme layer': 'Obriši podatke iz tematskog sloja', 'Delete Department': 'Obriši odjeljenje', 'Delete Detail': 'Obriši detalje', 'Delete Distribution': 'Izbriši raspodjelu', 'Delete Distribution Item': 'Obriši distribucijsku stavku', 'Delete Document': 'Obriši dokument', 'Delete Donation': 'Obriši donaciju', 'Delete Donor': 'Obriši donatora', 'Delete Email': 'Obriši e-pošteu', 'Delete Entry': 'Obriši unos', 'Delete Event': 'Obriši događaj', 'Delete Event Type': 'Obriši tip događaja', 'Delete Facility': 'Obriši objekat', 'Delete Facility Type': 'Obriši vrstu objekta', 'Delete Feature Class': 'Brisanje klasa karakteristika', 'Delete Feature Layer': 'Obriši sloj karakteristika', 'Delete Find Report': 'Obriši traženi izvještaj', 'Delete from Server?': 'Izbrisati sa servera?', 'Delete GPS data': 'Obriši GPS podatke', 'Delete Group': 'Obriši grupu', 'Delete Hazard': 'Obriši rizik', 'Delete Heliport': 'Obriši heliodrom', 'Delete Home': 'Izbriši dom', 'Delete Hospital': 'Obriši bolnicu', 'Delete Hours': 'Obriši sate', 'Delete Image': 'Obriši sliku', 'Delete Impact': 'Obriši utjeicaj', 'Delete Impact Type': 'Izbriši tip utjecaja', 'Delete Incident Report': 'Obriši izvještaj o incidentu', 'Delete Inventory Item': 'Brisanje artikla u skladištu', 'Delete Item': 'Obriši stavku', 'Delete Item Category': 'Obriši kategoriju stavke', 'Delete Item from Request': 'Obriši stavku iz zahtjeva', 'Delete Item Pack': 'Obriši paket stavki', 'Delete Item Packet': 'Obriši paket stavki', 'Delete Job Role': 'Izbriši ulogu posla', 'Delete Job Title': 'Obriši radno mjesto', 'Delete Key': 'Obriši ključ', 'Delete Kit': 'Obriši komplet', 'Delete Layer': 'Obriši sloj', 'Delete Level 1 Assessment': 'Izbriši procjenu nivoa 1', 'Delete Level 2 Assessment': 'Obriši procjenu nivoa 2', 'Delete Location': 'Obriši lokaciju', 'Delete Location Hierarchy': 'Obriši hijerarhiju lokacija', 'Delete Mailing List': 'Obriši listu za slanje poruka', 'Delete Map Configuration': 'Obriši konfiguraciju mape', 'Delete Marker': 'Obriši marker', 'Delete Membership': 'Obriši članstvo', 'Delete Message': 'Obriši poruku', 'Delete Mission': 'Brisanje misije', 'Delete Morgue': 'Obriši mrtvačnicu', 'Delete Need': 'Obriši potrebu', 'Delete Need Type': 'Obriši tip potrebe', 'Delete Office': 'Obriši kancelariju', 'Delete Office Type': 'Obriši tip kancelarije', 'Delete Order': 'Obriši narudžbu', 'Delete Organization': 'Obriši organizaciju', 'Delete Organization Domain': 'Obriši domenu organizacije', 'Delete Organization Needs': 'Obriši potrebe organizacije', 'Delete Organization Type': 'Obriši tip organizacije', 'Delete Participant': 'Obriši učesnika', 'Delete Partner Organization': 'Obriši partnersku organizaciju', 'Delete Patient': 'Obriši pacijenta', 'Delete Peer': 'Obriši saradnika', 'Delete Person': 'Obriši osobu', 'Delete Photo': 'Obriši fotografiju', 'Delete PoI Type': 'Obriši tačku interesa', 'Delete Point of Interest': 'Obriši tačku interesa', 'Delete Population Statistic': 'Obriši statistiku o populaciji', 'Delete Position': 'Obriši poziciju', 'Delete Post': 'Obriši blok ugradivog teksta', 'Delete Professional Experience': 'Obriši profesionalno iskustvo', 'Delete Program': 'Obriši program', 'Delete Project': 'Obriši projekat', 'Delete Projection': 'Obriši projekciju', 'Delete Rapid Assessment': 'Izbriši brzu procjenu', 'Delete Received Item': 'Izbriši primljenu stavku', 'Delete Received Shipment': 'Obriši primljenu pošiljku', 'Delete Record': 'Obriši zapis', 'Delete Recovery Report': 'Obriši izvještaj o pronalaženju', 'Delete Region': 'Obriši područje', 'Delete Relative': 'Obriši srodnika', 'Delete Report': 'Obriši izvještaj', 'Delete Request': 'Obriši zahtjev', 'Delete Request Item': 'Izbiši stavku zahtjeva', 'Delete Request Template': 'Obriši predložak zahtjeva', 'Delete Resource': 'Obriši resurs', 'Delete Resource Type': 'Obriši tip resursa', 'Delete Role': 'Obriši ulogu', 'Delete Room': 'Obriši sobu', 'Delete saved search': 'Obriši snimljenu pretragu', 'Delete Scenario': 'Obriši scenarij', 'Delete Seaport': 'Obriši luku', 'Delete Section': 'Obriši sekciju', 'Delete Sector': 'Obriši sektor', 'Delete Sent Item': 'Izbriši poslani predmet', 'Delete Sent Shipment': 'Obriši poslanu pošiljku', 'Delete Service': 'Obriši uslugu', 'Delete Service Profile': 'Obriši profil usluge', 'Delete Setting': 'Uklonite postavke', 'Delete Shipment Item': 'Obriši predmet pošiljke', 'Delete Site Needs': 'Obriši potrebe mjesta', 'Delete Skill': 'Obriši vještinu', 'Delete Skill Equivalence': 'Obriši ekvivalenciju vještine', 'Delete Skill Provision': 'Obriši pružanje vještina', 'Delete Skill Type': 'Obriši tip vještine', 'Delete SMS': 'Obriši SMS', 'Delete Staff Assignment': 'Obriši dodjelu osoblja', 'Delete Staff Member': 'Obriši člana osoblja', 'Delete Staff Type': 'Izbriši tip osoblja', 'Delete Status': 'Obriši status', 'Delete Stock Adjustment': 'Obriši prilagođenje zalihe', 'Delete Stock Count': 'Obriši broj zaliha', 'Delete Subscription': 'Izbriši pretplatu', 'Delete Subsector': 'Izbriši podsektor', 'Delete Supplier': 'Obriši dobavljača', 'Delete Survey Answer': 'Izbriši anketni odgovor', 'Delete Survey Question': 'Izbriši anketno pitanje', 'Delete Survey Section': 'Obriši anketnu sekciju', 'Delete Survey Series': 'Izbriši niz pregleda', 'Delete Survey Template': 'Obrišite šablon ankete', 'Delete Symbology': 'Obriši značenje simbola', 'Delete Theme': 'Obriši temu', 'Delete this Assessment Answer': 'Obriši ovaj odgovor ocjene', 'Delete this Assessment Question': 'Obriši ovo pitanje ocjene', 'Delete this Assessment Template': 'Obriši ovaj predložak ocjene', 'Delete this Completed Assessment Form': 'Obriši ovaj formular za završenu procjenu', 'Delete this Disaster Assessment': 'Obriši ovu procjenu katastrofe', 'Delete this Filter': 'Obriši filter', 'Delete this Question Meta-Data': 'Obriši ove metapodatke pitanja', 'Delete this Template Section': 'Obriši ovaj odjeljak predloška', 'Delete Tour': 'Obriši turu', 'Delete Training': 'Obriši obuku', 'Delete Training Event': 'Obriši događaj obuke', 'Delete Tweet': 'Obriši tweet', 'Delete Unit': 'Obriši jedinicu', 'Delete User': 'Obriši korisnika', 'Delete Vehicle': 'Obriši vozilo', 'Delete Vehicle Details': 'Obriši detalje o vozilu', 'Delete Vehicle Type': 'Obriši vrstu vozila', 'Delete Volunteer': 'Obriši volontera', 'Delete Volunteer Cluster': 'Obriši skup volontera', 'Delete Volunteer Cluster Position': 'Obriši poziciju skupa volontera', 'Delete Volunteer Cluster Type': 'Obriši tip skup volontera', 'Delete Volunteer Role': 'Obriši ulogu volontera', 'Delete Warehouse': 'Obriši skladište', 'Delete Warehouse Item': 'Obriši stavku skladišta', 'Delete:': 'Obriši:', 'deleted': 'obrisano', 'Deliver To': 'Isporuka za', 'Delivered By': 'Isporučio', 'Delivered To': 'Isporučeno na', 'Delphi Decision Maker': 'Delphi stvaralac odluka', 'Delphi toma de decisiones': 'Delphi stvaralac odluka', 'Demographic': 'Demografski', 'Demographics': 'Demografija', 'Demonstrations': 'Demonstracije', 'denied': 'odbijeno', 'Dental Examination': 'Pregled zuba', 'Dental Profile': 'Zubni Profil', 'Department / Unit': 'Odjel/Jedinica', 'Department added': 'Odjel dodan', 'Department Catalog': 'Katalog odjeljenja', 'Department deleted': 'Odjel obrisan', 'Department Details': 'Detalji odjeljenja', 'Department updated': 'Odjel ažuriran', 'Deployed': 'Dodijeljeno', 'Deployment': 'Isporuka', 'Deployment Alert': 'Upozorenje o dostavi', 'Deployment Location': 'Lokacija Razvrstavanja', 'Deployment Request': 'Zahtjev za dostavu', 'Describe the condition of the roads from/to the facility.': 'Opišite stanje puteva od/do ovog objekta', 'Describe the condition of the roads to your hospital.': 'Opišite stanje ceste prema Vašoj bolnici.', "Describe the procedure which this record relates to (e.g. 'medical examination')": "Opisati proceduru na koju se odnosi ovaj zapis (npr. 'medicinsko ispitivanje')", 'Description': 'Opis', 'description': 'opis', 'Description of Bin Type': 'Opis korpe za smještaj', 'Description of Contacts': 'Opis kontakta', 'Description of defecation area': 'Opis područja za vršenje nužde', 'Description of drinking water source': 'Opis izvora pitke vode', 'Description of perimeter fencing, security guards, security lighting.': 'Opis veličine ograde, stražara, sigurnosnih svjetala.', 'Description of sanitary water source': 'Opis sanitarnih izvora vode', 'Description of water source before the disaster': 'Opis vodenih izvora prije katastrofe', 'Description:': 'Opis:', 'Descriptive Text (e.g., Prose, etc)': 'Opisni tekst', 'design': 'dizajn', 'Designated for': 'Dizajnirano za', 'Desire to remain with family': 'Želja da se ostane sa porodicom', 'Destination': 'Odredište', 'Destroyed': 'Uništen', 'Detail': 'Detalji', 'Detail added': 'Detalj dodan', 'Detail deleted': 'Detalj obrisan', 'Detail updated': 'Detalj ažuriran', 'Detailed Description/URL': 'Detaljan opis /URL', 'Details': 'Detalji', 'Details field is required!': 'Polje detalji je obavezno', 'Details of Disaster Assessment': 'Detalji procjene katastrofe', 'Details of each question in the Template': 'Detalji svakog pitanja u predlošku', 'Dialysis': 'Dijaliza', 'Diaphragms, horizontal bracing': 'Diafragma, horizontalno učvršćenje', 'Diarrhea': 'Proljev', 'Diarrhea among children under 5': 'Dijareja među djecom mlađom od 5 godina', 'Dignitary Visit': 'Posjeta funkcionera', 'Direction': 'Smijer', 'Disabilities': 'Invaliditeti', 'Disable': 'Onemogući', 'Disabled': 'Onemogućeno', 'Disabled participating in coping activities': 'Učestvovanje osoba sa posebnim potrebama u aktivnostima za suočavanje sa stresom', 'Disabled?': 'Osoba sa invaliditetom?', 'Disaster': 'Katastrofa', 'Disaster Assessment added': 'Procjena katastrofe dodana', 'Disaster Assessment Chart': 'Dijagram procjene katastrofe', 'Disaster Assessment deleted': 'Procjena katastrofe obrisana', 'Disaster Assessment Map': 'Mapa procjene katastrofe', 'Disaster Assessment Summary': 'Rezime procjene katastrofe', 'Disaster Assessment updated': 'Procjena katastrofe ažurirana', 'Disaster Assessments': 'Procjene katastrofe', 'Disaster clean-up/repairs': 'Čišćenje/opravka od nepogode', 'Disaster Victim Identification': 'Identifikacija žrtava nesreće', 'Disaster Victim Registry': 'Registar žrtava katastrofe', 'Discharge (cusecs)': 'Pražnjenje (kubni metar po sekundi)', 'Discharges/24hrs': 'Istovari/24 sata', 'Discussion Forum': 'Forum za diskusiju', 'Discussion Forum on item': 'Forum za rasprave na određenu temu', 'Disease vectors': 'Vektori bolesti', 'diseased': 'bolesni', 'Disk Cache Keys': 'Disk cache ključevi', 'Disk Cleared': 'Disk očišćen', 'Dispatch': 'Isporuči', 'Dispatch Time': 'Vrijeme isporuke', 'Dispensary': 'Dispanzer', 'displaced': 'raseljeni', 'Displaced': 'Raseljen', 'Displaced Populations': 'Raseljeno stanovništvo', 'Display Chart': 'Prikaži dijagram', 'Display name': 'Ime za prikaz', 'Display Polygons?': 'Prikaži poligone?', 'Display Question on Map': 'Prikaži pitanje na karti.', 'Display Routes?': 'Prikazati rute?', 'Display Selected Questions': 'Prikaži izabrana pitanja', 'Display Tracks?': 'Prikaži tragove?', 'Display Waypoints?': 'Prikazati Putne tačke?', 'Dispose': 'Raspoloživ', 'Distance between defecation area and water source': 'Udaljenost između površina gdje se obavlja nužda i izvora vode', 'Distance between latrines and temporary shelter in meters': 'Udaljenost između površina gdje se obavlja nužda i privremenog skloništa', 'Distance between shelter and latrines': 'Udaljenost između skloništa i zahoda', 'Distance from %s:': 'Razdaljina od %s:', 'Distance(Kms)': 'Udaljenost(kilometri)', 'Distributed without Record': 'Raspodjeljeno bez zapisa', 'Distribution': 'Raspodjela', 'Distribution Added': 'Dodata raspodjela', 'Distribution Deleted': 'Raspodjela izbrisana', 'Distribution Details': 'Detalji raspodjele', 'Distribution Groups': 'Distribucijske grupe', 'Distribution groups': 'Distribucijske grupe', 'Distribution Item': 'Distribucijska stavka', 'Distribution Item Added': 'Dodata distribucijska stavka', 'Distribution Item Deleted': 'Stavka raspodjele je obrisana.', 'Distribution Item Details': 'Detalji stavke distribucije', 'Distribution Item Updated': 'Stavka raspodjele je ažurirana', 'Distribution Items': 'Raspodjela stavki', 'Distribution Report': 'Izvještaj raspodjele', 'Distribution Updated': 'Raspodjela ažurirana', 'Distributions': 'Raspodjele', 'District': 'Distrikt', 'divorced': 'razveden', 'Djibouti': 'DžibutiĐibuti', 'DM Planning': 'DM planiranje', 'DNA Profile': 'DNA profil', 'DNA Profiling': 'Prikaz profila preko DNA', 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': 'Da domaćinstva imaju odgovarajuću opremu i materijale da kuhaju svoju hranu (štednjak, lonci, tanjir, šolje / posude za piće, itd)?', 'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'Da li domaćinstva imaju odgovaraći materijal za ležaje (prekrivače, madrace)?', 'Do households have household water storage containers?': 'Da li domaćinstva imaju spremnike za vodu?', 'Do women and girls have easy access to sanitary materials?': 'Da li žene i djevojke imaju lak pristup sanitarnim materijalima?', 'Do you have access to cash to restart your business?': 'Imate li pristup novcu da ponovo započnete poslovne aktivnosti?', 'Do you know of any incidents of violence?': 'Znate li slučajeve nasilja?', 'Do you know of children living on their own (without adults)?': 'Poznajete li djecu koja žive sama (bez staratelja)?', 'Do you know of children separated from their parents or caregivers?': 'Da li poznajete djecu odvojenu od svojih roditelja/staratelja?', 'Do you know of children that have been sent to safe places?': 'Poznajete li djecu koja su poslana na sigurna mjesta?', 'Do you know of children that have disappeared without explanation in the period since the disaster?': 'Poznajete li djecu koja su nestala bez objašnjenja u periodu nakon katastrofe?', 'Do you know of parents/caregivers missing children?': 'Poznajete li roditelje/Staratelje djece koja su nestala?', 'Do you prefer': 'Da li više volite', 'Do you really want to approve this record?': 'Želite li zaista potvrditi ovaj zapis?', 'Do you really want to delete these records?': 'Da li zaista želite obrisati ove zapise?', 'Do you really want to delete this record? (This action can not be reversed)': 'Želite li zaista obrisati ovaj zapis? (akcija se ne može vratiti', 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Želite li otkazati ovu primljenu pošiljku? Predmeti će biti uklonjeni iz inventara. Ova akcija NE MOŽE biti poništena!', 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Da li želite otkazati ovu posiljku? Artikal će biti vraćen u inventar. Ovo se NE MOŽE poništiti!', 'Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!': 'Da li želite otkazati ovu pošiljku? Artikli će biti vraćeni u skladište. Ovo se NE MOŽE poništiti!', 'Do you want to commit to this request?': 'Želite li zaista potvrditi ovaj zahtjev?', 'Do you want to complete & close this adjustment?': 'Želite li završiti i zatvoriti ovo prilagođenje?', 'Do you want to complete the return process?': 'Želite li završiti proces vraćanja.', 'Do you want to over-write the file metadata with new default values?': 'Želite li prebrisati metapodatke datoteke s novim podrazumijevanim vrijednostima?', 'Do you want to receive this shipment?': 'Da li želite primiti ovu pošiljku?', 'Do you want to send these Committed items?': 'Da li želite poslati ove izvršene stavke?', 'Do you want to send this shipment?': 'Da li želite poslati ovu pošiljku?', 'Document': 'Dokument', 'Document added': 'Dokumenti dodani', 'Document deleted': 'Dokument obrisan', 'Document Details': 'Detalji dokumenta', 'Document removed': 'Dokument uklonjen', 'Document Scan': 'Skeniranje dokumenta', 'Document updated': 'Dokumenti ažurirani', 'Document:': 'Dokument:', 'Documents': 'Dokumenti', 'Documents and Images': 'Dokumenti i slike', 'Documents and Photos': 'Dokumenti i slike', 'Does this facility provide a cholera treatment center?': 'Da li ovaj objekat pruža tretman prilikom kolere?', 'Doing nothing (no structured activity)': 'Ne radeći ništa ( nema struktuirane aktivnosti )', 'Dollars': 'Dolari', 'Domain': 'Domena', 'Domestic chores': 'Domaći poslovi', 'Dominica': 'Dominika', 'Dominican Republic': 'Dominikanska Republika', "Don't Know": 'Ne znam', 'DONATE': 'DONACIJA', 'Donate to this Request': 'Doniraj zahtjev', 'Donated': 'Donirano', 'Donating Organization': 'Organizacija donatora', 'Donation': 'Donacija', 'Donation Added': 'Donacija dodana', 'Donation Canceled': 'Donacija otkazana', 'Donation Certificate': 'Certifikat o donaciji', 'Donation Details': 'Detalji donacije', 'Donation Phone #': 'telefon za donacije #', 'Donation Updated': 'Donacija ažurirana', 'Donations': 'Donacije', 'done!': 'učinjeno!', 'Donor': 'Donator', 'Donor added': 'Donator dodan', 'Donor deleted': 'Donator obrisan', 'Donor Details': 'Detalji Donatora', 'Donor updated': 'Donator ažuriran', 'Donors': 'donatori', 'Donors Report': 'Izvještaj davaoca', 'Doolie Transportation Ambulance': 'Doolie prevozna kola hitne pomoći', 'Door frame': 'Okvir od vrata', 'Download': 'Preuzmi', 'Download Assessment Form Document': 'Preuzmi formular procjene kao dokument', 'Download Assessment Form Spreadsheet': 'Preuzmi formular procjene kao tablicu', 'Download last build': 'Preuzmi samo posljednje kompajliranje', 'Download OCR-able PDF Form': 'Preuzmi OCR čitljiv PDF Formular', 'Download PDF': 'Preuzmite PDF', 'Download Template': 'Preuzimanje šablona', 'Draft': 'Nacrt', 'Draft Features': 'Nacrt objekata', 'Drag an image below to crop and scale it before uploading it:': 'Povucite sliku ispod da je izrežete i promijenite joj veličinu prije postavljanja.', 'Drainage': 'Drenaža', 'Draw on Map': 'Prikaži na karti', 'Drawing up a Budget for Staff & Equipment across various Locations.': 'Izrada nacrta budžeta za osoblje i opremu na različitim lokacijama.', 'Drill Down by Group': 'Dublja analiza po grupi', 'Drill Down by Incident': 'Dublja analizira po incidentu', 'Drill Down by Shelter': 'Dublja analiza po skloništu', 'Driver Phone Number': 'Telefonski broj vozača', 'Drivers': 'Drajveri', 'Driving License': 'Vozačka dozvola', 'Drop-off Location for Goods?': 'Lokacija za ostavljanje robe?', 'Drought': 'Suša', 'DRRPP Extensions': 'DRRPP proširenja', 'Drugs': 'Lijekovi', 'Dry Dock': 'Suho sidrište', 'Due %(date)s': 'Rok %(date)s', 'Dug Well': 'Iskopani bunar', 'Dump': 'Izdvajanje', 'Duplicate': 'Dupliciraj', 'duplicate': 'duplikat', 'Duplicate Locations': 'Dupliraj lokacije', 'Duplicate?': 'Napraviti kopiju?', 'Duration': 'Trajanje', 'Duration (months)': 'Trajanje (mjeseci)', 'Dust Storm': 'Prašnjava oluja', 'DVI Navigator': 'DVI Navigator', 'Dwelling': 'Stambeni', 'Dwellings': 'Stambene jedinice', 'dyed': 'umrli', 'E-mail': 'E-pošta', 'Early Recovery': 'Rani oporavak', 'Early warning': 'Rano upozorenje', 'Ears, angle': 'Uši, Uši, ugao', 'Ears, size': 'Uši, veličina', 'Earth Enabled?': 'Zemlja uključena?', 'Earthquake': 'Zemljotres', 'East Timor': 'Istočni Timor', 'Easy access to sanitation items for women/girls': 'Lak pristup sanitarnim predmetima za žene/djevojke', 'Ecuador': 'Ekvador', 'Edit': 'Izmijeni', 'edit': 'uredi', 'Edit %(site_label)s Status': 'Uredi %(site_label)s status', 'Edit %(type)s': 'Uredi %(type)s', "Edit 'More Info'": 'Uredi dodatne podatke', 'Edit a Missing Person': 'Uredi nestalu osobu', 'Edit Activity': 'Uredi aktivnost', 'Edit Activity Organization': 'Uredi organizaciju', 'Edit Activity Type': 'Uredi tip aktivnosti', 'Edit Address': 'Uredi adresu', 'Edit Adjustment': 'Uredi podešavanja', 'Edit Affiliation': 'Uredi namještenje', 'Edit Airport': 'Uredi aerodrom', 'Edit Alternative Item': 'Uredi alternativnu stavku', 'Edit Annual Budget': 'Uredi godišnji budžet', 'Edit Application': 'Uredi aplikaciju', 'Edit Appraisal': 'Uredi ispunjenje', 'Edit Assessment': 'Uredi procjenu', 'Edit Assessment Answer': 'Uredi odgovor ocjene', 'Edit Assessment Question': 'Uredi pitanje ocjene', 'Edit Assessment Summary': 'Izmjena sažetka procjene', 'Edit Assessment Template': 'Uredi predložak ocjene', 'Edit Asset': 'Uredi sredstvo', 'Edit Asset Log Entry': 'Uredi stavku zapisnika o sredstvima', 'Edit Award': 'Uredi nagradu', 'Edit Base Station': 'Uredi baznu stanicu', 'Edit Baseline': 'Uredi referentnu tačku', 'Edit Baseline Type': 'Uredi Tip Referentne tačke', 'Edit Beneficiaries': 'Uredi korisnika', 'Edit Beneficiary Type': 'Uredi tip korisnika', 'Edit Branch Organization': 'Uredi ogranak organizacije', 'Edit Brand': 'Uredi proizvođačku marku', 'Edit Budget': 'Promjeni budžet', 'Edit Bundle': 'Promjeni paket', 'Edit Camp': 'Uredi kamp', 'Edit Camp Service': 'Uredi uslugu kampa', 'Edit Camp Status': 'Uredi status kampa', 'Edit Camp Type': 'Uredi tip kampa', 'Edit Campaign': 'Uredi kampanju', 'Edit Campaign Message': 'Uredi poruku kampanje', 'Edit Case': 'Uredi slučaj', 'Edit Catalog': 'Uredi katalog', 'Edit Catalog Item': 'Uredi stavku kataloga', 'Edit Certificate': 'Uredi certifikat', 'Edit Certification': 'Uredi certifikaciju', 'Edit Cluster': 'Uredi grupisanje', 'Edit Cluster Subsector': 'Uredi podsektor skupa', 'Edit Commitment': 'Uredi zaduženje', 'Edit Commitment Item': 'Uredi stavku zaduženje', 'Edit Committed People': 'Uredi zadužene ljude', 'Edit Committed Person': 'Uredi zaduženu osobu', 'Edit Community Details': 'Uredi podatke zajednice', 'Edit Competency': 'Uredi kompetentnost', 'Edit Competency Rating': 'Uredi status spremnosti', 'Edit Completed Assessment Form': 'Uredi završen formular ocjene', 'Edit Config': 'Izmijeni konfiguraciju', 'Edit Contact': 'Uredi kontakt', 'Edit Contact Details': 'Uredi detalje kontakta', 'Edit Contact Information': 'Uredi informacije o kontaktu', 'Edit Contents': 'Uredi sadržaj', 'Edit Course': 'Uredi kurs', 'Edit Course Certicate': 'Uredi certifikat za tečaj', 'Edit Course Certificate': 'Uredi certifikat kursa', 'Edit Credential': 'Uredi akreditiv', 'Edit current record': 'Uredi trenutni zapis', 'Edit Dead Body Details': 'Uredi detalje izvještaja o mrtvim tijelima', 'Edit Department': 'Uredi odjeljenje', 'Edit Description': 'Uredi opis', 'Edit Details': 'Uredi detalje', 'Edit Disaster Victims': 'Uredi žrtve katastrofe', 'Edit Distribution': 'Uredi raspodjelu', 'Edit Distribution Item': 'Uredi stavku raspodjele', 'Edit Document': 'Uredi dokument', 'Edit Donation': 'Uredi donaciju', 'Edit Donor': 'Uredi donatora', 'Edit DRRPP Extensions': 'Uredi DRRPP proširenja', 'Edit Education Details': 'Uredi podatke o obrazovanju', 'Edit Education Level': 'Uredi nivo obrazovanja', 'Edit Email': 'Izmijeni e-mail', 'Edit Email Settings': 'Uredi postavke Email-a', 'Edit Entry': 'Uredi unos', 'Edit Event': 'Uredi događaj', 'Edit Event Type': 'Uredi tip događaja', 'Edit Experience': 'Uredi iskustvo', 'Edit Facility': 'Uredi objekat', 'Edit Facility Type': 'Uredi vrstu objekta', 'Edit Feature Class': 'Uredi klasu karakteristika', 'Edit Feature Layer': 'Uredi sloj karakteristika', 'Edit Flood Report': 'Uređivanje izvještaja o poplavi', 'Edit Gateway Settings': 'uredi postavke gatewy-a', 'Edit GPS data': 'Uredi GPS podatke', 'Edit Group': 'Uredi grupu', 'Edit Hazard': 'Uredi rizik', 'Edit Heliport': 'Uredi heliodrom', 'Edit Home': 'Uredi kuću', 'Edit Hospital': 'Uredi bolnicu', 'Edit Hours': 'Uredi sate', 'Edit Human Resource': 'Uredi ljudske resurse', 'Edit Identification Report': 'Uredi izvještaj o identifikacijama', 'Edit Identity': 'Uredi identitet', 'Edit Image Details': 'Uredi detalje slike', 'Edit Impact': 'Uredi utjecaj', 'Edit Impact Type': 'Uredi tip utjecaja', 'Edit Import File': 'Uredi uvezeni fajl', 'Edit Incident': 'Uredi incident', 'Edit Incident Report': 'Uredi izvještaj o incidentu', 'Edit Incident Type': 'Uredi tip incidenta', 'Edit Inventory Item': 'Uredi stavku zalihe ', 'Edit Item': 'Uredi stavku', 'Edit Item Catalog': 'Izmijeni stavku u katalogu', 'Edit Item Catalog Categories': 'Uredi kategorije stavki kataloga', 'Edit Item Category': 'Uredi kategoriju stavke', 'Edit Item in Request': 'Uredi stavku u zahtjevu', 'Edit Item Pack': 'Uredi paket stavki', 'Edit Item Packet': 'Uredi paket stavki', 'Edit Item Sub-Categories': 'Uredi podkategorije stavki', 'Edit Job': 'Uredi posao', 'Edit Job Role': 'Uredi opis posla', 'Edit Job Title': 'Uredi radno mjesto', 'Edit Key': 'Uredi ključ', 'Edit Keyword': 'Uredi ključnu riječ', 'Edit Kit': 'Uredi komplet', 'Edit L4': 'Da li urediti lokacije nivoa 4?', 'Edit L5': 'Da li urediti lokacije nivoa 4?', 'Edit Layer': 'Uredi sloj', 'Edit Level %d Locations?': 'Da li urediti lokacije nivoa %d ?', 'Edit Level 1 Assessment': 'Editovanje procjena Nivoa 1', 'Edit Level 2 Assessment': 'Izmjeni procjenu 2. nivoa', 'Edit Location': 'Uredi lokaciju', 'Edit Location Details': 'Uredi detalje lokacije', 'Edit Location Hierarchy': 'Uredi hijerarhiju lokacija', 'Edit Log Entry': 'Uredi unos zapisnika', 'Edit Logged Time': 'Uredi stavku zapisnika', 'Edit Mailing List': 'Uredi listu za slanje poruka', 'Edit Map Configuration': 'Uredi konfiguraciju mape', 'Edit Map Services': 'Uredi usluge mape', 'Edit Marker': 'Uredi marker', 'Edit Membership': 'Uredi članstvo', 'Edit Message': 'Uredi poruku', 'Edit message': 'Uredi poruku', 'Edit Messaging Settings': 'Uredite postavke poruka', 'Edit Metadata': 'Uredi metapodatke', 'Edit Milestone': 'Uredi prekretnicu', 'Edit Mission': 'Izmjeni misiju', 'Edit Mobile Commons Settings': 'Uredi mobilne postavke', 'Edit Modem Settings': 'Uredi postavke modema', 'Edit Need': 'Uredi potrebu', 'Edit Need Type': 'Uredi tip potrebe', 'Edit Network': 'Uredi mrežu', 'Edit Note': 'Uredi napomenu', 'Edit Office': 'Uredi kancelariju', 'Edit Office Type': 'Uredi tip kancelarije', 'Edit Options': 'Izmjeni opcije', 'Edit Order': 'Uredi narudžbu', 'Edit Organization': 'Uredi organizaciju', 'Edit Organization Domain': 'Uredi domen organizacije', 'Edit Organization Needs': 'Uredi potrebe organizacije', 'Edit Organization Type': 'Uredi tip organizacije', 'Edit Output': 'Uredi izlaz', 'Edit Page': 'Uredi stranicu', 'Edit Parameters': 'Uredi parametre', 'Edit Parser Connection': 'Uredi parsersku konekciju', 'Edit Participant': 'Uredi učesnika', 'Edit Partner': 'Uredi partnera', 'Edit Partner Organization': 'Uredi partnersku organizaciju', 'Edit Patient': 'Uredi pacijenta', 'Edit Peer': 'Uredi suradnika', 'Edit Peer Details': 'Promjena detalja saradnika', 'Edit Permissions for %(role)s': 'Uredi dopuštenja %(role)s', 'Edit Person': 'Uredi osobu', 'Edit Person Details': 'Uredi detalje osobe', "Edit Person's Details": 'Uredi detalje o osobi', 'Edit Personal Effects Details': 'Uredi detalje ličnih uticaja', 'Edit Photo': 'Uredi fotografiju', 'Edit Pledge': 'Uredi podršku', 'Edit PoI Type': 'Uredi tačku interesa', 'Edit Point of Interest': 'Uredi tačku interesa', 'Edit Policy or Strategy': 'Uredi politiku ili strategiju', 'Edit Population Statistic': 'Izmjeni statistiku stanovništva', 'Edit Position': 'Uredi poziciju', 'Edit Post': 'Uredi blok ugradivog teksta', 'Edit Problem': 'Uredi problem', 'Edit Professional Experience': 'Uredi profesionalno iskustvo', 'Edit Profile': 'Izmijeni profil', 'Edit Profile Configuration': 'Uredi konfiguraciju profila', 'Edit Program': 'Uredi program', 'Edit Project': 'Uredi projekat', 'Edit Project Organization': 'Uredi organizaciju projekta', 'Edit Projection': 'Uredi projekciju', 'Edit Question Meta-Data': 'Uredi metapodatke pitanja', 'Edit Rapid Assessment': 'Uredi brzu procjenu', 'Edit Received Item': 'Uredi primljeni predmet', 'Edit Received Shipment': 'Uredite primljenu pošiljku', 'Edit Record': 'Uredi zapis', 'Edit Recovery Details': 'Izmijeni detalje pronalaženja', 'Edit Region': 'Uredi područje', 'Edit Registration': 'Uredi registraciju', 'Edit Registration Details': 'uredi detalje registracije', 'Edit Relative': 'Uredi srodnike', 'Edit Relief Item': 'Uredi stavku pomoći', 'Edit Repository Configuration': 'Uredi konfiguraciju repozitorija', 'Edit Request': 'Uredi zahtjev', 'Edit Request Details': 'Zatraži detalje o zahtjevu', 'Edit Request Item': 'Izmjeni stavku zahtjeva', 'Edit Request Template': 'Uredi predložak zahtjeva', 'Edit Requested Skill': 'Uredi tražene vještine', 'Edit Resource': 'Uredi resurs', 'Edit Resource Configuration': 'Uredi konfiguraciju resursa', 'Edit Resource Type': 'Uredi tip resursa', 'Edit Response Summary': 'Dodaj sumarni odgovor', 'Edit River': 'Izmjeni rijeku', 'Edit Role': 'Uredi ulogu', 'Edit roles for': 'Uredi uloge za', 'Edit Room': 'Uredi sobu', 'Edit RSS Settings': 'Uredi RSS Postavke', 'Edit saved search': 'Uredi sačuvanu pretragu', 'Edit Scenario': 'Izmijeni scenarij', 'Edit School District': 'Uredi školski rejon', 'Edit School Report': 'Izmijeni školski izvještaj', 'Edit Seaport': 'Uredi luku', 'Edit Sector': 'Uredi sektor', 'Edit Sender Priority': 'Uredi prioritet pošiljaoca', 'Edit Sent Item': 'Uredi poslani predmet', 'Edit Series': 'Uredi seriju', 'Edit Service': 'Uredi uslugu', 'Edit Setting': 'Uredi postavke', 'Edit Settings': 'Izmjeni postavke', 'Edit Shelter': 'Uredi sklonište', 'Edit Shelter Service': 'Uredi uslugu skloništa', 'Edit Shelter Status': 'Uredi status skloništa', 'Edit Shelter Type': 'Uredi tip skloništa', 'Edit Shipment Item': 'Uredi predmet pošiljke', 'Edit Shipment to Send': 'Uredi pošiljku za slanje', 'Edit Site Needs': 'Uredi potrebe mjesta', 'Edit Skill': 'Uredi vještinu', 'Edit Skill Equivalence': 'Uredi ekvivalenciju vještina', 'Edit Skill Provision': 'Uredi pružanje vještine', 'Edit Skill Type': 'Uredi tip vještine', 'Edit SMS': 'Uredi SMS', 'Edit SMS Outbound Gateway': 'Uredi SMS izlaz', 'Edit SMS Settings': 'Uredi SMS postavke', 'Edit SMTP to SMS Settings': 'Uredi SMTP-SMS postavke', 'Edit Solution': 'Uredi rješenja', 'Edit Source': 'Uredi izvor', 'Edit Staff': 'Izmijeni osoblje', 'Edit Staff Assignment': 'Uredi dodjelu osoblja', 'Edit Staff Member Details': 'Uredi detalje člana osoblja', 'Edit Staff Type': 'Izmijeni tip osoblja', 'Edit Status': 'Uredi status', 'Edit Status Report': 'Uredi statusni izvještaj', 'Edit Stock Count': 'Uredi zalihu skladišta', 'Edit Storage Bins': 'Uredi korpe za smještaj', 'Edit Storage Location': 'Uredi lokacije skladišta', 'Edit Subscription': 'Uredi pretplatu', 'Edit Subsector': 'Uredi podsektor', 'Edit Supplier': 'Uredi dobavljača', 'Edit Survey Answer': 'Uredi odgovor ankete', 'Edit Survey Question': 'Uredi pitanja upitnika', 'Edit Survey Series': 'Uredi niz anketa', 'Edit Survey Template': 'Dodaj šablon za anketu', 'Edit Symbology': 'Uredi značenje simbola', 'Edit Sync Settings': 'Izmieni postavke sinhronizacije', 'Edit Synchronization Settings': 'Uredi Postavke sinhronizacije', 'Edit Tag': 'Uredi oznaku', 'Edit Task': 'Uredi zadatak', 'Edit Team': 'Uredi tim', 'Edit Template Section': 'Uredi odjeljak predloška', 'Edit the OpenStreetMap data for this area': 'Uredi OpenStreetMap podatke za ovo područje', 'Edit Theme': 'Uredi temu', 'Edit Theme Data': 'Uredi podatke teme', 'Edit Themes': 'Uredi teme', 'Edit this Disaster Assessment': 'Obriši ovu procjenu katastrofe', 'Edit this entry': 'Obriši ovaj unos', 'Edit Ticket': 'Uredi karticu', 'Edit Tour': 'Uredi turu', 'Edit Track': 'Uredi praćenje', 'Edit Training': 'Uredi obuku', 'Edit Training Event': 'Uredi događaj obuke', 'Edit Tropo Settings': 'Uredi Tropo postavke', 'Edit Twilio Settings': 'Uredi Twilio postavke', 'Edit Twitter account': 'Uredi twitter nalog', 'Edit Twitter Search Query': 'Uredi Twitter upit za pretragu', 'Edit Unit': 'Uredi jedinicu', 'Edit User': 'Uredi korisnika', 'Edit Vehicle': 'Uredi vozilo', 'Edit Vehicle Assignment': 'Uredi dodjelu vozila', 'Edit Vehicle Details': 'Uredi detalje o vozilu', 'Edit Vehicle Type': 'Uredi tip vozila', 'Edit Volunteer Availability': 'Uredi Dostupnost Volontera', 'Edit Volunteer Cluster': 'Uredi skup volontera', 'Edit Volunteer Cluster Position': 'Uredi poziciju skupa volontera', 'Edit Volunteer Cluster Type': 'Uredi tip skupa volontera', 'Edit Volunteer Details': 'Uredi detalje skupa volontera', 'Edit Volunteer Role': 'Uredi ulogu volontera', 'Edit Warehouse': 'Uredi skladište', 'Edit Warehouse Item': 'Iredi stavku skladišta', 'Edit Warehouse Stock': 'Uredi zalihu skladišta', 'Edit Web API Settings': 'Uredi Web API postavke', 'Editable?': 'Izmjenjivo?', 'editor': 'uređivač', 'Education': 'Obrazovanje', 'Education Details': 'Detalji o obrazovanju', 'Education details added': 'Detalji o obrazovanju dodani', 'Education details deleted': 'Detalji o obrazovanju obrisani', 'Education details updated': 'Detalji o obrazovanju ažurirani', 'Education Level': 'Nivo obrazovanja', 'Education Level added': 'Nivo obrazovanja dodan', 'Education Level deleted': 'Nivo obrazovanja obrisan', 'Education Level updated': 'Nivo obrazovanja ažuriran', 'Education Levels': 'Nivoeiobrazovanja', 'Education materials received': 'Primljeni obrazovni materijali', 'Education materials, source': 'Edukacijski materijal, izvor', 'Effects Inventory': 'Popis efekata', 'Effort Report': 'Izvještaj o uloženom radu', 'eg. gas, electricity, water': 'npr. gas, struja, voda', 'Eggs': 'Jaja', 'Egypt': 'Egipat', 'Either a shelter or a location must be specified': 'Sklonište ili lokacija moraju biti specificirani', 'Either file upload or document URL required.': 'Ili prijenos datoteka ili URL dokumenta potreban.', 'Either file upload or image URL required.': 'Upload-ujte file ili URL zadane slike', 'Elderly person headed households (>60 yrs)': 'Domaćinstva vođena od strane starijih osoba (>60 yrs)', 'Electrical': 'Električno', 'Electrical, gas, sewerage, water, hazmats': 'Električni, plinski, kanalizacioni, vodeni, zaštita', 'Elevated': 'Uzdignuto', 'Elevators': 'Liftovi', 'Email': 'Elektronska pošta', 'Email (Inbound)': 'Elektronska pošta (dolazna)', 'Email Account deleted': 'Nalog elektronske pošte obrisan', 'Email Accounts': 'Nalozi e-pošte', 'Email Address': 'Email adresa', 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email adresa na koju treba poslati SMS poruke. Pretpostavlja se slanje na brojtelefona@adresa', 'Email created': 'Email kreiran', 'Email deleted': 'Email obrisan', 'Email Details': 'Detalji elektronske pošte', 'Email InBox': 'Ulaz e-pošte', 'Email Settings': 'Postavke e-pošte', 'Email Settings updated': 'Twilio postavke ažurirane', 'Email settings updated': 'Postavke email-a ažurirane', 'Embalming': 'Balzamovanje', 'Embassy': 'Ambasada', 'embedded': 'ugrađeno', 'Emergency Capacity Building project': 'Projekat hitne izgradnje kapaciteta', 'Emergency Contacts': 'Hitni kontakti', 'Emergency Department': 'Odjel za hitne slučajeve', 'Emergency Medical Services': 'Hitne medicinske službe', 'Emergency Shelter': 'Hitno sklonište', 'Emergency Support Facility': 'Objekat za podršku u hitnim slučajevima', 'Emergency Support Service': 'Služba za hitnu podršku', 'Emergency Telecommunications': 'Telekomunikacije u hitnim slučajevima', 'EMS Reason': 'Razlog za slanje hitne pomoći', 'EMS Status': 'Status hitne medicinske službe', 'EMS Status Reasons': 'Razlozi EMS statusa', 'EMS Traffic Status': 'Status EMS saobraćaja', 'Enable': 'Omogući', 'Enable in Default Config?': 'Uključiti u podrazumijevanoj konfiguraciji=', 'Enable/Disable Layers': 'Omogućite/Onemogućite slojeve', 'Enabled': 'Omogućen', 'Enabled?': 'Omogućeno?', 'Enabling MapMaker layers disables the StreetView functionality': 'Omogućavanje slojeva za izrađivanje karata onemogućuje funkcionalnosti StreetView-a', 'enclosed area': 'ograđeni prostor', 'End date': 'Krajnji datum', 'End Date': 'Završni datum', 'End date should be after start date': 'Krajnji datum mora biti nakon početnog', 'End of Period': 'Kraj Perioda', 'English': 'engleski', 'Enter a date before': 'Unesi datum prije', 'Enter a GPS Coord': 'Unesi GPS koordinate', 'Enter a location': 'Unesi lokaciju', 'Enter a name for the spreadsheet you are uploading (mandatory).': 'Unesite ime za proračunsku tablicu (spreadsheet) koju uploadate (obavezno).', 'Enter a name for the spreadsheet you are uploading.': 'Unesite ime za tabelarni prikaz koji učitavate', 'Enter a new support request.': 'Unesi novi zahtjev za podršku', 'Enter a number between %(min)g and %(max)g': 'Enter a number between %(min)g and %(max)g', 'enter a number between %(min)g and %(max)g': 'unesite broj između %(min)godina i %(max)godina', 'Enter a summary of the request here.': 'Unesi rezime zahtjeva ovdje', 'Enter a unique label!': 'Unesite jedinstvenu oznaku!', 'Enter a valid date before': 'Unesi validan datum prije', 'Enter a valid email': 'Unesite validan email', 'Enter a valid future date': 'Unesite validan datum u budućnosti', 'Enter a valid past date': 'Unesite valjan rok trajanja', 'Enter a valid phone number': 'Unesite važeći broj telefona', 'enter a value': 'unesite vrijednost', 'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'Unesite vrijednost pažljivo bez grešaka u kucanju, jer se ovo polje mora usaglastiti s postojećim podacima.', 'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g', 'enter an integer between %(min)g and %(max)g': 'Unesi cijeli broj između %(min)g i %(max)g', 'Enter an integer greater than or equal to %(min)g': 'Enter an integer greater than or equal to %(min)g', 'Enter Completed Assessment': 'Unesi završenu ocjenu', 'Enter Completed Assessment Form': 'Unesi završen formular ocjene', 'Enter Coordinates in Deg Min Sec': 'Unesi kordinate u stepenima, minutama i sekundama', 'Enter Coordinates:': 'Unesi kordinate:', 'enter date and time': 'unesite datum i vrijeme', 'enter date and time in range %(min)s %(max)s': 'unesite datum i vrijeme u opsegu %(min)s %(max)s', 'enter date and time on or after %(min)s': 'unesi datum i vrijeme za %(min)s', 'enter date and time on or before %(max)s': 'unesi datum i vrijeme prije %(max)s', 'Enter phone number in international format like +46783754957': 'Unesite telefonski broj u internacionalnom formatu poput +46783754957', 'Enter some characters to bring up a list of possible matches': 'Unesite neke znakove kako biste pozvali listu mogucih poklapanja', 'Enter some characters to bring up a list of possible matches.': 'Upišite nekoliko početnih karaktera da biste vidjeli listu mogućih podudarnosti.', 'Enter tags separated by commas.': 'Unesite oznake odvojene zarezima.', 'Enter the data for an assessment': 'Unijeti podatke za procjenu', 'Enter the same password as above': 'Unesi istu lozinku kao iznad', 'Enter your first name': 'Unesite vaše ime ', 'Enter your firstname': 'Unesite svoje ime', 'Enter your organisation': 'Unesi svoju organizaciju', 'Enter your organization': 'Unesite vašu organizaciju', 'Entered': 'Uneseno', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Unos telefonskog broja je proizvoljan, ali ukoliko se odlučite da unesete možete se registrovati da primate SMS poruke.', 'Entity': 'jedinka', 'Entity Information': 'Informacije o jedinki', 'Entity Type': 'Tip entiteta', 'Entry added to Asset Log': 'Stavka dodana u zapisnik sredstava', 'Entry deleted': 'Unos izbrisan', 'Environment': 'Okruženje', 'Equatorial Guinea': 'Ekvatorijalna Gvineja', 'Equipment': 'Oprema', 'ER Status': 'Status hitne pomoći', 'ER Status Reason': 'Razlog ER statusa', 'Eritrea': 'Eritreja', 'Error encountered while applying the theme.': 'Desila se greška pri primjenjivanju teme.', 'Error in message': 'Greška u poruci', "Error logs for '%(app)s'": 'Zapisnici grešaka za "%(app)s"', 'Error reading file (invalid format?): %(msg)s': 'Greška čitanja datoteke (pogrešan format?): %(msg)s', 'Error sending message': 'Greška pri slanju poruke', 'Error sending message!': 'Greška pri slanju poruke!', 'Error Tickets': 'Kartice Grešaka', 'Errors': 'Greške', 'ESRI Shape File': 'ESRI datoteka likova', 'Essential Staff?': 'Suštinski bitno osoblje?', 'Est. Delivery Date': 'Procijenjeni datum isporuke', 'Estimated # of households who are affected by the emergency': 'Procijenjen broj domaćinstava koja su pogođena od nesreće', 'Estimated # of people who are affected by the emergency': 'Procjenjen broj ljudi koji su pogođeni krizom', 'Estimated Delivery Date': 'Procijenjeni Datum Isporuke', 'Estimated Overall Building Damage': 'Ukupna estimirana građevinska šteta', 'Estimated Reopening Date': 'Procijenjeni datum ponovnog otvaranja', 'Estimated total number of people in institutions': 'Procjenjen ukupan broj ljudi u institucijama', 'Estimated Value': 'Procjenjena vrijednost', 'Estimated Value per Pack': 'Procijenjena vrijednost po paketu', 'Estonia': 'Estonija', 'Ethiopia': 'Etiopija', 'Ethnicity': 'Nacionalnost', 'Euros': 'Eura', 'Evacuating': 'Evakuacija', 'Evacuation drills': 'Vježbe evakuacije', 'Evacuation is short-term whilst storm passing e.g. 12 hours, hence people need less space.': 'Evakuacija je kratkoročna do prolaska oluje, npr. 12 sati, stoga ljudima treba manje prostora.', 'Evacuation Route': 'Put evakuacije', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Procjeni informaciju u ovoj poruci.(Ova vrijednost NE BI TREBALA BITI korištena u javnim aplikacijama za upozorenje', 'Event': 'Dogadaj', 'Event added': 'Događaj dodan', 'Event deleted': 'Događaj obrisan', 'Event Details': 'Detalji događaja', 'Event Resource': 'Resurs događaja', 'Event Time': 'Vrijeme događaja', 'Event Type': 'Tip događaja', 'Event type': 'Tip događaja', 'Event Type added': 'Tip događaja dodan', 'Event Type Details': 'Detalji o vrsti događaja', 'Event Type removed': 'Tip događaja obrisan', 'Event Type updated': 'Tip događaja ažuriran', 'Event Types': 'Tipovi događaja', 'Event updated': 'Događaj ažuriran', 'Events': 'Događaji', 'Example': 'Primjer', 'Exceeded': 'Prekoračeno', 'Excellent': 'Odlično', 'Exclude contents': 'Isključi sadržaj', 'Excreta disposal': 'Sanitarni čvor', 'Execute a pre-planned activity identified in <instruction>': 'Izvrši unaprijed planiranu aktvinost identificiranu u <instrukciji>', 'Exercise': 'Vježba', 'EXERCISE': 'Vježba', 'Exercise?': 'Vježba?', 'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Vježbe znače da svi ekrani imaju vodeni žig i sve obavijesti imaju isti prefiks.', 'Existing food stocks': 'Postojeće zalihe hrane', 'Existing food stocks, main dishes': 'Postojeće zalihe hrane, glavni artikli', 'Existing food stocks, side dishes': 'Postojeće zalihe hrane, pomoćni artikli', 'Existing location cannot be converted into a group.': 'Postojeća lokacija ne može biti pretvorena u grupu', 'Existing Placard Type': 'Postojeći tip plakata', 'Existing Sections': 'Postojeća odjeljenja', 'Exits': 'Izlazi', 'Expected In': 'Očekivano u', 'Expected Out': 'Očekivano', 'Expected Return Home': 'Očekivani povratak u dom', 'Experience': 'Iskustvo', 'Expiration Date': 'Datum isteka', 'Expiration Details': 'Detalji isteka', 'Expiration Report': 'Izvještaj o isteku', 'Expired': 'Istekao', 'Expired?': 'Istekao?', 'Expiring Staff Contracts Report': 'Izvještaj o osoblju kome ističe ugovor', 'Expiry (months)': 'Ističe (mjeseci)', 'Expiry date': 'Datum isteka', 'Expiry Date': 'Rok valjanosti', 'Expiry Date/Time': 'Vrijeme i datum isteka', 'Expiry Time': 'Vrijeme isteka', 'Explosive Hazard': 'Opasnost od eksplozije', 'Export': 'Izvezi', 'Export all Completed Assessment Data': 'Izvezi sve podatke o završenoj procjeni', 'Export as': 'Izvezi kao', 'export as csv file': 'izvezi kao CSV dokument', 'Export Data': 'Izvezi podatke', 'Export Database as CSV': 'Izvezi bazu podataka kao CSV', 'Export in %(format)s format': 'Izvoz u %(format)s formatu', 'Export in GPX format': 'Izvoz u GPX formatu', 'Export in KML format': 'Poslati u KML formatu', 'Export in OSM format': 'Izvoz u OSM formatu', 'Export in PDF format': 'Izvesti u PDF formatu', 'Export in RSS format': 'Eksportujte u RSS formatu', 'Export in XLS format': 'Izvesti u XLS formatu', 'Exterior and Interior': 'Vanjski i unutrašnji', 'Exterior Only': 'Samo vanjski dio', 'External Features': 'Spoljnje mogućnosti', 'Eye Color': 'Boja očiju', 'Eyebrows, Peculiarities': 'Obrve, specifičnosti', 'Eyebrows, Shape': 'Obrve, oblik', 'Eyebrows, Thickness': 'Obrve, debljina ', 'Eyes, Colour': 'Oči, boja', 'Eyes, Distance between Eyes': 'Oči, razmak između očiju', 'Eyes, Peculiarities': 'Oči, specifičnosti', 'Eyes, Shade': 'Oči, sjena', 'Face': 'Lice', 'Facebook': 'Facebook', 'Facial hair, color': 'Dlake po licu, boja', 'Facial hair, Colour': 'Dlake po licu, boja', 'Facial hair, comment': 'Dlake po licu, komentar', 'Facial hair, length': 'Dlake po licu, dužina', 'Facial hair, type': 'Dlake na licu, tip', 'Facial hair, Type': 'Dlake na licu, tip', 'Facial hear, length': 'Brada, dužina', "Facilitate uploading of missing person's photograph": 'Omogućite slanje fotografije nedostajuće osobe', 'Facilities': 'Objekti', 'Facility': 'Objekat', 'Facility added': 'Dodat objekat', 'Facility Contact': 'Kontakt vezan za objekat', 'Facility deleted': 'Obrisan objekat', 'Facility Details': 'Detalji objekta', 'Facility Operations': 'Aktivnosti objekta', 'Facility or Location': 'Objekat / Lokacija', 'Facility removed': 'Objekat uklonjen', 'Facility Status': 'Stanje objekta', 'Facility Type': 'Vrsta objekta', 'Facility Type added': 'Vrsta objekta dodana', 'Facility Type deleted': 'Vrsta objekta obrisana', 'Facility Type Details': 'Detalji o vrsti objekta', 'Facility Type updated': 'Vrsta objekta ažurirana', 'Facility Types': 'Vrste objekata', 'Facility updated': 'Ažuriran objekat', 'Factors affecting school attendance': 'Faktori koji utiču na pohađanje škole', 'Fail': 'Neuspjeh', 'Failed': 'Nije uspjelo', 'Failed!': 'Nije uspjelo!', 'Fair': 'Pošteno', 'Falling Object Hazard': 'Opasnost od padajućih objekata', 'Families/HH': 'Porodice/HH', 'Family': 'Porodica', 'Family Care': 'Porodična briga', 'Family tarpaulins received': 'Cerade za porodicu primljene', 'Family tarpaulins, source': 'Porodične cerade, izvor', 'Family/friends': 'Porodica/prijatelji', 'Farmland/fishing material assistance, Rank': 'Materijalna pomoć za obradu zemlje/ribolov , rang', 'fat': 'mast', 'Fatalities': 'Ljudske žrtve', 'FAX': 'FAKS', 'Fax': 'Faks', 'Feature Class': 'Klasa karakteristika', 'Feature Class added': 'Klasa karakteristika dodana', 'Feature Class deleted': 'Obrisana klasa karakteristika', 'Feature Class Details': 'Detalji klase karakteristika', 'Feature Class updated': 'Klasa karakteristika ažurirana', 'Feature Classes': 'Klase karakteristika', 'Feature Classes are collections of Locations (Features) of the same type': 'Klase karakteristika su kolekcije lokacija (karakteristika) istog tipa', 'Feature Group': 'Grupa karakteristika', 'Feature Group added': 'Dodan grupa karakteristika', 'Feature Group deleted': 'Grupna karakteristika izbrisana', 'Feature Group Details': 'Detalji grupe karakteristika', 'Feature Group Updated': 'Grupa karakteristika ažurirana', 'Feature Group updated': 'Grupa karakteristika ažurirana', 'Feature Groups': 'Grupe karakteristika', 'Feature Info': 'Informacije o karakteristici', 'Feature Layer': 'Sloj karakteristika', 'Feature Layer added': 'Dodat sloj karakteristika', 'Feature Layer deleted': 'Obrisan sloj karakteristika', 'Feature Layer Details': 'Detalji sloja karakteristika', 'Feature Layer updated': 'Ažuriran sloj karakteristika', 'Feature Layers': 'Slojevi karakteristika', 'Feature Namespace': 'Imenik karakteristika', 'Feature Request': 'Zahtjev za karakteristikama', 'Feature Type': 'Tip karakteristike', 'Features Include': 'Karakteristike uključuju', 'feedback': 'povratna informacija', 'Feedback': 'Povratna informacija', 'Feet, Condition': 'Stopalo, stanje', 'Feet, Nails': 'Stopala, nokti', 'Feet, Shape': 'Stopalo, oblik', 'Female': 'Žensko', 'female': 'žensko', 'Female headed households': 'Domaćinstva u kojim je žena glava porodice', 'Few': 'Mali broj', 'Field': 'Terenski', 'Field Hospital': 'Poljska bolnica', 'Fields tagged with a star': 'Polja označena zvjezdicom', 'Fiji': 'Fidži', 'File': 'Datoteka', 'File Imported': 'Datoteka je unesena', 'File Importer': 'Uvoz datoteka', 'File name': 'Ime datoteke', 'File not found': 'Datoteka nije pronađena', 'File uploaded': 'Datoteka poslana', 'Files': 'Datoteke', 'Fill in Latitude': 'Dopuni geografsku širinu', 'Fill in Longitude': 'Upišite geografsku dužinu', 'fill in order: day(2) month(2) year(4)': 'popuni redoslijedom: dan(2) mjesec(2) godina(4)', 'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'popuni redoslijedom: sat(2) min(2) dan(2) mjesec(2) godina(4)', 'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'popuni redoslijedom: sat(2) min(2) mjesec(2) dan(2) godina(4)', 'fill in order: month(2) day(2) year(4)': 'popuni redoslijedom: mjesec(2) dan(2) godina(4)', 'Filter': 'Filtriraj', 'Filter by %(type)s': 'Filtriraj po %(type)s', 'Filter by Bookmark': 'Filtriraj po zabilješci', 'Filter by Category': 'Filtriraj po kategoriji', 'Filter by Country': 'Filtriraj po državi', 'Filter by Date': 'Filtriraj po datumu', 'Filter by Disaster': 'Filtriraj po katastrofi', 'Filter by Location': 'Filtriraj po lokaciji', 'Filter by Organization': 'Filtriraj po organizaciji', 'Filter by Status': 'Filtriraj po statusu', 'Filter by Tag': 'Filtriraj po oznaci', 'Filter by Type': 'Filtriraj po tipu', 'Filter Field': 'Polje filtera', 'Filter Options': 'Opcije filtera', 'Filter Tweets by the date they were tweeted on': 'Filtriraj Tweet po danu kada su navedeni', 'Filter Tweets by who tweeted them': 'Filtriraj Tweet po osobama koje su unijele', 'Filter type': 'Tip Filtera', 'Filter Value': 'Filter vrijednosti', 'Filtered search of aid pledges and requests': 'Filtrirana pretraga ponude i potražnje pomoći', 'Filters': 'Filteri', 'final report': 'završni izvještaj', 'Find': 'Pronađi', 'Find a Person Record': 'Nađite zapis o osobi', 'Find by Name': 'Nađi po imenu', 'Find Dead Body Report': 'Pronađi izvještaj o mrtvim osobama', 'Find Details': 'Nađi detalje', 'Find Hospital': 'Pronađi bolnicu', 'Find more': 'Nađi više', 'Find on Map': 'Nađi na karti', 'Find Person Record': 'Pronađi zapis osobe', 'Find Recovery Report': 'Nađi Izvještaj o pronalaženju', 'Find Report added': 'Dodat izvjestaj o traženju', 'Find Report deleted': 'Izvještaj o traženju izbrisan', 'Find Report updated': 'Traženi izvještaj ažuriran', 'Find Volunteers': 'Pronađi volontere', 'Finder': 'Pronalazač', 'Fingerprint': 'Otisak prsta', 'Fingerprinting': 'Uzimanje otiska prsta', 'Fingerprints': 'Otisci', 'Finish': 'Završetak', 'Finished Jobs': 'Gotovi zadaci', 'Finland': 'Finska', 'Fire': 'Vatra', 'Fire Fighter Forest Vehicle': 'Vatrogasno šumsko vozilo', 'Fire Fighter Light Vehicle': 'Vatrogasno lako vozilo', 'Fire Fighter Rural Vehicle': 'Vatrogasno seosko vozilo', 'Fire Fighter Special Vehicle': 'Vatrogasno specijalno vozilo', 'Fire Fighter Urban Vehicle': 'Vatrogasno gradsko vozilo', 'Fire Station': 'Vatrogasna stanica', 'Fire suppression and rescue': 'Suzbijanje vatre i spašavanje', 'First': 'Prvi', 'First name': 'Ime', 'First Name': 'Ime', 'Fishing': 'Ribolov', 'Flash Flood': 'Nagla poplava', 'Flash Freeze': 'Brzo zamrzavanje', 'flatfooted': 'dustabanlija', 'Flexible Impact Assessments': 'Fleksibilna procjena uticaja', 'Flood': 'Poplava', 'Flood Alerts': 'Uzbune od poplava', 'Flood Alerts show water levels in various parts of the country': 'Alarmi poplava pokazuju vodostaje u različitim dijelovima države', 'Flood Depth': 'Dubina poplave', 'Flood Report': 'Izvještaj o poplavi', 'Flood Report added': 'Izvještaj o poplavi dodan', 'Flood Report deleted': 'Izvještaj o poplavi izbrisan', 'Flood Report Details': 'Detalji izvještaja o poplavi', 'Flood Report updated': 'Izvještaj o Poplavi ažuriran', 'Flood Reports': 'Izvještaji o poplavama', 'Flooding': 'Poplava', 'Flow Status': 'Status toka', 'flush latrine with septic tank': 'Očisti zahod i septičku jamu', 'Focal Person': 'Poznata osoba', 'Focal Point': 'Tačka fokusa', 'Fog': 'Magla', 'Folder': 'Mapa', 'Food': 'Hrana', 'Food assistance': 'Pomoć u hrani', 'Food assistance available/expected': 'Pomoć u hrani primljena/očekivana', 'Food security ': 'Sigurnost hrane', 'Food Supply': 'Zalihe hrane', 'food_sources': 'izvori hrane', 'Footer': 'Zaglavlje na dnu strane', 'Footer file %s missing!': 'Nedostaje datoteka zaglavlja %s!', 'For': 'Za', 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Za zemlju to će biti ISO2 kod, za grad, to bi bio Locode aerodroma', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Za svakog sinhronizovanog partnera , postoji zadani sinhronizovani posao nakon određenog vremenskog intervala . Takođe možete postaviti više sinhronizovanih poslova koji mogu biti prilagođeni prema vašim potrebama . Kliknite link nadesno da počnete.', 'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Za instalacije Eden platforme unesite URL bazne organizacije, npr. http://sync.sahanfoundation.org/eden, za druge učesnike URL sinhronizacijskog interfejsa.', 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Za povećanu sigurnost, preporučljivo je upisati korisničko ime i šifru, te obavijestiti administratora ostalih mašina u Vašoj organizaciji da doda to korisničko ime i šifru preko Vašeg UUID u Sinhronizacija -> Sinhronizacijski partneri', 'For Entity': 'Za jedinku', 'For live help from the Sahana community on using this application, go to': 'Ako trebate pomoć pri korištenju ove aplikacije od strane Sahana zajednice, idite na', 'For messages that support alert network internal functions': 'Za poruke koje podržavaju interne funkcije mreža za uzbunjivanje', 'For more details on the Sahana Eden system, see the': 'Za vise detalja o Sahana Eden sistemu, pogledati', 'For more details on the Sahana system, see the': 'Za vise detalja o Sahana sistemu, pogledati', 'For more information, see': 'Za više informacija, pogledaj', 'For more information, see ': 'Za više informacija, pogledajte ', 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'Za POP-3 ovo je obično 110 (995 za SSL), za IMAP ovo je obično 143 (993 za IMAP).', 'For:': 'Za:', 'forehead': 'čelo', 'Forehead, Height': 'Čelo, visina', 'Forehead, Inclination': 'Čelo, nagib', 'Forehead, Width': 'Čelo, širina', 'Forest Fire': 'Šumski požar', 'Forest Tank Tactical Vehicle': 'Šumska taktička pokretna cisterna', 'form data': 'podaci formulara', 'Form Settings': 'Postavke obrasca', 'Formal camp': 'Formalni kamp', "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Oblikujte popis atributa i RGB vrijednosti da bi se koristile kao JSON objekt, npr.: {Crvena: '#FF0000', Zelena: '#00FF00 ', Žuta: '#FFFF00 '}", 'Forms': 'Formulari', 'Found': 'Pronađeno', 'found': 'nađeno', 'Foundations': 'Osnove', 'Freezing Drizzle': 'ledeno rominjanje', 'Freezing Rain': 'Ledena kiša', 'Freezing Spray': 'ledena kiša', 'Freight company or organisation providing transport': 'Transportno preduzeće ili organizacija koja pruža transport', 'French': 'Francuski', 'Frequency': 'Učestanost', 'Friday': 'Petak', 'From': 'Od', 'From %(site)s': 'Sa %(site)s', 'From Facility': 'Iz objekta', 'From Inventory': 'Iz inventara', 'From Location': 'Sa lokacije', 'From Organization': 'Od organizacije', 'From Person': 'Od osobe', 'from Twitter': 'sa Twittera', 'Frost': 'Mraz', 'Fuel': 'Gorivo', 'Fulfil. Status': 'Ispuni status', 'Fulfill Status': 'Ispuni status', 'Fulfillment Status': 'Status realizacije', 'full': 'puno', 'Full': 'Potpun', 'Full beard': 'Puna brada', 'Fullscreen Map': 'Mapa punog ekrana', 'Function': 'Funkcija', 'Function name': 'Ime funkcije', 'Function Permissions': 'Funkcijske dozvole', 'Function tour is activated': 'Tura funkcije je aktivirana', 'Functions available': 'Dostupne funkcije', 'Funding': 'Fondovi', 'Funding Organisation': 'Osnivačka organizacija', 'Funding Organization': 'Osnivačka organizacija', 'Funding Report': 'Izvještaj o fondovima', 'Funds Contributed': 'Doprinos fondovima', 'Funeral': 'Sahrana', 'Further Action Recommended': 'Preporučljive su daljnje akcije', 'Gale Wind': 'Jak vjetar', 'Gap Analysis': 'Analiza propusta', 'Gap Analysis Map': 'Karta analize propusta', 'Gap Analysis Report': 'Izvještaj o analizi pukotina', 'Gap Map': 'Karta sa pukotinama', 'Gap Report': 'Izvještaj propusta', 'Gas Supply Left (in hours)': 'Preostala zaliha goriva (u satima)', 'Gas Supply Type': 'Vrsta zaliha goriva', 'Gateway': 'Mrežni izlaz', 'Gateway Settings': 'Postavke mrežnog izlaza', 'Gateway settings updated': 'Postavke mrežnog izlaza ažurirane', 'Gender': 'Spol', 'General': 'Općenito', 'General Comment': 'Generalni komentar', 'General emergency and public safety': 'Opće opasnosti i javna sigurnost', 'General information on demographics': 'Opšte demografske informacije', 'General Medical/Surgical': 'Opće zdravstveno / hirurško', 'General Person Transportation Vehicle': 'Transportno vozilo opšte namjene', 'General Skills': 'Opšte vještine', 'Generate portable application': 'Generiši prenosivu aplikaciju', 'Geocode': 'Geokod', 'Geocoder Selection': 'Izbor geokodera', 'GeoJSON Layer': 'GeoJSON sloj', 'Geometry Name': 'Geometrijski naziv', 'Geonames.org search requires Internet connectivity!': 'Geonames.org pretraga zahtijeva Internet vezu!', 'Geophysical (inc. landslide)': 'Geofizički (ink. klizište)', 'Georgia': 'Gruzija', 'GeoRSS Layer': 'GeoRSS sloj', 'Geotechnical': 'Geotehnički', 'Geotechnical Hazards': 'Geotehničke opasnosti', 'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Modul Geraldo nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'Geraldo module not available within the running Python - this needs installing to do PDF Reporting!': 'Modul Geraldo nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'German': 'njemački', 'Germany': 'Njemačka', 'Get Feature Info': 'Dobavi informacije o karakteristici', 'Get incoming recovery requests as RSS feed': 'Dobijte dolazeće zahtjeve za oporavak kao RSS feed', 'getting': 'uzimajući', 'Ghana': 'Gana', 'Girls 13-18 yrs in affected area': 'Djevojčice 13-18 god u pogođenom području', 'Girls 13-18 yrs not attending school': 'Djevojčice 13-18 godina koji ne pohađaju školu', 'Girls 6-12 yrs in affected area': 'Djevojčice 6-12 godina u zahvaćenim područjima', 'Girls 6-12 yrs not attending school': 'Djeviojčice 6-12 godina koje ne pohađaju školu', 'GIS integration to view location details of the Shelter': 'GIS integracija za pregled detalja lokacije skloništa', 'GIS Reports of Shelter': 'GIS Izvještaji skloništa', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Dajte kratak opis fotografije, npr. šta se gdje može vidjeti na slici (nije obavezno).', 'Give information about where and when you have seen the person': 'Dajte informaciju o tome gdje i kada ste vidjeli ovu osobu', 'Give information about where and when you have seen them': 'Dajte informaciju o tome gdje i kada ste ih vidjeli', 'Global Messaging Settings': 'Globalna Podešavanje Poruka', 'Go': 'Idi', "Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "Idite na %(url)s, prijavite se i registrujte vašu aplikaciju. Možete unijeti neki URL i tada samo trebati izmijeniti dozvole za 'mijenjanje mape'.", 'Go to Request': 'Idi na zahtjev', 'Goatee': 'Kozja bradica', 'Good': 'Dobro', 'Good Condition': 'Dobro stanje', 'Goods Received Note': 'Napomena o prijemu robe', 'Google Layer': 'Google sloj', "Google Layers cannot be displayed if there isn't a valid API Key": 'Google Layers ne mogu biti prikazani ukoliko ne postoji validan API ključ', 'Government': 'Vlada', 'Government building': 'Zgrada Vlade', 'Government UID': 'JMB', 'Government UUID': 'JMB', 'GPS Data': 'GPS podaci', 'GPS data': 'GPS podaci', 'GPS data added': 'GPS podaci dodani', 'GPS data deleted': 'GPS podaci obrisani', 'GPS data updated': 'GPS podaci ažurirani', 'GPS Track': 'GPS praćenje', 'GPS Track File': 'Datoteka GPS praćenja', 'GPX Layer': 'GPX SLOJ', 'GPX Track': 'GPX staza', 'Grade': 'Ocjena', 'Graph': 'Grafikon', 'Graph Model': 'Model grafa', 'Great British Pounds': 'Britanske funte', 'Greater than 10 matches. Please refine search further': 'Više od 10 poklapanja. Molim napravite precizniju pretragu', 'Greece': 'Grčka', 'Greek': 'Grčki', 'green': 'zelena', 'Green': 'Zeleno', 'grey': 'siva', 'Grid': 'Mreža', 'Ground movement, fissures': 'Kretanje tla, pukotine', 'Ground movement, settlement, slips': 'Pokreti zemljišta, naselja, klizišta', 'Group': 'Grupa', 'Group added': 'Grupa dodana', 'Group deleted': 'Grupa obrisana', 'Group description': 'Opis grupe', 'Group Description': 'Opis grupe', 'Group Details': 'Detalji o grupi', 'Group Head': 'Glavna osoba grupe', 'Group ID': 'IB grupe', 'Group Leader': 'Vođa grupe', 'Group Member added': 'Dodan član grupe', 'Group Members': 'Članovi grupe', 'Group Membership added': 'Dodano članstvo grupe', 'Group Membership deleted': 'Grupno članstvo izbrisano', 'Group Membership Details': 'Detalji grupnog članstva', 'Group Membership updated': 'Grupno članstvo ažurirano', 'Group Memberships': 'Grupna članstva', 'Group name': 'Ime grupe', 'Group Name': 'Ime grupe', 'Group Title': 'Naslov Grupe', 'Group Type': 'Vrsta grupe', 'Group updated': 'Grupa ažurirana', 'Group Updated': 'Ažurirana grupa', 'Grouped by': 'Grupisano po', "Grouping by 'Family Unit' or other group category": 'Grupisanje po porodičnoj jedinici ili drugoj kategoriji grupe', 'Groups': 'Grupe', 'Groups removed': 'Grupa odstranjena', 'Guatemala': 'Gvatemala', 'Guest': 'Gost', 'Guided Tour Functionality': 'Funkcionalnost vođene ture', 'Guided Tours': 'Vođene ture', 'Guinea': 'Gvineja', 'Guinea-Bissau': 'Gvineja-Bisau', 'Guyana': 'Gvajana', 'Hail': 'Gräd', 'Hair Color': 'Boja kose', 'Hair Comments': 'Komentari na kosu', 'Hair Length': 'Dužina kose', 'Hair of the head, Baldness (extent)': 'Kosa na glavi, ćelavost (veličina)', 'Hair of the head, Baldness (location)': 'Kosa na glavi, ćelavost (mjesto)', 'Hair of the head, Colour': 'Kosa, boja', 'Hair of the head, Length': 'Kosa, dužina', 'Hair of the head, Parting': 'Kosa, razdjeljak', 'Hair of the head, Shade of colour': 'Kosa na glavi, nijansa boje', 'Hair of the head, Style': 'Kosa, stil', 'Hair of the head, Thickness': 'Kosa, debljina', 'Hair of the head, Type': 'Kosa, tip', 'Hair Style': 'Frizura', 'Hair-piece': 'Dlaka', 'Hands, Nail length': 'Ruke, dužina noktiju', 'Hands, Nail peculiarities': 'Ruke, specifičnosti noktiju', 'Hands, Nicotine': 'Ruke, nikotin', 'Hands, Shape': 'Ruke, oblik', 'Hands, Size': 'Ruke, veličina', 'Has data from this Reference Document been entered into Sahana?': 'Da li su podaci iz ovog referentnog dokumenta uneseni u Sahanu?', 'Has only read-only access to records relating to this Organization or Site.': 'Da li ima pristup samo za čitanje vezan za ovu organizaciju ili mjesto?', 'Has the %(GRN)s (%(GRN_name)s) form been completed?': 'Da li je formular %(GRN)s (%(GRN_name)s) ispunjen?', 'Has the Certificate for receipt of the shipment been given to the sender?': 'Da li je pošiljalac primio certifikat o prijemu isporuke?', 'Has the GRN (Goods Received Note) been completed?': 'Da li su BPR (Bilješke o Primljenoj Robi) popunjene?', 'Has your business been damaged in the course of the disaster?': 'Da li je vaše posao oštećen usljed katastrofe?', 'Have normal food sources been disrupted?': 'Da li su normalni izvori hrane oštećeni?', 'Hazard': 'Rizik', 'Hazard added': 'Rizik dodan', 'Hazard added to Project': 'Rizik dodan u projekat', 'Hazard deleted': 'Rizik obrisan', 'Hazard Details': 'Detalji rizika', 'Hazard Pay': 'Rizično plaćanje', 'Hazard removed from Project': 'Rizik uklonjen sa projekta', 'Hazard updated': 'Rizik ažuriran', 'Hazardous Material': 'Opasan materijal', 'Hazardous Road Conditions': 'Opasni uslovi na putu', 'Hazards': 'Rizici', 'Head': 'Glava', 'Head form, front': 'Oblik glave, prednji', 'Head form, profile': 'Oblik glave, profil', 'Header Background': 'Pozadina zaglavlja', 'Header background file %s missing!': 'Pozadinska datotka zaglavlja %s nedostaje!', 'Headquarters': 'Glavno sjedište', 'Health': 'Zdravlje', 'Health care assistance, Rank': 'Pomoć zdravstvene zaštite, stepen', 'Health center': 'Zdravstveni centar', 'Health center with beds': 'Zdravstveni centar sa krevetima', 'Health center without beds': 'Zdravstveni centar bez kreveta', 'Health Org UUID': 'Identifikacijski broj zrdavstvene organizacije', 'Health services functioning prior to disaster': 'Zdravstvene usluge koje su funkcionisale prije katastrofe', 'Health services functioning since disaster': 'Zdravstvene usluge koje djeluju nakon katastrofe-', 'Health services status': 'Status zdravstvenih usluga', 'Healthcare Worker': 'Zdravstveni radnik', 'Heat and Humidity': 'Toplota i Vlažnost', 'Heat Wave': 'Toplotni talas', 'heavy': 'težak', 'Height': 'Visina', 'Height (cm)': 'Visina (cm)', 'Height (m)': 'visina (m)', 'Helipad Information': 'Informacije o helikopterskom sletištu', 'Heliport': 'Heliodrom', 'Heliport added': 'Heliodrom dodan', 'Heliport deleted': 'Heliodrom obrisan', 'Heliport Details': 'Detalji heliodroma', 'Heliport updated': 'Heliodrom ažuriran', 'Heliports': 'Heliodromi', 'Help': 'Pomoć', 'Helps to monitor status of hospitals': 'Pomaže pri praćenju statusa bolnica', 'Helps to report and search for Missing Persons': 'Pomaže pri izvještavanju i traženju nestalih osoba', 'Helps to report and search for missing persons': 'Pomaže pri prijavljivanju i traženju nestalih osoba', 'here': 'ovdje', 'Here are the solution items related to the problem.': 'Ovdje su predmeti rješenja povezani sa problemom.', 'Heritage Listed': 'Izlistano nasljeđe', 'HFA Priorities': 'HFA Prioriteti', 'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'HFA1: Osigurajte da je smanjenje rizika od katastrofe državni i lokalni prioritet i jaka institucionalna baza za implementaciju.', 'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'HFA2: Prepoznati, procijeniti i pratiti rizike od katastrofe i pojačati rano upozoravanje', 'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'HFA3: Koristiti znanjem inovacije i obrazovanje da se sagradi bezbjednosna kultura na svim nivoima.', 'HFA4: Reduce the underlying risk factors.': 'HFA4: Smanjiti podložne faktore rizika.', 'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'HFA5: Pojačati spremnost za katastrofe za efikasan odgovor na svim nivoima.', 'Hide': 'Sakrij', 'Hide Table': 'Sakrij tabelu', 'Hierarchy': 'Hijerarhija', 'Hierarchy Level 0 Name (i.e. Country)': 'Ime nultog hijerarhijskog nivoa (države)', 'Hierarchy Level 1 Name (e.g. State or Province)': 'Ime prvog nivoa hijerarhije (npr. savezna država/republika/pokrajina)', 'Hierarchy Level 2 Name (e.g. District or County)': 'Ime drugog nivoa hijerarhije (npr. kanton/regija)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Ime trećeg nivoa hijerarhije (npr. grad/opština/selo)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Ime četvrtog nivoa hijerarhije (npr. susjedstvo/zaseok)', 'Hierarchy Level 5 Name': 'Ime petog nivoa hijerarhije', 'high': 'visoko', 'High': 'Visok', 'High Tide Depth': 'Dubina visoke plime', 'High Water': 'Najveći vodostaj', 'Highest Priority Open Requests': 'Najviši prioritet za otvorene zahtjeve', 'highly critical': 'Vrlo kritično', 'History': 'Istorija', 'Hit the back button on your browser to try again.': "Stisnite 'Nazad' na vašem pretraživaču da pokušte ponovo.", 'Holiday Address': 'Adresa za vrijeme odmora', 'Home': 'Početak', 'Home added': 'Kuća dodana', 'Home Address': 'Kućna adresa', 'Home City': 'Grad stanovanja', 'Home Country': 'Matična država', 'Home Crime': 'Kućni kriminal', 'Home deleted': 'Kuća izbrisana', 'Home Details': 'Kućni detalji', 'Home Phone': 'Kućni telefon', 'Home phone': 'Kućni telefon', 'Home Phone Number': 'Kućni telefon', 'Home Relative': 'Kućni srodnik', 'Home updated': 'Ažuriran dom', 'Homes': 'Kuće', 'horizontal': 'horizontalno', 'Hospital': 'Bolnica', 'Hospital Details': 'Pojedinosti bolnice', 'Hospital information added': 'Dodana informacija o bolnici', 'Hospital information deleted': 'Obrisana informacija o bolnici', 'Hospital information updated': 'Ažurirana informacija o bolnici', 'Hospital Management': 'Upravljanje bolnicom', 'Hospital status assessment.': 'Procjena stanja bolnice.', 'Hospital Status Report': 'Izvještaj o statusu bolnice', 'Hospitals': 'Bolnice', 'Host': 'Domaćin', 'Hot Spot': 'Kritična tačka', 'Hour': 'Sat', 'Hourly': 'Svaki sat', 'hourly': 'svaki sat', 'hours': 'sati', 'Hours': 'Sati', 'Hours added': 'Sati dodani', 'Hours by Program Report': 'Sati po programskom izvještaju', 'Hours by Role Report': 'Sati po izvještaju o ulogama', 'Hours deleted': 'Sati obrisani', 'Hours Details': 'Detalji sati', 'Hours updated': 'Sati ažurirani', 'Household kits received': 'Kompleti za domaćinstva primljeni', 'Household kits, source': 'Kućanski kompleti, izvor', 'households': 'domaćinstva', 'How data shall be transferred': 'Kako podaci trebaju biti preneseni', 'How did boys 13-17yrs spend most of their time prior to the disaster?': 'Kako su dječaci 13-17 god. provodili većinu vremena prije katastrofe?', 'How did boys <12yrs spend most of their time prior to the disaster?': 'Kako su dječaci <12 god. provodili većinu vremena prije katastrofe?', 'How did boys girls 13-17yrs spend most of their time prior to the disaster?': 'Kako su mladići i djevojke 13-17 godina provodili većinu vremena prije katastrofe', 'How did girls <12yrs spend most of their time prior to the disaster?': 'Kako su djevojčice <12 god. provodili većinu vremena prije katastrofe?', 'How do boys 13-17yrs spend most of their time now?': 'Kako mladići 13-17 godina sada provode većinu vremena?', 'How do boys <12yrs spend most of their time now?': 'Kako dječaci <12yrs sada provode većinu vremena?', 'How do girls 13-17yrs spend most of their time now?': 'Kako djevojke 13-17 godina sada provode većinu vremena?', 'How do girls <12yrs spend most of their time now?': 'Kako djevojčice <12yrs sada provode većinu vremena?', 'How does it work?': 'Kako ovo radi?', 'How is this person affected by the disaster? (Select all that apply)': 'Kako je osoba pogođena katastrofom? (Odaberite sve što se može primjeniti)', 'How local records shall be updated': 'Kako se lokalni zapisi trebaju ažurirati', 'How long will the food last?': 'Koliko dugo će hrana trajati?', 'How long will this water resource last?': 'Koliko dugo će ovaj resurs vode trajati?', 'How many Boys (0-17 yrs) are Dead due to the crisis': 'Koliko dječaka (starosti od 0 do 17 god) je mrtvo usljed trenutne krize', 'How many Boys (0-17 yrs) are Injured due to the crisis': 'Koliko dječaka (0-17 godina) je povrijeđeno zbog krize', 'How many Boys (0-17 yrs) are Missing due to the crisis': 'Koliko dječaka (0 - 17 godina) je nestalo tokom krize', 'How many days will the supplies last?': 'Koliko dana će trajati zalihe?', 'How many doctors in the health centers are still actively working?': 'Koliko ljekara u zdravstvenim centrima još aktivno radi?', 'How many Girls (0-17 yrs) are Dead due to the crisis': 'Koliko djevojaka (0-17godina) je umrlo tokom ove krize', 'How many Girls (0-17 yrs) are Injured due to the crisis': 'Koliko djevojki (0-17 god.) je ozlijeđeno zbog nepogode', 'How many Girls (0-17 yrs) are Missing due to the crisis': 'Koliko djevojčica (0-17 god) je nestalo uslijed krize', 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': 'Koliko je kuća oštećeno ali još upotrebljivo (upotrebljivo = razbijeni prozori, pukotine u zidu, krov malo oštećen)?', 'How many latrines are available in the village/IDP centre/Camp?': 'Koliko zahoda je dostupan u selu/centru/kampu?', 'How many Men (18 yrs+) are Dead due to the crisis': 'Koliko muškaraca (18+ godina) je umrlo zbog krize', 'How many Men (18 yrs+) are Injured due to the crisis': 'Koliko muškaraca (preko 18 god.) je povrijeđeno usljed krize', 'How many Men (18 yrs+) are Missing due to the crisis': 'Koliko muškaraca (18 god+) je nestalo uslijed krize', 'How many midwives in the health centers are still actively working?': 'Koliko babica u zdravstvenim centrima još aktivno radi?', 'How many new cases have been admitted to this facility in the past 24h?': 'Koliko novih slučajeva je primljeno u ovaj objekat u posljednjih 24h?', 'How many nurses in the health centers are still actively working?': 'Koliko medicinskih sestara u zdravstvenim centrima još aktivno radi?', 'How many of the patients with the disease died in the past 24h at this facility?': 'Koliko pacijenata sa ovom bolesti je umrlo u posljednjih 24h u ovom objektu?', 'How many of the primary school age boys (6-12) in the area are not attending school?': 'Koliko dječaka osnovaca (6-12) u ovom području ne pohađaju školu?', 'How many of the primary school age girls (6-12) in the area are not attending school?': 'Koliko djevojčica osnovaca (6-12) u ovom području ne pohađaju školu?', 'How many of the secondary school age boys (13-18) in the area are not attending school?': 'Koliko srednjoškolskih mladića (13-18) u ovom području ne pohađaju školu?', 'How many of the secondary school age girls (13-18) in the area are not attending school?': 'Koliko srednjoškolskih djevojaka (13-18) u ovom području ne pohađaju školu?', 'How many patients with the disease are currently hospitalized at this facility?': 'Koliko pacijenata sa tom bolesti je trenutno hospitalizovano u ovom objektu?', 'How many primary school age boys (6-12) are in the affected area?': 'Koliko dječaka osnovaca (6-12) je u pogođenom području?', 'How many primary school age girls (6-12) are in the affected area?': 'Koliko djevojčica osnovaca (6-12) je u pogođenom području?', 'How many secondary school age girls (13-18) are in the affected area?': 'Koliko srednjoškolskih djevojaka (13-18) je u pogođenom području?', 'How many teachers have been affected by the disaster (affected = unable to work)?': 'Kako je nastavnika pogođeno katastrofom? (pogođeno = ne može raditi)', 'How many teachers worked in the schools prior to the disaster?': 'Koliko je nastavnika radilo u školi prije katastrofe?', 'How many Women (18 yrs+) are Dead due to the crisis': 'Koliki broj Žena (od 18 godina i više) je poginulo tokom krize', 'How many Women (18 yrs+) are Injured due to the crisis': 'Koliko žena (18+ godina) je povrijeđeno uslijed krize', 'How many Women (18 yrs+) are Missing due to the crisis': 'Koliko žena(18godina+) je nestalo uslijed krize', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Koliko detalja je vidljivo. Velik nivo zumiranja znači mnogo detalja, ali ne i široku oblast prikaza. Nizak nivo zumiranja znači prikaz široke oblasti, ali ne i visok nivo detalja.', 'How often you want to be notified. If there are no changes, no notification will be sent.': 'Koliko često želite biti obavještavani. Ako nema izumjena, napomene se neće slati.', 'How you want to be notified.': 'Kako želite biti obavještavani', 'HR Manager': 'Menadžer ljudskih resursa', 'HTML class': 'HTML klasa', 'Human Resource': 'Ljudski resurs', 'Human Resource added': 'Ljudski resurs dodan', 'Human Resource assigned': 'Dodijeljeni ljudski resursi', 'Human Resource Assignment updated': 'Dodjela ljudskih resursa ažurirana', 'Human Resource Assignments': 'Dodjeljivanje ljudskih resursa', 'Human Resource Details': 'Detalji ljudskih resursa', 'Human Resource Management': 'Rukovodstvo ljudskim resursima', 'Human Resource removed': 'Ljudski resurs uklonjen', 'Human Resource unassigned': 'Nedodijeljeni ljudski resursi', 'Human Resource updated': 'Ažuriran ljudski resurs', 'Human Resources': 'Ljudski resursi', 'Human Resources Management': 'Upravljanje ljudskim resursima', 'Humanitarian NGO': 'Humanitarna NVO', 'Hungary': 'Mađarska', 'Hurricane': 'Uragan', 'Hurricane Force Wind': 'Vjetar snage uragana', 'Hybrid Layer': 'Hibridni sloj', 'Hygiene': 'Higijena', 'Hygiene kits received': 'Primljeni higijenski kompleti', 'Hygiene kits, source': 'Higijenski kompleti, izvor', 'Hygiene NFIs': 'Higijenski neprehrambeni artikli', 'Hygiene practice': 'higijenska praksa', 'Hygiene problems': 'Higijenski problemi', 'Hygiene promotion': 'Unapređenje higijene', 'I accept. Create my account.': 'Prihvatam. Kreiraj moj račun.', 'I agree to the %(terms_of_service)s': 'Slažem se sa %(terms_of_service)s', 'I am available in the following area(s)': 'Dostupan sam u sljedećim područjima', 'Ice Pressure': 'Pritisak leda', 'Iceberg': 'Santa leda', 'Iceland': 'Island', 'ICT': 'IKT', 'ID': 'IB', 'ID Label': 'ID oznaka', 'ID Label:': 'ID oznaka:', 'ID Tag': 'ID oznaka', 'ID Tag Number': 'Broj identifikacijske kartice', 'ID type': 'ID tip', 'Identificación de Víctimas de Desastres': 'Identifikacija žrtava katastrofe', 'Identification': 'Identifikacija', 'Identification label of the Storage bin.': 'Identifikacija korpe za smještaj', 'Identification Report': 'Izvještaj o identifikaciji', 'Identification Reports': 'Izvještaji identifikacija', 'Identification Status': 'Status identifikacije', 'identified': 'identificiran', 'Identified as': 'Identifikovano kao', 'Identified by': 'Identifikovan od strane', 'Identifier Name for your Twilio Account.': 'Ime identifikatora za vaš Twilio nalog.', 'Identifier which the remote site uses to authenticate at this site when sending synchronization requests.': 'Identifikator koji će udaljeni sajt koristiti za provjeru prijave na ovaj sajt kada šalje zahtjeve za sinhronizacijom.', 'Identities': 'Identiteti', 'Identity': 'Identitet', 'Identity added': 'Identitet dodan', 'Identity deleted': 'Identitet obrisan', 'Identity Details': 'Detalji o identitetu', 'Identity updated': 'Identitet ažuriran', 'IEC Materials': 'IEC materijali', 'If a ticket was issued then please provide the Ticket ID.': 'Ako je kartica izdata molimo vas da obezbijedite ID kartice', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Ako korisnik potvrdi da posjeduje e-mail adresu ove domene, polje odobravatelja će se koristiti da definira da li i od strane koga se traže daljnje potvrde.', 'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'Ako je označeno, napomena će sadržati sve izmijenjene zapise. Ako nije označeno, napomena će biti poslana za svaki izmijenjeni zapis.', 'If it is a URL leading to HTML, then this will downloaded.': 'Ako URL vodi ka HTML-u, ovo će biti preuzeto.', 'If neither are defined, then the Default Marker is used.': 'Ako nijedan nije definisan, onda je korišten Podrazumjevani Znak', 'If no marker defined then the system default marker is used': 'Ako nema definisanog markera onda se koristi standardni marker sistema', 'If no, specify why': 'Ako ne, navedite zašto', 'If none are selected, then all are searched.': 'Ako nijedan nije označen, svi će biti pretraženi.', 'If not found, you can have a new location created.': 'Ako nije nađeno, možete kreirati novu lokaciju.', "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Ako je odabrano, lokacija ovog sredstva će biti ažurirana kada se kod ažurira lokacija osobe', 'If the location is a geographic area, then state at what level here.': 'Ako je lokacija geografsko područje, navedite na kojem je nivou.', 'If the person counts as essential staff when evacuating all non-essential staff.': 'Ako se osoba računa kao neohodno osoblje pri evakuaciji svog osoblja koje nije neophodno.', 'If the request is for %s, please enter the details on the next screen.': 'Ako je zahtjev za %s, unesite detalje na sljedećem ekranu.', 'If the request type is "Other", please enter request details here.': 'Ako je tip zahtjev "Drugi", unesite detalje zahtjeva ovdje.', 'If the service requries HTTP BASIC Auth (e.g. Mobile Commons)': 'Ako usluga zahtijeva HTTP BASIC Autorizaciju (npr. Mobile Commons)', 'If there are multiple configs for a person, which should be their default?': 'Ako ima više konfiguracija za jednu osobu, koja treba biti podrazumijevana?', "If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Ako je ova konfiguracija prikazana na GIS konfiguracijskom meniju, dajte ime da se koristi u meniju. Ime za ličnu konfiguraciju mape će se koristiti za korisničko ime.', "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Ako ova konfiguracija predstavlja lokalitet za Izbornik Lokaliteta, postavite naziv da biste je koristili u izborniku. Kao naziv za ličnu konfiguraciju mape će biti postavljeno ime korisnika.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Ako je ovo polje popunjeno onda će korisniku koji specificira ovu organizaciju pri upisu biti osoblje organizacije osim ako se njegovo područje ne podudara sa područjem polja.', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Ako je ovo polje popunjeno tada korisnik sa navedenom domenom će biti automatski dodijeljen kao osoblje ove organizacije', 'If this is a request template to be added repeatedly then the schedule can be set on the next page.': 'Ako je ovo predložak za zahtjev koji će se dodati iznova, onda se raspored može postaviti na sljedećoj strani', 'If this is set to True then mails will be deleted from the server after downloading.': 'Ako je ovo uključeno, tada će poruke elektronske pošte biti obrisane sa servera nakon preuzimanja', "If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Ako je ovo označeno, onda će ovo postati korisnikova osnovna lokacija i samim time lokacija na kojoj će korisnik biti prikazan na mapi.', 'If this record should be restricted then select which role is required to access the record here.': 'Ako bi ovaj zapis trebao biti ograničen, ovdje odaberite kojoj ulozi je dozvoljen pristup zapisu.', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Ako je ovaj zapis ograničen, označiti kojim ulogama je dozvoljen pristupovom zapisu', 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'Ako je Unit = m, Base Unit = Km, tada je multiplikator is 0.0001 jer je 1m = 0.001 km.', 'If yes, specify what and by whom': 'Ako da, navedite šta i od strane koga', 'If yes, which and how': 'Ako jeste, koji i kako', 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Ako ne unesete odgovarajući dokument, vaš e-mail će biti prikazan kako mogli potvrditi ove podatke.', "If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": "Ukoliko aktivnost ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj aktivnost'.", "If you don't see the asset in the list, you can add a new one by clicking link 'Create Asset'.": "Ukoliko sredstvo ne vidite u popisu, možete dodati novo klikom na link 'Kreiraj sredstvo'.", "If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiary'.": 'Ukoliko ne vidite korisnika u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj korisnika"', "If you don't see the campaign in the list, you can add a new one by clicking link 'Add Campaign'.": 'Ukoliko ne vidite kampanju u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj kampanju"', "If you don't see the Cluster in the list, you can add a new one by clicking link 'Add New Cluster'.": 'Ukoliko ne vidite skup u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj novi skup"', "If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": "Ukoliko zajednicu ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj zajednicu'.", "If you don't see the Hospital in the list, you can add a new one by clicking link 'Add Hospital'.": "Ukoliko bolnicu ne vidite u popisu, možete dodati novu klikom na link 'Dodaj bolnicu'.", "If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Ukoliko bolnicu ne vidite u popisu, možete dodati novu klikom na link 'Kreiraj bolnicu'.", "If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": 'Ukoliko ne vidite lokaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Kreiraj lokaciju"', "If you don't see the Office in the list, you can add a new one by clicking link 'Add Office'.": "Ukoliko ne vidite ured na listi, možete dodati novi klikom na link 'Dodaj ured'", "If you don't see the Organization in the list, you can add a new one by clicking link 'Add Organization'.": 'Ukoliko ne vidite organizaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj Organizaciju"', "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Ukoliko ne vidite organizaciju u popisu, možete dodati novu tako što ćete kliknuti na link "Kreiraj Organizaciju"', "If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": "Ukoliko projekt ne vidite u popisu, možete dodati novi klikom na link 'Kreiraj projekt'.", "If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": "Ukoliko ne vidite sektor na listi, možete dodati novi klikom na link 'Kreiraj sektor'", "If you don't see the Type in the list, you can add a new one by clicking link 'Add Region'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Dodaj region'", "If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip aktivnosti'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip objekta'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": "Ukoliko ne vidite tip na listi, možete dodati novi klikom na link 'Kreiraj tip kancelarije'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": 'Ukoliko ne vidite tip u popisu, možete dodati novi tako što ćete kliknuti na link "Kreiraj tip organizacije"', "If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.": 'Ukoliko ne vidite vozilo u popisu, možete dodati novu tako što ćete kliknuti na link "Dodaj vozilo"', "If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "Ako unesete ime diektorija, onda će se sloj pojaviti u tom direktoriju u prebacivaču sloja mape. Poddirektorij se može kreirati razdvajanjem imena s '/'", 'If you know what the Geonames ID of this location is then you can enter it here.': 'Ako znate koje je Geonames ime područja (ID) ove lokacije unesite ga ovdje', 'If you know what the OSM ID of this location is then you can enter it here.': 'Ako znate OSM ID ove lokacije, možete ga unijeti ovdje.', 'If you need to add a new document then you can click here to attach one.': 'Ako vam je potrebno da dodate novi dokument onda kliknite ovdje kako biste dodali jedan.', "If you specify a module then this will be used as the text in that module's index page": 'Ako navedete modul, ovo će se koristiti kao tekst u indeksnoj stranici modula', "If you specify a resource then this will be used as the text in that resource's summary page": 'Ako navedete resurs, ovo će se koristiti kao tekst u sumarnoj stranici tog resursa', 'If you want several values, then separate with': 'Ukoliko želite više vrijednosti, onda razdvojite sa', 'If you would like to help, then please': 'Ako zelite pomoći, samo izvolite', 'If you would like to help, then please %(sign_up_now)s': 'Ako želite pomoći, onda %(sign_up_now)s', 'ignore': 'zanemari', 'Ignore Errors?': 'Ignoriši greške?', 'Illegal Immigrant': 'Ilegalni doseljenik', 'Image': 'Slika', 'Image added': 'Slika dodana', 'Image deleted': 'Slika obrisana', 'Image Details': 'Detalji slike', 'Image File(s), one image per page': 'Datoteka (datoteke) slika, prikaz jedne slike po stranici', 'Image Tags': 'Oznake na slikama', 'Image Type': 'Vrsta slike', 'Image updated': 'Slika ažurirana', 'Image Upload': 'Postavi sliku', 'Image/Other Attachment': 'Slika/Drugi dodaci', 'Imagery': 'Lik', 'Images': 'Slike', 'Immediate reconstruction assistance, Rank': 'Pomoć u hitnoj rekonstrukciji, stepen', 'Immediately': 'Odmah', 'Immigration and Customs Capabilities': 'Carinske i imigracione mogućnosti', 'Impact added': 'Utjecaj dodat', 'Impact Assessments': 'Procjene utjecaja', 'Impact deleted': 'Utjecaj obrisan', 'Impact Details': 'Detalji utjecaja', 'Impact Type': 'Tip utjecaja', 'Impact Type added': 'Dodan tip utjecaja', 'Impact Type deleted': 'Tip utjecaja obrisan', 'Impact Type Details': 'Detalji o tipu utjecaja', 'Impact Type updated': 'Ažurirana vrsta utjecaja', 'Impact Types': 'Tip utjecaja', 'Impact updated': 'Utjecaj ažuriran', 'Impacts': 'Utjecaji', 'implanted': 'implantat', 'import': 'uvoz', 'Import': 'Uvoz', 'Import & Export Data': 'Uvoz i izvoz podataka', 'Import Activity Data': 'Uvezi podatke aktivnosti', 'Import Activity Type data': 'Uvezi podatke tipa aktivnosti', 'Import Airports': 'Uvezi aerodrome', 'Import and Export': 'Uvoz i izvoz', 'Import Annual Budget data': 'Uvezi podatke godišnje budžeta', 'Import Assets': 'Uvezi sredstva', 'Import Awards': 'Uvezi nagrade', 'Import Base Stations': 'Uvezi bazne stanice', 'Import Catalog Items': 'Uvezi stavke kataloga', 'Import Certificates': 'Uvezi certifikate', 'Import Community Data': 'Uvezi podatke zajednice', 'Import Completed Assessment Forms': 'Uvezi završen formular ocjene', 'Import Contacts': 'Uvezi kontakte', 'Import Courses': 'Uvezi kurseve', 'Import Data': 'Uvezi podatke', 'Import Data for Theme Layer': 'Uvezi podatke za tematskog sloja', 'Import Departments': 'Uvezi odjeljenja', 'Import Event Types': 'Uvezi tipove događaja', 'Import Facilities': 'Uvezi objekte', 'Import Facility Types': 'Uvezi vrste objekata', 'Import File': 'Uvezi datoteku', 'Import File deleted': 'Unosna datoteka izbrisana', 'Import File Details': 'Uvezi detalje datoteke', 'Import Files': 'Uvezi datoteke', 'Import from CSV': 'Uvezi iz CSV', 'Import from OpenStreetMap': 'Uvezi iz OpenStreetMap', 'Import from Ushahidi Instance': 'Importuj iz Ushahidi instance', 'Import Hazard data': 'Uvezi podatke o riziku', 'Import Hazards': 'Uvezi rizike', 'Import Heliports': 'Uvezi heliodrome', 'Import Hours': 'Uvezi sate', 'Import if Master': 'Uvezi ako je Master', 'Import Incident Reports': 'Uvezi izvještaje o incidentu', 'Import Incident Reports from Ushahidi': 'Uvezi izvještaj o incidentu iz Ushahidi', 'Import Incident Types': 'Uvezi tipove incidenta', 'Import Job': 'Uvezi posao', 'Import Job Count': 'Broj poslova uvoza', 'Import job created': 'Posao za uvoz kreiran', 'Import Jobs': 'Uvezi poslove', 'Import Layers': 'Uvezj slojeve', 'Import Location data': 'Uvezi podatke lokacije', 'Import Location Data': 'Uvezi podatke lokacije', 'Import Locations': 'Uvezi lokacije', 'Import Logged Time data': 'Uvezi zabilježene vremenske podatke', 'Import multiple tables as CSV': 'Uvoz više tabela kao CSV', 'Import New File': 'Uvezi novu datoteku', 'Import Offices': 'Uvezi kancelarije', 'Import Organizations': 'Uvezi organizacije', 'Import Participant List': 'Uvezi listu učesnika', 'Import Participants': 'Uvezi učesnike', 'Import Partner Organizations': 'Uvezi partnerske organizacije', 'Import PoI Types': 'Uvezi tipove tačaka interesa', 'Import Points of Interest': 'Uvezi tačke interesa', 'Import Policies & Strategies': 'Uvezi politiku ili strategiju', 'Import Posts': 'Uvezi blok ugradivog teksta', 'Import Project Organizations': 'Uvezi organizacije projekta', 'Import Projects': 'Uvezi projekte', 'Import Resource Types': 'Uvezi tipove resursa', 'Import Resources': 'Uvezi resurse', 'Import Seaports': 'Uvezi luke', 'Import Sector data': 'Uvezi podakte o sektoru', 'Import Series': 'Uvezi serije', 'Import Service data': 'Uvezi podatke usluge', 'Import Services': 'Uvezi usluge', 'Import Staff': 'Uvezi osoblje', 'Import Suppliers': 'Uvezi dobavljače', 'Import Tags': 'Uvezi oznake', 'Import Tasks': 'Uvezi zadatke', 'Import Template Layout': 'Uvezi raspored predložaka', 'Import Templates': 'Uvezi predloške', 'Import Theme data': 'Uvezi podatke teme', 'Import Training Events': 'Uvezi događaje obuke', 'Import Training Participants': 'Uvezi učesnike obuke', 'Import Users': 'Uvezi korisnike', 'Import Volunteer Cluster Positions': 'Uvezi pozicije skupa volontera', 'Import Volunteer Cluster Types': 'Uvezi tipove skup volontera', 'Import Volunteer Clusters': 'Uvezi skupove volontera', 'Import Volunteers': 'Uvezi volontere', 'Import Warehouse Stock': 'Uvezi zalihu skladišta', 'Import Warehouses': 'Uvezi skladišta', 'Import/Export': 'Uvoz/Izvoz', 'Import/Master': 'Uvezi/Master', 'Important': 'Važno', 'Importantly where there are no aid services being provided': 'Važnije gdje nije pružena pomoć', 'Imported': 'Uvezeno', 'Importing data from spreadsheets': 'Unošenje podataka iz tabela', 'Improper decontamination': 'Nepravilna dekontaminacija', 'Improper handling of dead bodies': 'Nepravilno postupanje sa mrtvim tijelima', 'improvement': 'poboljšanje', 'In': 'U', 'In Catalogs': 'U katalozima', 'in Deg Min Sec format': 'u Stepeni Minute Sekunde formatu', 'In error': 'Greška', 'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'U GeoServer, ovo je ime sloja. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'U GeoServer, ovo je ime radnog prostora. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'in GPS format': 'U GPS formatu', 'in Inv.': 'u Inv.', 'In Inventories': 'U zalihama', 'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'Da možete mijenjati OpenStreetMap podatke iz %(name_short)s, trebate registrovati nalog na OpenStreetMap serveru.', 'In Process': 'U procesu', 'In Progress': 'U Toku', 'In Stock': 'Na zalihi', 'in Stock': 'na zalihi', 'In transit': 'U prijelazu', 'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'U rasporedu prozora karta se maksimizira da popuni prozor , nema potrebe da se ovdje postavlja velika vrijednost.', 'inactive': 'neaktivno', 'Inbound Mail Settings': 'Podešavanja ulaznih mail-ova', 'InBox': 'Dolazna pošta', 'Incident': 'Slučaj', 'Incident added': 'Dodat incident', 'Incident Categories': 'Kategorije incidenata', 'Incident Commander': 'Komandir incidenata', 'Incident Details': 'Detalji o incidentu', 'Incident removed': 'Incident uklonjen', 'Incident Report': 'Izvještaj o incidentu', 'Incident Report added': 'Dodat izvještaj o incidentu', 'Incident Report deleted': 'Obrisan izvještaj o incidentu', 'Incident Report Details': 'Detalji o izvještaju incidenta', 'Incident Report removed': 'Uklonjen izvještaj o incidentu', 'Incident Report updated': 'Ažuriran izvještaj o incidentu', 'Incident Reporting': 'Izvještavanje o incidentu', 'Incident Reporting System': 'Sistem za izvještavanje o incidentima', 'Incident Reports': 'Izvještaji o incidentu', 'Incident Timeline': 'Vremenski tok incidenta', 'Incident Type': 'Tip incidenta', 'Incident Type added': 'Vrsta incidenta dodana', 'Incident Type Details': 'Detalji o vrsti incidenta', 'Incident Type removed': 'Tip incidenta obrisan', 'Incident Type updated': 'Vrsta incidenta ažurirana', 'Incident Types': 'Tipovi incidenta', 'Incident updated': 'Ažuriran incident', 'Incidents': 'Incidenti', 'Include any special requirements such as equipment which they need to bring.': 'Uključite bilo koje posebne zahtjeve kao npr. opremu koju trebaju donijeti.', 'Include core files': 'Uključi osnovne datoteke', 'Include Entity Information?': 'Uključi informaciju o jedinki?', 'Include only items purchased within the specified dates.': 'Uključi samo stavke kupljene unutar navedenih datuma.', 'Include only items that expire within the specified dates.': 'Uključi samo stavke koje ističu unutar navedenih datuma.', 'Include only items where quantity is in this range.': 'Uključi samo stavke čija je količina unutar navedenog opsega.', "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Uključuje GroundOverlay ili ScreenOverlay koji još nisu podržani u OpenLayers, tako da možda neće raditi ispravno.', 'Incoming': 'Dolazni', 'Incoming Shipment canceled': 'Dolazna pošiljka otkazana', 'Incoming Shipment updated': 'Dolazna pošiljka je ažurirana', 'Incoming Shipments': 'Dolazne pošiljke', 'Incomplete': 'Nepotpuno', 'Incorrect parameters': 'Nevažeći parametri', 'India': 'Indija', 'Individuals': 'Pojedinci', 'Indonesia': 'Indonezija', 'Industrial': 'Industrijska', 'Industrial Crime': 'Industrijski kriminal', 'Industry close to village/camp': 'Industrija u blizini sela/kampa', 'Industry Fire': 'Industrijska vatra', 'Infant (0-1)': 'Novorođenče (0-1)', 'Infectious Disease': 'Infektivne bolesti', 'Infectious Disease (Hazardous Material)': 'Zarazna bolest (Opasan materijal)', 'Infectious Diseases': 'Zarazne bolesti', 'Infestation': 'Napast', 'Informal camp': 'Neformalni kamp', 'Informal Leader': 'Neformalni vođa', 'Information gaps': 'Praznine u informacijama', 'Information Source': 'Izvor informacije', 'Infusion catheters available': 'Sonde za infuziju dostupne', 'Infusion catheters need per 24h': 'Infuzioni kateteri potrebni u 24h', 'Infusion catheters needed per 24h': 'Infuzijski kateteri potrebni po 24h', 'Infusions available': 'Dostupne infuzije', 'Infusions needed per 24h': 'Infuzija potebna u 24h', 'Inherited?': 'Naslijeđeni?', 'initial assessment': 'Početna procjena:', 'Initials': 'Inicijali', 'injured': 'povrijeđeni', 'Injuries': 'Povrede', 'input': 'ulaz', 'Input Job': 'Ulazni posao', 'insert new': 'Ubaci novi', 'insert new %s': 'dodaj novi %s', 'Inspected': 'Pregledano', 'Inspection Date': 'Datum Inspekcije', 'Inspection date and time': 'Datum i vrijeme inspekcije', 'Inspection time': 'Vrijeme inspekcije ili pregleda', 'Inspector ID': 'ID inspektora', 'Instance Type': 'Tip instance', 'Instance URL': 'URL instance', 'Instant Porridge': 'Instant supa', "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Umjesto automatske sinhronizacije sa ostalih tačaka mreže, možete izvršiti sinhronizaciju preko datoteka, što je neophodno na mjestima gdje nema mreže. Možete koristiti ovu stranicu da uvezete sinhronizacijske podatke iz datoteka, kao i da izvezete podatke u sinhronizacijske datoteke. Kliknite na link desno da biste otišli na ovu stranicu.', 'Institution': 'Institucija', 'Instructor': 'Instruktor', 'Instrument Landing System': 'Instrumentalni sistem za slijetanje', 'Insufficient': 'Nedovoljno', 'insufficient number of pages provided': 'naveden nedovoljan broj strana', 'Insufficient Privileges': 'Nedovoljno ovlasti', 'Insufficient privileges': 'Nedovoljno ovlasti', 'Insufficient vars: Need module, resource, jresource, instance': 'Nedovoljan broj promjenjivih: potrebni su modul, resurs, jresurs, instanca', 'Insurance': 'Osiguranje', 'Insurance Renewal Due': 'Rok za obnovu osiguranja', 'Intake Items': 'Ulazne stavke', 'Intergovernmental': 'Međuvladina', 'Intergovernmental Organisation': 'Međuvladina organizacija', 'Intergovernmental Organization': 'Međuvladina Organizacija', 'Interior walls, partitions': 'Unutarnji zidovi, pregrade', 'Internal Features': 'Interne karakteristike', 'Internal Shipment': 'Interne pošiljke', 'Internal State': 'Unutrašnje stanje', 'International NGO': 'Međunarodna NVO', 'International Organization': 'Međunarodna organizacija', 'International Staff': 'Međunarodno osoblje', 'Intervention': 'Intervencija', 'Interview taking place at': 'intervju se održava u', 'invalid': 'neispravno', 'Invalid': 'Nevažeće', 'Invalid data: record %(id)s not accessible in table %(table)s': 'Pogrešni podaci: slog %(id)s nije dostupan u tabeli %(table)s', 'Invalid email': 'Neispravan email', 'Invalid form (re-opened in another window?)': 'Pogrešan formular (ponovo otvoren u drugom prozoru?)', 'Invalid Location!': 'Pogrešna lokacija!', 'Invalid Organisation ID!': 'Neispravan ID organizacije.', 'Invalid Organization ID!': 'Neispravan organizacijski ID.', 'Invalid phone number': 'Netačan broj telefona', 'Invalid phone number!': 'Pogrešan broj telefona!', 'Invalid Query': 'Pogrešan upit', 'invalid request': 'nevažeći zahtjev', 'Invalid request!': 'Nevažeći zahtjev!', 'Invalid Site!': 'Pogrešno mjesto!', 'Invalid ticket': 'Nevažeća kartica', 'invalid ticket': 'nevažeća kartica', 'Invalid UUID!': 'Nevažeći JMBG!', 'Inventories': 'Zalihe', 'Inventories with Item': 'Skladišta s stavkama', 'Inventories with Items': 'Skladišta s stavkama', 'Inventory': 'Skladište', 'Inventory Adjustment': 'Prilagođenje skladišta', 'Inventory Adjustment Item': 'Prilagođenje artikala u skladištu', 'Inventory functionality is available for:': 'Funkcionalnost skladišta je dostupna za', 'Inventory Item': 'Stavka - Inventar (popis)', 'Inventory Item added': 'Dodana stavka inventara', 'Inventory Item deleted': 'Stavka skladišta obrisana', 'Inventory Item Details': 'Popis detalja artikala u skladištu', 'Inventory Item updated': 'Ažurirana stavka inventara', 'Inventory Items': 'Stavke skladišta', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Stavke skladišta uključuju prehrambene artikle kao i one koji će biti pretvoreni u sredstva na njihovim odredištima.', 'Inventory Location': 'Lokacija skladišta', 'Inventory Management': 'Upravljanje zalihama', 'Inventory of Effects': 'Inventar efekata', 'Inventory Stock Position': 'Pozicija zaliha inventara', 'Inventory Store added': 'Dodana stavka inventara', 'Inventory Store Details': 'Popis detalja artikala u skladištu', 'Inventory/Ledger': 'Skladište/knjigovodstvo', 'Iraq': 'Irak', 'Ireland': 'Irska', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'je centralno online skladište gdje se mogu čuvati informacije o svim žrtvama nesreće i porodicama, posebno identificiranim gubicima, evakuisanim i raseljenim osobama. Informacije poput imena, godina, kontakt telefona, broja lične karte, trenutnog mjesta boravka i drugih detalja su pohranjene. Slike i otisci prstiju ljudi se mogu učitati u sistem. Ljudi se mogu rasvrstavati po grupama zbog efikasnosti i pogodnosti.', 'Is adequate food and water available for these institutions?': 'Da li je dostupna adekvatna hrana i voda za ove institucije?', 'Is editing level L%d locations allowed?': 'Da li je uređivanje nivoa L%d lokacija dopušteno?', 'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'Predviđa se da se sastoji od nekoliko pod-modula koji rade zajedno kako bi osigurali složenu funkcionalnost uz pomoć kojih će organizacije lakše upravljati olakšanjima i projektnim predmetima. To uključuje sistem za unos, sistem za upravljanje skladištima, praćenje proizvoda, upravljanje lancem opskrbe, upravljanje voznim parkom, nabavka, financijsko praćenje i druge sposobnosti za upravljanje sredstvima i resursima.', 'Is it safe to collect water?': 'Da li je bezbjedno sakupljanje vode?', 'Is this a strict hierarchy?': 'Da li je ovo stroga hijerarhija?', 'Israel': 'Izrael', 'Issued without Record': 'Izdato bez zapisa', 'Issuing Authority': 'Autoritet (odgovorno lice) za dodjeljivanje resursa', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Snima ne samo dio gdje su aktivni ,već također snima podatke u dometu projekata koji pružaju u svakom području.', 'It gives four options: No Sync, Newer Timestamp, Keep All, Replace All': 'Pruža četiri mogućnosti: Bez sinhronizacije, Novija vremenska oznaka, Zarži sve, Zamijeni sve', 'It is built using the Template agreed by a group of NGOs working together as the': 'Izgrađeno koristeći šablon usaglašen od strane grupe NVO radeći zajedno kao', 'Italian': 'Talijanski', 'Italy': 'Italija', 'Item': 'Stavka', 'Item added': 'Stavka dodana', 'Item added to Inventory': 'Stavka je dodana u inventar', 'Item Added to Shipment': 'Dodana Stavka za Pošiljku.', 'Item added to shipment': 'Predmet dodan u pošiljku', 'Item added to stock': 'Stavka dodana u zalihu', 'Item already in budget!': 'Stavka je već u budžetu !', 'Item already in Bundle!': 'Stavka već u paketu!', 'Item already in Kit!': 'Stavka već u kompletu!', 'Item Catalog added': 'Dodata stavka u katalog', 'Item Catalog Categories': 'Kategorije kataloga stavki ', 'Item Catalog Category': 'Kategorija kataloga stavki', 'Item Catalog Category added': 'Dodana kategorija kataloga stavki', 'Item Catalog Category deleted': 'Obrisana kategorija kataloga stavki', 'Item Catalog Category Details': 'Detalji kategorije kataloga stavke', 'Item Catalog Category updated': 'Kategorija kataloga stavke ažurirana', 'Item Catalog deleted': 'Katalog stavki izbrisan', 'Item Catalog Details': 'Pojedinosti o katalogu stavki', 'Item Catalog updated': 'Katalog stavki je ažurirana', 'Item Catalogs': 'Katalozi stavki', 'Item Categories': 'Kategorije stavki', 'Item Category': 'Kategorija stavki', 'Item Category added': 'Dodata kategorija stavki', 'Item Category deleted': 'Obrisana kategorija stavki', 'Item Category Details': 'Detalji o kategorijama stavki', 'Item Category updated': 'Ažurirana kategorija stavki', 'Item Code': 'Šifra stavke', 'Item deleted': 'Stavka obrisana', 'Item Details': 'Detalji o predmetu', 'Item name': 'Ime stavke', 'Item Pack added': 'Paket stavki je dodan', 'Item Pack deleted': 'Paket stavki je obrisan', 'Item Pack Details': 'Sadržaj paketa', 'Item Pack updated': 'Paket stavki je ažuriran', 'Item Packet added': 'Dodat paket stavki', 'Item Packet deleted': 'Paket sa stavkama obrisan', 'Item Packet Details': 'Detalji sadržaja paketa', 'Item Packet updated': 'Ažuriran paket stavki', 'Item Packets': 'Paketi stavki', 'Item Packs': 'Paketi sa stavkama', 'Item quantity adjusted': 'Prilagođena količina stavke', 'Item removed from Inventory': 'Stavka uklonjena iz inventara', 'Item Status': 'Status stavke', 'Item Sub-Categories': 'Pod-kategorije stavke', 'Item Sub-Category': 'Podkategorija stavke', 'Item Sub-Category deleted': 'Obrisana podkategorija stavki', 'Item Sub-Category Details': 'Detalji podkategorije stavke', 'Item Sub-Category updated': 'Podkategorija stavke ažurirana', 'Item Tracking Status': 'Status praćenja stavke', 'Item updated': 'Stavka ažurirana', 'Item(s) added to Request': 'Stavke dodane u zahtjev', 'Item(s) deleted from Request': 'Stavke obrisane iz zahtjeva', 'Item(s) updated on Request': 'Stavke ažurirane u zahtjev', 'Item/Description': 'Stavka/opis', 'Items': 'Stavke', 'Items in Category are Vehicles': 'Stavke u kategoriji su vozila', 'Items in Category can be Assets': 'Stavke u kategoriji mogu biti sredstva', 'Items in Request': 'Stavke u zahtjevu', 'Items in Stock': 'Stavke u zalihi', 'Items/Description': 'Stavke/opis', 'Jamaica': 'Jamajka', 'Japanese': 'Japanski', 'Jerry can': 'Jerry može', 'Jew': 'Jevrej', 'Jewish': 'Jevrejski', 'JNAP Priorities': 'JNAP Prioriteti', 'JNAP-1: Strategic Area 1: Governance': 'JNAP-1: Strateško područje 1: Vlada', 'JNAP-2: Strategic Area 2: Monitoring': 'JNAP-2: Strateško područje 2: Praćenje', 'JNAP-3: Strategic Area 3: Disaster Management': 'JNAP-3: Strateško područje 3: Upravljanje u katastrofama', 'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'JNAP-4: Strateško područje 4: Smanjenje rizika i prilagođenje na promjene klime', 'Job added': 'Posao dodan', 'Job deleted': 'Posao obrisan', 'Job reactivated': 'Posao ponovo aktiviran', 'Job Role': 'Radno mjesto', 'Job Role added': 'Dodana uloga posla', 'Job Role Catalog': 'Katalog radnih mjesta', 'Job Role deleted': 'Pozicija obrisana', 'Job Role Details': 'Opis uloge posla', 'Job Role updated': 'Ažurirana uloga posla', 'Job Roles': 'Radno mjesto', 'Job Schedule': 'Raspored poslova', 'Job Title': 'Radno mjesto', 'Job Title added': 'Radno mjesto dodano', 'Job Title Catalog': 'Katalog radnih mjesta', 'Job Title deleted': 'Radno mjesto obrisano', 'Job Title Details': 'Detalji radnog mjesta', 'Job Title updated': 'radnog mjesto ažurirano', 'Job updated': 'Posao ažuriran', 'Jobs': 'Poslovi', 'joining': 'spajanje', 'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': 'Udruženi državni akcioni plan za upravljanje rizicima u slučaju katastrofe i prilagođenju na klimatske promjene. Primjenjivo samo na Kukova ostrva', 'Journal': 'Dnevnik', 'Journal entry added': 'Unos u dnevnik dodan', 'Journal entry deleted': 'Unos u dnevnik obrisan', 'Journal Entry Details': 'Detalji stavke žurnala', 'Journal entry updated': 'Unos u dnevnik ažuriran', 'JS Layer': 'JS sloj', 'Just Once': 'Samo jednom', 'Kazakhstan': 'Kazahstan', 'Keep All': 'Zadrži sve', 'Keep Duplicate': 'Sačuvaj duplikat', 'Keep Local': 'Zadrži lokalne', 'Keep Original': 'Sačuvaj original', 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'prati sve dolazne kartice dopuštajući im da se kategoriziraju i preusmjere na odgovarajuća mjesto za dalju akciju.', 'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'prati sve humanitarne organizaije koje djeluju u regionu katastrove. Snima ne samo mjesta gdje su aktivne ,već također snima informacije o opsegu projekata koje one provode u svakom području.', 'Kenya': 'Kenija', 'Key': 'Ključ', 'Key added': 'Ključ dodan', 'Key deleted': 'Obrisan Ključ', 'Key Details': 'Ključni detalji', 'Key updated': 'Kljuc ažuriran', 'Key Value pairs': 'Parovi ključ-vrijednost', 'Keys': 'Ključevi', 'Keyword': 'Ključna riječ', 'Keyword Added': 'Ključna riječ dodana', 'Keyword Deleted': 'Ključna riječ obrisana', 'Keyword Updated': 'Ključna riječ ažurirana', 'Keywords': 'Ključne riječi', 'kit': 'komplet', 'Kit': 'Komplet', 'Kit added': 'Komplet je dodan', 'Kit canceled': 'Komplet otkazan', 'Kit Contents': 'Sadržaj kompleta', 'Kit Created': 'Komplet kreiran', 'Kit deleted': 'Komplet obrisan', 'Kit Details': 'Detalji kompleta', 'Kit Item': 'Stavka kompleta', 'Kit Items': 'Stavke kompleta', 'Kit Updated': 'Komplet je ažuriran', 'Kit updated': 'Komplet je ažuriran', 'Kit?': 'Komplet?', 'Kits': 'Kompleti', 'Kitting': 'Pakovanje', 'KML Layer': 'KML sloj', 'Known Identities': 'Poznate ličnosti', 'Known incidents of violence against women/girls': 'Poznati incidenti nasilja nad ženama/djevojkama', 'Known incidents of violence since disaster': 'Poznati slučajevi nasilja od katastrofe', 'Known Locations': 'Poznate lokacije', 'Korea, North': 'Sjeverna Koreja', 'Korea, South': 'Južna Koreja', 'Korean': 'Korejski', 'KPIs': 'KPI', 'Kuwait': 'Kuvajt', 'Kyrgyzstan': 'Kirgistan', 'Label': 'Oznaka', 'Lack of material': 'Nedostatak materijala', 'Lack of school uniform': 'Nedostatak školske uniforme', 'Lack of supplies at school': 'Nedostatak zaliha u školi', 'Lack of transport to school': 'Nedostatak prevoza ka školi', 'Lactating women': 'Dojilje', 'Ladder Vehicle 30': 'Platformsko vozilo 30', 'Landslide': 'Klizište', 'Language': 'Jezik', 'Language Code': 'Šifra jezika', 'large': 'širok', 'Last': 'Zadnje', 'Last Checked': 'Zadnja provjera', 'Last Contacted': 'Zadnji kontakt', 'Last Downloaded': 'Zadnje preuzimanje', 'Last known location': 'Posljednja poznata lokacija', "Last Month's Work": 'Rad u zadnjem mjesecu', 'Last Name': 'Prezime', 'Last name': 'Prezime', 'Last Polled': 'Zadnje pregledanje', 'Last Pull': 'Zadnje povlačenje', 'Last pull on': 'Povučeno zadnji put', 'Last Push': 'Zadnje guranje', 'Last push on': 'Gurnuto zadnji put', 'Last run': 'Posljednje pokretanje', 'Last status': 'Zadnji status', 'Last synchronization on': 'Sinhronizovano zadnji put', 'Last synchronization time': 'Vrijeme posljednje sinhronizacije', 'Last updated': 'Zadnji put ažurirano', 'Last updated by': 'Zadnji put ažurirao', 'Last updated on': 'Zadnji put ažurirano', "Last Week's Work": 'Zadnja radna sedmica', 'Latest Information': 'Posljednja informacija', 'Latitude': 'Geografska širina', 'Latitude & Longitude': '(geografska) Dužina i Širina', 'Latitude and Longitude are required': 'Potrebne geografska širina i dužina', 'Latitude is Invalid!': 'Geografska širina nije ispravna!', 'Latitude is North - South (Up-Down).': 'Geografska širina je sjever-jug (gore-golje)', 'Latitude is North-South (Up-Down).': 'Geografska širina je sjever-jug(gore-dolje)', 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Geografska širina se mjeri od sjevera ka jugu (gore-dolje). Geografska širina je nula na ekvadoru, pozitivna na sjevernoj hemisferi i negativna na južnoj.', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Geografska širina je nula na ekvadoru, pozitivna na sjevernoj hemisferi i negativna na južnoj.', 'Latitude must be between -90 and 90.': 'Geografska širina mora biti između -90 i 90', 'Latitude of far northern end of the region of interest.': 'Geografska širina krajnjeg sjevernog kraja regije', 'Latitude of far southern end of the region of interest.': 'Geografska širina je daleko od južnog kraja interesnog područja', 'Latitude of Map Center': 'Geografska širina od centra karte', 'Latitude should be between': 'Geografska širina treba da bude između', 'latrines': 'zahodi', 'Latrines': 'Zahodi', 'Latvia': 'Latvija', 'Law enforcement, military, homeland and local/private security': 'Organi provođenja zakona, vojska, državna i lokalna/privatna sigurnost', 'Layer': 'Sloj', 'Layer added': 'Dodan sloj', 'Layer deleted': 'Obrisan sloj', 'Layer Details': 'Detalji sloja', 'Layer has been Disabled': 'Sloj je onemogućen', 'Layer has been Enabled': 'Sloj je omogućen', 'Layer ID': 'ID Nivoa', 'Layer Name': 'Ime sloja', 'Layer Properties': 'Svojstva sloja', 'Layer removed from Symbology': 'Sloj uklonjen iz značenja simbola', 'Layer Type': 'Tip sloja', 'Layer updated': 'Ažuriran sloj', 'Layers': 'Slojevi', 'Layers updated': 'Slojevi ažurirani', 'Layout': 'Raspored', 'Lead Implementer': 'Vodeći realizator', 'Lead Implementer for this project is already set, please choose another role.': 'Glavni implementator za ovaj projekt je već postavljen, molim izaberite drugu ulofz.', 'Lead Organization': 'Vodeća organizacija', 'Leader': 'Vođa', 'Leave blank to request an unskilled person': 'Ostavi prazno za zahtjev za nekvalificiranom osobom', 'leave empty to detach account': 'ostavite prazno da odvojite račun', 'Lebanon': 'Liban', 'left': 'lijevo', 'Left-side is fully transparent (0), right-side is opaque (1.0).': 'Lijeva strana je potpuno prozirno (0), desna strana je neprovidno (1.0).', 'Left-to-Right': 'Sa lijeva na desno', 'Legend': 'Legenda', 'Legend Format': 'Format legende', 'legend URL': 'opis URL', 'Legend URL': 'URL legende', 'Length': 'Dužina', 'Length (m)': 'Dužina (m)', 'Lesotho': 'Lesoto', 'less': 'manje', 'Less Options': 'Manje opcija', 'Level': 'Nivo', 'Level 1': 'Nivo 1', 'Level 1 Assessment added': 'Procjena razine 1 dodana', 'Level 1 Assessment deleted': 'Izbrisana procjena nivoa 1', 'Level 1 Assessment Details': 'Detalji nivoa 1 procjene', 'Level 1 Assessment updated': 'Procjena prvog stepena ažurirana', 'Level 1 Assessments': 'procjena nivoa1', 'Level 2': 'Nivo 2', 'Level 2 Assessment added': 'Dodat nivo procjene 2.', 'Level 2 Assessment deleted': 'Procjena Nivoa 2 obrisana', 'Level 2 Assessment Details': 'Detalji procjene nivoa 2', 'Level 2 Assessment updated': 'Procjena drugog nivoa ažurirana', 'Level 2 Assessments': 'Procjena nivoa 2', 'Level 2 or detailed engineering evaluation recommended': 'Preporučuje se nivo 2 ili procjena izvedbenog projekta', 'Level 3': 'Nivo 3', "Level is higher than parent's": 'Nivo je veći nego kod roditelja', 'Level of Award': 'Nivo nagrade', 'Level of competency this person has with this skill.': 'Nivo sposobnosti koju ova osoba ima s tom vještinom.', 'Liberia': 'Liberija', 'Library support not available for OpenID': 'Podrška za biblioteku nije dostupna za OpenID', 'Libya': 'Libija', 'LICENCE': 'DOZVOLA', 'LICENSE': 'DOZVOLA', 'License Number': 'Broj dozvole', 'License Plate': 'Registarske tablice', 'Liechtenstein': 'Lihtenštajn', 'light': 'lagane', 'Lighting': 'Osvjetljenje', 'Line': 'Linija', 'LineString': 'Žica', 'Link': 'Veza', 'Link (or refresh link) between User, Person & HR Record': 'Veza (ili osvježena veza) između korisnika, osobe i zapisa o ljudskim resursima', 'Link an Item & Shipment': 'Poveži predmet i pošiljku', 'Link for the RSS Feed.': 'Veza na RSS dovod', 'Link Item & Shipment': 'Poveži predmet i pošiljku', 'Link to this result': 'Veza na ovaj link', 'Links': 'Veze', 'Lips, Shape': 'Usne, oblik', 'List': 'Spisak', 'List %(site_label)s Status': 'Prikaži %(site_label)s status', 'List / Add Baseline Types': 'Popis / Dodaj vrste referentnih vrijednosti', 'List / Add Impact Types': 'Izlistaj/Dodaj tipove utjecaja', 'List / Add Services': 'Izlistaj / Dodaj usluge', 'List / Add Types': 'Izlistaj / Dodaj Tipove', 'List Activities': 'Prikaži aktivnosti', 'List Activity Organizations': 'Prikaži organizacije aktivnosti', 'List Activity Reports': 'Prikaži izvještaje o aktivnostima', 'List Activity Types': 'Prikaži tipove aktivnosti', 'List Addresses': 'Prikaži adrese', 'List Affiliations': 'Prikaži namještenja', 'List Airports': 'Prikaži aerodrome', 'List all': 'Prikaži sve', 'List All': 'Prikaži sve', 'List All Assets': 'Navedite sva sredstva', 'List All Catalog Items': 'Lista svih stavki kataloga', 'List All Catalogs & Add Items to Catalogs': 'Prikaži sve kataloge i dodaj stavke u kataloge', 'List All Commitments': 'Prikaži sva zaduženja', 'List all Entries': 'Prikaži sve unose', 'List All Entries': 'Prikaži sve unose', 'List All Group Memberships': 'Prikaži svo članstvo grupa', 'List All Item Categories': 'Prikaži kategorije stavki', 'List All Memberships': 'Izlistaj sva članstva', 'List All Organization Approvers & Whitelists': 'Prikaži sve potvrđivače u organizaciji i bijele liste', 'List All Received Shipments': 'Izlistaj sve primljene pošiljke', 'List All Records': 'Izlistaj sve zapise', 'List All Reports': 'Prikaži sve izvještaje', 'List All Requested Items': 'Prikaži sve zahtijevane stavke', 'List All Requested Skills': 'Prikaži sve tražene vještinee', 'List All Requests': 'Prikaži sve zahtjeve', 'List All Roles': 'Prikaži sve uloge', 'List All Sent Shipments': 'Izlistaj sve poslane pošiljke', 'List All Users': 'Prikaži sve korisnike', 'List All Vehicles': 'Prikaži sva vozila', 'List Alternative Items': 'Prikaži alternativne stavke', 'List Annual Budgets': 'Prikaži godišnje budžete', 'List Assessment Answers': 'Prikaži odgovore ocjene', 'List Assessment Questions': 'Prikaži pitanja ocjene', 'List Assessment Summaries': 'Izlistaj sažetke procjena', 'List Assessment Templates': 'Prikaži predloške ocjene', 'List Assessments': 'Popis procjenea', 'List Assets': 'Prikaži sredstva', 'List Assigned Human Resources': 'Prikaži dodijeljene ljudske resurse', 'List Availability': 'Pregled dostupnih', 'List available Scenarios': 'Izlistaj dostupne scenarije', 'List Awards': 'Prikaži nagrade', 'List Base Stations': 'Prikaži bazne stanice', 'List Baseline Types': 'Lista tipova referentnih tačaka', 'List Baselines': 'Prikaži referentne tačke', 'List Beneficiaries': 'Prikaži korisnike', 'List Beneficiary Types': 'Prikaži tipove korisnika', 'List Body Finds': 'Prikaži nađena tijela', 'List Branch Organizations': 'Prikaži ogranke organizacije', 'List Brands': 'Prikaži proizvođačke marke', 'List Budgets': 'Izlistaj budžete', 'List Bundles': 'Ispiši pakete', 'List Camp Services': 'Prikaži usluge kampa', 'List Camp Statuses': 'Prikaži statuse kampa', 'List Camp Types': 'Prikaži tipove kampava', 'List Campaign Messages': 'Prikaži poruke kampanje', 'List Campaigns': 'Prikaži kampanje', 'List Camps': 'Prikaži kampove', 'List Cases': 'Prikaži slučajeve', 'List Catalog Items': 'Prikaži stavke kataloga', 'List Catalogs': 'Prikaži kataloge', 'List Category<>Sub-Category<>Catalog Relation': 'Prikaz Kategorija<>Podkategorija<>Kataloški odnos', 'List Certificates': 'Prikaži certifikate', 'List Certifications': 'Prikaži certifikacije', 'List Checklists': 'Prikaži liste zadataka', 'List Cluster Subsectors': 'Izlistaj podsektore skupa', 'List Clusters': 'Prikaži grupisanja', 'List Coalitions': 'Prikaži koalicije', 'List Commitment Items': 'Prikaži stavke zaduženja', 'List Commitments': 'Prikaži zaduženja', 'List Committed People': 'Prikaži zadužene ljude', 'List Communities': 'Prikaži zajednice', 'List Community Contacts': 'Prikaži kontakt podatke zajednice', 'List Competencies': 'Popis Kompetencija', 'List Competency Ratings': 'Prikaži ocjene sposobnosti', 'List Completed Assessment Forms': 'Prikaži završene formulare ocjene', 'List Configs': 'Prikaži konfiguracije', 'List Conflicts': 'lista sukoba', 'List Contact Information': 'Prikaži kontaktne informacije', 'List Contacts': 'Prikaži kontakte', 'List Course Certicates': 'Ispiši certifikovane kurseve', 'List Course Certificates': 'Prikaži certifikate kursa', 'List Courses': 'Prikaži kurseve', 'List Credentials': 'Prikaži akreditive', 'List Current': 'Prikaži trenutne', 'List Data in Theme Layer': 'Prikaži podatke iz tematskog sloja', 'List Departments': 'Prikaži odjeljenja', 'List Details': 'Prikaži detalje', 'List Disaster Assessments': 'Prikaži procjene katastrofe', 'List Distribution Items': 'Prikaži stavke raspodjele', 'List Distributions': 'Prikaži raspodjele', 'List Documents': 'Prikaži dokumente', 'List Donations': 'Prikaži donacije', 'List Donors': 'Prikaži donatore', 'List Education Details': 'Prikaži podatke o obrazovanju', 'List Education Levels': 'Prikaži nivoe obrazovanja', 'List Event Types': 'Prikaži tipove događaja', 'List Events': 'Prikaži događaje', 'List Facilities': 'Prikaži objekte', 'List Facility Types': 'Prikaži vrstw objekata', 'List Feature Classes': 'Izlistaj klase karakteristika', 'List Feature Groups': 'Prikaži grupe karakteristika', 'List Feature Layers': 'Prikaži slojeve karakteristika', 'List Finds': 'Lista pronalaženja', 'List Flood Reports': 'Izlistaj izvještaje o poplavama', 'List Found People': 'Lista nađenih ljudi', 'List GPS data': 'Prikaži GPS podatke', 'List Groups': 'Prikaži grupe', 'List Groups/View Members': 'Izlistaj Grupe/Pogledaj Članove', 'List Hazards': 'Prikaži rizike', 'List Heliports': 'Prikaži heliodrome', 'List Homes': 'Izlistaj domove', 'List Hospitals': 'Prikaži bolnice', 'List Hours': 'Prikaži sate', 'List Human Resources': 'Prikaži ljudske resurse', 'List Identities': 'Prikaži identitete', 'List Images': 'Prikaži slike', 'List Impact Assessments': 'Izlistaj procjene utjecaja', 'List Impact Types': 'Popis vrsta utjecaja', 'List Impacts': 'Nabroji utjecaje', 'List Import Files': 'Ispiši uvezene datoteke', 'List Incident Reports': 'Prikaži izvještaje o incidentu', 'List Incident Types': 'Prikaži tipove incidenta', 'List Incidents': 'Prikaži incidente', 'List Item Catalog Categories': 'Prikaži kategorije stavki kataloga', 'List Item Catalogs': 'Prikaži stavki...', 'List Item Categories': 'Prikaži kategorije stavki', 'List Item Packets': 'Prikaz paketa stavki', 'List Item Packs': 'Prikaži pakete stavki', 'List Item Sub-Categories': 'Prikaži podkategorije stavki', 'List Items': 'Prikaži stavke', 'List Items in Inventory': 'Ispiši stavke u inventaru', 'List Items in Request': 'Prikaži stavke u zahtjevu', 'List Items in Stock': 'Prikaži stavku u zalihi', 'List Job Roles': 'Izlistaj poslovne uloge', 'List Job Titles': 'Prikaži radna mjesta', 'List Jobs': 'Prikaži poslove', 'List Keys': 'Lista ključeva', 'List Keywords': 'Prikaži ključne riječi', 'List Kits': 'Prikaži komplete', 'List Layers': 'Prikaži slojeve', 'List Layers in Profile': 'Prikaži slojeve u profilu', 'List Layers in Symbology': 'Prikaži značenja simbola', 'List Level 1 assessments': 'Izlistaj procjene 1. Nivoa', 'List Level 1 Assessments': 'Prikaži procjene nivoa 1', 'List Level 2 Assessments': 'Ispiši procjene drugog nivoa', 'List Level 2 assessments': 'Ispiši procjene nivoa 2', 'List Location Hierarchies': 'Prikaži hijerarhije lokacija', 'List Locations': 'Prikaži lokacije', 'List Log Entries': 'Prikaži unose zapisnika', 'List Logged Time': 'Prikaži bilježena vremena', 'List Mailing Lists': 'Prikaži liste za slanje poruka', 'List Map Configurations': 'Prikaži konfiguracije mape', 'List Markers': 'Prikaži markere', 'List Members': 'Prikaži članove', 'List Memberships': 'Prikaži članstva', 'List Messages': 'Prikaži poruke', 'List Milestones': 'Prikaži prekretnice', 'List Missing People': 'Lista nestalih ljudi', 'List Missing Persons': 'Prikaži nedostajuće osobe', 'List Missions': 'Prikaži misije', 'List Morgues': 'Kreiraj mrtvačnice', 'List Need Types': 'Prikaži vrste potreba', 'List Needs': 'Lista potreba', 'List Networks': 'Prikaži mreže', 'List of addresses': 'Lista adresa', 'List of Appraisals': 'Prikaži ispunjenja', 'List of CSV files': 'Lista CSV datoteka', 'List of CSV files uploaded': 'Spisak ucitanih CSV(comma separated value) datoteka', 'List of Facilities': 'Prikaži objekte', 'List of Items': 'Potpis predmeta', 'List of Missing Persons': 'Lista osoba koje su nestale', 'List of Peers': 'Lista saradnika', 'List of Professional Experience': 'Prikaži profesionalna iskustva', 'List of Reports': 'Lista Izvještaja', 'List of Requests': 'Lista zahtjeva', 'List of Roles': 'Prikaži uloge', 'List of Spreadsheets': 'Lista proračunskih tablica', 'List of Spreadsheets uploaded': 'Lista poslanih tablica', 'List of Volunteers': 'Lista volontera', 'List of Volunteers for this skill set': 'Lista volontera za ovu skupinu vještina', 'List of Volunteers for this skills set': 'Lista volontera za ovu skupinu vještina', 'List Office Types': 'Prikaži tipove kancelarija', 'List Offices': 'Prikaži kancelarije', 'List Orders': 'Prikaži narudžbe', 'List Organisations': 'Prikaži organizacije', 'List Organization Domains': 'Prikaži domene organizacije', 'List Organization Types': 'Prikaži tipove organizacije', 'List Organizations': 'Prikaži organizacije', 'List Outputs': 'Prikaži izlaze', 'List Participants': 'Prikaži učesnike', 'List Partner Organizations': 'Prikaži partnerske organizacije', 'List Partners': 'Lista partnera', 'List Patients': 'Lista pacijenata', 'List Peers': 'Popis saradnika', 'List Personal Effects': 'Kreiraj lične uticaje', 'List Persons': 'Prikaži osobe', "List Persons' Details": 'Prikaži detalje o osobama', 'List Photos': 'Prikaži fotografije', 'List PoI Types': 'Prikaži tipove tačaka interesa', 'List Points of Interest': 'Prikaži tačke interesa', 'List Policies & Strategies': 'Prikaži politike i strategije', 'List Population Statistics': 'Izlistaj demografsku statistiku', 'List Positions': 'Navedi Pozicije', 'List Posts': 'Prikaži blokove ugradivog teksta', 'List Problems': 'Lista problema', 'List Profiles configured for this Layer': 'Prikaži profile konfigurisane za ovaj sloj', 'List Programs': 'Prikaži programe', 'List Project Organizations': 'Prikaži organizacije projekta', 'List Projections': 'Prikaži projekcije', 'List Projects': 'Prikaži projekte', 'List Question Meta-Data': 'Prikaži metapodatke pitanja', 'List Rapid Assessments': 'Izlistaj brze procjene', 'List Received Items': 'Izlistaj primljene predmete', 'List Received Shipments': 'Prikaži primljene isporuke', 'List Received/Incoming Shipments': 'Prikaži primljene/dolazne pošiljke', 'List Records': 'Prikaži zapise', 'List Recurring Requests': 'Prikaži ponavljajuće zahtjeve', 'List Regions': 'Prikaži područja', 'List Registrations': 'Prikaži registracije', 'List Relatives': 'Izlistaj rodbinu', 'List Reports': 'Prikaži izvještaje', 'List Repositories': 'Prikaži repozitorije', 'List Request Items': 'Lista traženih predmeta', 'List Request Templates': 'Prikaži predloške zahtjeva', 'List Requested Skills': 'Prikaži tražene vještine', 'List Requests': 'Prikaži zahtjeve', 'List Resources': 'Prikaži resurse', 'List Response Summaries': 'Prikaži sumarne odgovore', 'List Responses': 'Prikaz odgovora', 'List Rivers': 'Lista rijeka', 'List Roles': 'Prikaži uloge', 'List Rooms': 'Prikaži sobe', 'List saved searches': 'Prikaži sačuvane pretrage', 'List Scenarios': 'Lista scenarija', 'List School Districts': 'Prikaz rejona škole', 'List School Reports': 'Prikaz izvještaja o školama', 'List Seaports': 'Prikaži luke', 'List Sections': 'Lista sekcija', 'List Sector': 'Prikaz sektora', 'List Sectors': 'Prikaži sektore', 'List Sent Items': 'Prikaži poslane stavke', 'List Sent Shipments': 'Prikaži poslane pošiljke', 'List Series': 'Prikaži serije', 'List Service Profiles': 'Prikaži profile usluga', 'List Services': 'Prikaži usluge', 'List Settings': 'Izlistaj postavke', 'List Shelter Services': 'Prikaži usluge skloništa', 'List Shelter Statuses': 'Prikaži statuse skloništa', 'List Shelter Types': 'Prikaži tipove skloništa', 'List Shelters': 'Prikaži skloništa', 'List Shipment Items': 'Prikaži predmete pošiljke', 'List Shipment/Way Bills': 'Lista Poslanih Pošiljki', 'List Shipment<>Item Relation': 'Prikaži Pošiljka<>Odnos predmeta', 'List Shipments': 'Lista Poslanih Pošiljki', 'List Skill Equivalences': 'Prikaži ekvivalencije vještina', 'List Skill Provisions': 'Izlistaj pružanja vještina', 'List Skill Types': 'Prikaži tipove vještina', 'List Skills': 'Prikaži vještine', 'List Solutions': 'Izlistaj rješenja', 'List Sources': 'Prikaži izvore', 'List Staff': 'Izlistaj osoblje', 'List Staff & Volunteers': 'Prikaži osoblje i vlolontere', 'List Staff Assignments': 'Prikaži dodjele osoblja', 'List Staff Members': 'Prikaži članove osoblja', 'List Staff Types': 'Izlistaj tipove osoblja', 'List Status': 'Ispiši status', 'List Status Reports': 'Prikaži statusne izvještaje', 'List Statuses': 'Prikaži statuse', 'List Stock Adjustments': 'Prikaži prilagođenja zaliha', 'List Stock Counts': 'Prikaži količine zaliha', 'List Stock in Warehouse': 'Prikaži zalihe u skladištima', 'List Storage Bins': 'Prikaz korpi za smještaj', 'List Storage Location': 'Navedi lokacije skladišta', 'List Subscriptions': 'Ispiši pretplate', 'List Subsectors': 'Prikaži podsektore', 'List Suppliers': 'Prikaži dobavljače', 'List Support Requests': 'Izlistaj zahtjeve za podršku', 'List Survey Answers': 'Navedi odgovore anketa', 'List Survey Questions': 'Prikaz anketnih pitanja', 'List Survey Sections': 'Izlistaj sekcije ankete', 'List Survey Series': 'Prikaži istraživačke nizove', 'List Survey Templates': 'Izlistaj šablone za ankete', 'List Symbologies': 'Prikaži značenje simbola', 'List Symbologies for Layer': 'Prikaži značenje simbola za sloj', 'List Tagged Posts': 'Prikaži označene dijelove teksta', 'List Tags': 'Prikaži oznake', 'List Tasks': 'Prikaži zadatke', 'List Teams': 'Prikaži timove', 'List Template Sections': 'Prikaži odjeljke predložaka', 'List Themes': 'Prikaži teme', 'List Tickets': 'Prikaži kartice', 'List Tours': 'Prikaži ture', 'List Tracks': 'Izlistaj praćenja', 'List Training Events': 'Prikaži događaje obuke', 'List Trainings': 'Prikaži obuke', 'List unidentified': 'Prikaži neidentifikovano', 'List Units': 'Prikaži jedinice', 'List Users': 'Prikaži korisnike', 'List Vehicle Assignments': 'Prikaži dodjele vozila', 'List Vehicle Details': 'Prikaži detalje o vozilu', 'List Vehicle Types': 'Prikaži tipove vozila', 'List Vehicles': 'Prikaži vozila', 'List Volunteer Cluster Positions': 'Prikaži pozicije skupa volontera', 'List Volunteer Cluster Types': 'Prikaži tipove skupa volontera', 'List Volunteer Clusters': 'Prikaži skupove volontera', 'List Volunteer Roles': 'Prikaži uloge volontera', 'List Volunteers': 'Prikaži volontere', 'List Warehouse Items': 'Prikaži stavke skladišta', 'List Warehouses': 'Prikaži skladišta', 'List/Add': 'Izlistaj/Dodaj', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Popis "ko radi šta i gdje". Omogućava agencijama za pomoć da koordinišu svoje aktivnosti.', 'liter': 'litar', 'Lithuania': 'Litvanija', 'Live Help': 'Pomoć uživo', 'Livelihood': 'Izdržavanje', 'Livelihoods': 'Izdržavanja', 'LMS Administration': 'LMS administracija', 'Load': 'Učitaj', 'Load Cleaned Data into Database': 'Unesi očišćene podatke u bazu podataka', 'Load Raw File into Grid': 'Učitaj neobrađenu datoteku u mrežu', 'Loaded By': 'Učitao', 'Loading': 'Učitavam', 'Loading Equipment': 'Učitavanje opreme', 'Loading Locations...': 'Učitavam lokacije...', 'Local Currency': 'Lokalna valuta', 'Local Name': 'Lokalni naziv', 'Local Names': 'Lokalna imena', 'Location': 'Lokacija', 'Location (Site)': 'Lokacija (mjesto)', 'Location 1': 'Lokacija 1', 'Location 2': 'Lokacija 2', 'Location Added': 'Lokacija dodana', 'Location added': 'Lokacija dodana', 'Location added to Organization': 'Lokacija dodana organizacioji', 'Location cannot be converted into a group.': 'Lokacija ne može biti pretvorena u grupu.', 'Location deleted': 'Lokacija obrisana', 'Location Deleted': 'Izbrisana lokacija', 'Location Detail': 'Detalji lokacije', 'Location Details': 'Detalji lokacije', 'Location Group': 'Grupa lokacija ', 'Location group cannot be a parent.': 'Lokacijska grupa ne može biti roditelj.', 'Location group cannot have a parent.': 'Grupa lokacija ne može imati roditelja', 'Location groups can be used in the Regions menu.': 'Grupe lokacija se mogu koristiti u meniju regija.', 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Lokacije grupa mogu biti korištene za filtriranje prikaza na mapi i u pretrazi rezultata samo na entitetima pokrivenim lokacijama grupe.', 'Location Hierarchies': 'Hijerarhije lokacija', 'Location Hierarchy': 'Hijerarhija lokacija', 'Location Hierarchy added': 'Hijerarhija lokacija dodana', 'Location Hierarchy deleted': 'Hijerarhija lokacija obrisana', 'Location Hierarchy Level 0 Name': 'Ime hijerarhijske lokacije nultog nivoa', 'Location Hierarchy Level 1 Name': 'Naziv nivoa 1 u hijerarhiji lokacija', 'Location Hierarchy Level 2 Name': 'Ime hijerarhijske lokacije drugog stepena', 'Location Hierarchy Level 3 Name': 'Ime lokacije 3. hijerarhijskog nivoa', 'Location Hierarchy Level 4 Name': 'Ime nivoa 4 u hijerahiji položaja', 'Location Hierarchy Level 5 Name': 'Naziv lokacije hijerarhijskog nivoa 5', 'Location Hierarchy updated': 'Hijerarhija lokacija ažurirana', 'Location is of incorrect level!': 'Lokacija je na neispravnom nivou', 'Location is Required!': 'Zahtijeva se lokacija', 'Location needs to have WKT!': 'Lokacija treba imati WKT!', 'Location removed from Organization': 'Lokacija uklonjena iz organizacije', 'Location Required!': 'Zahtijeva se lokacija!', 'Location updated': 'Lokacija ažurirana', 'Location:': 'Lokacija:', 'Location: ': 'Lokacija: ', 'Locations': 'Lokacije', 'Locations De-duplicator': 'Deduplikator lokacija', 'Locations of this level need to have a parent of level': 'Lokacije ovog nivoa moraju imati roditelja nivoa', 'Locations should be different!': 'Lokacije trebaju biti različite', 'Lockdown': 'Zaključavanje', 'Loctaion of tip': 'Lokacija savjeta', 'Log': 'Zapisnik', 'Log Entry': 'Element zapisnika', 'Log entry added': 'Unos je dodan', 'Log entry deleted': 'Polje za unos izbrisano', 'Log Entry Deleted': 'Stavka zapisnika izbrisana', 'Log Entry Details': 'Detalji stavki zapisnika', 'Log entry updated': 'Unos je ažuriran', 'Log Time Spent': 'Provedeno vrijeme prijave', 'Logged By': 'Evidentirao', 'Logged Time': 'Vrijeme prijave', 'Logged Time Details': 'Detalji vremena prijave', 'Login': 'Prijava', 'login': 'prijava', 'Login using Facebook account': 'Prijava koristeći Facebook nalog', 'Login using Google account': 'Prijava koristeći Google nalog', 'Login with Facebook': 'Prijava na Facebook', 'Login with Google': 'Prijava preko Google', 'Logistics': 'Logistika', 'Logistics Management': 'Upravljane Logistikom', 'Logistics Management System': 'Sistem logističke uprave', 'Logo file %s missing!': 'Nedostaje %s logo datoteka', 'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Logotip organizacije. To treba biti png ili jpeg datoteka i ne treba biti veći od 400x400', 'Logout': 'Odjavi se', 'long': 'dugi', 'Long Name': 'Dugo ime', 'Long Text': 'Dug Tekst', 'Long-term care': 'Dugoročna briga', 'long>12cm': 'dugo>12cm', 'Longitude': 'Geografska dužina', 'Longitude is Invalid!': 'Geografska dužina je neispravna', 'Longitude is West - East (sideways).': 'Geografska dužina je Zapad - Istok (horizontalno)', 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': 'Geografska dužina je Zapad - istog (postrance). Geografska širina je nula na ekvatoru i pozitivna je je na sjevernoj polulopti a', 'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je zapad - istok. Geografska dužina je nula na glavnom meridijanu (Vrijeme po Griniču) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna na zapadu, preko Atlantika i Amerika.', 'Longitude is West-East (sideways).': 'Geografska dužina: Zapad-istok (horizontalno)', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je nula na glavnom meridijanu (Vrijeme po Griniču) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna na zapadu, preko Atlantika i Amerika.', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Geografska dužina je jednaka nuli na prvom meridijanu (kroz Grinvič, Velika Britanija) i pozitivna je prema istoku, preko Evrope i Azije. Geografska dužina je negativna prema zapadu, preko Atlantika i Amerike.', 'Longitude must be between -180 and 180.': 'Geografska dužina mora biti broj između -180 i 180', 'Longitude of far eastern end of the region of interest.': 'Geografska dužina istočnog dijela posmatrane regije', 'Longitude of far western end of the region of interest.': 'Geografska dužina krajnje desne/zapadne tačke regiona o kom je riječ', 'Longitude of Map Center': 'Geografska dužina centra mape', 'Longitude should be between': 'Geografska dužina treba biti između', 'Looting': 'Pljačkanje', 'Lost': 'Izgubljeno', 'Lost Password': 'Izgubljena lozinka', 'low': 'nisko', 'Low': 'Nisko', 'Low Tide Depth': 'Dubina niske plime', 'Luxembourg': 'Luksemburg', 'Macedonia': 'Makedonija', 'Machine with which data was exchanged.': 'Mašina s kojom su podaci razmijenjeni.', 'Madagascar': 'Madagaskar', 'Magnetic Storm': 'Magnetna Oluja', 'Mailing list': 'Dopisna lista', 'Mailing list added': 'Lista elektronske pošte dodana', 'Mailing list deleted': 'Lista elektronske pošte obrisana', 'Mailing List Details': 'Detalji liste za slanje poruka', 'Mailing List Name': 'Ime liste za slanje poruka', 'Mailing list updated': 'Lista elektronske pošte ažurirana', 'Mailing Lists': 'Dopisne liste', 'Main cash source': 'Glavni izvor gotovine', 'Main income sources before disaster': 'Glavni izvori prihoda prije nepogode', 'Main?': 'Glavni?', 'Mainstreaming DRR': 'Opšte prihvatanje smanjenjearizika katastrofe', 'Major': 'Bitan', 'Major Damage': 'Značajna šteta', 'Major expenses': 'Glavni troškovi', 'Major outward damage': 'Velika vanjska šteta', 'Make a request': 'Kreiraj zahtjev', 'Make a Request for Aid': 'Kreiraj zahtjev za pomoć', 'Make Commitment': 'Kreiraj zaduženje', 'Make New Commitment': 'Kreiraj novo zaduženje', 'Make People Request': 'Napravi zahtjev za ljudima', 'Make Pledge': 'Obećati podršku', 'Make preparations per the <instruction>': 'Kreirajti pripreme po oznaci <instruction>', 'Make Request': 'Pošalji Zahtjev', 'Make Supplies Request': 'Napravi zahtjev za zalihama', 'Malawi': 'Malavi', 'Malaysia': 'Malezija', 'Maldives': 'Maldivi', 'male': 'Muško', 'Male': 'Muškarac', 'Malnutrition present prior to disaster': 'Neuhranjenost prisutna prije katastrofe', 'Manage': 'Upravljajte', 'Manage Cache': 'Upravljanje kešom', 'Manage Events': 'Upravljaj Događajima', 'Manage Images': 'Upravljaj slikama', 'Manage Incidents': 'Upravljanje incidentima', 'Manage Item catalog': 'Upravljaj katalogom stavki', 'Manage Kits': 'Upravljanje kompletima', 'Manage Layers in Catalog': 'Upravljanje slojevima u katalogu', 'Manage Relief Item Catalogue': 'Upravljanje katalogom humanitarne robe', 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Upravljajte zahtjevima za zalihe, sredstvima, osobljem ili zahtjevima za druge resurse. Poklapanja sa inventarima zaliha se zahtijevaju.', 'Manage requests of hospitals for assistance.': 'Upravljanje zahtjevima bolnica za pomoć.', 'Manage Returns': 'Upravljanje povratima', 'Manage Sub-Category': 'Upraljaj potkategorijama', 'Manage Users & Roles': 'Upravljanje korisnicima i ulogama', 'Manage Vehicles': 'Upravljaj vozilima', 'Manage volunteers by capturing their skills, availability and allocation': 'Upravljaj volonterima vodeći računa o njihovim vještinama, dostupnosti i raspodjeli', 'Manage Warehouses/Sites': 'Upravljanje skladištima/položajima', 'Manage Your Facilities': 'Upravljanje vašim objektima', 'Manager': 'Menadžer', 'Managing Office': 'Ured upravljanja', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Obavezno. U GeoServer, ovo je ime sloja. Unutar WFS getCapabilities, ovo je dio sa FeatureType imenom nakon dvotačke(:).', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'Obavezno. Bazni URL za pristup servisu, npr. http://host.domain/geoserver/wfs?', 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'Obavezno. Bazni URL za pristup servisu, npr. http://host.domain/geoserver/wms?', 'Mandatory. The URL to access the service.': 'Obavezno. URL za pristup usluzi.', 'manicured': 'manikiran', 'manual': 'ručno', 'Manual': 'Priručnik', 'Manual Synchronization': 'Ručna sinhronizacija', 'Manual synchronization completed.': 'Ručna sinhronizacija završena', 'Manual synchronization scheduled - refresh page to update status.': 'Raspoređena ručna sinhronizacija - osvježite stranicu da ažurirate status', 'Manual synchronization started in the background.': 'Ručna sinhronizacija započeta u pozadini.', 'Many': 'Mnogo', 'Map': 'Karta', 'Map cannot display without prepop data!': 'Mapa se ne može prikazati bez pripremljenih podataka', 'Map Center Latitude': 'Geografska širina središta mape', 'Map Center Longitude': 'Geografska dužina centra mape', 'Map Configuration': 'Konfiguracija karte', 'Map Configuration added': 'Dodana konfiguracija mape', 'Map Configuration deleted': 'Obrisana konfiguracija mape', 'Map Configuration Details': 'Detalji o konfiguraciji mape', 'Map Configuration removed': 'Konfiguracija Karte izbrisana', 'Map Configuration updated': 'Ažurirana konfiguracija mape', 'Map Configurations': 'Konfiguracija Karte', 'Map has been copied and set as Default': 'Mapa je kopirana i postavljena kao podrazumijevana', 'Map has been set as Default': 'Mapa je postavljena kao podrazumijevana', 'Map Height': 'Visina karte', 'Map is already your Default': 'Mapa je već podrazumijevana', 'Map not available: Cannot write projection file - %s': 'Mapa nije dostupna: Ne mogu pisati datoteku projekcije - %s', 'Map not available: No Projection configured': 'Mapa nije dostupna: nema konfigurisane projekcije', 'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'Mapa nije dostupna: Projekcija %(projection)s nije podržana - molim dodajte definiciju u %(path)s', 'Map of Base Stations': 'Mapa baznih stanica', 'Map of Communities': 'Mapa zajednica', 'Map of Facilities': 'Mapa Objekata', 'Map of Hospitals': 'Karta bolnica', 'Map of Incident Reports': 'Mapa izvještaja o incitentu', 'Map of Offices': 'Mapa kancelarija', 'Map of Projects': 'Mapa projekata', 'Map of Requests': 'Mapa zahtjeva', 'Map of Resources': 'Mapa resursa', 'Map of Vehicles': 'Mapa vozila', 'Map of Warehouses': 'Mapa skladišta', 'Map Service Catalogue': 'Katalog usluga mape', 'Map Settings': 'Postavke karte', 'Map Viewing Client': 'Klijent za pregled mapa', 'Map Width': 'Širina mape', 'Map Zoom': 'Uvećanje mape', 'Mapa': 'Mapa', 'MapMaker Hybrid Layer': 'MapMaker hibridni sloj', 'MapMaker Layer': 'Sloj MapMaker', 'Mapping': 'Mapiranje', 'Maps': 'Mape', 'Marine Security': 'Pomorska sigurnost', 'Marital Status': 'Bračno stanje', 'Mark as duplicate': 'Označite kao duplo', 'Mark Sender': 'Označi pošiljaoca', 'Marker added': 'Marker dodan', 'Marker deleted': 'Marker obrisan', 'Marker Details': 'Detalji markera', 'Marker Levels': 'Nivoi markera', 'Marker updated': 'Marker ažuriran', 'Markers': 'Markeri', 'married': 'vjenčan', 'Marshall Islands': 'Maršal ostrva', 'Master': 'Glavni', 'Master Message Log': 'Master zapisnik poruka', 'Master Message Log to process incoming reports & requests': 'Glavni zapisnika poruka za obradu ulaznih izvještaja i zahtjeva', 'Match Percentage': 'Postotak poklapanja', 'Match percentage indicates the % match between these two records': 'Odgovarajući postotak ukazuje na % podudaranja između ova dva zapisa', 'Match Requests': 'Uskladi zahtjeve', 'Match?': 'Slaganje?', 'Matching Catalog Items': 'Odgovarajuće Stavke Kataloga', 'Matching Items': 'Uparene stavke', 'Matching Records': 'Odgovarajući zapisi', 'Matching Vehicle Types': 'Usklađeni tipovi vozila', 'Matrix of Choices (Multiple Answers)': 'Matrica izbora (više odgovora)', 'Matrix of Choices (Only one answer)': 'Matrica izbora (samo jedan odgovor)', 'Matrix of Text Fields': 'Matrica tekstualnih polja', 'Mauritania': 'Mauritanija', 'Mauritius': 'Mauricijus', 'Max Height': 'Maksimalna visina', 'Max Persons per Dwelling': 'Maksimalni broj osoba po jedinici smještaja', 'maxExtent': 'maksimalni obim', 'Maximum': 'Maksimum', 'Maximum Extent': 'Maksimalna širina', 'Maximum Location Latitude': 'Maksimalna geografska širina lokacije', 'Maximum Location Longitude': 'Maksimalna geografska dužina lokacije', 'maxResolution': 'maksimalnaRezolucija', 'Measure Area: Click the points around the polygon & end with a double-click': 'Područje mjerenja: Kliknite na tačke oko poligona i završite s dvostrukim klikom', 'Measure Length: Click the points along the path & end with a double-click': 'Dužina mjerenja: Kliknite na tačke oko staze i završite s dvostrukim klikom', 'Measures': 'Mjere', 'Media Manager': 'Menadžer medija', 'Medical and public health': 'Medicina i javno zdravstvo', 'Medical Conditions': 'Medicinski uslovi', 'Medicine': 'Medicina', 'Medium': 'Srednje', 'medium': 'srednji', 'medium<12cm': 'srednje<12cm', 'Megabytes per Month': 'Megabajta po mjesecu', 'Member Organizations': 'Organizacije članice', 'Members': 'Članovi', 'Membership': 'Članstvo', 'Membership added': 'Dodano članstvo', 'Membership deleted': 'Članstvo izbrisano', 'Membership Details': 'Detalji o članstvu', 'Membership updated': 'Ažurirano članstvo', 'Memberships': 'Članstva', 'Mensajería': 'Slanje poruka', 'Mental': 'Mentalno', 'Menu': 'Meni', 'menu item': 'stavka menija', 'Merge': 'Spoji', 'Merge records': 'Spoji zapise', 'Message': 'Poruka', 'message': 'poruka', 'Message added': 'Dodana poruka', 'Message deleted': 'Poruka obrisana', 'Message Details': 'Detalji poruke', 'Message Log': 'Zapisnik poruka', 'Message Source': 'Izvor poruke', 'Message updated': 'Poruka ažurirana', 'Message variable': 'Varijabla poruke', 'Message Variable': 'Promjenjiva poruke', 'Messages': 'Poruke', 'Messaging': 'Slanje poruka', 'Messaging Module': 'Modul poruka', 'Messaging settings updated': 'Ažurirana podešenja razmjene poruka', 'Metadata': 'Meta podaci', 'Meteorite': 'Meteor', 'Meteorological (inc. flood)': 'Meteorološki (uklj. poplave)', 'meter': 'metar', 'meter cubed': 'kubni metar', 'meters': 'metara', 'Method used': 'Metode korištene', 'Mexico': 'Meksiko', 'MGRS Layer': 'MGRS sloj', 'Micronutrient malnutrition prior to disaster': 'Neuhranjenost mikroelementima prisutna prije katastrofe', 'middle': 'sredina', 'Middle Name': 'Srednje ime', 'Migrants or ethnic minorities': 'Imigranti ili etničke manjine', 'Mileage': 'Kilometraža', 'Milestone': 'Prekretnica', 'Milestone Added': 'Prekretnica dodana', 'Milestone Deleted': 'Prekrednica izbrisana', 'Milestone Details': 'Detalji prekretnice', 'Milestone Updated': 'Prekretnica ažurirana', 'Milestones': 'Prekretnice', 'Military': 'Vojni', 'Minimum': 'Minimum', 'Minimum Bounding Box': 'Minimalna uokviravajuća kutija', 'Minimum Location Latitude': 'Minimalna geografska širina lokacije', 'Minimum Location Longitude': 'Minimalna geografska dužina lokacije', 'Minimum shift time is 6 hours': 'Minimalno vrijeme do smjene je 6 sati', 'Minor Damage': 'Manja šteta', 'Minor/None': 'Minorno/ništa', 'Minorities participating in coping activities': 'Manjine koje učestvuju u akcijama suočavanja', 'Minute': 'minuta', 'Minutes must be a number between 0 and 60': 'Broj minuta mora biti između 0 i 60', 'Minutes must be a number.': 'Minuta mora biti broj', 'Minutes must be less than 60.': 'Minute bi trebale biti broj manji od 60', 'Minutes per Month': 'Minute po mjesecu', 'Minutes should be a number greater than 0 and less than 60': 'Minute bi trebale biti broj veći od nula i manji od 60', 'Minutes should be greater than 0 and less than 60': 'Minute bi trebale biti broj veći od nula i manji od 60', 'Miscellaneous': 'Razno', 'misshapen': 'deformisano', 'missing': 'nedostaje', 'Missing': 'Nestalo', 'Missing Person': 'Nestala osoba', 'Missing Person Details': 'Detalji o nestaloj osobi', 'Missing Person Registry': 'Registar nestalih osoba', 'Missing Person Reports': 'Izvještaji o nestalim osobama', 'Missing Persons': 'Nestale osobe', 'Missing Persons Registry': 'Registar nestalih osoba', 'Missing Persons Report': 'Izvještaj o nestalim osobama', 'Missing Report': 'Nedostajući izvještaj', 'Missing Senior Citizen': 'Izgubljen stariji građanin', 'Missing Vulnerable Person': 'Ranjiva osoba nestala', 'Mission': 'Misija', 'Mission added': 'Dodana misija', 'Mission deleted': 'Misija izbrisana', 'Mission Details': 'Detalji zadatka', 'Mission Record': 'Zapis misije', 'Mission updated': 'Misija ažurirana', 'Missions': 'Misije', 'mixed': 'izmiješano', 'Mobile': 'Pokretno', 'Mobile Assess.': 'Mobilna procjena.', 'Mobile Basic Assessment': 'Mobilna osnovna procjena', 'Mobile Commons (Inbound)': 'Mobile Commons (Ulazna)', 'Mobile Commons Setting added': 'Uobičajene mobilne postavke dodane', 'Mobile Commons Setting deleted': 'Uobičajene mobilne postavke obrisane', 'Mobile Commons Setting Details': 'Detalji za postavke Mobile Commons', 'Mobile Commons Settings': 'Postavke za Mobile Commons', 'Mobile Commons settings updated': 'Uobičajene mobilne postavke ažurirane', 'Mobile Commons SMS Settings': 'Uobičajene SMS postavke', 'Mobile Phone': 'Mobilni Telefon', 'Mobile Phone #': 'Broj mobitela', 'Mobile Phone Number': 'Broj mobilnog telefona', 'Mode': 'Način rada', 'Model/Type': 'Model/Tip', 'Modem Settings': 'Postavke modema', 'Modem settings updated': 'Uobičajene mobilne postavke ažurirane', 'Moderate': 'Umjereno', 'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'Izmjena značajke: Odaberite karakteristiku koju želite deformisati i prevucite tačke da deformišete karakteristiku na izabran način', 'Modify Information on groups and individuals': 'Modifikuj informacije o grupama i pojedincima', 'Modifying data in spreadsheet before importing it to the database': 'Modificiranje podataka u tabeli prije njihovog importovanja u bazu.', 'Module': 'Modul', 'Module Administration': 'Administracija modula', 'module allows the site administrator to configure various options.': 'modul omogućava administratoru stranice da prilagodi razne opcije.', 'Module disabled!': 'Modul je isključen.', 'module helps monitoring the status of hospitals.': 'Modul pomaže nadgledanju statusa bolnica', 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': "modul pruža mehanizam da se zajednički omogući pregled katastrofe u toku, koristeći 'online' mapiranje (Geografski Informacijski Sistem (GIS) )", 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS). You can add markers and pictures to pin point incidents on the map.': "modul pruža mehanizam da se zajednički omogući pregled katastrofe u toku, koristeći 'online' mapiranje (Geografski Informacijski Sistem (GIS) ). Možete dodati markere i slike da pokažete incidente na mapi.", 'Module provides access to information on current Flood Levels.': 'Modul omogućava pristup informacijama o trenutnim nivoima poplave.', 'Module-wise Percentage of Translated Strings': 'Procenat prevedenosti stringova po modulu', 'Moldova': 'Moldavija', 'Monaco': 'Monako', 'Monday': 'Ponedjeljak', 'Monetization': 'Novčana vrijednost', 'Monetization Details': 'Detalji novčane vrijednosti', 'Monetization Report': 'Izvještaj vrijednosti', 'Mongolia': 'Mongolija', 'mongoloid': 'mongoloid', 'Montenegro': 'Crna Gora', 'Month': 'Mjesec', 'Monthly': 'Mjesečno', 'Monthly Cost': 'Mjesečni troškovi', 'Monthly Salary': 'Mjesečna primanja', 'Months': 'Mjeseci', 'more': 'više', 'More Info': 'Više Informacija', 'More Options': 'Više opcija', 'more...': 'više...', 'Morgue': 'Mrtvačnica', 'Morgue added': 'Mrtvačnica dodana', 'Morgue deleted': 'Mrtvačnica obrisana', 'Morgue Details': 'Detalji o mrtvačnici', 'Morgue Status': 'Status Mrtvačnice', 'Morgue Units Available': 'Mrtvačnice na raspolaganju', 'Morgue updated': 'Mrtvačnica ažurirana', 'Morgues': 'Mrtvačnice', 'Morocco': 'Maroko', 'Mosque': 'Džamija', 'Motorcycle': 'Motocikl', 'Moustache': 'Brkovi', 'Mouth, Size': 'Usta, veličina', 'Move Feature: Drag feature to desired location': 'Premještanje karakteristike: Prevucite karakteristiku na željenu lokaciju.', 'Movements (Filter In/Out/Lost)': 'Kretanja (Filter U/Van/Izgubljeno)', 'Mozambique': 'Mozambik', 'Multi-Option': 'Više opcija', 'Multiple': 'Višestruko', 'Multiple Choice (Multiple Answers)': 'Višestruki izbor (više odgovora)', 'Multiple Choice (Only One Answer)': 'Višestruki izbor (samo jedan odgovor)', 'Multiple Matches': 'Višestruko poklapanje', 'Multiple Text Fields': 'Višestruka tekstualna polja', 'Multiplicator': 'Multiplikator', 'MultiPolygon': 'VišePoligonski', 'Muslim': 'Musliman', 'Must a location have a parent location?': 'Mora li lokacija imati lokaciju roditeljsku lokaciju?', 'My Bookmarks': 'Moje zabilješke', 'My Current function': 'Moja Trenutna funkcija', 'My Details': 'Moji detalji', 'My Logged Hours': 'Moji evidentirani sati', 'My Maps': 'Moje mape', 'My Open Tasks': 'Moji otvoreni zadaci', 'My Profile': 'IMoj profil', 'My Tasks': 'Moji zadaci', 'My Volunteering': 'Moje volontiranje', 'Myanmar': 'Mjanmar', 'Módulo de Tickets': 'Modul s karticama', 'n/a': 'nije dostupno', 'N/A': 'N/D', 'Nagorno-Karabakh': 'Nagorno-Karabah', 'Name': 'Naziv', 'Name and/or ID': 'Ime i/ili broj LK', 'Name and/or ID Label': 'Ime i/ili ID oznaka', 'Name field is required!': 'Polje s imenom je obavezno', 'Name for your Twilio Account.': 'Ime za vaš Twilio nalog.', 'Name of a programme or another project which this project is implemented as part of': 'Ime programa ili drugog projekta čiji je ovaj projekt dio', 'Name of Award': 'Ime nagrade', 'Name of Driver': 'Ime vozača', 'Name of Father': 'Ime oca', 'Name of Institute': 'Ime institucije', 'Name of Map': 'Ime mape', 'Name of Mother': 'Ime majke', 'Name of Storage Bin Type.': 'Ime korpe za smještaj', 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Naziv datoteke (i opcionalno putanja) koja će biti korištena kao pozadina zaglavlja.', 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Ime datoteke (i opcionalna podstaza ) lociranog u statičnom mjestu , koji bi trebao biti korišten za lijevu gornju sliku.', 'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Ime datoteke (i eventualna podstaza) smještena u pogledima koja se može koristiti za zaglavlje na dnu stranice', 'Name of the person in local language and script (optional).': 'Ime osobe na maternjem jeziku', 'Name of the repository (for you own reference)': 'Ime repozitorija (za vašu vlastitu referencu)', 'Name, Org and/or ID': 'Ime, Org i/ili ID', 'Name/Model/Type': 'Ime/Model/Tip', 'Names can be added in multiple languages': 'Imena mogu biti dodana na više jezika', 'Namibia': 'Namibija', 'narrow': 'usko', 'National': 'Nacionalno', 'National ID Card': 'Lična karata', 'National NGO': 'Nacionalna NVO', 'National Staff': 'Nacionalno osoblje', 'Nationality': 'Nacionalnost', 'Nationality of the person.': 'Nacionalnost ili državljanstvo osobe.', 'natural hazard': 'prirodni rizik', 'Nautical Accident': 'Pomorska nesreća', 'Nautical Hijacking': 'Nautičke otmice', 'NDRT (National disaster response teams)': 'NDRT (Nacionalni tim za odgovor u slučaju katastrofa)', 'Neck, Length': 'Vrat, dužina', 'Neck, Peculiarities': 'Vrat, specifičnosti', 'Neck, Shape': 'Vrat, oblik', "Need a 'url' argument!": "Potreban 'url' argument", 'Need added': 'Potreba dodana', 'Need deleted': 'Potreba obrisana', 'Need to be logged-in to be able to submit assessments': 'Potrebno je da budete prijavljeni da biste podnijeli procjenu', 'Need to configure Twitter Authentication': 'Potrebno je izvršiti konfiguraciju Twitter autentičnosti', 'Need to specify a budget!': 'Pogtrebno je navesti budžet!', 'Need to specify a Budget!': 'Pogtrebno je navesti budžet!', 'Need to specify a bundle!': 'Neophodno je naznačiti paket!', 'Need to specify a feature group!': 'Morate navesti grupu karakteristika', 'Need to specify a group!': 'Morate specifikovati grupu!', 'Need to specify a kit!': 'Potrebno je odrediti komplet!', 'Need to specify a Kit!': 'Potrebno je odrediti komplet!', 'Need to specify a location to search for.': 'Potrebno specificirati traženu lokaciju.', 'Need to specify a Resource!': 'Trebate navesti resurs!', 'Need to specify a role!': 'Mora se specificirati uloga!', 'Need to specify a table!': 'Potrebno je navesti tabelu!', 'Need to specify a user!': 'Potrebno je odrediti korisnika!', 'Need Type': 'Vrsta potreba', 'Need Type added': 'Dodan tip potrebe', 'Need Type deleted': 'Tip potreba izbrisan', 'Need Type Details': 'Potrebni detalji o tipu', 'Need Type updated': 'Vrsta potrebe ažurirana', 'Need Types': 'Tipovi potreba', 'Need updated': 'Potreba ažurirana', 'Needs': 'Potrebe', 'Needs Details': 'Detalji potreba', 'Needs elaboration!!!': 'Treba elaborirati!!!', 'Needs Maintenance': 'Potrebno održavanje', 'Needs to reduce vulnerability to violence': 'Potrebno je smanjiti ranjivost prema nasilju', 'Negative Flow Isolation': 'Negativna izolacija protoka', 'negroid': 'negroid', 'Neighborhood': 'Komšiluk', 'Neighbourhood': 'Susjedstvo', 'Neighbouring building hazard': 'Opasnosti od susjednih zgrada', 'Neonatal ICU': 'Intenzivna njega za novorođenčad', 'Neonatology': 'Neonatologija', 'Netherlands': 'Nizozemska', 'Network': 'Mreža', 'Network added': 'Mreža dodana', 'Network Details': 'Detalji mreže', 'Network removed': 'Mreža uklonjena', 'Network updated': 'Mreža ažurirana', 'Networks': 'Mreže', 'Neurology': 'Neurologija', 'Never': 'Nikada', 'never': 'nikad', 'never update': 'nikad ažurirati', 'new': 'Novo', 'New': 'Novi', 'new ACL': 'novi ACL', 'New Activity Type': 'Novi tip aktivnosti', 'New Annual Budget created': 'Kreiran novi godišnji budžet', 'New Assessment reported from': 'Izvještaj o novoj procjeni iz', 'New Body Find': 'Novo traženje tijela', 'New cases in the past 24h': 'Novi slučajevi u posljednjih 24 sata', 'New Certificate': 'Novi certifikat', 'New Checklist': 'Novi spisak', 'New Entry': 'Novi unos', 'New Entry in Asset Log': 'Nova stavka u zapisniku sredstava', 'New Event': 'Novi događaj', 'New Hazard': 'Novi rizik', 'New Home': 'Novi dom', 'New Item Category': 'Nova kategorija predmeta', 'New Job Role': 'Nova radno mjesto', 'New Location': 'Nova lokacija', 'New Location Group': 'Nova grupa lokacija ', 'New Organization': 'Nova organizacija', 'New Output': 'Novi izlaz', 'New Page': 'Nova strana', 'New Patient': 'Novi pacijent', 'New Peer': 'Novi saradnik', 'New Post': 'Novi ubacivi tekst', 'New Problem': 'Novi problem', 'New Record': 'Novi zapis', 'new record inserted': 'novi zapis unesen', 'New Records': 'Novi zapisi', 'New Relative': 'Novi srodnik', 'New Report': 'Novi izvještaj', 'New Request': 'Novi zahtjev', 'New Role': 'Nova uloga', 'New Scenario': 'Novi scenario', 'New Sector': 'Novi sektor', 'New Service': 'Nova usluga', 'New Skill': 'Nova vještina', 'New Solution Choice': 'Izbor novog rješenja', 'New Staff Member': 'Novi član osoblja', 'New Stock Adjustment': 'Novo prilagođenje zalihe', 'New Stock Count': 'Nova količina zaliha', 'New Support Request': 'Novi zahtjev za podršku', 'New Synchronization Peer': 'Nova sinhronizacijski saradnik', 'New Team': 'Novi tim', 'New Theme': 'Nova tema', 'New Ticket': 'Nova kartica', 'New Training Course': 'Novi kurs obučavanja', 'New updates are available.': 'Nove dostupne nadogradnje.', 'New Volunteer': 'Novi Volonter', 'New Zealand': 'Novi Zeland', 'Newer Timestamp': 'Novija vremenska oznaka', 'News': 'Novosti', 'Next': 'Sljedeće', 'next 100 rows': 'Narednih 100 redova', 'Next run': 'Sljedeće pokretanje', 'Next View': 'Sljedeći prikaz', 'NGO': 'NVO', 'Nicaragua': 'Nikaragva', 'Nigeria': 'Nigerija', 'No': 'Ne', 'NO': 'NE', 'no': 'nema', 'No access at all': 'Nema nikakvog pristupa', 'No access to this record!': 'Nema pristupa ovom zapisu!', 'No Accounts currently defined': 'Trenutno nema definisanih računa', 'No action recommended': 'Nema preporučene akcije', 'No Activities currently registered in this event': 'Trenutno nema registrovanih Aktivnosti u ovom događaju', 'No Activities Found': 'Nema pronađenih aktivnosti', 'No Activity Organizations Found': 'Nema nađenih organizacija aktivnosti', 'No Activity Types Found': 'Nema nađenih tipova aktivnosti', 'No Activity Types found for this Activity': 'Nema nađenih tipova aktivnosti za ovu aktivnost', 'No Activity Types found for this Project Location': 'Nema nađenih tipova aktivnosti za ovu lokaciju projekta', 'No Addresses currently registered': 'Trenutno nema registrovanih adresa', 'No Affiliations defined': 'Nema definisanih preduzeća', 'No Aid Requests currently registered': 'Trenutno nema registrovanih zahtjeva za pomoć', 'No Airports currently registered': 'Trenutno nema registrovanih aerodroma', 'No Alternative Items currently registered': 'Nema alternativnih artikala registrovanih', 'No annual budgets found': 'Godišnji budžeti nisu nađeni', 'No Appraisals found': 'Nema nađenih poređenja opcija', 'No Assessment Answers': 'Nema odgovore ocjene', 'No Assessment Questions': 'Nema pitanja ocjene', 'No Assessment Summaries currently registered': 'Nema trenutno registrovanih procjena pregleda', 'No Assessment Templates': 'Nema predložaka ocjene', 'No Assessments currently registered': 'Trenutno nema registrovanih procjena', 'No Asset Assignments currently registered': 'Trenutno nema registrovanih sredstava', 'No Assets currently registered': 'Nema sredstva koja je trenutno registrovano', 'No Assets currently registered in this event': 'Trenutno nema registrovanih sredstava na ovom događaju', 'No Assets currently registered in this incident': 'Trenutno nema sredstava registrovanih u ovom incidentu', 'No Assets currently registered in this scenario': 'Trenutno nema sredstava registrovanih u ovom scenariju', 'No Awards found': 'Nema nađenih nagrada', 'No Base Layer': 'Nema baznog sloja', 'No Base Stations currently registered': 'Nema trenutno registrovanih baznih stanica', 'No Baseline Types currently registered': 'Trenutno nije registriran nijedan tip referentne tačke', 'No Baselines currently registered': 'Nijedna referentnu tačku trenutno registrovana', 'No Beneficiaries Found': 'Nema nađenih korisnika', 'No Beneficiary Types Found': 'Nema nađenih tipova korisnika', 'No Branch Organizations currently registered': 'Nema trenutno registrovanih ogranaka organizacija', 'No Brands currently registered': 'Nema trenutno registrovanih marki', 'No Budgets currently registered': 'Nema prijavljenih budžeta trenutno', 'No Bundles currently registered': 'Nema registrovanih paketa', 'No Camp Services currently registered': 'Trenutno nema registrovanih usluga u kampu', 'No Camp Statuses currently registered': 'Trenutno nema registrovanih statusa kampa', 'No Camp Types currently registered': 'Nije registrovan nikakav tip kampa', 'No Campaign Messages Found': 'Nema nađenih poruka kampanje', 'No Campaigns Found': 'Nema nađenih kampanja', 'No Camps currently registered': 'Nijedan Kamp nije trenutno registrovan', 'No Cases found': 'Nema nađenih slučajeva', 'No Catalog Items currently registered': 'Trenutno nije registrovan katalog sa stavkama', 'No Catalogs currently registered': 'Nema trenutno registrovanih kataloga', 'No Checklist available': 'Nijedna kontrolna lista nije dostupna', 'No Cluster Subsectors currently registered': 'Nijedan podsektor skupa trenutačno registrovan', 'No Clusters currently registered': 'Trenutno nema registrovanih skupova', 'No Coalitions currently recorded': 'Nema trenutnio zabilježenih koalicija', 'No Commitment Items currently registered': 'Trenutno nema registriranih stavki zaduženja', 'No Commitments': 'Nema zaduženja', 'No Communities Found': 'Nema nađenih zajednica', 'No Completed Assessment Forms': 'Nema završenih formulara ocjene', 'No Configs currently defined': 'Trenutno nema definisanih konfiguracija', 'No conflicts logged': 'Nisu zabilježeni konflikti', 'No contact information available': 'Nisu dostupne informacije o kontaktu', 'No contact method found': 'Nije pronađena metoda kontakta', 'No Contacts currently registered': 'Nema registriranih kontakata', 'No contacts currently registered': 'Nema registriranih kontakata', 'No Contacts Found': 'Nema nađenih kontakta', 'No contacts yet defined for this site': 'Kontakti još nisu definisani za ovo mjesto', 'No Credentials currently set': 'Nisu postavljeni nijedni akreditivi', 'No data available': 'Nema dostupnih podataka', 'No Data currently defined for this Theme Layer': 'Nema definisanih podataka za ovaj tematski sloj', 'No data in this table - cannot create PDF!': 'Nema podataka u ovoj tabeli - ne može se kreirati PDF!', 'No databases in this application': 'Nema baza podataka u ovom zahtjevu', 'No dead body reports available': 'Nijedan izvještaj o mrtvim tijelima nije dostupan', 'No Details currently registered': 'Trenutno nema registrovanih detalja', 'No Disaster Assessments': 'Nema procjena katastrofe', 'No Distribution Items Found': 'Nisu pronađene stavke raspodjele', 'No Distributions currently registered': 'Trenutno nema registrovanih raspodjela', 'No Distributions Found': 'Nema nađenih raspodjela', 'No Documents currently attached to this request': 'Trenutno nema dokumenata koji su priloženi uz ovaj zahtjev', 'No Documents found': 'Nijedan dokument nije pronađen', 'No Donations': 'Nema donacija', 'No Donors currently registered': 'Trenutno nema registrovanih donatora', 'No education details currently registered': 'Nema trenutno registrovanih nivoa obrazovanja', 'No Education Levels currently registered': 'Nema trenutno registrovanih nivoa obrazovanja', 'No Emails currently in InBox': 'Trenutno nema elektronske pošte u ulaznom sandučetu', 'No Emails currently in Outbox': 'Trenutno nema e-mail poruka u izlaznom sandučetu', 'No Emails currently in Sent': 'Nema trenutno elektronske pošte za slanje', 'No entries currently available': 'Nema trenutno dostupnih unosa', 'No entries found': 'Ništa nije uneseno', 'No entries matching the query': 'Nema stavki vezanih za upit', 'No entry available': 'Nema dostupnog unosa', 'No Event Types currently registered': 'Nijedan tip događaja nije trenutno registrovan', 'No Events currently registered': 'Nema registriranih događaja', 'No Facilities currently registered': 'Trenutno nema registrovanih objekata', 'No Facilities currently registered in this event': 'Nema trenutno registriranih objekata za ovaj događaj', 'No Facilities currently registered in this incident': 'U ovom incidentu trenutno nema registrovanih objekata', 'No Facilities currently registered in this scenario': 'U ovom scenariju trenutno nema registrovanih postrojenja', 'No Facility Types currently registered': 'Nijedan tip objekta nije trenutno registrovan', 'No Feature Classes currently defined': 'Nijedna klasa karakteristika nije trenutno definisana.', 'No Feature Groups currently defined': 'Nijedna klasa mogućnosti nije trenutno definisana.', 'No Feature Layers currently defined': 'Nema trenutno definisanih slojeva karakteristika', 'No file uploaded.': 'Nema poslane datoteke,', 'No Flood Reports currently registered': 'trenutno nema registriranih izvještaja o poplavi', 'No forms to the corresponding resource have been downloaded yet.': 'Još uvijek nisu preuzeti obrasci za odgovarajuće resurse', 'No further users can be assigned.': 'Daljnji korisnici se ne mogu dodijeliti', 'No GPS data currently registered': 'Nema GPS podataka trenutno', 'No Group Memberships currently registered': 'Trenutno nema registrovanih članstava u grupi', 'No Groups currently defined': 'Trenutno nema definisanih grupa', 'No Groups currently registered': 'Trenutno nema registrovanih grupa', 'No Hazards currently registered': 'Trenutno nema registrovanih rizika', 'No Hazards found for this Project': 'Nema nađenih rizika za ovaj projekt', 'No Heliports currently registered': 'Trenutno nema registrovanih helikopterskih sletišta', 'No Homes currently registered': 'Nijedan dom trenutno registrovan', 'No Hospitals currently registered': 'Nema registriranih bolnica', 'No Human Resources currently assigned to this incident': 'Ljudski resursi nisu dodijeljeni ovom incidentu', 'No Human Resources currently registered in this event': 'Nema ljudskih resursa trenutno registriranih za ovaj događaj', 'No Human Resources currently registered in this scenario': 'Nema ljudskih resursa trenutno registrovanih u ovom scenariu', 'No Identification Report Available': 'Nema dostupnog izvještaja o identifikaciji', 'No Identities currently registered': 'Nema trenutno registriranih identiteta', 'No Image': 'Nema fotografije', 'No Images currently registered': 'Nema trenutno registrovanih slika', 'No Impact Types currently registered': 'Nijedan tip utjecaja nije trenutačno registrovan.', 'No Impacts currently registered': 'Nema trenutno zabilježenih utjecaja', 'No Import Files currently uploaded': 'Trenutno nema dodanih datoteka za uvoz', 'No import jobs': 'Nema poslova uvoza', 'No Incident Reports currently registered': 'Nema trenutno registrovanih izvještaja o incidentima', 'No Incident Reports currently registered for this event': 'Nema trenutno registrovanih izvještaja o incidentu za ovaj događaj', 'No Incident Reports currently registered in this incident': 'Trenutno nema izvještaja o incidentu registrovanih u ovom incidentu', 'No Incident Types currently registered': 'Trenutno nema registrovanih tipova incidenta', 'No Incidents currently registered in this event': 'Trenutno nema registrovanih incidenata na ovom događaju', 'No Incoming Shipments': 'Nema dolazećih pošiljki', 'No Inventories currently have suitable alternative items in stock': 'Nijedan inventar trenutno nema odgovarajuću zamjensku stavku u zalihama', 'No Inventories currently have this item in stock': 'Nijedan inventar trenutno nema ovu stavku u zalihama', 'No Inventory Stores currently registered': 'Broj registrovanih stavki u inventaru', 'No Item Catalog Category currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No Item Catalog currently registered': 'Trenutno nema kataloga predmeta', 'No Item Categories currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No Item currently registered': 'Nema trenutno registrovanih stavki', 'No Item Packets currently registered': 'Nema trenutno registrovanih paketa stavki', 'No Item Packs currently registered': 'Nema trenutno registrovanih paketa', 'No Item Sub-Category currently registered': 'Trenutno nema registrovanih predmetnih kategorija', 'No items currently in stock': 'Nema stavki u zalihama', 'No Items currently registered': 'Nema trenutno registrovanih stavki', 'No Items currently registered in this Inventory': 'Trenutno nema registrovanih stavki u inventaru', 'No Items currently requested': 'Nema trenutno registrovnih stavki', 'No items have been selected for shipping.': 'Nema stavki izabranih za isporuku.', 'No jobs configured': 'Nema podešenih poslova', 'No jobs configured yet': 'Još uvijek nema podešenih poslova', 'No Keys currently defined': 'Nema trenutno definisanih ključeva', 'No Keywords Found': 'Nema nađenih ključnih riječi', 'No Kits': 'Nema kompleta', 'No Kits currently registered': 'Trenutno nema registrovanih kompleta', 'No Layers currently configured in this Profile': 'Nema slojeva konfigurisanih u ovom profilu', 'No Layers currently defined': 'Nema trenutno definisanih slojeva', 'No Layers currently defined in this Symbology': 'Nema definisanih slojeva za ovo značenje simbola', 'No Level 1 Assessments currently registered': 'Nema procjene prvog nivoa koja je trenutno registrovana', 'No Level 2 Assessments currently registered': 'Nivo 2 procjene je trenutno registriran', 'No Location Hierarchies currently defined': 'Trenuitno nije definisana hijerarhija lokacija', 'No location information defined!': 'Nema definisanih informacija o lokaciji', 'No location known for this person': 'Ne postoji poznata lokacija za ovu osobu', 'No Locations currently available': 'Nema trenutno dostupnih lokacija', 'No Locations currently registered': 'Nijedna Lokacija trenutno nije registrovana', 'No Locations Found': 'Nema nađenih lokacija', 'No locations found for members of this team': 'Nisu pronađene lokacije za članove ovog tima', 'No Locations found for this Organization': 'Nisu pronađene lokacije za ovu lokaciju', 'No locations registered at this level': 'Nema registrovanih lokacija na ovom nivou', 'No log entries matching the query': 'Nema podudaranja u zapisnika za upit', 'No Mailing List currently established': 'Nema trenutno u', 'No Map Configurations currently defined': 'Nema trenutno definisane konfiguracije mape', 'No Map Configurations currently registered in this event': 'Trenutno nije registrovana konfiguracije karte u ovom događaju', 'No Map Configurations currently registered in this incident': 'Trenutno nema konfiguracija mapa registrovanih u ovom incidentu', 'No Map Configurations currently registered in this scenario': 'Nijedna konfiguracija karte nije trenutno registrovana u ovom scenariju.', 'No Markers currently available': 'Nema trenutno dostupnih markera', 'No match': 'Nema poklapanja', 'No Match': 'nema podudaranja', 'No Matching Catalog Items': 'Nema odgovarajućih kataloških stavki', 'No Matching Items': 'Nema odgovarajućih stavki', 'No Matching Records': 'Nema niti jedan zapis', 'No matching records found': 'Nisu pronađeni odgovarajući zapisi', 'No matching records found.': 'Nisu pronađeni odgovarajući zapisi', 'No Matching Vehicle Types': 'Nema odgovarajućih tipova vozila', 'No Members currently registered': 'Nema korisnika trenutno registrovanih', 'No Memberships currently defined': 'Trenutno nema definisanih članstava', 'No Memberships currently registered': 'Nema trenutno prijavljenog članstva', 'No Messages currently in InBox': 'Trenutno nema poruka u ulaznom sandučetu', 'No Messages currently in Outbox': 'Trenutno nema poruka u izlaznom sandučetu', 'No Messages currently in the Message Log': 'Nema poruka u dnevniku poruka', 'No messages in the system': 'Nema poruka u sistemu', 'No Milestones Found': 'Nema nađenih prekretnica', 'No Mobile Commons Settings currently defined': 'Mobilne postavke trenutno nisu definisane', 'No more items may be added to this request': 'Ne može se više stavki dodati na ovaj zahtjev', 'No morgues found': 'Nema nađenih mrtvačnica', 'No Need Types currently registered': 'trenutno nema registriranih tipova potrebe', 'No Needs currently registered': 'Nema trenutno registrovane potrebe', 'No Networks currently recorded': 'Nema trenutno zabilježenih mreža', 'No of Families Settled in the Schools': 'Broj porodica smještenih u školama', 'No of Families to whom Food Items are Available': 'Broj porodica za koje su dostupni prehrambeni artikli', 'No of Families to whom Hygiene is Available': 'Broj porodica kojima je higijena dostupna', 'No of Families to whom Non-Food Items are Available': 'Broj porodica za koje su dostupni neprehrambeni artikli', 'No of Female Students (Primary To Higher Secondary) in the Total Affectees': 'Broj ženskih učenika (osnovne i srednje škole) od ukupno pogođenih', 'No of Female Teachers & Other Govt Servants in the Total Affectees': 'Broj žena u nastavi i drugim vladinim uslugama u ukupno broju pogođenih', 'No of Male Students (Primary To Higher Secondary) in the Total Affectees': 'Broj muških učenika (osnovne i srednje škole) od ukupno pogođenih', 'No of Male Teachers & Other Govt Servants in the Total Affectees': 'Broj muškaraca u nastavi i drugim vladinim uslugama u ukupno broju pogođenih', 'No of Rooms Occupied By Flood Affectees': 'Broj soba koje su zauzele osobe pogođene poplavom', 'No Office Types currently registered': 'Trenutno nema registrovanih tipova kancelarija', 'No Offices currently registered': 'Nema trenutno registrovanih kancelarija', 'No Offices found!': 'Uredi nisu pronađeni!', 'No Open Tasks for %(project)s': 'Nema otvorenih zadataka za %(project)s', 'No options available': 'Nema dostupnih opcija', 'no options available': 'nema dostupnih opcija', 'No options currently available': 'Nema trenutno dostupnih opcija', 'No Orders registered': 'Nema registrovanih narudžbi', 'No Organization Domains currently registered': 'Trenutno nema registrovanih domena organizacija', 'No Organization Types currently registered': 'Trenutno nema registrovanih tipova organizacija', 'No Organizations currently registered': 'Nema trenutno registrovane organizacije', 'No Organizations for Project(s)': 'Nema organizacija za projekt(e)', 'No Organizations found for this Policy/Strategy': 'Nije nađena organizacija za ovu politiku/strategiju', 'No outputs found': 'Nema nađenih izlaza', 'No Packets for Item': 'Nema paketa za artikle', 'No Packs for Item': 'Nema paketa za artikle', 'No Parsers currently connected': 'Nijedan parser nije trenutno povezan', 'No Partner Organizations currently registered': 'Nema trenutno registrovanih partnerskih organizacija', 'No Patients currently registered': 'Trenutno nema registrovanih pacijenata', 'No peers currently registered': 'Nema trenutno registrovanih saradnika', 'No Peers currently registered': 'Nema trenutno registrovanih suradnika', 'No pending registrations found': 'Na čekanju nema zahtjeva za registraciju', 'No pending registrations matching the query': 'Nema registracije na čekanju za vaš upit', 'No People currently committed': 'Trenutno nema posvecenih Ljudi', 'No People currently registered': 'Trenutno nema registrovanih ljudi', 'No People currently registered in this camp': 'Trenutno nema prijavljenih ljudi u ovom kampu', 'No People currently registered in this shelter': 'Za sad nema registrovanih u skloništu', 'No person record found for current user.': 'Nisu pronađeni lični podaci za trenutnog korisnika.', 'No Persons currently registered': 'Trenutno nema registrovanih osoba', 'No Persons currently reported missing': 'Trenutno nema registrovanih nestalih osoba', 'No Persons found': 'Osoba nije pronađena', 'No Photos found': 'Slike nisu nađene', 'No Picture': 'Nema Slike', 'No PoI Types currently available': 'Nema trenutno dostupnih tipova tačaka interesa', 'No Points of Interest currently available': 'Nema trenutno dostupnih tačaka interesa', 'No PoIs available.': 'Nema dostupnih tačaka interesa', 'No Policies or Strategies found': 'Nema nađenih politika ili strategija', 'No Population Statistics currently registered': 'Ne postoji registrovana statistika stanovništva', 'No Posts available': 'Nema dostupnih ubacivih tekstova', 'No posts currently available': 'Nema trenutno dostupnih ubacivih tekstova', 'No posts currently set as module/resource homepages': 'Nema ubacivih tekstova postavljenih kao početne stranice za modul/resurs', 'No posts currently tagged': 'Nema trenutno označenih ubacivih tekstova', 'No Posts currently tagged to this event': 'Ubacivi tekstovi nisu označeni za ovaj događaj', 'No Presence Log Entries currently registered': 'Trenutno nema registrovanih stavki zapisnika prisustva', 'No problem group defined yet': 'Nema još definisane grupe problema', 'No Problems currently defined': 'Nijedan problem trenutno nije definisan', 'No Professional Experience found': 'Nije nađeno profesionalno iskustvo', 'No Profiles currently have Configurations for this Layer': 'Nema profila konfigurisanih za ovaj sloj.', 'No Projections currently defined': 'Trenutno nema definisanih projekcija', 'No Projects currently registered': 'Treunutno nema registrovanih projekata', 'No projects currently registered': 'Treunutno nema registrovanih projekata', 'No Query currently defined': 'Trenutno nema definisanih upita', 'No Question Meta-Data': 'Nema metapodataka pitanja', 'No Rapid Assessments currently registered': 'Nema prijekih procjena trenutno registrovanih', 'No Ratings for Skill Type': 'Nema ocjena za tip vještine', 'No Received Items currently registered': 'Nema registritanih primljenih stavki', 'No Received Shipments': 'Nema primljenih pošiljki', 'No Records currently available': 'Trenutno nema nikakvih podataka', 'No records found': 'Nisu pronađeni zapisi', 'No records in this resource. Add one more records manually and then retry.': 'Nema zapisa u ovom resursu. Dodajte ručno jedan ili više zapisa i probajte ponovo.', 'No Records matching the query': 'Nema zapisa koji odgovaraju upitu', 'No records matching the query': 'Nema zapisa koji odgovaraju upitu', 'No records to review': 'Nije zapisa za pregled', 'No recovery reports available': 'Trenutno nema dostupnih izvještaja o pronalasku', 'No Regions currently registered': 'Trenutno nema registrovanih područja', 'No Relatives currently registered': 'Nijedan srodnik nije trenutno prijavljen', 'No report available.': 'Nema dostupnog izvještaja', 'No report specified.': 'Nema navedenog izvještaja.', 'No reports available.': 'Niti jedan izvještaj nije dostupan.', 'No reports currently available': 'Trenutno nema dostupnih izvještaja', 'No repositories configured': 'Nema podešenih repozitorija', 'No Request Items currently registered': 'Nema trenutno registrovanih stavki koje se zahtijevaju', 'No Request Shipments': 'Nema zahtijeva pošiljki', 'No Request Templates': 'Nema predložaka zahtjeva', 'No Requests': 'Nema zahtjeva', 'No requests currently registered': 'Trenutno nema registrovanih zahtjeva', 'No requests found': 'Zahtjevi nisu pronadjeni', 'No Resource Types defined': 'Nema definisanih tipova resursa', 'No Resources assigned to Incident': 'Resursi nisu dodijeljen ovom incidentu', 'No resources configured yet': 'Još uvijek nema podešenih resursa', 'No resources currently registered': 'Trenutno nema registrovanih resursa', 'No resources currently reported': 'Trenutno nema prijavljenih sredstava', 'No Resources in Inventory': 'Nema resursa u zalihama', 'No Response': 'Nema odgovora', 'No Response Summaries Found': 'Nema sumarnih odgovora nađeno', 'No Responses currently registered': 'Trenutno nema registrovanih odgovora', 'No Restrictions': 'Bez ograničenja', 'No Rivers currently registered': 'Nema trenutno registrovanih rijeka', 'No role to delete': 'Nema uloge za brisanje', 'No roles currently assigned to this user.': 'Nema uloga dodijeljenih ovom korisniku.', 'No Roles currently defined': 'Uloga nije trenutno definirana', 'No Roles defined': 'Nijedna uloga nije definirana', 'No Rooms currently registered': 'Nema trenutno registriranih soba', 'No Scenarios currently registered': 'Nema trenutno prijavljenih scenarija', 'No School Districts currently registered': 'Trenutno nema registrovanih školskih rejona', 'No School Reports currently registered': 'Trenutno nema registriranih izvještaja o školama', 'No Seaports currently registered': 'Trenutno nema registrovanih luka', 'No Search saved': 'Nema snimljene pretrage', 'No Sections currently registered': 'Nema trenutno registrovanih odjela', 'No Sectors currently registered': 'Sektori trenutno nisu registrovani', 'No Sectors found for this Organization': 'Nisu pronađeni sektori za ovu lokaciju', 'No Sectors found for this Project': 'Nema nađenih sektora za ovaj projekt', 'No Sectors found for this Theme': 'Nema nađenih sektora za ovu temu', 'No Senders Whitelisted': 'Nema pošiljaoca na bijeloj listi', 'No Sent Items currently registered': 'Nema trenutno registrovanih poslanih stvari', 'No Sent Shipments': 'Nema poslanih pošiljki', 'No series currently defined': 'Trenutno nema definisanih serija', 'No service profile available': 'Nema dostupnog profila usluge', 'No Services currently registered': 'Trenutno nema registrovanih usluga', 'No Services found for this Organization': 'Nisu pronađene usluge za ovu organizaciju', 'No Settings currently defined': 'Nema trenutno definisanih postavki', 'No Shelter Services currently registered': 'Trenutno nema registriranih usluga skloništa', 'No Shelter Statuses currently registered': 'Trenutno nema registrovanih statusa skloništa', 'No Shelter Types currently registered': 'Trenutno nema registrovanih tipova skloništa', 'No Shelters currently registered': 'Trenutno nema registrovanih skloništa', 'No Shipment Items': 'Nema stavki pošiljke', 'No Shipment Transit Logs currently registered': 'Trenutno nema registrovanih tranzitnih zapisa', 'No Shipment/Way Bills currently registered': 'Trenutno nema registrovanih dostava/putnij n aloga', 'No Skill Types currently set': 'Nijedna vrsta vještina nije trenutno podešena', 'No Skills currently requested': 'Nema trenutno traženih vještina', 'No skills currently set': 'Nijedna vještina nije trenutno podešena', 'No Skills Required': 'Nema potrebnih vještina', 'No SMS currently in InBox': 'Trenutno nema SMS u ulaznom sandučetu', 'No SMS currently in Outbox': 'Trenutno nema SMS u izlaznom sandučetu', "No SMS's currently in Sent": 'Broj SMS u poslanom sandučetu', 'No Solutions currently defined': 'Nema definisanih rješenja', 'No Staff currently registered': 'Nema osoblja trenutno registrovanog', 'No staff or volunteers currently registered': 'Trenutno nema registrovanih članova osoblja ili volontera', 'No Staff Types currently registered': 'Nijedan tip osoblja trenutno registrovan', 'No status information available': 'Informacije o statusu nisu dostupne', 'No status information currently available': 'Nema trenutno dostupnih statusnih informacija', 'No Statuses currently registered': 'Trenutno nema registrovanih statusa', 'No stock adjustments have been done': 'Prilagođenja zaliha nisu obavljena', 'No stock counts have been done': 'Količine zaliha nisu obavljene', 'No Stock currently registered': 'Trenutno nema registrovanih zaliha', 'No Stock currently registered in this Warehouse': 'Nema zaliha registrovanih za ovo skladište', 'No Storage Bin Type currently registered': 'Nijedan tip korpe za smještaj trenutno registrovan', 'No Storage Bins currently registered': 'Trenutno nema registrovanih korpi za smještaj', 'No Storage Locations currently registered': 'Trenutno nema registrovanih logacija za smještaj', 'No Subscription available': 'Nema dostupne pretplate', 'No Subsectors currently registered': 'Trenutno nema registrovanih podsektora', 'No Suppliers currently registered': 'Trenutno nema registrovanih dobavljača', 'No Support Requests currently registered': 'Trenutno nema registrovanih Zahtjeva za podršku', 'No Survey Answers currently entered.': 'Nema trenutno unesenih odgovora na ankete', 'No Survey Answers currently registered': 'Trenutno nema registrovanih anketnih odgovora', 'No Survey Questions currently registered': 'Trenutno nema registrovanih anketnih pitanja', 'No Survey Sections currently registered': 'Trenutno nema registrovanih anketnih odjela', 'No Survey Series currently registered': 'Nema registrovanih Serija Anketa', 'No Survey Template currently registered': 'Trenutno nema registrovanog šablona za anketu', 'No Symbologies currently defined': 'Trenutno nema definisanih značenja simbola', 'No Symbologies currently defined for this Layer': 'Nema trenutno definisanih značenja simbola za ovaj sloj', 'No Sync': 'Bez sinhronizacije', 'No sync permitted!': 'Sinhronizacija nije dozvoljena', 'No synchronization': 'Bez sinhronizacije', 'No tags currently defined': 'Trenutno nema definisanih oznaka', 'No Tasks Assigned': 'Nema dodijeljenih zadataka', 'No tasks currently assigned': 'Trenutno nema dodijeljenih zadataka', 'No tasks currently registered': 'Nema trenutno registriranih zadataka', 'No Tasks currently registered in this event': 'Nijedan zadatak trenutno nije registrovan u ovaj događaj', 'No Tasks currently registered in this incident': 'Trenutno nema zadataka registrovanih u ovom incidentu', 'No Tasks currently registered in this scenario': 'Trenutno nema registrovanih zadataka u ovom scenariju', 'No Tasks with Location Data': 'Nema Zadataka sa Podacima o Lokaciji', 'No Teams currently registered': 'Trenutno nema registrovanih timova', 'No template found!': 'Nema šablona pronađenog!', 'No Template Sections': 'Nema odjeljaka predložaka', 'No Themes currently defined': 'Nijedna Tema nije trenutno definisana', 'No Themes currently registered': 'Trenutno nema registrovanih tema', 'No Themes found for this Activity': 'Nema nađenih tema za ovu aktivnost', 'No Themes found for this Project': 'Nema nađenih tema za ovaj projekat', 'No Themes found for this Project Location': 'Nema nađenih tema za ovu lokaciju projekta', 'No Tickets currently registered': 'Trenutno nema registrovanih kartica', 'No Time Logged': 'Nema zabilježenog vremena', 'No Tours currently registered': 'Trenutno nema registrovanih tura', 'No Tracks currently available': 'Trenutno nema dostupnih zapisa', 'No translations exist in spreadsheet': 'Ne postoje prijevodi u ovoj tablici', 'No Tweets Available.': 'Nema dostupnih Tweet', 'No Tweets currently in InBox': 'Trenutno nema Tweet u ulaznom sandučetu', 'No Tweets currently in Outbox': 'Trenutno nema Tweet u izlaznom sandučetu', 'No Twilio Settings currently defined': 'Nema trenutno definisanih Twilio postavki', 'No Units currently registered': 'Trenutno nema regitrovanih jedinica', 'No units currently registered': 'Trenutno nema regitrovanih jedinica', 'No Users currently registered': 'Nema trenutno registrovanih korisnika', 'No users have taken a tour': 'Nijedan korisnik nije uzeo turu', 'No users with this role at the moment.': 'Nema korisnika s ovom ulogom u datom trenutku.', "No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": 'Nema pomaka od griničkog vremena. Molim navedite UTC pomak u korisničkim profilima. Primjer: UTC+0530', 'No Vehicle Details currently defined': 'Trenutno nisu definisani detalji vozila', 'No Vehicle Types currently registered': 'Trenutno nema registrovanih tipova vozila', 'No Vehicles currently assigned to this incident': 'Vozila nisu dodijeljena ovom incidentu', 'No Vehicles currently registered': 'Nema registrovanih vozila', 'No volunteer availability registered': 'nema registirane dostupnosti volontera', 'No Volunteer Cluster Positions': 'Nema pozicija skupa volontera', 'No Volunteer Cluster Types': 'Nema tipova skupa volontera', 'No Volunteer Clusters': 'Nema skupova volontera', 'No volunteer information registered': 'nema registiranih informacija o volonterima', 'No Volunteers currently registered': 'Trenutno nema registrovanih volontera', 'No Warehouse Items currently registered': 'Nema registrovanih stavki skladišta', 'No Warehouses currently registered': 'Trenutno nema registrovanih skladišta', 'No Warehouses match this criteria': 'Nema skladišta koja odgovaraju ovom kriteriju', 'non-critical': 'ne-kritično', 'Non-medical Staff': 'Nemedicinsko osoblje', 'Non-structural Hazards': 'Nestrukturne opasnosti', 'None': 'Nema', 'none': 'Nijedno', 'None (no such record)': 'Nijedan (ne postoji takav zapis)', 'None of the above': 'Ništa od noga', 'Noodles': 'Tjestenine', 'Normal': 'Normalan', 'normal': 'normalno', 'Normal food sources disrupted': 'Ukobičajen izvor hrane ometan', 'Normal Job': 'Normalni posao', 'Northern Cyprus': 'Sjeverni Kipar', 'Norway': 'Norveška', 'Nose, Angle': 'Nos, ugao', 'Nose, Curve': 'Nos, krivulja', 'Nose, shape': 'Nos, oblik', 'Nose, size': 'Nos, veličina', 'not accessible - no cached version available!': 'Nije dostupno- nema dostupne cache verzije!', 'not accessible - using cached version from': 'nije dostupno - koristi se cache verzija forme', 'Not allowed to Donate without matching to a Request!': 'Nije dopušteno donirati što nije usaglašeno s zahtjevom', 'Not Applicable': 'Nije primjenjivo', 'not applicable': 'nije primjenjivo', 'Not authorised!': 'Nije odobreno!', 'Not Authorised!': 'Nije dopušteno!', 'Not installed or incorrectly configured.': 'Nije instalirano ili nije pravilno konfigurirano', 'not needed': 'nije potrebno', 'Not Parsed': 'Nije još analizirano', 'Not Possible': 'Nije moguće', 'Not Set': 'Nije postavljeno', 'not specified': 'nenavedeno', 'Not Started': 'Nije započelo', 'not writable - unable to cache GeoRSS layers!': 'nemoguće pisati - nije moguće čuvati GeoRSS slojeve!', 'not writable - unable to cache KML layers!': 'nemoguće pisati - nije moguće čuvati KML slojeve!', 'Not yet a Member of any Group': 'Još nije član ni jedne grupe', 'Not yet a Member of any Team': 'Još nije član ni jednog tima', 'Note': 'Bilješka', 'Note added': 'Napomena dodana', 'Note Details': 'Detalji bilješke', 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.': 'Primijetite da ova lista prikazuje samo aktivne volontere. Da biste vidjeli sve ljude registrirane u sistemu, pretražite sa ovog ekrana.', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Primijetite da ova lista prikazuje samo aktivne volontere. Da biste vidjeli sve ljude registrirane u sistemu, pretražite sa ovog ekrana', 'Note that when using geowebcache, this can be set in the GWC config.': 'Primijetite da kada se koristi geowebcache, ovo se može postaviti u GWC konfiguraciji.', 'Note Type': 'Vrsta bilješke', 'Note updated': 'Bilješka ažurirana', 'Note: Make sure that all the text cells are quoted in the csv file before uploading': 'Napomena: Obezbijedite da su sve ćelije teksta pod navodnicima u CSV datoteci prije postavljanja', 'Notes': 'Bilješke', 'Notice to Airmen': 'Bilješka za avijatičare', 'Notification frequency': 'Učestanost informisanja', 'Notification method': 'Metod informisanja', 'Notify': 'Informiši', 'num Zoom Levels': 'broj nivoa uvećanja', 'Number': 'Broj', 'Number of Activities': 'Broj aktivnosti', 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Broj dodatnih ležaja tog tipa će, prema očekivanjima, biti dostupan u ovoj jedinici tokom sljedeća 24 sata.', 'Number of alternative places for studying': 'Broj alternativnih mjesta za studiranje', 'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Broj dostupnih kreveta tog tipa u toj jedinici za vrijeme podnošenja izvještaja', 'Number of Barges': 'Broj šlepova', 'Number of Beneficiaries': 'Broj korisnika', 'Number of bodies found': 'Broj tijela pronađen', 'Number of Columns': 'Broj kolona', 'Number of Completed Assessment Forms': 'Broj završenih formulara ocjene', 'Number of deaths during the past 24 hours.': 'Broj smrtnih slučajeva u posljednja 24 sata.', 'Number of Disasters': 'Broj katastrofa', 'Number of discharged patients during the past 24 hours.': 'Broj otpuštenih pacijenata iz bolnice u posljednjih 24h.', 'Number of doctors': 'Broj doktora', 'Number of doctors actively working': 'Broj ljekara aktivno zaposlenih', 'Number of Facilities': 'Broj objekata', 'Number of houses damaged, but usable': 'Broj kuća oštećenih ali upotrebljivih', 'Number of houses destroyed/uninhabitable': 'Broj kuća uništenih/neuseljivih', 'Number of in-patients at the time of reporting.': 'Broj pacijenata u bolnici u vrijeme izvještavanja.', 'Number of Incidents': 'Broj incidenata', 'Number of Items': 'Broj stavki', 'Number of items': 'Broj stavki', 'Number of midwives actively working': 'Broj medicinskih babica aktivno zaposlenih', 'Number of newly admitted patients during the past 24 hours.': 'Broj novih primljenih pacijanata u posljednja 24 sata.', 'Number of non-medical staff': 'Broj osoblja koji nisu ukljuceni u zdravstvo', 'Number of nurses': 'Broj medicinskih sestara', 'Number of nurses actively working': 'Broj medicinskih sestara aktivno zaposlenih', 'Number of Patients': 'Broj pacijenata', 'Number of People Affected': 'Broj ljudi na koje je događaj uticao', 'Number of People Dead': 'Broj umrlih ljudi', 'Number of People Injured': 'Broj povrijeđenih ljudi', 'Number of People Required': 'Broj potrebnih ljudi', 'number of planes': 'broj aviona', 'Number of private schools': 'Broj privatnih škola', 'Number of public schools': 'Broj javnih školskih ustanova', 'Number of religious schools': 'Broj religijskih škola', 'Number of residential units': 'Broj stambenih jedinica', 'Number of residential units not habitable': 'Broj neuseljivih prebivališnih jedinica', 'Number of Resources': 'Broj resursa', 'Number of Responses': 'Broj odgovora', 'Number of Rows': 'Broj Redova', 'Number of schools damaged but usable': 'Ukupni broj škola oštećenih ali upotrebljivih', 'Number of schools destroyed/uninhabitable': 'Broj škola uništenih/neuseljivih', 'Number of schools open before disaster': 'Broj škola otvoren prije katastrofe', 'Number of schools open now': 'Broj škola trenutno otvorenih', 'Number of teachers affected by disaster': 'Broj nastavnika ugroženih zbog katastrofe?', 'Number of teachers before disaster': 'Broj nastavnika prije katastrofe', 'Number of Tugboats': 'Broj skela', 'Number of vacant/available beds in this facility. Automatically updated from daily reports.': 'Broj slobodnih/dostupnih kreveta na ovoj lokaciji. Automatski ažurirano iz dnevnih izvještaja', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Broj praznih/slobodnih kreveta u ovoj bolnici. Automatski ažurirano iz dnevnih izvještaja.', 'Number of vacant/available units to which victims can be transported immediately.': 'Broj praznih/dostupnih jedinica u koje žrtve mogu biti odmah transportovane.', 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Broj ili kod koji se koristi da bi se oznacilo mjesto nalazišta, npr. oznaka zastave, koordinate, referentni broj položaja ili slično (ako je dostupno)', 'Number or Label on the identification tag this person is wearing (if any).': 'Broj ili obilježje na identifikacijskoj oznaci koju osoba nosi (ukoliko postoji).', 'Number/Percentage of affected population that is Female & Aged 0-5': 'Broj/Procent pogođene populacije koju čine žene od 0-5 godina', 'Number/Percentage of affected population that is Female & Aged 13-17': 'Broj/procenat pogođene ženske populacije starosti od 13 do 17 godina', 'Number/Percentage of affected population that is Female & Aged 18-25': 'Broj/Procenat pogođene populacije koja je ženskog spola i starosti 18-25', 'Number/Percentage of affected population that is Female & Aged 26-60': 'Broj/procenat pogođene ženske populacije od 26-60 godina', 'Number/Percentage of affected population that is Female & Aged 6-12': 'Broj/postotak pogođene ženske populacije starosne dobi 6-12 godina', 'Number/Percentage of affected population that is Female & Aged 61+': 'Broj/Procenat pogođene populacije koja je ženskog pola i starosti 61+ godina', 'Number/Percentage of affected population that is Male & Aged 0-5': 'Broj / postotak zahvaćenog stanovništva koji su muškarci i godina 0-5', 'Number/Percentage of affected population that is Male & Aged 13-17': 'Broj/postotak pogođene muške populacije starosne dobi od 13-17 godina', 'Number/Percentage of affected population that is Male & Aged 18-25': 'Broj/ Procent zahvaćene populacije muškog spola u dobi od 18-25', 'Number/Percentage of affected population that is Male & Aged 26-60': 'Broj/Procent pogođenog stanovništva kojeg čine muškarci od 26-60 godina starosti', 'Number/Percentage of affected population that is Male & Aged 6-12': 'Broj/Postotak zahvaćenih muskih osoba izmedu 6 i 12 godina', 'Number/Percentage of affected population that is Male & Aged 61+': 'Broj/procenat muške populacije koja je povrijeđena i imaju 61 i više godina', 'Numbers Only': 'Samo brojevi', 'Numeric': 'Brojčano', 'Nurse': 'Medicinska sestra', 'Nursery Beds': 'Kreveti u jaslicama', 'Nursing Information Manager': 'Upravljanje informacijama o medicinskoj njezi', 'Nutrition': 'Prehrana', 'Nutrition problems': 'Problemi u ishrani', 'NZSEE Level 1': 'NZSEE Nivo 1', 'NZSEE Level 2': 'NZSEE Nivo 2', 'Object': 'Objekat', 'Objectives': 'Ciljevi', 'Observer': 'Posmatrač', 'Obsolete': 'Zastarjelo', 'obsolete': 'zastario', 'Obstetrics/Gynecology': 'Porodilište/Ginekologija', 'OCR Form Review': 'Pregled OCR forme', 'OCR module is disabled. Ask the Server Administrator to enable it.': 'OCR modul je isključen. Pitajte serverskog administratora da ga omogući.', 'OCR review data has been stored into the database successfully.': 'Podaci za OCR pregled su uspješno stavljeni u bazu podataka', 'Office': 'Kancelarija', 'Office added': 'Kancelarija dodana', 'Office Address': 'Adresa kancelarije', 'Office deleted': 'Kancelarija obrisan', 'Office Details': 'Detalji o kancelariji', 'Office Phone': 'Službeni telefon', 'Office Type': 'Tip kancelarije', 'Office Type added': 'Dodan tip kancelarije', 'Office Type deleted': 'Obrisan tip kancelarije', 'Office Type Details': 'Detalji tipa kancelarije', 'Office Type updated': 'Ažuriran tip kancelarije', 'Office Types': 'Tipovi kancelarija', 'Office updated': 'Kancelarija ažurirana', 'Offices': 'Kancelarije', 'Offices & Warehouses': 'Uredi i skladišta', 'Offline Sync': 'Vanmrežna sinhronizacija', 'Offline Sync (from USB/File Backup)': 'Vanmrežna Sinhronizacija (sa USB-a/Pomoćnih Dokumenata)', 'Oil Terminal Depth': 'Dubina naftnog terminala', 'Old': 'Star', 'Older people as primary caregivers of children': 'Stariji ljudi, kao primarni staratelji djece', 'Older people in care homes': 'Stariji ljudi u domovima', 'Older people participating in coping activities': 'Stariji ljudi koji učestvuju u aktivnostima prilagođavanja', 'Older people with chronical illnesses': 'Stariji ljudi s hroničnim bolestima', 'Older person (>60 yrs)': 'Starije osobe (preko 60 god.)', 'on': 'uključeno', 'on %(date)s': 'na %(date)s', 'On by default?': 'Uključeno prema podrazumijevanoj vrijednosti?', 'On by default? (only applicable to Overlays)': 'Automatski postavljeno na uključeno? (Jedino se može primjeniti na Preglede)', 'On Hold': 'Na čekanju', 'On Order': 'U narudžbi', 'On Scene': 'Na sceni', 'On-site Hospitalization': 'Hospitalizacija na licu mjesa', 'once': 'jednom', 'One time cost': 'Jednokratna cijena', 'One Time Cost': 'Jednokratni trošak', 'One-time': 'Jedanput', 'One-time costs': 'Jednokratni troškovi', "Only Categories of type 'Asset' will be seen in the dropdown.": "Samo kategorije tipa 'Sredstvo' će se vidjeti u padajućoj listi.", "Only Categories of type 'Vehicle' will be seen in the dropdown.": "Samo kategorije tipa 'Vozilo' će se vidjeti u padajućoj listi.", "Only Items whose Category are of type 'Vehicle' will be seen in the dropdown.": "Samo stavke čija je kategorija tipa 'Vozilo' će se vidjeti u padajućoj listi.", 'Only showing accessible records!': 'Prikazujem samo pristupačne zapise!', 'Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system': 'Koristite ovo dugme za prihvatanje nazad u zalihu stavki koje su vraćene iz isporuke korisnjicima koji nisu zabilježili detalje o isporuci direktno u sistem', 'Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system': 'Koristite ovo dugme da potvrdite da je pošiljka stigla na odredište bez bilježenja pošiljke direktno u sistem', 'Oops! something went wrong on our side.': 'Oops! Nešto je krenulo po zlu.', 'Oops! Something went wrong...': 'Ups! Nešto nije u redu...', 'Opacity': 'Neprozirnost', 'Opacity (1 for opaque, 0 for fully-transparent)': 'Neprozirnost (1 za neprozirno, 0 potpuno providno)', 'Open': 'Otvori', 'Open area': 'Otvoreno područje', 'Open Chart': 'Otvori dijagram', 'open defecation': 'otvorena defekacija', 'Open in New Tab': 'Otvori u novoj kartici', 'Open Incidents': 'Otvori incidente', 'Open Map': 'Otvori mapu', 'Open recent': 'Otvorni skorašnje', 'Open Report': 'Otvori izvještaj', 'Open Table': 'Otvori tabelu', 'Open Tasks for %(project)s': 'Otvoreni zadaci za %(project)s', 'Open Tasks for Project': 'Otvoreni zadaci za projekt', 'Opening Times': 'Radna vremena', 'OpenStreetMap Layer': 'OpenStreetMap sloj', 'OpenStreetMap OAuth Consumer Key': 'OpenStreetMap OAuth korisnički ključ', 'OpenStreetMap OAuth Consumer Secret': 'OpenStreetMap OAuth korisnička tajna lozinka', 'OpenWeatherMap Layer': 'OpenWeatherMap sloj', 'Operating Rooms': 'Operacione sale', 'Operational': 'Operativno', 'Opportunities to Volunteer On-Site?': 'Prilike za volontera na lokaciji?', 'Opportunities to Volunteer Remotely?': 'Prilike za volotera za udaljeni rad?', 'Option': 'Opcija', 'Option Other': 'Opcija druga', 'Optional': 'Neobavezno', 'optional': 'opcionalno', 'Optional link to an Incident which this Assessment was triggered by.': 'Izborni link na incident koji je potaknuo ovu procjenu.', 'Optional password for HTTP Basic Authentication.': 'Opcionalna lozinka za HTTP osnovnu autentifikaciju', 'Optional selection of a background color.': 'Izbor boje pozadine', 'Optional selection of a MapServer map.': 'Opcionalan izbor MapSever karte.', 'Optional selection of an alternate style.': 'Neobavezna selekcija alternativnog stila', 'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Neobavezni predmet za staviti u Email - može biti korišten kao sigurnosna šifra od strane pružatelja usluga.', 'Optional username for HTTP Basic Authentication.': 'Opcionalno ime korisnika za HTTP osnovnu autentifikaciju', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Opcionalno. Ako želite stilizirati karakteristike na osnovu vrijednosti atributa, ovdje izaberite atribute koje ćete koristiti.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je imenski prostor (ne ime!) radnog prostora URI. U sklopu WFS getCapabilities, radni prostor je FeatureType ime prije dvotačke (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je URI imenskog prostora (ne ime!) . U sklopu WFS getCapabilities, ovo je dio tipa osobine prije dvotačke (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Opcionalno. U GeoServer-u, ovo je imenski prostor radnog prostora URI. U sklopu WFS getCapabilities, ovo je dio tipa osobine prije dvotačke (:).', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Izborno. Naziv elementa čiji sadržaji trebaju biti URL slikovne datoteke stavljene u Popup-e.', 'Optional. The name of an element whose contents should be put into Popups.': 'Opcionalno. Ime elementa čiji sadržaj bi trebao biti unutar iskočnih prozora.', "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Opcionalno. Ime geometrijske kolone. U postGIS ovo je automatski 'the_geom'.", 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Opcionalno. Ime šeme. Na Geoserveru ovo ima formu http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.', 'Options': 'Opcije', 'or': 'ili', 'Or add a new language code': 'Ili dodaj novu šifru jezika', 'or import from csv file': 'ili uvoz iz CSV dokumenta', 'OR Reason': 'OR razlog', 'OR Status Reason': 'OR Status razlog', 'Order': 'Narudžba', 'Order canceled': 'Narudžba otkazana', 'Order Created': 'Narudžba kreirana', 'Order Details': 'Detalji narudžbe', 'Order Due %(date)s': 'Rok narudžbe %(date)s', 'Order Item': 'Stavka narudžbe', 'Order updated': 'Narudžba ažurirana', 'Ordered list ... (#TODO [String])': 'Uerđena lista ... (#TODO [String])', 'Orders': 'Narudžbe', 'Organization': 'Organizacija', 'Organization added': 'Dodana organizacija', 'Organization added to Policy/Strategy': 'Organizacija dodana u politiku/strategiju', 'Organization added to Project': 'Organizacija dodana u projekt', 'Organization deleted': 'Obrisana organizacija', 'Organization Details': 'Detalji organizacije', 'Organization Domain added': 'Domena organizacije dodana', 'Organization Domain deleted': 'Domena organizacije obrisana', 'Organization Domain Details': 'Detalji domene organizacije', 'Organization Domain updated': 'Domena organizacije ažurirana', 'Organization Domains': 'Domene organizacije', 'Organization Group': 'Grupa organizacija', 'Organization group': 'Grupe organizacija', 'Organization Needs': 'Potrebe organizacije', 'Organization Needs added': 'Potrebe organizacije dodane', 'Organization Needs deleted': 'Potrebe organizacije obrisane', 'Organization Needs updated': 'Potrebe organizacije ažurirane', 'Organization Registry': 'Registar Organizacija', 'Organization removed from Policy/Strategy': 'Organizacija uklonjena iz politike/strategije', 'Organization removed from Project': 'Organizacija uklonjena sa projekta', 'Organization Type': 'Tip organizacije', 'Organization Type added': 'Vrsta organizacije dodana', 'Organization Type deleted': 'Vrsta organizacije obrisana', 'Organization Type Details': 'Detalji tipa organizacije', 'Organization Type updated': 'Vrsta organizacije ažurirana', 'Organization Types': 'Tipovi organizacije', 'Organization Units': 'Organizacione jedinice', 'Organization updated': 'Ažurirana organizacija', 'Organization(s)': 'Organizacije', 'Organization/Branch': 'Organizacija/Ogranak', 'Organization/Supplier': 'Organizacija/dobavljač', 'Organization:': 'Organizacija:', 'Organizations': 'Organizacije', 'Organizations / Teams / Facilities': 'Organizacije / Timovi / Objekti', 'Organized By': 'Organizovao', 'Origin': 'Porijeklo', 'Origin of the separated children': 'Porijeklo odvojene djece', 'Original': 'Izvorno', 'Original Quantity': 'Izvorna količina', 'Original Value per Pack': 'Izvorna vrijednost po paketu', 'OSM file generation failed!': 'Generisanje OSM datoteke neuspjelo!', 'OSM file generation failed: %s': 'Generisanje OSM datoteke neuspjelo: %s', 'Other': 'Ostalo', 'other': 'drugo', 'Other (describe)': 'Ostalo (opis)', 'Other (specify)': 'Ostalo (navedi)', 'Other activities of boys 13-17yrs': 'Ostale aktivnosti dječaka od 13-17 godina', 'Other activities of boys 13-17yrs before disaster': 'Druge aktivnosti dječaka uzrasta od 13 do 17 godina, prije nesreće', 'Other activities of boys <12yrs': 'Druge aktivnosti dječaka mlađih od 12 godina', 'Other activities of boys <12yrs before disaster': 'Druge aktivnosti dječaka <12godina prije nesreće', 'Other activities of girls 13-17yrs': 'Druge aktivnosti djevojčiva 13-17 godina', 'Other activities of girls 13-17yrs before disaster': 'Ostale aktivnosti djevojčica 13-17 godina prije katastrofe', 'Other activities of girls<12yrs': 'Druge aktivnosti djevojčica < 12godina', 'Other activities of girls<12yrs before disaster': 'Ostale aktivnosti djevojčica <12 godina prije katastrofe', 'Other Address': 'Druga Adresa', 'Other alternative infant nutrition in use': 'Druga alternativa u prehrani dojenčadi u upotrebi', 'Other alternative places for study': 'Alternativna mjesta za učenje', 'Other assistance needed': 'Potrebna druga pomoć', 'Other assistance, Rank': 'Ostale vrte pomoći, Poredak', 'Other current health problems, adults': 'Ostali aktuelni zdravstveni problemi odraslih', 'Other current health problems, children': 'Drugi trenutni zdravstveni problemi, djeca', 'Other Details': 'Drugi detalji', 'Other events': 'Drugi događaji', 'Other Evidence': 'Ostali dokazi', 'Other factors affecting school attendance': 'Drugi faktori koji utiču na pohađanje škole', 'Other Faucet/Piped Water': 'Druga voda iz slavine/vodovoda', 'Other Inventories': 'Druge zalihe', 'Other Isolation': 'Druge izolacije', 'Other major expenses': 'Drugi veći troškovi', 'Other Name': 'Drugo ime', 'Other non-food items': 'Ostali neprehrambeni artikli', 'Other recommendations': 'Ostale preporuke', 'Other residential': 'Ostala prebivališta', 'Other school assistance received': 'Druga nastavna pomoć primljena', 'Other school assistance, details': 'Asistencija ostalih škola, detalji', 'Other school assistance, source': 'Ostale pomoći u školi, izvor', 'Other settings can only be set by editing a file on the server': 'Ostale postavke mogu biti postavljene jedino uređivanjem datoteke na serveru', 'Other side dishes in stock': 'ostali prilozi u zalihama', 'Other types of water storage containers': 'Drugi tipovi spremnika za vodu', 'Other Users': 'Drugi korisnici', 'Other ways to obtain food': 'Drugi načini za dobavu hrane', 'Others': 'Ostali', 'Out': 'Van', 'Outbound Mail settings are configured in models/000_config.py.': 'Poruke koje se salju su konfigurisane u models/000_config.py.', 'Outbox': 'Za slanje', 'Outcomes, Impact, Challenges': 'Izlazi, utjecaj izazovi', 'Outgoing SMS handler': 'Rukovodilac odlaznih SMS poruka', 'Outgoing SMS Handler': 'Upravljač izlaznog SMS', 'Output': 'Izlaz', 'Output added': 'Izlaz dodan', 'Output removed': 'Izlaz uklonjen', 'Output updated': 'Izlaz ažuriran', 'Outputs': 'Izlazi', 'oval': 'ovalno', 'over one hour': 'preko sat', 'Overall Hazards': 'Ukupni rizici', 'Overall status of the clinical operations.': 'Ukupni status kliničkih operacija.', 'Overall status of the facility operations.': 'Ukupni status operacija objekta', 'Overhead falling hazard': 'Opasnost od predmeta koji padaju s visine', 'Overland Flow Flood': 'Poplava kopnenog toka', 'Overlays': 'Preklopi', 'Overview': 'Pregled', 'Owned By (Organization/Branch)': 'Vlasnik (organizacija/grana)', 'Owned Records': 'Broj zapisa u vlasništvu', 'Owned Resources': 'Posjedovani resursi', 'Ownership': 'Vlasništvo', 'Owning organization': 'Vlasnička organizacija', 'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'Okvir pacifičkih ostrva za akcije o klimatskim promjenama. Primjenjivo samo na projekte u pacifičkim zemljama', 'Pack': 'Paket', 'pack of 10': 'Paket od 10', 'Packet': 'Paket', 'Packs': 'Paketi', 'Page': 'Stranica', 'painted': 'nacrtano', 'Pan Map: keep the left mouse button pressed and drag the map': 'Prevlačenje mape: držite lijevo dugme miša pritisnuto i vucite mapu', 'Papua New Guinea': 'Papua Nova Gvineja', 'Paraguay': 'Paragvaj', 'Parameters': 'Parametri', 'Parapets, ornamentation': 'Parapeti, ukrašavanja', 'Parent': 'Roditelj', 'Parent Item': 'Nadređena stavka', "Parent level should be higher than this record's level. Parent level is": 'Roditeljski nivo bi trebao biti viši od nivoa ovog zapisa. Roditeljski nivo je', 'Parent needs to be of the correct level': 'Roditelj treba da bude na odgovarajućoj razini', 'Parent needs to be set': 'Roditelj treba biti postavljen', 'Parent needs to be set for locations of level': 'Roditelj treba bit postavljen na lokacijama nivoa', 'Parent Office': 'Matični ured', 'Parents/Caregivers missing children': 'Roditelji/Staratelji djece koja su nestala', 'Parking Area': 'Parking područje', 'Parking/Tarmac Space Capacity': 'Kapacitet parking prostora', 'Parking/Tarmac Space Units': 'Jedinice parking prostora', 'Parse': 'Raščlani', 'Parsed': 'Raščlanjeno', 'Parser connected': 'Povezano kroz parser', 'Parser Connection Details': 'Detalji parserske konekcije', 'Parser connection removed': 'Uklonjena parserska konekcija', 'Parser connection updated': 'Parserska konekcija je ažurirana', 'Parser Connections': 'Parserske konekcije', 'Parsing Settings': 'Postavke parsera', 'Parsing Status': 'Status parsera', 'part': 'dio', 'Part of the URL to call to access the Features': 'Dio URL koji se zove za pristup objektima', 'Partial': 'Djelimično', 'Partial Database Synchronization': 'Djelomična sinhronizacija sa bazom podataka', 'Participant': 'Učesnik', 'Participant added': 'Učesnik dodan', 'Participant deleted': 'Učesnik obrisan', 'Participant Details': 'Detalji učesnika', 'Participant updated': 'Učesnik ažuriran', 'Participants': 'Učesnici', 'Partner added': 'Partner dodan', 'Partner deleted': 'Partner izbrisan', 'Partner Details': 'Detalji partnera', 'Partner Organization added': 'Organizacija partnera dodana', 'Partner Organization deleted': 'Organizacija partnera obrisana', 'Partner Organization Details': 'Detalji partnersk organizacije', 'Partner Organization updated': 'Organizacija partnera ažurirana', 'Partner Organizations': 'Partnerske organizacije', 'Partner updated': 'Partner ažuriran.', 'Partners': 'Partneri', 'Pashto': 'Pašto', 'Pass': 'Prolaz', 'Passport': 'Pasoš', 'Password': 'Lozinka', "Password fields don't match": 'Polja za lozinku se ne podudaraju', 'Password to use for authentication at the remote site.': 'Lozinka za prijavu na udaljeni sajt-', 'Path': 'Putanja', 'Pathology': 'Patologija', 'Patient': 'Pacijent', 'Patient added': 'Pacijent dodan', 'Patient deleted': 'Pacijent obrisan', 'Patient Details': 'Detalji o pacijentu', 'Patient Tracking': 'Praćenje pacijenta', 'Patient Transportation Ambulance': 'Ambulanta za transport pacijenata', 'Patient updated': 'Pacijent ažuriran', 'Patients': 'Pacijenti', 'PDF File': 'PDF datoteka', 'Pediatric ICU': 'Pedijatrijska intenzivna njega', 'Pediatric Psychiatric': 'Pedijatrijsko psihijatrijsko', 'Pediatrics': 'Doktor za bolesti i povrijede djece i maloljetnika', 'Peer': 'Saradnik', 'Peer added': 'Saradnik dodan', 'Peer deleted': 'Saradnik obrisan', 'Peer Details': 'Detalji o saradniku', 'Peer not allowed to push': 'Nije dozvoljeno gurati saradnika', 'Peer Registration': 'Registracija saradnika', 'Peer Registration Details': 'Detalji registracije saradnika', 'Peer Registration Request': 'Zahtjev za registraciju saradnika', 'Peer registration request added': 'Dodat zahtjev za registraciju saradnika', 'Peer registration request deleted': 'Zahtjev za registracijom saradnika je obrisan', 'Peer registration request updated': 'Zahtjev za registraciju saradnika ažuriran', 'Peer Type': 'Tip saradnika', 'Peer UID': 'UID saradnika', 'Peer updated': 'Ažuriran saradnik', 'Peers': 'Saradnici', 'pending': 'čeka', 'Pending': 'U toku', 'Pending Requests': 'Zahtjevi na čekanju', 'people': 'ljudi', 'People': 'Ljudi', 'People added to Commitment': 'Ljudi dodani u zaduženje', 'People Needing Food': 'Ljudi koji trebaju hranu', 'People Needing Shelter': 'Ljudi kojima je potrebno sklonište', 'People Needing Water': 'Ljudi koji trebaju vodu', 'People removed from Commitment': 'Osobe odstranjene iz zaduženja', 'People Trapped': 'Zarobljeni ljudi', 'People with chronical illnesses': 'Ljudi s hroničnim bolestima', 'per': 'po', 'Percentage': 'Procenat', 'Performance Rating': 'Ocjena izvedbe', 'Permanent Home Address': 'Stalna kućna adresa', 'Person': 'Osoba', 'Person 1': 'Osoba broj 1', 'Person 1, Person 2 are the potentially duplicate records': 'Osoba 1, Osoba 2 su mogući dupli zapisi', 'Person 2': 'Osoba 2', 'Person added': 'Osoba dodana', 'Person added to Commitment': 'Osoba dodana zaduženju', 'Person added to Group': 'Osoba dodana u grupu', 'Person added to Team': 'Osoba dodana u tim', 'Person Data': 'Lični podaci', 'Person De-duplicator': 'Deduplikator osoba', 'Person deleted': 'Osoba obrisana', 'Person Details': 'Detalji osobe', 'Person details updated': 'Detalji o osobi ažurirani', 'Person Entity': 'Jedinka osobe', 'Person Finder': 'Nalazač osoba', 'Person found': 'Osoba pronađena', 'Person interviewed': 'Osoba ispitana', 'Person Management': 'Upravljanje osobljem', 'Person missing': 'Nedostaje osoba', 'Person must be specified!': 'Osoba se mora navesti!', 'Person or OU': 'Osoba ili OJ', 'Person Registry': 'Registar osoba', 'Person removed from Commitment': 'Osoba odstranjena iz zaduženja', 'Person removed from Group': 'Osoba odstranjena iz grupe', 'Person removed from Team': 'Osoba odstranjena iz tima', 'Person reporting': 'Osoba koja je prijavila', 'Person Transportation Tactical Vehicle': 'Lično transportno taktičko vozilo', 'Person updated': 'Osoba ažurirana', 'Person who has actually seen the person/group.': 'Osoba koja je zapravo vidjela osobu/grupu.', "Person's Details": 'Detalji osobe', "Person's Details added": 'Detalji o osobi dodani', "Person's Details deleted": 'Detalji o osobi obrisani', "Person's Details updated": 'Detalji o osobi ažurirani', 'Person.': 'Osoba', 'Person/Group': 'Osoba/Grupa', 'Personal': 'Lično', 'Personal Data': 'Lični podaci', 'Personal Effects': 'Lični efekti', 'Personal Effects Details': 'Detalji ličnih uticaja', 'Personal impact of disaster': 'Lični utjecaj katastrofe', 'Personal Map': 'Lična mapa', 'Personal Profile': 'Lični profil', 'Persons': 'Osobe', 'Persons in institutions': 'Osobe u institucijama', 'Persons per Dwelling': 'Osoba po stambenoj jedinici', 'Persons with disability (mental)': 'Osobe sa (mentalnim) invaliditetom', 'Persons with disability (physical)': 'Osobe s invaliditetom (tjelesnim)', "Persons' Details": 'Detalji o osobama', 'Philippines': 'Filipini', 'Phone': 'Telefon', 'Phone #': 'Telefon #', 'Phone 1': 'Telefon 1', 'Phone 2': 'Telefon 2', 'Phone number is required': 'Potreban je telefonski broj', "Phone number to donate to this organization's relief efforts.": 'Telefonski broj na koji se donira pomoć žrtvama ove organizacije.', 'Phone/Business': 'Telefon/ Posao', 'Phone/Emergency': 'Telefon/Hitni', 'Phone/Exchange': 'Telefon/razmjena', 'Phone/Exchange (Switchboard)': 'Telefonska centrala', 'Photo': 'Fotografija', 'Photo added': 'Fotografija dodana', 'Photo deleted': 'Fotografija obrisana', 'Photo Details': 'Detalji o fotografiji', 'Photo Taken?': 'Fotografija napravljena?', 'Photo updated': 'Fotografija ažurirana', 'Photograph': 'Fotografija', 'Photos': 'Fotografije', 'Physical': 'Fizički', 'Physical Description': 'Fizički opis', 'Physical Safety': 'Fizička sigurnost', 'Picture': 'Slika', 'Picture upload and finger print upload facility': 'Mogućnost uploada slike i otiska prsta', 'piece': 'Dio', 'PIFACC Priorities': 'PIFACC Prioriteti', 'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'PIFACC-1: Implementacija realnih mjerenja s adaptacijom na zemljičtu', 'PIFACC-2: Governance and Decision Making': 'PIFACC-2: Vlada i donošenje odluka', 'PIFACC-3: Improving our understanding of climate change': 'PIFACC-3: Poboljšanje razumijevanja klimatskih promjena', 'PIFACC-4: Education, Training and Awareness': 'PIFACC-4: Obrazovanje, obuka i informisanost', 'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'PIFACC-5: Smanjenje globalnog ispuštanja gasova koji izazivaju efekat staklene bašte', 'PIFACC-6: Partnerships and Cooperation': 'PIFACC-6: Partnerstvo i saradnja', 'PIL (Python Image Library) not installed': 'PIL (Python Image Library) nije instalirana', 'PIN': 'PIN', 'PIN number': 'PIN broj', 'PIN number ': 'PIN broj - osobni identifikacijski broj ', 'Pipe': 'Cijev', 'pit': 'jama', 'pit latrine': 'septička jama poljskog zahoda', 'PL Women': 'PL žene', 'Place': 'Mjesto', 'Place for solid waste disposal': 'Mjesto za ostavljanje čvrstog smeća', 'Place of Birth': 'Mjesto rođenja', 'Place of find': 'Mjesto pronalaska', 'Place of Recovery': 'Mjesto oporavka', 'Place on Map': 'Mjesto na karti', 'Places for defecation': 'Mjesta za vršenje nužde', 'Places the children have been sent to': 'Mjesta gdje su djeca poslana', 'Planned': 'Planirano', 'Planned %(date)s': 'Planirano %(date)s', 'Planned Procurement': 'Planirana nabava', 'Planned Procurement Item': 'Planirana stavka nabavke', 'Planned Procurements': 'Planirane nabave', 'Playing': 'Izvršava', 'Please choose a type': 'Odaberite tip', "Please come back after sometime if that doesn't help.": 'Molim vratite se nakon nekog vremena, ako to ne pomogne.', 'Please correct all errors.': 'Molim da ispravite sve greške.', 'Please do not remove this sheet': 'Molim da ne brišete ovaj list', 'Please enter a %(site)s': 'Molimo unesite %(site)s', 'Please enter a %(site)s OR an Organization': 'Molimo unesite %(site)s ILI organizaciju', 'Please enter a first name': 'Molimo unesite ime', 'Please enter a last name': 'Molim, unesite prezime', 'Please enter a number only': 'Molim unesite samo broj', 'Please enter a site OR a location': 'Molimo unesite mjesto ILI lokaciju', 'Please enter a valid email address': 'Unesite važeću adresu elektronske pošte', 'Please enter an Organization/Supplier': 'Molimo unesite organizaciju/dobaljača', 'Please enter details of the Request': 'Molim unesite detalje zahtjeva', 'Please enter request details here.': 'Molim unesite detalje zahtjeva ovdje.', 'Please enter the details on the next screen.': 'Molim unesite detalje zahtjeva na sljedećem ekranu.', 'Please enter the first few letters of the Person/Group for the autocomplete.': 'Molimo unesite prvih nekoliko slova Osobe/Grupe za automatsko popunjavanje.', 'Please enter the recipient': 'Molimo dodajte primatelja', 'Please enter the recipient(s)': 'Molimo unesite primatelja', 'Please fill this!': 'Molim Vas popunite ovo!', 'Please give an estimated figure about how many bodies have been found.': 'Molimo dajte okvirnu procjenu koliko je tijela pronađeno.', "Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'Molim navedite što više detalja, uključujući URL gdje se greška dešava ili želite nove mogućnosti.', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Molimo unesite URL stranice na koju mislite, opis onoga što očekujete da će se desiti i onoga što se ustvari dogodilo', 'Please record Beneficiary according to the reporting needs of your project': 'Molim zapišite korisnmika prema potrebama izvještavanja vašeg projekta', 'Please report here where you are:': 'Molim Vas da ovdje prijavite gdje se nalazite:', 'Please select': 'Označite,molim', 'Please Select a Facility': 'Molim odaberite objekat', 'Please select a valid image!': 'Molim izaberite ispravnu sliku', 'Please select another level': 'Molimo odaberite drugi nivo', 'Please select exactly two records': 'Molim odaberite tačno dva zapisa', 'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Molimo da se prijavite koristeći svoj broj mobilnog telefona, jer nam to omogućava da vam šaljemo SMS poruke. Molimo da napišete kompletan pozivni broj.', 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Molimo detaljno specificirajte probleme i prepreke sa pravilnim pristupanjem bolesti (u brojevima, gdje je prikladno). Možete također dodati prijedloge gdje bi situacija mogla biti poboljšana.', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Molimo koristite ovo polje da popunite dodatne informacije, uključujući istorijat zapisa ako je ažuriran.', 'Please use this field to record any additional information, including any Special Needs.': 'Molimo Vas iskorisite ovo polje da snimite dodatne informacije, uključujući bilo kakve specijalne potrebe.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Molimo koristite ovo polje da zabilježite dodatne informacije, poput Ushahidi ID instance. Uključite istorijat zapisa ako je ažuriran.', 'Pledge': 'Obećati podršku', 'Pledge Aid': 'Ponudi pomoć', 'Pledge Aid to match these Requests': 'Ponudi pomoć koja je usaglašena s ovim zahtjevima', 'Pledge Status': 'Status ponude pomoći', 'Pledge Support': 'Obećati podršku', 'Pledges': 'Ponude za pomoć', 'PoI': 'Tačka interesa', 'PoI Type added': 'Tip tačke interesa dodan', 'PoI Type deleted': 'Tip tačke interesa obrisan', 'PoI Type Details': 'Detalji o vrsti tačke interesa', 'PoI Type updated': 'Tip tačke interesa ažuriran', 'PoI Types': 'Tipovi tačaka interesa', 'Point': 'Tačka', 'Point of Interest added': 'Tačka interesa dodana', 'Point of Interest deleted': 'Tačka interesa obrisana', 'Point of Interest Details': 'Detalji tačaka interesa', 'Point of Interest updated': 'Tačka interesa ažurirana', 'pointed': 'označeno', 'Points of Interest': 'Tačke interesa', 'PoIs': 'Tačke interesa', 'PoIs successfully imported.': 'Tačke interesa uspješno uvezene.', 'Poisoning': 'Trovanje', 'Poisonous Gas': 'Otrovni gas', 'Poland': 'Poljska', 'Police': 'Policija', 'Policies & Strategies': 'Politike ili strategije', 'Policy': 'Pravila', 'Policy or Strategy': 'Politika ili strategija', 'Policy or Strategy added': 'Politika ili strategija dodana', "Policy or Strategy added, awaiting administrator's approval": 'Dodana politika ili strategija, čeka se na potvrdu administratora.', 'Policy or Strategy deleted': 'Politika ili strategija obrisana', 'Policy or Strategy updated': 'Politika ili strategija ažurirana', 'Poll': 'Anketa', 'Pollution and other environmental': 'Zagađenja i druge okolišne', 'Polygon': 'Poligon', 'Polygon reference of the rating unit': 'Poligon referenca jedinice za procjenu', 'Poor': 'Slabo', 'Population': 'Populacija', 'Population and number of households': 'Stanovnistvo i broj domacinstava', 'Population Statistic added': 'Statistika o populaciji dodana', 'Population Statistic deleted': 'Statistika stanovništva izbrisana', 'Population Statistic Details': 'Statističke pojedinosti populacije', 'Population Statistic updated': 'Statistika stanovništva osvježena', 'Population Statistics': 'Statistika o populaciji', 'Popup Fields': 'Popup stavke', 'Popup Label': 'Iskočna oznaka', 'Porridge': 'Kaša', 'Port Closure': 'Zatvaranje luke', 'Portable App': 'Prenosiva aplikacije', 'Portuguese': 'Portugalski', 'Portuguese (Brazil)': 'Portugalski (Brazil)', 'Position': 'Pozicija', 'Position added': 'Pozicija dodana', 'Position Catalog': 'Katalog Položaja', 'Position deleted': 'Pozicija izbrisana', 'Position Details': 'Detalji pozicije', 'Position in tour': 'Položaj na turi', 'Position updated': 'Pozicija ažurirana', 'Positions': 'Položaji', 'Post': 'Ubacivi tekst', 'Post added': 'Ubacivi tekst dodan', 'Post deleted': 'Ubacivi tekst obrisan', 'Post Details': 'Detalji ubacivog teksta', 'Post removed': 'Ubacivi tekst uklonjen', 'Post set as Module/Resource homepage': 'Ubacivi tekst postavljen kao početna stranica modula/resursa', 'Post Tagged': 'Ubacivi tekst označen', 'Post updated': 'Ubacivi tekst ažuriran', 'Post-impact shelterees are there for a longer time, so need more space to Sleep.': 'Skloništa poslije nesrećnog utjecaja su ovdje za duže vrijeme, pa je potrebno više prostora za spavanje.', 'Postcode': 'Poštanski broj', 'Posted on': 'Postavljeno', 'postponed': 'odgođeno', 'Posts': 'Ubacivi tekstovi', 'Poultry': 'Perad', 'Poultry restocking, Rank': 'Obnova zaliha peradi, Rang', 'Pounds': 'Funte', 'Power': 'Napajanje', 'Power Failure': 'Nestanak struje', 'Power Outage': 'Nestanak napajanja', 'Power Supply Type': 'Vrsta napajanja', 'Powered by Sahana Eden': 'Omogućeno od strane Sahana Eden', 'Pre-cast connections': 'Konekcije prije emitovanja', 'Preferred Name': 'Preferirano ime', 'Pregnant women': 'Trudnica', 'Preliminary': 'Preeliminarno', 'preliminary template or draft, not actionable in its current form': 'preliminarni šablon ili nacrt, nije kažnjiv u trenutnom obliku', 'Prepare Shipment': 'Pripremi ošiljku', 'Presence': 'Prisustvo', 'Presence Condition': 'stanje prisutnosti', 'Presence Log': 'Zapisnik prisustva', 'Previous': 'Prethodni', 'previous 100 rows': 'prethodnih 100 redova', 'Previous View': 'Prethodni prikaz', 'primary incident': 'primarni incident', 'Primary Name': 'Primarni naziv', 'Primary Occupancy': 'Primarno zanimanje', 'Principal': 'Najvažnije', 'Print': 'Štampa', 'Priority': 'Prioritet', 'Priority from 1 to 9. 1 is most preferred.': 'Prioritet od 1 do 9. 1 je najviše željen.', 'Priority Level': 'Nivo prioriteta', 'Privacy': 'Privatnost', 'Private': 'Privatno', 'Problem added': 'Problem dodan', 'Problem Administration': 'Upravljanje problemima', 'Problem connecting to twitter.com - please refresh': 'Problem konektovanja na twitter.com - molimo osvježite stranicu', 'problem connecting to twitter.com - please refresh': 'Problem konektovanja na twitter.com - molimo osvježite stranicu', 'Problem deleted': 'Izbrisan problem', 'Problem Details': 'Detalji problema', 'Problem Group': 'Grupa problema', 'Problem Title': 'Naslov problema', 'Problem updated': 'Problem ažuriran', 'Problems': 'Problemi', 'Problems? Please call': 'Problemi? Molim pozovite', 'Procedure': 'Procedura', 'Process Received Shipment': 'Obradi primljenu isporuku', 'Process Shipment to Send': 'Procesiraj pošiljku za slanje', 'Processed with KeyGraph?': 'Obrađeno koristeći KeyGraph', 'Processing': 'Obrada', 'Procured': 'Nabavljeno', 'Product Description': 'Opisproizvoda', 'Profession': 'Profesija', 'Professional Experience': 'Profesionalno iskustvo', 'Professional Experience added': 'Profesionalno iskustvo dodano', 'Professional Experience deleted': 'Profesionalno iskustvo obrisano', 'Professional Experience Details': 'Detalji profesionalnog iskustva', 'Professional Experience updated': 'Profesionalno iskustvo ažurirano', 'Profile': 'Profil', 'Profile Configuration': 'Konfiguracija Profila', 'Profile Configuration removed': 'Konfiguracija profila izbrisana', 'Profile Configuration updated': 'Ažurirana konfiguracija mape', 'Profile Configurations': 'Konfiguracije profila', 'Profile Configured': 'Profil konfigurisan', 'Profile Details': 'Detalji profila', 'Profile Page': 'Stranica profila', 'Profile Picture': 'Slika profila', 'Profile Picture?': 'Slika profila?', 'Profiles': 'Profili', 'Program added': 'Program dodan', 'Program deleted': 'Program obrisan', 'Program Details': 'Detalji programa', 'Program Hours (Month)': 'Programski sati (mjeseci)', 'Program Hours (Year)': 'Programski sati (godina)', 'Program updated': 'Program ažuriran', 'Programs': 'Programi', 'Project': 'Projekt', 'Project Activity': 'Aktivnosti projekta', 'Project added': 'Projekat je dodan', 'Project Calendar': 'Kalendar projekta', 'Project deleted': 'Projekat je obrisan', 'Project Details': 'Detalji Projekta', 'Project Framework': 'Radni okvir projekta', 'Project has no Lat/Lon': 'Projekat nema Lat/Lon koordinate', 'Project Management': 'Upravljanje Projektom', 'Project Name': 'Ime projekta', 'Project not Found': 'Projekt nije nađen', 'Project Organization Details': 'Detalji organizacije projekta', 'Project Organization updated': 'Organizacija projekta ažurirana', 'Project Organizations': 'Organizacije projekta', 'Project Report': 'Izvještaj projekta', 'Project Status': 'Status projekta', 'Project Time Report': 'Izvještaj o projektnom vremenu', 'Project Tracking': 'Praćenje projekata', 'Project updated': 'Projekat je ažuriran', 'Projection': 'Projekcija', 'Projection added': 'Projekcija je dodana', 'Projection deleted': 'Projekcija je obrisana', 'Projection Details': 'Detalji projekcije', 'Projection Type': 'Tip projekcije', 'Projection updated': 'Projekcija je ažurirana', 'Projections': 'Projekcije', 'Projects': 'Projekti', 'Projects Map': 'Mapa projekata', 'Prominent Adams apple': 'Uočljiva Adamova jabučica', 'pronounced': 'izgovoreno', 'Property reference in the council system': 'Preporuka vlasništva u sistemu vijeća', 'Proposed': 'Predloženo', 'Protected resource': 'Zaštićeni resurs', 'Protection': 'Zaštita', 'Protocol': 'Protokol', 'Provide a password': 'Obezbijedi lozinku', 'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Osiguravanje dodatnih skica cijele zgrade ili oštećenih tačaka. Navesti oštečene tačke.', 'Provide Metadata for your media files': 'Obezbjedi metapodatke za vaše medijske datoteke', 'Province': 'Područje', 'Proxy Server URL': 'URL za posrednički (proxy) server', 'Proxy-server': 'Proxy server', 'Psychiatrics/Adult': 'Psihijatrija/odrasli', 'Psychiatrics/Pediatric': 'Psihijatrija/Pedijatrija', 'Pubic hair, Colour': 'Dlake na polnim organima, boja', 'Pubic hair, Extent': 'Dlake na polnim organima, dužina', 'Public': 'Javno', 'Public and private transportation': 'Javni i privatni transport', 'Public assembly': 'Javni skup', 'Public Event': 'Javni događaj', 'Published on': 'Objavljeno', 'pull': 'povuci', 'pull and push': 'povuci i gurni', 'Pull tickets from external feed': 'Povucite karticu sa spoljašnjeg snabdjevanja', 'Punjabi': 'Pandžabi', 'Purchase': 'Kupovina', 'Purchase date': 'Datum kupovine', 'Purchase Date': 'Datum nabave', 'Purchase Price': 'Nabavna cijena', 'Purpose': 'Namjena', 'push': 'gurni', 'Push tickets to external system': 'Guranje kartica u vanjski sistem', 'Put a choice in the box': 'Označite izbor', 'pygraphviz library not found': 'pygraphviz biblioteka nije nađena.', 'pyramidal': 'piramidalno', 'Pyroclastic Flow': 'Piroklastični tok', 'Pyroclastic Surge': 'Vulkanski pepeo', 'pyserial module not available within the running Python - this needs installing for SMS!': 'pyserial modul nije dostupan unutar tekućeg Pythona-potrebna je instalacija za SMS!', 'Python GDAL required for Shapefile support!': 'Python GDAL potreban za podršku datotekama s likovima!', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial modul nije dostupan kada je Python pokrenut - ovo zahtijeva instalaciju da bi se aktivirao modem', 'Qatar': 'Katar', 'quadrangular': 'četverougaoni', 'Quantity': 'Količina', 'Quantity Committed': 'Količina učinjena', 'Quantity Fulfilled': 'Količina ispunjena', "Quantity in %s's Inventory": "Količina u %s's Inventaru", "Quantity in %s's Warehouse": 'Količina u %s skladištu', 'Quantity in Transit': 'Količina u prelazu', 'Quantity Needed': 'Potrebna količina', 'Quantity range': 'Opseg količine', 'Quantity Received': 'Primljena količina', 'Quantity Returned': 'Vraćena količina', 'Quantity Sent': 'Poslana količina', 'Quarantine': 'Karantena', 'Queries': 'Upiti', 'Query': 'Upit', 'Query added': 'Upit dodan', 'Query deleted': 'Upit obrisan', 'Query Feature': 'Upit karakteristika', 'Query updated': 'Upit ažuriran', 'Query:': 'Upit:', 'Queryable?': 'Moguće postaviti u upit?', 'Question': 'Pitanje', 'Question Details': 'Detalji pitanja', 'Question Meta-Data': 'Metapodaci pitanja', 'Question Meta-Data added': 'Metapodaci pitanja dodani', 'Question Meta-Data deleted': 'Metapodaci pitanja obrisani', 'Question Meta-Data Details': 'Detalji metapodataka pitanja', 'Question Meta-Data updated': 'Metapodaci pitanja ažurirani', 'Question Summary': 'Rezime pitanja', 'Race': 'Rasa', 'Race group': 'Rasna grupaShift', 'Race, complexion': 'Rasa, miješana', 'Radio Callsign': 'Pozivni znak za radio', 'Radio Details': 'Radio detalji', 'Radiological Hazard': 'Radiološka opasnost', 'Radiology': 'Radiologija', 'Railway Accident': 'Željeznička nesreća', 'Railway Hijacking': 'Razbojništvo na željeznici', 'Rain Fall': 'Padanje kiše', 'RAM Cache Keys': 'RAM cache ključevi', 'Ram Cleared': 'Ram obrisan', 'Rapid Assessment': 'Brza procjena', 'Rapid Assessment added': 'Brza procjena dodana', 'Rapid Assessment deleted': 'Izbrisana brza procjena', 'Rapid Assessment Details': 'Detalji brze procjene', 'Rapid Assessment updated': 'Brza procjena ažurirana', 'Rapid Assessments': 'Brze Procjene', 'Rapid Assessments & Flexible Impact Assessments': 'Brze procjene i fleksibilne procjene utjecaja', 'Rapid Close Lead': 'Jaki i grupni grad', 'Rapid Data Entry': 'Brzi unos podataka', 'Rating': 'Rejting', 'Raw Database access': 'Direktni pristup bazi podataka', 'RC frame with masonry infill': 'RC okvir sa zidanim ispunjenjem', 'Read-Only': 'Samo za čitanje', 'Ready': 'Spreman', 'Real World Arbitrary Units': 'Proizvoljne jedinice iz realnog svijeta', 'Reason': 'Razlog', 'Receive': 'Preuzimanje', 'Receive %(opt_in)s updates:': 'Primite %(opt_in)s nadogradnje:', 'Receive New Shipment': 'Primi novu pošiljku', 'Receive Shipment': 'Prijem pošiljke', 'Receive this shipment?': 'Primiti ovu pošiljku?', 'Receive updates': 'Primi nadogradnje', 'Receive/Incoming': 'Prijem/dolaz', 'Received': 'Primljeno', 'Received By': 'Primljeno od strane', 'Received By Person': 'Osoba primila', 'Received date': 'Primljeno datuma', 'Received Item added': 'Dodata primljena stavka', 'Received Item deleted': 'Primljeni Predmet obrisan', 'Received Item Details': 'Detalji primjene stavke', 'Received Item updated': 'Primljena stavka je ažurirana', 'Received Shipment canceled': 'Rrimljena pošiljka otkazana', 'Received Shipment canceled and items removed from Inventory': 'Isporuka otkazana i stavke uklonjene iz skladišta', 'Received Shipment Details': 'Detalji primljene pošiljke', 'Received Shipment updated': 'Ažurirana primljena stavka', 'Received Shipments': 'Primljene pošiljke', 'Received/Incoming Shipments': 'Primljene/dolazne pošiljke', 'Receiving and Sending Items': 'Primanje i slanje stavki', 'Receiving Inventory': 'Prijem u skladište', 'Reception': 'Prijem', 'Recipient': 'Primalac', 'Recipient(s)': 'Primaoc(i)', 'Recipients': 'Primaoci', 'Recommendations for Repair and Reconstruction or Demolition': 'Prijedlozi za opravak i rekonstrukciju ili rušenje', 'Record': 'Zapis', 'Record %(id)s created': 'Zapis %(id)s kreiran', 'Record %(id)s updated': 'Zapis %(id)s ažuriran', 'RECORD A': 'ZAPIS A', 'Record added': 'Zapis dodan', 'Record already exists': 'Zapis već postoji', 'Record any restriction on use or entry': 'Zabilježi bilo kakva ograničenja prilikom korištenja ili pristupa', 'Record approved': 'Zapis odobren', 'RECORD B': 'ZAPIS B', 'Record could not be approved.': 'Zapis ne može biti potvrđen', 'Record could not be deleted.': 'Zapis ne može biti obrisan', 'Record deleted': 'Zapis obrisan', 'Record Details': 'Detalji zapisa', 'record does not exist': 'zapis ne postoji', 'Record ID': 'Id zapisa', 'record id': 'Id zapisa', 'Record id': 'Id zapisa', 'Record last updated': 'Zapis je posljednji put izmijenjen', 'Record not found': 'Zapis nije nađen', 'Record not found!': 'Zapis nije pronađen', 'Record Saved': 'Zapis spašen', 'Record updated': 'Zapis ažuriran', 'Record Updates': 'Ažuriranja zapisa', 'Recording and Assigning Assets': 'Snimanje i dodjela sredstava', 'Records': 'Zapisi', 'records deleted': 'Zapis obrisan', 'Records merged successfully.': 'Slogovi uspješno spojeni', 'Recovery': 'Oporavak', 'Recovery report added': 'Dodat izvjestaj o pronalaženju', 'Recovery report deleted': 'Izvještaj o pronalaženju izbrisan', 'Recovery report updated': 'Izvještaj o pronalaženju ažuriran', 'Recovery Request': 'Zahtjev za Obnovom', 'Recovery Request added': 'Zahtjev za povrat dodan', 'Recovery Request deleted': 'Zahtjev za povrat obrisan', 'Recovery Request updated': 'Zahtjev za povrat ažuriran', 'Recovery Requests': 'Zahtjevi za povrat', 'rectangular': 'pravougaona', 'Recurring': 'Ponavljajući', 'Recurring Cost': 'Ponavljajući troškovi', 'Recurring cost': 'Ponavljajući troškovi', 'Recurring costs': 'Povratni troškovi', 'Recurring Request?': 'Ponavljajući zahtjev?', 'Red': 'Crveno', 'red': 'crvena', 'Red Cross / Red Crescent': 'Crveni križ/Crveni polumjesec', 'Redirect URL': 'Preusmjeri URL', 'Reference Document': 'Referentni dokument', 'refresh': 'osvježi', 'Refresh Rate (seconds)': 'Brzina osvježavanja (sekunde)', 'Region': 'Oblast', 'Region added': 'Područje dodano', 'Region deleted': 'Područje obrisano', 'Region Details': 'Detalji oblasti', 'Region Location': 'Lokacija regiona', 'Region updated': 'Područje ažurirano', 'Regional': 'Regionalan', 'Regions': 'Oblasti', 'Register': 'Registruj', 'Register As': 'Registruj kao', 'Register for Account': 'Registruj se za korisnički nalog', 'Register Person': 'Registriraj osobu', 'Register Person into this Camp': 'Registruj Osobu u ovaj Kamp', 'Register Person into this Shelter': 'Registruj osobu u sklonište', 'Register them as a volunteer': 'Registruj ih kao volontere', 'Registered People': 'Registrirani ljudi', 'Registered users can': 'Registrovani korisnici mogu', 'Registered users can %(login)s to access the system': 'Potrebna je %(login)s da registrovani korisnici mogu da pristupe sistemu', 'Registration': 'Registracija', 'Registration added': 'Registracija zabilježena', 'Registration Details': 'Detalji registracije', 'Registration entry deleted': 'Registracija zabilježena', 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Prijava još čeka odobrenje od ovlaštene osobe (%s) - molimo pričekajte dok se prijava ne odobri.', 'Registration key': 'Ključ za registraciju', 'Registration not permitted': 'Registracija nije dozvoljena', 'Registration successful': 'Registracija uspješna', 'Registration updated': 'Registracija zabilježena', 'Registro de Organización': 'Registar organizacije', 'Registro de Refugios': 'Registar izbjeglica', 'Rehabilitation/Long Term Care': 'Rehabilitacija/ Briga u dužem trajanju', 'Reinforced masonry': 'Ojačano zidanje', 'Reject': 'Odbaci', 'Rejected': 'Odbijeno', 'Relationship': 'Odnos', 'Relative added': 'Dodat srodnik', 'Relative deleted': 'Obrisan srodnik', 'Relative Details': 'Povezani detalji', 'Relative updated': 'Srodnik ažuriran', 'Relatives': 'Srodnici', 'Reliable access to sanitation/hygiene items': 'Pouzdan pristup sanitarnim predmetima ', 'Relief': 'Pomoć', 'Relief Item': 'Stavka pomoći', 'Relief Item updated': 'Stavka pomoći je ažurirana', 'Relief Items': 'Predmeti pomoći', 'Relief Team': 'Tim za pomoć', 'Religion': 'Religija', 'Religious': 'Vjerski', 'Religious Leader': 'Vjerski Vođa', 'Reload': 'Ponovo učitaj', 'reload': 'ponovo učitaj', 'Relocate as instructed in the <instruction>': 'Premjesti kako je navedeno u <instruction>', 'Remarks': 'Napomene', 'Remember Me': 'Zapamti me', 'Remote Error': 'Greška udaljenog servisa.', 'Remove': 'Ukloni', 'Remove Activity from this event': 'Ukloni Aktivnost iz ovog događaja', 'Remove all log entries': 'Ukloni stavke zapisnika', 'Remove Asset from this event': 'Sklonite sredstvo sa ovog događaja', 'Remove Asset from this incident': 'Odstrani sredstvo iz ovog incidenta', 'Remove Asset from this scenario': 'Odstrani sredstvo iz ovog scenarija', 'Remove Bookmark': 'Ukloni zabilješku', 'Remove Coalition': 'Ukloni koaliciju', 'Remove Document from this request': 'Ukloni dokument iz ovog zahtjeva', 'Remove existing data before import': 'Obriši postojeće podatke prije uvoza', 'Remove Facility from this event': 'Ukloni objekat iz ovog događaja', 'Remove Facility from this incident': 'Ukloni ovaj objekt iz ovog incidenta', 'Remove Facility from this scenario': 'Ukloni objekat iz ovog scenarija', 'Remove Feature: Select the feature you wish to remove & press the delete key': 'Ukloni karakteristiku: Izaberite karakteristiku koju želite ukloniti i pritisnite dugme za brisanje', 'Remove Human Resource from this event': 'Skloniti ljudske resurse sa ovog događaja', 'Remove Human Resource from this incident': 'Uklonite ovaj ljudski resusr sa ovog incidenta', 'Remove Human Resource from this scenario': 'Uklonite ljudske resurse iz ovog scenarija', 'Remove Incident from this event': 'Ukloni ovaj incident iz datog događaja', 'Remove Incident Report from this event': 'Ukloni izvještaj o incidentu za ovaj događaj', 'Remove Incident Report from this incident': 'Odstrani izvještaj o incidentu iz ovog incidenta', 'Remove Incident Type from this event': 'Ukloni tip incidenta za ovaj događaj', 'Remove Item from Inventory': 'Ukloni stavku iz inventara', 'Remove Layer from Profile': 'Ukloni sloj s profila', 'Remove Layer from Symbology': 'Ukloni sloj s značenja simbola', 'Remove Map Configuration from this event': 'Ukloni podešavanje mape iz ovog događaja', 'Remove Map Configuration from this incident': 'Ukloni konfiguraciju mape s ovog incidenta', 'Remove Map Configuration from this scenario': 'Ukloni podsešavanje mape iz ovog scenarija', 'Remove Network': 'Ukloni mrežu', 'Remove Organization from Project': 'Ukloni organizaciju iz projekta', 'Remove People from Commitment': 'Ukloni Osobu iz Obavezivanja', 'Remove Person from Commitment': 'Ukloni osobu iz zaduženja', 'Remove Person from Group': 'Ukloni osobu iz grupe', 'Remove Person from Team': 'Odstrani osobu iz tima', 'Remove Profile Configuration for Layer': 'Ukloni konfiguraciju profila za ovaj sloj', 'Remove selection': 'Uklonite trenutni odabir', 'Remove Skill': 'Obriši vještinu', 'Remove Skill from Request': 'Ukloni vještinu iz zahtjeva', 'Remove Stock from Warehouse': 'Ukloni zalihu iz skladišta', 'Remove Symbology from Layer': 'Ukloni značenja simbola s sloja', 'Remove Tag for this Event from this Post': 'Ukloni oznaku ovog događaja za ovaj ubacivi tekst', 'Remove Task from this event': 'Izbrišite zadatak sa ovog događaja', 'Remove Task from this incident': 'Odstrani zadatak iz ovog incidenta', 'Remove Task from this scenario': 'Ukloni zadatak iz ovog scenarija', 'Remove this asset from this event': 'Ukloni ovo sredstvo iz datog događaja', 'Remove this asset from this scenario': 'Ukloni sredstvo iz ovog scenarija', 'Remove this entry': 'Ukloni ovaj unos', 'Remove this facility from this event': 'Uklonite ovaj objekat iz ovog događaja', 'Remove this facility from this scenario': 'Ukloni ovaj objekt iz ovog scenaria', 'Remove this human resource from this event': 'Uklonite ovaj ljudski resusr sa ovog dešavanja', 'Remove this human resource from this scenario': 'Izbriši ovaj ljudski resurs sa ovog scenarija', 'Remove this task from this event': 'Ukloni ovaj zadatak sa ovog događaja', 'Remove this task from this scenario': 'Ukloni ovaj zadatak sa scenarija', 'Remove Vehicle from this incident': 'Odstrani vozilo iz ovog incidenta', 'Removed from Group': 'Odstranjen iz grupe', 'Removed from Team': 'Odstranjen iz tima', 'Reopened': 'Ponovo otvoren', 'Repacked By': 'Prepakovao', 'Repair': 'Popravi', 'Repaired': 'Popravljeno', 'Repairs': 'Popravke', 'Repeat': 'Ponovi', 'Repeat your password': 'Ponovite vašu lozinku', 'replace': 'zamijeni', 'Replace': 'Zamijeni', 'Replace All': 'Zamijeni sve', 'Replace if Master': 'Zamjeni ukoliko je Master', 'Replace if Newer': 'Zamijeni ako je novije', 'Replace with Remote': 'Zamijeni s udaljenim', 'Replace/Master': 'Zamjeni/Master', 'Replace/Newer': 'Zamijeni/novije', 'Replies': 'Odgovori', 'Reply': 'Odgovor', 'Reply Message': 'Poruka za automatski odgovor', 'Report': 'Izvještaj', 'Report a Found Person': 'Prijavi pronalazak osobe', 'Report a Missing Person': 'Prijavite nestanak osobe', 'Report a Problem with the Software': 'Prijavi problem sa softverom', 'Report added': 'Dodan izvještaj', 'Report Another Assessment...': 'Prijavite još jednu procjenu...', 'Report by Age/Gender': 'Izvještaj po starosti/spolu', 'Report deleted': 'Obrisan izvještaj', 'Report Details': 'Detalji izvještaja', 'Report my location': 'Prijavi moju lokaciju', 'Report of': 'Izvještaj za', 'Report on Annual Budgets': 'Izvještaj o godišnjem budžetu', 'Report Options': 'Opcije izvještaja', 'Report Resource': 'Prijavi resurs', 'Report that person missing': 'Prijavite nestanak osobe', 'Report the contributing factors for the current EMS status.': 'Izvjesititi o faktorima koji utiču na trenutni status hitne medicinske pomoći', 'Report the contributing factors for the current OR status.': 'Prijavi faktore koji doprinose trenutnom OR statusu.', 'Report them as found': 'Prijavi ih kao pronađene', 'Report them missing': 'Prijavite njihov nestanak', 'Report To': 'Prijavi na', 'Report Types Include': 'Tipovi izvještaja sadrže', 'Report updated': 'Ažuriran izvještaj', 'Reported By': 'Prijavio%2', 'Reported By (Not Staff)': 'Izvijestio (Nije osoblje)', 'Reported By (Staff)': 'Prijavio (osoblje)', 'Reported To': 'Prijavljeno', 'Reporter': 'Izvjestilac', 'Reporter Name': 'Ime izvjestioca', 'Reporter:': 'Izvjestilac:', 'Reporting on the projects in the region': 'Izvještavanje o projektima u regionu', 'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab modul nije dostupan unutar pokrenutog Pythona-potrebna je instalacija PDF izlaza!', 'ReportLab module not available within the running Python - this needs installing to do PDF Reporting!': 'Modul ReportLab nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju PDF izlaza!', 'ReportLab not installed': 'ReportLab nije instaliran', 'Reports': 'Izvještaji', 'reports successfully imported.': 'Izvještaji uspješno uvezeni', 'Repositories': 'Repozitoriji', 'Repository': 'Repozitorijum', 'Repository Base URL': 'Osnovni repozitorijski URL...', 'Repository Configuration': 'Konfiguracija repozitorija', 'Repository configuration deleted': 'Obrisana konfiguracija repozitorija', 'Repository configuration updated': 'Ažurirana konfiguracija repozitorija', 'Repository configured': 'Repozitorij konfigurisan', 'Repository Name': 'Ime repozitorija', 'Repository Type': 'Tip repozitorija', 'Repository UUID': 'UUID repozitorija', 'representation of the Polygon/Line.': 'predstavljanje Poligona/Linije', 'Request': 'Zahtjev', 'Request Added': 'Dodan Zahtjev', 'Request added': 'Zahtjev dodan', 'Request Aid': 'Zahtijevaj pomoć', 'Request Canceled': 'Zahtjev otkazan', 'Request deleted': 'Zahtjev obrisan', 'Request Detail': 'Zahtijevaj detalj', 'Request Details': 'Zatražiti detalje', 'Request for Role Upgrade': 'Zahtjev za nadogradnju uloge', 'Request From': 'Zahtjev od', 'Request from Facility': 'Zahtjev s objekta', 'Request Item': 'Zahtjev za predmetom', 'Request Item added': 'Stavka zahtjeva dodana', 'Request Item deleted': 'Traženi artikal obrisan.', 'Request Item Details': 'Zatraži detalje o predmetu', 'Request Item from Available Inventory': 'Zahtjevaj stavku iz dostupnog inventara', 'Request Item updated': 'Zahtijevana stavka je ažurirana', 'Request Items': 'Zatraži stavke', 'Request Job': 'Zahtijevaj posao', 'Request Management': 'Upravljanje zahtjevima', 'Request New People': 'Zatraži nove ljude', 'Request Schedule': 'Raspored zahtjeva', 'Request Status': 'Zahtjev za status', 'Request Status updated': 'Status zahtjeva ažuriran', 'Request Stock from Available Warehouse': 'Zahtijevaj zalihu iz dostupnih skladišta', 'Request Template Added': 'Predložak zahtjeva dodan', 'Request Template Deleted': 'Predložak zahtjeva obrisan', 'Request Template Details': 'Detalji predloška zahtjeva', 'Request Template Updated': 'Predložak zahtjeva ažuriran', 'Request Templates': 'Predlošci zahtjeva', 'Request Type': 'Tip zahtjeva', 'Request Updated': 'Zahtjev ažuriran', 'Request updated': 'Zahtjev ažuriran', 'Request, Response & Session': 'Zahtjev, odgovor i sesija', 'Requested': 'Zahtijevano', 'Requested By': 'Zatraženo od strane', 'Requested by': 'Zatraženo od strane', 'Requested By Facility': 'Zahtijevano po objektima', 'Requested By Location': 'Zahtjevano po lokacijama', 'Requested By Warehouse': 'Zahtjevano po skladištu', 'Requested For': 'Zahtijevano za', 'Requested For Facility': 'Zahtjevano po objektima', 'Requested for Site': 'Zahtjevano po mjestu', 'Requested For Site': 'Zahtjevano po mjestu', 'Requested From': 'Traženo Od', 'Requested From Warehouse': 'Zahtjevano iz skladišta', 'Requested Items': 'Zatraženi predmeti', 'Requested on': 'Zahtijevano na', 'Requested Skill': 'Zahtijevana vještina', 'Requested Skill Details': 'Detalji o traženoj vještini', 'Requested Skill updated': 'Zahtijevana vještina ažurirana', 'Requested Skills': 'Tražene vještine', 'Requester': 'Zahtjevaoc', 'Requests': 'Zahtjevi', 'Requests Management': 'Upravljanje zahtjevima', 'Requests Report': 'Izvještaj o zahtjevima', 'Required by other servers.': 'Zahtijevano od strane drugog servera', 'Required Skill': 'Potrebne vjestine', 'Required Skills': 'Potrebne vještine', 'Required Skills (optional)': 'Potrebne vještine (opciono)', 'Requires login': 'Potrebna prijava', 'Requires Login': 'Potrebna prijava', 'Requires Login!': 'Potrebna prijava!', 'Rescue Ambulance': 'Spasilačka kola hitne pompoći', 'Rescue and recovery': 'Spašavanje i oporavak', 'Rescue Vehicle Tactical Assistance': 'Taktička pomoć vozila za spašavanje', 'Reset': 'Ponovno postavljanje', 'Reset form': 'Vradi formular na početak', 'Reset Password': 'Promijeni lozinku', 'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Raširenje karakteristike: Izaberite karakteristiku kojoj želite promijeniti veličinu i prevucite pridruženu tačku željenoj veličini', 'Resolve': 'Razriješi', 'Resolve Conflict': 'Razriješi konflikt', 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Link za Riješi prikazuje novi ekran koji pomaže da se riješi problem sa duplim zapisima i ažurira baza podataka', 'Resolved': 'Riješeno', 'Resource': 'Resurs', 'Resource added': 'Resurs je dodan.', 'Resource Configuration': 'Podešavanje resursa', 'Resource configuration deleted': 'Obrisana konfiguracija resursa', 'Resource configuration updated': 'Ažurirana konfiguracija resursa', 'Resource configured': 'Resurs konfigurisan', 'Resource deleted': 'Resurs je obrisan.', 'Resource Details': 'Detalji o resursima', 'Resource Inventory': 'Zalihe resursa', 'Resource Name': 'Ime resursa', 'Resource Type': 'Tip resursa', 'Resource Type added': 'Vrsta resursa dodana', 'Resource Type deleted': 'Vrsta resursa obrisana', 'Resource Type Details': 'Detalji tipa resursa', 'Resource Type updated': 'Vrsta resursa ažurirana', 'Resource Types': 'Tipovi resursa', 'Resource updated': 'Resurs je ažuriran.', 'Resources': 'Resursi', 'Respiratory Infections': 'Infekcije respiratornih puteva', 'Responded': 'Odgovoreno', 'Responder(s)': 'Odgovorili', 'Responding': 'Odgovara', 'Response': 'Odgovor', 'RESPONSE': 'ODGOVOR', 'Response deleted': 'Odgovor izbrisan', 'Response Details': 'Detalji o odgovoru', 'Response Summaries': 'Sumarni odgovori', 'Response Summary Added': 'Sumarni odgovor dodan', 'Response Summary Deleted': 'Sumarni odgovori obrisan', 'Response Summary Details': 'Detalji sumarnog odgovora', 'Response Summary Report': 'Izvještaj sumarnog odgovora', 'Response Summary Updated': 'Sumarni odgovor ažuriran', 'Responses': 'Odgovori', 'Restricted Access': 'Ograničen pristup', 'Restricted Use': 'Ograničena upotreba', 'Restrictions': 'Ograničenja', 'Results': 'Rezultati', 'Retail Crime': 'Kriminal u maloprodaji', 'retired': 'penzionisan', 'Retrieve Password': 'Preuzeti Lozinku', 'retry': 'pokušaj ponovo', 'Return': 'Vraćanje', 'Return to Request': 'Povratak na zahtjev', 'Returned': 'Vraćeno', 'Returned From': 'Vraćeno sa', 'Returning': 'Vraćanje', 'Revert Entry': 'Vrati unos', 'Review': 'Pregled', 'Review Incoming Shipment to Receive': 'pregled nadolazeće pošiljke', 'Review the situation on maps.': 'Pogledaj situaciju na mapi.', 'Revised Quantity': 'Količina revidirana', 'Revised Status': 'Revidirani status', 'Revised Value per Pack': 'Revidirana vrijednost po paketu', 'RFA Priorities': 'RFA Prioriteti', 'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'RFA1: Radni okvir za vladu i organizacije, institucije, politiku i donošenje odluka', 'RFA2: Knowledge, Information, Public Awareness and Education': 'RFA2: Znanje, informacije, javno obavještavanje i obrazovanje', 'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'RFA3: Analiza i procjena rizika, ranjivosti i elemenata rizika', 'RFA4: Planning for Effective Preparedness, Response and Recovery': 'RFA4: Planiranje za efektivnu pripremljenost, odgovor i obnovu', 'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'RFA5: Efektivni, integrisani i fokusirani na ljudstvo sistemi za rano upozoravanje', 'RFA6: Reduction of Underlying Risk Factors': 'RFA6: Smanjenje fundamentalnih faktora rizika', 'Rice': 'Riža', 'Rich Text?': 'Bogat tekst?', 'right': 'desno', 'Right-to-Left': 'Sa desna na lijevo', 'Riot': 'pobuna', 'Risk': 'Rizik', 'Risk transfer': 'Prijenos rizika', 'river': 'rijeka', 'River': 'Rijeka', 'River added': 'Dodana rijeka', 'River deleted': 'Rijeka izbrisana', 'River Details': 'Detalji o rijeci', 'River updated': 'Rijeka ažurirana', 'Rivers': 'Rijeke', 'Road Accident': 'Saobraćajna nesreća', 'Road Closed': 'Zatvorena cesta', 'Road Conditions': 'Stanje putnih pravaca', 'Road Delay': 'Odgađanje puta', 'Road Hijacking': 'Razbojništvo na putu', 'Road Usage Condition': 'Stanje na cesti', 'Roads Layer': 'Sloj puteva', 'Role': 'Uloga', 'Role added': 'Uloga dodana', 'Role assigned to User': 'Dodijeljena uloga korisniku', 'Role deleted': 'Uloga obrisana', 'Role Details': 'Detalji uloga', 'Role Name': 'Ime uloge', 'Role Required': 'Potrebna uloga', 'Role updated': 'Uloga ažurirana', 'Role Updated': 'Uloga izmjenjena', 'Role-based': 'baziran na ulozi', 'Roles': 'Uloge', 'Roles currently assigned': 'Trenutno dodijeljene uloge', 'Roles of User': 'Uloge korisnika', 'Roles Permitted': 'Dopuštene uloge', 'Roles updated': 'Uloge ažurirane', 'Roll On Roll Off Berth': 'Ro-ro brod', 'Roman': 'rimski', 'Romania': 'Rumunija', 'Roof tile': 'Crijep', 'Roofs, floors (vertical load)': 'Krovovi, podovi (vertikalno opterećenje)', 'Room': 'Soba', 'Room added': 'Dodana soba', 'Room deleted': 'Obrisana soba', 'Room Details': 'Detalji sobe', 'Room updated': 'Ažurirana soba', 'Rooms': 'Sobe', 'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'Rotiranje karakteristike: Izaberite karakteristiku koju želite rotirati i prevucite pridruženu tačku da rotirate na željenoj lokaciji', 'round': 'krug', 'Row Choices (One Per Line)': 'Red odgovora (Jedan po liniji)', 'Rows in table': 'Redova u tabeli', 'Rows in Table': 'Redova u tabeli', 'Rows selected': 'Izabrani redovi', 'RPC Service URL': 'URL RPC usluge', 'RSS Feed': 'RSS dovod', 'RSS Post deleted': 'RSS Ubacivi tekst obrisan', 'RSS Post Details': 'Detalji RSS ubacivog teksta', 'RSS Posts': 'RSS Ubacivi tekst', 'RSS Setting deleted': 'RSS podešavanje obrisano', 'RSS Setting Details': 'Detalji RSS postavki', 'RSS Settings': 'RSS Postavke', 'RSS settings updated': 'RSS podešavanje ažurirano', 'Run every': 'Pokreni svakih', 'Run Functional Tests': 'Pokreni funkcionalne testove', 'Run Interval': 'Interval izvrštavanja', 'Run Now': 'Pokreni sada', 'Running Cost': 'Trenutni troškovi', 'Rural Tank Tactical Vehicle': 'Seoska taktička pokretna cisterna', 'Russia': 'Rusija', 'Russian': 'ruski', 'Rwanda': 'Ruanda', 'Rápido Evaluaciones': 'Brze procjene', 'sack 20kg': 'vreća 20kg', 'sack 50kg': 'vreća 50kg', 'Safe environment for vulnerable groups': 'Sigurno okruženje za ugrožene grupe', 'Safety Assessment Form': 'Obrazac procjene sigurnosti', 'Safety of children and women affected by disaster': 'Sigurnost djece i žena ugroženih zbog katastrofe', 'Safety of children and women affected by disaster?': 'Sigurnost djece i žena ugroženih zbog prirodne nepogode?', 'Sahana access granted': 'Sahana pristup odobren', 'Sahana Community Chat': 'Chat Sahana udruženja', 'Sahana Eden <= Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <= ostalo sinhronizacija (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Other': 'Sahana Eden <=> Drugi', 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Ostalo (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> ostalo sinhronizacija (Sahana Agasti, Ushahidi, itd.)', 'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden', 'Sahana Eden <=> Sahana Eden sync': 'Sahana Eden <=> Sahana Eden sinhronizacija', 'Sahana Eden Disaster Management Platform': 'Sahana Eden Platforma za vođenje aktivnosti u slučaju katastrofa', 'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Platforma za vođenje humanitarnih aktivnosti', 'Sahana Eden portable application generator': 'Generator Sahana Eden prenosive aplikacije', 'Sahana Eden Website': 'Sahana Eden Web stranica', 'Sahana FOSS Disaster Management System': 'Sahana FOSS, sistem za upravljanje u slučaju katastrofa', 'Sahana Green': 'Sahana Green', 'Sahana is a collection of web based disaster management applications that provides solutions to large-scale humanitarian coordination and collaboration in disaster situation and its aftermath. Sahana consists of several modules for following functionalities': 'Sahana je kolekcija web baziranih aplikacija za upravljanje u slučaju katastrofe koja pruža rješenje za humanitarne koordinacije i saradnju većeg obima', 'Sahana Steel': 'Sahana čelik', 'Sahana Website': 'Sahana Web stranica', 'Saint Lucia': 'Sveta Lucija', 'Saint Vincent and the Grenadines': 'Sent Vincent i Grenadin', 'Sale': 'Prodaja', 'Salted Fish': 'Zasoljena riba', 'Salvage material usable from destroyed houses': 'Preostali materijal upotrebljiv iz uništenih kuća', 'Salvage material usable from destroyed schools': 'Preostali materijal upotrebljiv iz uništenih škola', 'Sanitation': 'Sanitacija', 'Sanitation problems': 'Sanitarni problemi', 'Satellite': 'Satelit', 'Satellite Layer': 'Satelitski sloj', 'Satellite Office': 'Pridruženi ured', 'Saturday': 'Subota', 'Saudi Arabia': 'Saudijska Arabija', 'Save': 'Snimi', 'Save and add Items': 'Snimi i dodaj stavke', 'Save and add People': 'Snimi i dodaj ljude', 'Save and Continue Editing': 'Snimi i nastavi uređivanje', 'Save any Changes in the one you wish to keep': 'Snimi promjene među onim koje želite zadržati', 'Save as New Map?': 'Snimi kao novu mapu?', 'Save Changes': 'Snimi promjene', 'Save Map': 'Snimi mapu', 'Save model as...': 'Snimi model kao...', 'Save: Default Lat, Lon & Zoom for the Viewport': 'Snimi: Podrazumijevane geografska širina i dužina i promjena veličine za prostor pogleda', 'Saved': 'Snimljeno', 'Saved filters': 'Snimljeni filteri', 'Saved Filters': 'Snimljeni filteri', 'Saved Filters...': 'Snimljeni filteri...', 'Saved Maps': 'Snimljene mape', 'Saved search added': 'Snimljeno traženje dodano', 'Saved search deleted': 'Snimljeno traženje obrisano', 'Saved search details': 'Snimljeni detalji pretrage', 'Saved search updated': 'Snimljeno traženje ažurirano', 'Saved Searches': 'Snimljene pretrage', 'Saved searches': 'Snimljene pretrage', 'Saved.': 'Spašeno.', 'Saving...': 'Snimam...', 'Scale of Results': 'Skala rezultata', 'Scanned Copy': 'Skenirana kopija', 'Scanned Forms Upload': 'Slanje skeniranih obrazaca', 'Scenario': 'Scenario', 'Scenario added': 'Scenarij dodat', 'Scenario deleted': 'Scenarij izbrisan', 'Scenario Details': 'Detalji scenarija', 'Scenario updated': 'Ažuriran scenario', 'Scenarios': 'Scenariji', 'Schedule': 'Raspored', 'Schedule synchronization jobs': 'Rasporedi poslove sinhronizacije', 'Scheduled Jobs': 'Zakazani poslovi', 'Schema': 'Shema', 'School': 'Škola', 'School activities': 'Školske aktivnosti', 'School assistance': 'Pomoć školi', 'School assistance received/expected': 'Nastavna pomoć primljena/očekivana', 'School attendance': 'Školsko prisustvo', 'School Closure': 'Zatvaranje škole', 'School Code': 'Šifra škole', 'School destroyed': 'Škola uništena', 'School District': 'Rejon škole', 'School District added': 'Dodat školski rejon', 'School District deleted': 'Školski rejon obrisan', 'School District Details': 'Detalji školskog rejona', 'School District updated': 'Ažuriran školski rejon', 'School Districts': 'Školski rejoni', 'School heavily damaged': 'Škola teško oštećena', 'School Holidays only': 'Samo školski praznici', 'School Lockdown': 'Škola je zaključana', 'School Report added': 'Dodat školski izvjestaj', 'School Report deleted': 'Izvještaj o školii izbrisan', 'School Report Details': 'Detalji izvještaja o školama', 'School Report updated': 'Izvještaj o školama ažuriran', 'School Reports': 'Školski izvještaj', 'School Safety': 'Sigurnost škole', 'School Teacher': 'Školski nastavnik', 'School tents received': 'Primljeni školski šatori', 'School tents, source': 'Školski šatori, izvor', 'School used for other purpose': 'Škola koja se koristi u druge svrhe', 'School/studying': 'Škola/učenje', 'Schools': 'Škole', 'Scubadiving Support Vehicle': 'Vozilo za ronioce', 'Seaport': 'Luka', 'Seaport added': 'Luka dodana', 'Seaport deleted': 'Luka obrisana', 'Seaport Details': 'Detalji luke', 'Seaport updated': 'Luka ažurirana', 'Seaports': 'Luka', 'Search': 'Potraži', 'Search & List Bins': 'Pretraga i prikaz korpi', 'Search & List Catalog': 'Pretraga i prikaz kataloga', 'Search & List Items': 'Traži i izlistaj stavke', 'Search & List Site': 'Pretraga i prikaz mjesta', 'Search & List Sub-Category': 'Traži i prikaži podkategoriju', 'Search Activities': 'Pretraži aktivnosti', 'Search Activity Report': 'Pretraga izvještaja o aktivnostima', 'Search Addresses': 'Adrese Pretrage', 'Search After Save?': 'Traži nakon snimanja', 'Search Aid Requests': 'Traži zahtjeve za pomoć', 'Search All Requested Items': 'Traži sve potrebne stavke', 'Search All Requested Skills': 'Traži sve zahtjevane vještine', 'Search Alternative Items': 'Traži alternativne predmete', 'Search and Edit Group': 'Pretraži i uredi grupu', 'Search and Edit Individual': 'Pretraži i Uredi Pojedinca', 'Search Assessment Summaries': 'Sumarno traženje sredstava', 'Search Assessments': 'Pretraži procjene', 'Search Asset Assignments': 'Traži dodjelu sredstava', 'Search Asset Log': 'Traži zapisnika o sredstvima', 'Search Assets': 'Traži sredstva', 'Search Baseline Type': 'Pretraga tipa referentne tačke', 'Search Baselines': 'Pretraži referentne tačke', 'Search Brands': 'Traži proizvođačke marke', 'Search Budgets': 'Pretraži budžete', 'Search Bundles': 'Pretraži pakete', 'Search by ID Tag': 'Pretraži po ID oznaci', 'Search by Skill Types': 'Pretraži po vrstama vještina', 'Search by skills': 'Traži po vještinama', 'Search by Skills': 'Traži po vještinama', 'Search Camp Services': 'Pretraži usluge kampa', 'Search Camp Types': 'Traži vrste kampova', 'Search Camps': 'Pretraži kampove', 'Search Catalog Items': 'Pretraži stavke kataloga', 'Search Catalogs': 'Pretraži kataloge', 'Search Category<>Sub-Category<>Catalog Relation': 'Traži kategorija<>Podkategorija<>kataloški odnos', 'Search Certificates': 'Traži certifikate', 'Search Certifications': 'Pretraga certifikata', 'Search Checklists': 'Pretraži kontrolnu listu', 'Search Cluster Subsectors': 'Podsektori za pretragu skupa', 'Search Clusters': 'Pretraži skupove', 'Search Commitment Items': 'Pretraži stavke zaduženja', 'Search Commitments': 'Traži zaduženja', 'Search Committed People': 'Pretraži zadužene ljude', 'Search Competencies': 'Pretraga stručnosti', 'Search Competency Ratings': 'Pretraži rejtinge kompetencija', 'Search Configs': 'Traži konfiguracije', 'Search Contact Information': 'Pretraga informacija o kontaktu', 'Search Contacts': 'Pretraži kontakte', 'Search Course Certicates': 'Pretraga certifikata kursa', 'Search Courses': 'Pretraga kurseva', 'Search Credentials': 'Traži akreditive', 'Search Criteria': 'Kriterij pretrage', 'Search Distribution Items': 'Pretraži stavke raspodjele', 'Search Distributions': 'Traži raspodjele', 'Search Documents': 'Pretraži dokumente', 'Search Donors': 'Traženje donatora', 'Search Email InBox': 'Traži E-maik dolazne poruke', 'Search Email OutBox': 'Traži SMS E-mail poruke', 'Search Entries': 'Traži stavke', 'Search Events': 'Pretraži događaje', 'Search Facilities': 'Pretraga Objekata', 'Search Feature Class': 'traži klasu karakteristika', 'Search Feature Groups': 'Traži grupe karakteristika', 'Search Feature Layers': 'Pretraga slojeva s karakteristikama', 'Search Find Report': 'Traži izvještaj za traženje', 'Search Flood Reports': 'Pretraga izvještaja o poplavi', 'Search for a commitment by Committer name, Request ID, Site or Organization.': 'Traži zaduženje po imenu zadužioca, ID zahtjeva, mjestu ili organizaciji', 'Search for a Hospital': 'Traži bolnicu', 'Search for a Location': 'Traži lokaciju', 'Search for a Location by name, including local names.': 'Traži Lokaciju pomoću imena, uključujući lokalne nazive.', 'Search for a Person': 'Potraga za osobom', 'Search for a Project': 'Pretraživanje projekta', 'Search for a Project by name, code, location, or description.': 'Traži projekt po imenu, šifri, lokaciji ili opisu', 'Search for a Project by name, code, or description.': 'Traži projekt po imenu, šifri ili opisu', 'Search for a Project Community by name.': 'Traži zajednicu projekta po imenu', 'Search for a Request': 'Pretraži zahtijev', 'Search for a request by Site name, Requester name or free text.': 'Traži zahtjev po imenu mjesta, zahtjevaocu ili slobodnom tekstu.', 'Search for a shipment by looking for text in any field.': 'Pretraži isporuke traženjem teksa iz bilo kog polja.', 'Search for a shipment received between these dates': 'Pretraži pošiljku primljenu između ovih datuma', 'Search for a shipment sent between these dates.': 'Pretraži pošiljku poslanu između ovih datuma', 'Search for a shipment which has an estimated delivery between these dates.': 'Traži pošiljke čija se isporuka očekuje između ovih datutma.', 'Search for a vehicle by text.': 'Pretraži vozilo po tekstu', 'Search for an asset by text.': 'Pronađi sredstva uz pomoć teksta', 'Search for an item by category.': 'Tražite predmet po kategoriji', 'Search for an item by brand.': 'Pretraga predmeta po marki', 'Search for an item by catalog.': 'Kataloška pretraga stavki', 'Search for an item by category.': 'Pretraga po kategoriji', 'Search for an item by its code, name, model and/or comment.': 'Pretraga predmeta po kodu, imenu, modelu i/ili komentaru', 'Search for an item by text.': 'Potraži stavku unosom teksta', 'Search for an item by Year of Manufacture.': 'Pretraga stavki po atributu: Godina proizvodnje', 'Search for an order by looking for text in any field.': 'Pretraži narudžbu traženjem teksta iz bilo kog polja.', 'Search for an order expected between these dates': 'Pretraži narudžbu očekivanu između ovih datuma', 'Search for an Organization by name or acronym': 'Potraži organizaciju po imenu ili akronimu', 'Search for an Organization by name or acronym.': 'Traži organizaciju po imenu ili akronimu', 'Search for asset by country.': 'Pretraga sredstava po državama', 'Search for asset by location.': 'Traženje sredstava po lokaciji.', 'Search for commitments available between these dates.': 'Traži obaveze dostupne između ovih datuma', 'Search for commitments made between these dates.': 'Traži obaveze načinjene između ovih datuma', 'Search for Items': 'Traži stavke', 'Search for items by donating organization.': 'Traži stavke po donatorskoj organizaciji', 'Search for items by owning organization.': 'Traži stavke po vlasničkoj organizaciji', 'Search for items with this text in the name.': 'Traži stavke s tim tekstom u imenu', 'Search for office by country.': 'Traži ured po zemlji', 'Search for office by location.': 'Pretraga ureda po lokaciji', 'Search for office by organization or branch.': 'Traži kancelariju po organizaciji ili ogranku.', 'Search for office by organization.': 'Pretraži kancelarije po organizaciji', 'Search for office by text.': 'Pretraga ureda po tekstu', 'Search for Persons': 'Traži po osobama', 'Search for requests made between these dates.': 'Traži zahtjeve napravljene između ovih datuma', 'Search for requests required between these dates.': 'Traži zahtjeve potrebne između ovih datuma', 'Search for Staff or Volunteers': 'Potraži osoblje ili volontere', 'Search for vehicle by location.': 'Traži vozilo po lokaciji', 'Search for warehouse by country.': 'Pretraga skladišta na osnovu atributa: Država', 'Search for warehouse by location.': 'Traži skladišta po lokaciji', 'Search for warehouse by organization.': 'Potraži skladište po organizaciji.', 'Search for warehouse by text.': 'Potraži skladište unosom teksta', 'Search GPS data': 'Pretraži GPS podatke', 'Search Groups': 'Traži grupe', 'Search here for a person record in order to:': 'Pretraži ovdje za zapis osobe koje su zabilježene da bi :', "Search here for a person's record in order to:": 'Pretraži ovdje za lične zapise da bi :', 'Search Homes': 'Pretraži domaćinstva', 'Search Hospitals': 'Traži bolnice', 'Search Human Resources': 'Pretraži ljudske resurse', 'Search Identity': 'Pretraži identitet', 'Search Images': 'Pretražuj slike', 'Search Impact Type': 'Traži tip utjecaja', 'Search Impacts': 'Pretraga utjecaja', 'Search Import Files': 'Pretražite uvezene datoteke', 'Search Incident Reports': 'Pretraži izvještaje o incidentima', 'Search Incidents': 'Traži incidente', 'Search Inventory Items': 'Pretraži artikle sa popisa', 'Search Inventory items': 'Pretraži predmete inventara', 'Search Inventory Stores': 'Pretraži skladišta inventara', 'Search Item Catalog(s)': 'Pretraži katalog(e) stavki', 'Search Item Categories': 'Pretraži kategorije stavki', 'Search Item Packets': 'Pretraga paketa stavki', 'Search Item Packs': 'Pretraga paketa predmeta', 'Search Item Sub-Category(s)': 'Traži podkategoriju stavke', 'Search Items': 'Pretraži stavke', 'Search Job Roles': 'Pretraži radna mjesta', 'Search Keys': 'Ključevi pretrage', 'Search Kits': 'Traži komplete', 'Search Layers': 'Pretraga slojeva', 'Search Level': 'Nivo pretrage', 'Search Level 1 Assessments': 'Traži procjene prvog nivoa', 'Search Level 2 Assessments': 'Pretraživanje procjena level 2', 'Search location in Geonames': 'Navedite lokaciju u Geonames', 'Search Locations': 'Traži lokacije', 'Search Log Entry': 'pretraži unose iz zapisnika', 'Search Map Configurations': 'Pretraži konfiguracije mape', 'Search Markers': 'Pretraži oznake', 'Search Member': 'Potraži člana', 'Search Members': 'Traži članove', 'Search Membership': 'Pretraži članstvo', 'Search Memberships': 'Pretraži članstva', 'Search messages': 'Pretraži poruke', 'Search Metadata': 'Traži metapodatke', 'Search Missions': 'Pretraži misije', 'Search Need Type': 'Pretraga tipa zahtijeva ', 'Search Needs': 'Traži potrebe', 'Search Notes': 'Traži bilješke', 'Search Offices': 'Tražite kancelarije', 'Search Organisations': 'Traži organizacije', 'Search Organizations': 'Pretraži organizacije', 'Search Partners': 'Traži partnere', 'Search Patients': 'Pretraga pacijenata', 'Search Peer': 'Pretraži saradnike', 'Search Peers': 'Traži saradnike', 'Search Personal Effects': 'Pretraži osobne učinke', 'Search Persons': 'Pretraži osobe', 'Search Photos': 'Pretraga fotografija', 'Search Population Statistics': 'Pretraga statistike stanovništva', 'Search Positions': 'Pretraži pozicije', 'Search Problems': 'Pretraži probleme', 'Search Projections': 'Pretraga projekcija', 'Search Projects': 'Pretraži projekte', 'Search Queries': 'Traži upite', 'Search Query': 'Traži upit', 'Search Rapid Assessments': 'Pretraži brze procjene', 'Search Received Items': 'Pretraži primljene stavke', 'Search Received Shipments': 'Pretraži primljene isporuke', 'Search Records': 'Pretraga zapisa', 'Search Recovery Reports': 'Pretraga izvještaja o pronalaženjima', 'Search Registations': 'Pretraga registracija', 'Search Registration Request': 'Potraži zahtjev za registraciju', 'Search Relatives': 'Pretraži srodnike', 'Search Report': 'Izvještaj pretrage', 'Search Reports': 'Pretraga Izvještaja', 'Search Request': 'Potražite zahtjev', 'Search Request Items': 'Pretraži zahtijevane stavke', 'Search Requested Items': 'Pretraži tražene jedinice', 'Search Requested Skills': 'Pretraga traženih sposobnosti', 'Search Requests': 'Zahtjevi za pretragu', 'Search Resources': 'Pretraži resurse', 'Search Responses': 'Pretraga odgovora', 'Search Results': 'Rezultati pretrage', 'Search Rivers': 'Traži rijeke', 'Search Roles': 'Traži uloge', 'Search Rooms': 'Traži sobe', 'Search Scenarios': 'Pretražo scenarije', 'Search School Districts': 'Pretraga rejona škole', 'Search School Reports': 'Pretraga Izvještajao školama', 'Search Sections': 'Traži sekcije', 'Search Sectors': 'Pretraži Sektore', 'Search Sent Email': 'Traži poslanu elektronsku poštu', 'Search Sent Items': 'Pretraži poslane stavke', 'Search Sent Shipments': 'Pretraži poslane pošiljke', 'Search Sent SMS': 'Pretraži poslane SMS', 'Search Service Profiles': 'Pretraživanje profila usluge', 'Search Settings': 'Postavke pretrage', 'Search Shelter Services': 'Traži uslugu skloništa', 'Search Shelter Types': 'Traži tipove skloništa', 'Search Shelters': 'Pretraži skloništa', 'Search Shipment<>Item Relation': 'Traži pošiljku<>Odnos predmeta', 'Search Shipped Items': 'Pretraži isporučene stavke', 'Search Site(s)': 'Traži mjesta', 'Search Skill Equivalences': 'Pretraži ekvivalenciju vještina', 'Search Skill Provisions': 'Pretraga Provizije vjestina', 'Search Skill Types': 'Pretraži tipove sposobnosti', 'Search Skills': 'Pretraži vještine', 'Search SMS InBox': 'Traži SMS dolazne poruke', 'Search SMS OutBox': 'Traži SMS odlazne poruke', 'Search Solutions': 'Traži rješenja', 'Search Sources': 'Traži izvore', 'Search Staff': 'Traži osoblje', 'Search Staff & Volunteers': 'Potraži osoblje ili volontere', 'Search Staff or Volunteer': 'Pretraži osoblje ili volontere', 'Search Staff Types': 'Traži tip osoblja', 'Search Status': 'Status pretrage', 'Search Storage Bin Type(s)': 'Traži vrste korpe za smještaj', 'Search Storage Bin(s)': 'Traži korpe za smještaj', 'Search Storage Location(s)': 'Traži lokacije o smještaju', 'Search Subscriptions': 'Pretraži Pretplatnike.', 'Search Subsectors': 'Pretražite podsektore', 'Search Support Requests': 'Pretraži zahtjeve za podršku', 'Search Tasks': 'Pretraga zadataka', 'Search Teams': 'Traži timove', 'Search Themes': 'Pretraži teme', 'Search Tickets': 'Pretraži kartice', 'Search Tracks': 'Pretraži tragove', 'Search Training Participants': 'Traži učesnike obuke', 'Search Trainings': 'Traži treninge', 'Search Twitter Tags': 'Pretraži Twitter tagove', 'Search Units': 'Pretraži jedinice', 'Search Users': 'Pretražite korisnike', 'Search Vehicle Details': 'Pretraži detalje vozila', 'Search Vehicles': 'Pretraga vozila', 'Search Volunteer Availability': 'Pretraži mogućnosti volontiranja', 'Search Volunteers': 'Pretraži volontere', 'Search Warehouse Items': 'Pretraži stavke skladišta', 'Search Warehouses': 'Pretraži skladišta', 'Searched?': 'Traženo?', 'Searching for different groups and individuals': 'Traženje različitih grupa i pojedinaca', 'secondary effect': 'sekundarni efekat', 'Secondary Server (Optional)': 'Sekundarni server (opcionalno)', 'seconds': 'sekundi', 'Seconds must be a number between 0 and 60': 'Sekunde moraju biti broj između 0 i 60', 'Seconds must be a number.': 'Sekunde moraju biti broj', 'Seconds must be less than 60.': 'Sekunde bi trebale biti broj manji od 60', 'Section': 'Odjeljak', 'Section deleted': 'Odjel izbrisan', 'Section Details': 'Detalji o odjelima', 'Section updated': 'Odjel ažuriran', 'Sections': 'Sekcije', 'Sections that are part of this template': 'Sekcije koje su dio ovog šablona', 'Sections that can be selected': 'Djelovi koji mogu biti odabrani', 'Sector': 'Sektor', 'Sector added': 'Sektor dodan', 'Sector added to Organization': 'Sektor dodan u organizaciju', 'Sector added to Project': 'Sektor dodan u projekt', 'Sector added to Theme': 'Sekttor dodan u temu', 'Sector deleted': 'Sektor obrisan', 'Sector Details': 'Detalji o sektoru', 'Sector removed from Organization': 'Sektor uklonjen iz organizacije', 'Sector removed from Project': 'Sektor uklonjen sa projekta', 'Sector removed from Theme': 'Sektor uklonjen iz teme', 'Sector updated': 'Sektor ažuriran', 'Sector(s)': 'Sektor(i)', 'Sectors': 'Sektori', 'Sectors to which this Activity Type can apply': 'Sektori na koje je ova vrsta aktivnosti primjenjiva', 'Sectors to which this Theme can apply': 'Sektori na koje je ova vrsta tema primjenjiva', 'Secure Storage Capacity': 'Sigurni kapacitet smještaja', 'Security': 'Sigurnost', 'Security Description': 'Sigurnosni opis', 'Security Policy': 'Politika sigurnosti', 'Security problems': 'Sigurnosni problemi', 'Security Required': 'Potrebna sigurnost', 'Security Status': 'Sigurnosni status', 'See a detailed description of the module on the Sahana Eden wiki': 'Vidi detaljan opis modula na Sahana Eden wiki', 'See all': 'Vidi sve', 'See All Entries': 'Pogledajte sve unose', 'see comment': 'prikaži komentar', 'see more': 'vidi više', 'See the universally unique identifier (UUID) of this repository': 'Postavi univerzalno jedinstveni identifikator (UUID) za ovaj repozitorij', 'See unassigned recovery requests': 'Pregledaj neraspoređene zahtjeve za oporavak', 'Seen': 'Viđeno', 'Select': 'Izaberi', 'Select %(location)s': '%(location)s za izbor', "Select 2 records from this list, then click 'Merge'.": "Odaberite 2 sloga iz liste i kliknite 'Spoji0", 'Select a label question and at least one numeric question to display the chart.': 'Odabertite pitanje oznake i barem jedno brojčano pitanje za prikaz dijagrama.', 'Select a location': 'Izaberite mjesto', "Select a manager for status 'assigned'": "Odaberi menadžera za status 'dodijeljeno'", "Select a person in charge for status 'assigned'": "Odaberi osobu zaduženu za status 'dodijeljeno'", 'Select a question from the list': 'Označite pitanje sa liste', 'Select a range for the number of total beds': 'Označite opseg za ukupan broj kreveta', "Select a Room from the list or click 'Add Room'": 'Izaberite sobu sa spiska ili pritisnite "Dodaj sobu"', "Select a Room from the list or click 'Create Room'": 'Izaberite sobu sa spiska ili kliknite "Kreiraj sobu"', 'Select all': 'Izaberi sve', 'Select All': 'Izaberi sve', 'Select all templates (All modules included)': 'Odaberi sve predloške (Svi moduli uključeni)', 'Select all that apply': 'Označi sve što se odnosi na to', 'Select an existing bin': 'Odaberi postojeću korpu', 'Select an image to upload. You can crop this later by opening this record.': 'Odaberite sliku za postavljanje. Možete je izreati kasnije za otvaranje ovog zapisa.', 'Select an Organisation to see a list of offices': 'Izaberi organizaciju za prikaz liste kancelarija', 'Select an Organization to see a list of offices': 'Izaberi organizaciju za prikaz liste kancelarija', 'Select Existing Location': 'Izaberi postojeću lokaciju', 'Select from registry': 'Izaberi iz registra', 'Select Items from the Request': 'Izaberite željene stavke', 'Select Items from this Inventory': 'Odaberite stavke iz ovog inventara', 'Select Label Question': 'Izaberi pitanje oznake', 'Select language code': 'Izaberi oznaku jezika', 'Select Modules for translation': 'Odaberite module za prevođenje', 'Select Modules which are to be translated': 'Odaberite module koji se trebaju prevesti', 'Select Numeric Questions (one or more):': 'Odaberite numerička pitanja (jedno ili više):', 'Select one or more option(s) that apply': 'Odaberite jednu ili dvije primjenjive opcije', 'Select Photos': 'Izaberi fotografije', 'Select resources to import': 'Izaberite resurse za uvesti', 'Select Skills from the Request': 'Izaberi vještine iz Zahtjeva', 'Select Stock from this Warehouse': 'Odaberite zalihu iz ovog skladišta', 'Select the default site.': 'Izaberi podrazumijevano mjesto', 'Select the language file': 'Izaberi jezičku datoteku', 'Select the option that applies': 'Odaberite primjenjivu opciju', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Izaberite preklapanje za procjene i aktivnosti koje se odnose na svaku potrebu da se identifikuje propust.', 'Select the person assigned to this role for this project.': 'Odaberi osobu dodijeljenu za ovu ulogu za ovaj projekt', 'Select the person associated with this scenario.': 'Odaberi osobu dodijeljenu za ovaj scenario', 'Select the required modules': 'Izaberite potrebne module', "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'Odaberite ovo ako sve određene lokacije trebaju roditelja na najdubljem nivou lokacijske hijerarhije. Na primjer, ako je "distrikt" najmanja podjela u hijerarhiji,to znači da sve određene lokacije moraju imati distrikt kao roditelja.', "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "Odaberite ovo ako sve specifične lokacije trebaju imati nadređenu lokaciju usljed hijerarhije. Ovo može pomoći u postavljanju 'regije' koja predstavlja pogođenu oblast", 'Select this if you need this resource to be mapped from site_id instead of location_id.': 'Odaberite ovo ako želite da se resurs mapira prema oznaci mjesta site_id umjesto lokacije location_id.', 'Select This Location': 'Odaberi ovu lokaciju', 'Select to show this configuration in the menu.': 'Odaberi prikaz ove konfiguracije u meniju', 'Select to show this configuration in the Regions menu.': 'Izaberite da vam se prikaže ova konfiguracija u meniju Regije', 'selected': 'odabran', 'Selected Jobs': 'Izabrani poslovi', 'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'Odabrani OCR nema stranice. Koristite drugu reviziju da kreirate novu reviziju preuzimanjem novog formulara.', 'Selected Questions for all Completed Assessment Forms': 'Odabrana pitanja za sve ispunjene formulare procjene', 'Selects what type of gateway to use for outbound SMS': 'Izabira tip mrežnog izlaza za izlazni SMS', 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Bira da li koristiti modem, tropo ili neki drugi način za slanje SMS', 'Selects whether to use the gateway or the Modem for sending out SMS': 'Bira da li koristiti mrežni izlaz ili modem za slanje SMS', 'Self Registration': 'Samoregistracija', 'Self-care': 'Vlasita briga', 'Self-registration': 'Samoregistracija', 'Send': 'Pošalji', 'Send & Receive Email messages (e.g. for alerting)': 'Pošalji i primi Email poruke (npr. za alarmiranje)', 'Send & Receive SMS messages (e.g. for alerting)': 'Pošalji i primi SMS poruke (npr. za alarmiranje)', 'Send a message to this person': 'Pošalji poruku ovoj osobi', 'Send a message to this team': 'Pošalji poruku ovom timu', 'Send Alerts using Email &/or SMS': 'Poslati upozorenje koristeći Email i/ili SMS', 'Send batch': 'Paketno slanje', 'Send Commitment as Shipment': 'Pošalji zaduženje kao pošiljku', 'Send Dispatch Update': 'Pošalji ažuriranje raspodjele', 'Send Email': 'Pošalji e-mail', 'Send from %s': 'Pošalji sa %s ', 'Send Message': 'Šalji poruku', 'Send message': 'Pošalji poruku', 'Send new message': 'Pošalji novu poruku', 'Send New Shipment': 'Pošalji novu pošiljku', 'Send Notification': 'Pošalji obavijest', 'Send Shipment': 'Slanje pošiljke', 'Send SMS': 'Pošalji SMS', 'Send Task Notification': 'Obavijest o slanju zadatka', 'Sender': 'Pošiljalac', 'Sender deleted': 'Pošiljalac obrisan', 'Sender Priority updated': 'Prioritet pošiljaoca ažuriran', 'Sender Whitelisted': 'Pošiljaoc na bijeloj listi', 'Sends & Receives Alerts via Email & SMS': 'Šalje i prima upozorenja putem emaila i SMS-a', 'Senior (50+)': 'Stariji (50+)', 'Sensitivity': 'Osjetljivost', 'Sent': 'Poslano', 'Sent By': 'Poslano od', 'Sent By Person': 'Poslano putem osobe', 'Sent date': 'Poslano dana', 'Sent Emails': 'Poslana elektronska pošta', 'Sent Item deleted': 'Poslana stavka obrisan', 'Sent Item Details': 'Detalji poslanog predmeta', 'Sent Item updated': 'Poslana stavka ažurirana', 'Sent Shipment canceled': 'Poslana Pošiljka otkazana', 'Sent Shipment canceled and items returned to Inventory': 'Poslana pošiljka otkazana i predmeti vraćeni u inventar', 'Sent Shipment canceled and items returned to Warehouse': 'Poslana pošiljka otkazana i predmeti vraćeni u skladište', 'Sent Shipment Details': 'Detalji poslate pošiljke', 'Sent Shipment has returned, indicate how many items will be returned to Warehouse.': 'Poslana pošiljka je vraćena, navedite koliko će se stavki vratiti u skladište', 'Sent Shipment updated': 'Poslana pošiljka ažurirana', 'Sent Shipments': 'Poslate pošiljke', 'Sent SMS': 'Pošalji SMS', 'Sent Tweets': 'Pošalji Tweets', 'Separate latrines for women and men': 'Odvojeni zahodi za muškarce i žene', 'separated': 'razdvojeni', 'Separated children, caregiving arrangements': 'Djeca odvojena od roditelja, raspored skrbnika', 'separated from family': 'Odvojen/a od porodice', 'Seraiki': 'Seraiki', 'Serbia': 'Srbija', 'Serial Number': 'Serijski broj', 'Series': 'Serije', 'Series added': 'Serija dodana', 'Series deleted': 'Serija obrisana', 'Series Details': 'Detalji serije', 'Series details missing': 'Nedostaju detalji serije', 'Series updated': 'Serija ažurirana', 'Server': 'Server', 'Service': 'Usluga', 'Service added': 'Usluga dodana', 'Service added to Organization': 'Usluga dodana organizaciji', 'Service Catalogue': 'Katalog usluga', 'Service deleted': 'Usluga obrisana', 'Service Details': 'Detalji usluge', 'Service Due': 'Rok usluge', 'Service or Facility': 'Usluga ili objekat', 'Service profile added': 'Dodat profil usluge', 'Service profile deleted': 'Obrisan profil usluge', 'Service profile updated': 'Ažuriran profil usluge', 'Service Record': 'Zapis usluge', 'Service removed from Organization': 'Usluga uklonjena iz organizacije', 'Service updated': 'Usluga ažurirana', 'Services': 'Usluge', 'Services Available': 'Dostupne usluge', 'Set as default Site': 'Postavi kao podrazumijevano mjesto', 'Set as my Default': 'Postavi kao moje podrazumijevano', 'Set Base Facility/Site': 'Postavljeno mjesto/objekt baze', 'Set Base Site': 'Postavi osnovnu lokaciju', 'Set By': 'Postavi prema', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Postaviti na True da se dozvoli uređivanje ovog nivoa hierarhije lokacija korisnicima koji nisu MapAdmin.', 'Setting added': 'Podešavanje dodano', 'Setting deleted': 'Podešavanje obrisano', 'Setting Details': 'Detalji postavke', 'Setting updated': 'Postavka ažurirana', 'Settings': 'Postavke', 'Settings updated': 'Podešavanja ažurirana', 'Settings were reset because authenticating with Twitter failed': 'Postavke su obrisane zbog neuspjele autentifikacije sa Twitterom', 'Settings which can be configured through the web interface are available here.': 'Postavke, koje je moguće konfigurisati putem web sučelja, su dostupne ovdje.', 'Severe': 'Strogo', 'Severity': 'Ozbiljnost', 'Severity:': 'Ozbiljnost:', 'Sex': 'Spol', 'Seychelles': 'Sejšeli', 'shallow': 'plitak', 'Shapefile Layer': 'Sloj datoteke s likovima', 'Share': 'Dijeli', 'Share a common Marker (unless over-ridden at the Feature level)': 'Podijeli zajednički marker (osim ukoliko nije zamijenjeno na nivou karakteristika)', 'shaved': 'obrijano', 'Shelter': 'Sklonište', 'Shelter & Essential NFIs': 'Sklonište & osnovni neprehrambeni artikli', 'Shelter added': 'Sklonište dodato', 'Shelter deleted': 'Sklonište obrisano', 'Shelter Details': 'Detalji o skloništu', 'Shelter Manager': 'Menadžer skloništa', 'Shelter Name': 'Naziv sklonista', 'Shelter Registry': 'Registar skloništa', 'Shelter Service': 'Usluge u skloništu', 'Shelter Service added': 'Usluga skloništa dodana', 'Shelter Service deleted': 'Usluga skloništa obrisana', 'Shelter Service Details': 'Detalji usluga skloništa', 'Shelter Service updated': 'Usluga skloništa ažurirana', 'Shelter Services': 'Usluga skloništa', 'Shelter Settings': 'Postavke skloništa', 'Shelter Status': 'Status skloništa', 'Shelter Status added': 'Status skloništa dodan', 'Shelter Status deleted': 'Status skloništa obrisan', 'Shelter Status Details': 'Detalji statusa skloništa', 'Shelter Status updated': 'Status skloništa ažuriran', 'Shelter Statuses': 'Statusi skloništa', 'Shelter Type': 'Tip skloništa', 'Shelter Type added': 'Tip skloništa dodan', 'Shelter Type deleted': 'Tip skloništa obrisan', 'Shelter Type Details': 'Detalji o tipu skloništa', 'Shelter Type updated': 'Tip skloništa ažuriran', 'Shelter Types': 'Tipovi skloništa', 'Shelter Types and Services': 'Vrste i usluge skloništa', 'Shelter updated': 'Sklonište ažurirano', 'Shelter/NFI Assistance': 'Sklonište/Pomoć u neprehrambenim artiklima', 'Shelter/NFI assistance received/expected': 'Sklonište/Pomoć u neprehrambenim artiklima/očekivano', 'Shelters': 'Skloništa', 'shift_start': 'pomjeranje_početka', 'Shipment': 'Pošiljka', 'Shipment Created': 'Pošiljka napravljena', 'Shipment Details': 'Detalji pošiljke', 'Shipment Item deleted': 'Predmet dostave obrisan', 'Shipment Item Details': 'Detalji stavki pošiljke', 'Shipment Item updated': 'Stavka dostave ažurirana', 'Shipment Items': 'Stavke pošiljke', 'Shipment Items Received': 'Primljene stavke pošiljke', 'Shipment Items received by Inventory': 'Pošiljke primljene u skladište', 'Shipment Items sent from Inventory': 'Isporuke stavki poslatih iz inventara', 'Shipment Items sent from Warehouse': 'Isporuke stvaki poslatih iz skladišta', 'Shipment received': 'Primljena pošiljka', 'Shipment to Receive': 'Pošiljka za prijem', 'Shipment to Send': 'Pošiljka za poslati', 'Shipment Type': 'Tip pošiljke', 'Shipment/Way Bills deleted': 'Dostavnica/putni nalog obrisana', 'Shipments': 'Pošiljke', 'Shipments To': 'Pošiljka do', 'Shipping Organization': 'Organizacija dostave', 'Shooting': 'Pucnjava', 'short': 'kratki', 'Short Assessment': 'Kratka procjena', 'Short Description': 'Kratak Opis', 'Short Description:': 'Kratak opis:', 'Short Text': 'Kratki tekst', 'Short Title / ID': 'Kratki naslov / ID', 'short<6cm': 'kratak<6cm', 'Show': 'Prikaži', 'Show _MENU_ entries': 'Prikaži _MENU_ članova', 'Show author picture?': 'Prikaži sliku autora', 'Show Checklist': 'Prikaži kontrolnu listu', 'Show Details': 'Prikaži detalje', 'Show in Menu?': 'Prikazati u Meniju ?', 'Show Location?': 'Prikaži lokaciju?', 'Show Map': 'Prikaži kartu', 'Show on map': 'Prikaži na mapi', 'Show on Map': 'Prikaži na karti', 'Show Region in Menu?': 'Pokaži regiju u meniju?', 'Show Table': 'Prikaži tabelu', 'Show totals': 'Prikaži sumarne kolone', 'Showing 0 to 0 of 0 entries': 'Prikaz 0 do 0 od 0 unosa', 'Showing _START_ to _END_ of _TOTAL_ entries': 'Prikazujem _START_ do _END_ od _TOTAL_ zapisa', 'Showing latest entries first': 'Zadnji unosi se prvi prikazuju', 'sides': 'strane', 'Sierra Leone': 'Sierra Leone', 'Sign-up as a volunteer': 'Prijavi se kao volonter', 'Sign-up for Account': 'Prijavi se za Račun', 'sign-up now': 'Prijavi se sada', 'Sign-up succesful - you should hear from us soon!': 'Prijava uspješna, uskoro ćemo Vam se javiti!', 'Signature': 'Potpis', 'Signature / Stamp': 'Potpis / Pečat', 'simple': 'jednostavan', 'Sindhi': 'Sindi', 'Singapore': 'Singapur', 'single': 'samac', 'Single PDF File': 'Jedan PDF dokument', 'Site': 'Lokacija', 'Site Administration': 'Administracija lokacije', 'Site Contact': 'Kontakt mjesta', 'Site ID': 'ID mjesta', 'Site Key': 'Ključ mjesta', 'Site Key which this site uses to authenticate at the remote site (if required for this type of repository).': 'Ključ sajta kojim se on prijavljuje na udaljeni sajt (ako je potrebno za ovu vrstu repozitorija).', 'Site Location Description': 'Opis lokacije mjesta', 'Site Location Name': 'Naziv lokacije mjesta', 'Site Manager': 'Menadžer mjesta', 'Site Name': 'Ime mjesta', 'Site Needs': 'Potrebe mjesta', 'Site Needs added': 'Potrebe mjesta dodane', 'Site Needs deleted': 'Potrebe mjesta obrisane', 'Site Needs updated': 'Potrebe mjesta ažurirane', 'Site/Warehouse': 'Mjesto/Skladište', 'Sites': 'Mjesta', 'SITUATION': 'SITUACIJA', 'Situation': 'Situacija', 'Situation Awareness': 'Svjesnost Situacije', 'Situation Awareness & Geospatial Analysis': 'Svjesnost Situacije & Geoprostorna Analiza', 'Situation Map': 'Mapa situacije', 'Size of cache:': 'Veličina keša:', 'Skeleton Example': 'Primjer skeleta', 'Sketch': 'Nacrt', 'Skill': 'Vještina', 'Skill added': 'Vještina dodana', 'Skill added to Request': 'Vještina dodana zahtjevu', 'Skill Catalog': 'Katalog vještina', 'Skill deleted': 'Vještina obrisana', 'Skill Details': 'Detalji VJEŠTINE', 'Skill Equivalence': 'Ekvivalencija vještine', 'Skill Equivalence added': 'Dodata ekvivalencija vještine', 'Skill Equivalence deleted': 'Obrisana ekvivalencija vještine', 'Skill Equivalence Details': 'Detalji ekvivalentnosti vještina', 'Skill Equivalence updated': 'Ažurirana ekvivalencija vještine', 'Skill Equivalences': 'Ekvivalencije vještina', 'Skill Provision': 'Pružanje vještina', 'Skill Provision added': 'Pružanje vještine dodato', 'Skill Provision Catalog': 'Katalog pribavljanja vještina', 'Skill Provision deleted': 'Pružanje vještina obrisano', 'Skill Provision Details': 'Detalji o pružanju vještina', 'Skill Provision updated': 'Provizija vjestina ažurirana', 'Skill Provisions': 'Odredba Vještina', 'Skill removed': 'Uklonjena vještina', 'Skill removed from Request': 'Uklonjena vještina iz Zahtjeva', 'Skill Status': 'Status vještina', 'Skill TYpe': 'Vrsta vještine', 'Skill Type': 'Tip sposobnosti', 'Skill Type added': 'Dodan tip vještine', 'Skill Type Catalog': 'Katalog vrsta vještina', 'Skill Type deleted': 'Obrisan tip vještine', 'Skill Type Details': 'Detalji tipa sposobnosti', 'Skill Type updated': 'Ažuriran tip vještine', 'Skill Types': 'Tipovi vještina', 'Skill updated': 'Vještina ažurirana', 'Skills': 'Vještine', 'Skills Catalog': 'Katalog vještina', 'Skills Management': 'Upravljanje vještinama', 'Skin Marks': 'Oznake na koži', 'slight': 'pomalo', 'slim': 'vitak', 'Slope failure, debris': 'Propast padine , krhorine', 'Slovakia': 'Slovačka', 'Slovenia': 'Slovenija', 'small': 'mali', 'Small scale mitigation': 'Smanjenje u malom stepenu', 'Small Trade': 'Mala trgovina', 'Smoke': 'Dim', 'Smoking habits': 'Pušačke navike', 'SMS added': 'SMS dodan', 'SMS deleted': 'SMS obrisan', 'SMS Details': 'Detalji o SMS', 'SMS Gateway Settings': 'Postavke SMS izlaza', 'SMS InBox': 'SMS dolazne poruke', 'SMS Modems (Inbound & Outbound)': 'SMS Modemi (ulazni i izlazni)', 'SMS Outbound': 'SMS van granica', 'SMS Outbound Gateway': 'SMS izlaz', 'SMS Outbound Gateway updated': 'SMS mrežni izlaz ažuriran', 'SMS OutBox': 'SMS odlazne poruke', 'SMS Settings': 'SMS Postavke', 'SMS settings updated': 'Postavke SMS-a su ažurirane', 'SMS updated': 'SMS ažuriran', 'SMS via SMTP (Outbound)': 'SMS preko SMTP (izlazni)', 'SMS WebAPI (Outbound)': 'SMS WebAPI (izlazni)', 'SMTP to SMS settings updated': 'SMTP u SMS postavke ažurirane', 'Snapshot': 'Snimak stanja', 'Snapshot Report': 'Kratko izvješće', 'Snow Fall': 'Sniježne padavine', 'Snow Squall': 'Snježna oluja', 'Social': 'Društveno', 'Soil bulging, liquefaction': 'Ispupčenje tla, rastapanje', 'Soliciting Cash Donations?': 'Iznuđene donacije u gotovini?', 'Solicitudes': 'Zabrinutost', 'Solid waste': 'Kruti otpad', 'Solution': 'Rješenje', 'Solution added': 'Rješenje dodano', 'Solution deleted': 'Rješenje izbrisano', 'Solution Details': 'Detalji rješenja', 'Solution Item': 'Stavka rješenja', 'Solution updated': 'Rješenja izmjenjena', 'Solutions': 'Rješenja', 'Somalia': 'Somalija', 'Some': 'Neki', 'Sorry - the server has a problem, please try again later.': 'Izvinjavamo se - problem sa serverom, molimo pokušajte kasnije.', 'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'Žao nam je, ali izgleda da se lokacija %(location)s nalazi izvan oblasti roditelja %(parent)s..', 'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'Izvinite ali lokacija %(location)s je izvan područja koje je podržano ovom instalacijom.', 'Sorry location appears to be outside the area of parent %(parent)s.': 'Žao nam je, ali izgleda da se lokacija nalazi izvan oblasti roditelja %(parent)s..', 'Sorry location appears to be outside the area supported by this deployment.': 'Izvinite ali ta lokacija je izvan područja koje je podržano ovom instalacijom', 'Sorry that location appears to be outside the area of the Parent.': 'Žao nam je, ali izgleda da se lokacija nalazi izvan oblasti roditelja.', 'Sorry that location appears to be outside the area supported by this deployment.': 'Nažalost ali ta lokacija je izvan područja koje je podržano ovim sistemom.', 'Sorry, I could not understand your request': 'Oprostite, ne mogu razumjeti vaš zahtjev.', 'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Nažalost samo korisnicima sa MapAdmin ulogom je dozvoljeno kreiranje grupa lokacija.', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Žao nam je, samo korisnici sa ulogom administratora mape imaju dozvolu da uređuju ove lokacije', 'Sorry, something went wrong.': 'Žao nam je, dogodila se greška.', 'Sorry, that page is forbidden for some reason.': 'Nažalost, ta stranica je zabranjena iz nekog razloga.', 'Sorry, that service is temporary unavailable.': 'Žao nam je, ova usluga je trenutno nedostupna.', 'Sorry, there are no addresses to display': 'Izvinite, ne postoje adrese za prikaz', "Sorry, things didn't get done on time.": 'Žao nam je, stvari nisu završene na vrijeme', "Sorry, we couldn't find that page.": 'Izvinite, ta stranica nije pronađena.', 'Source': 'Izvor', 'source': 'target', 'Source deleted': 'Izvor izbrisan', 'Source ID': 'Identifikacija izvora', 'Source Link': 'Izvorna veza', 'Source Name': 'Ime izvora ', 'Source of Information': 'Izvor informacije', 'Source Time': 'Izvorno vrijeme', 'Source updated': 'Izvor ažuriran.', 'Source URL': 'URL izvora', 'Sources': 'Izvori', 'Sources of income': 'Izvori prihoda', 'South Africa': 'Južna Afrika', 'South Ossetia': 'Južna Osetija', 'Space Debris': 'Svemirski otpad', 'Spain': 'Španija', 'Spanish': 'Španski', 'Special Ice': 'Specijalni led', 'Special Marine': 'Posebna mornarica', 'Special Multirisk Protection Vehicle': 'Specijalna varijabla za zaštitu od višestrukog rizika', 'Special needs': 'Specijalne potrebe', 'Specialized Hospital': 'Specijalizovana Bolnica', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Tačno mjesto (npr. zgrada / soba) u okviru lokacije na kojem je ta osoba/grupa viđena.', 'Specific locations need to have a parent of level': 'Specifične lokacije moraju imati roditelja nivoa', 'Specific Operations Vehicle': 'Vozilo za specifične poslove', 'specify': 'precizirati', 'Specify a descriptive title for the image.': 'Specificiraj opisni naslov za sliku', 'Specify the bed type of this unit.': 'Specifikuj tip kreveta za ovu jedinicu', 'Specify the minimum sustainability in weeks or days.': 'Navedi minimalnu održivosr u sedmicama ili danima.', 'Specify the number of available sets': 'Specificiraj broj raspoloživih setova', 'Specify the number of available units (adult doses)': 'Specificiraj broj dostupnih jedinica (odrasle doze)', 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specificirajte broj dostupnih jedinica (litara) Ringer-Laktata ili ekvivalentnih rastvora', 'Specify the number of sets needed per 24h': 'Specificirati broj skupova potrebnih za 24h', 'Specify the number of units (adult doses) needed per 24h': 'Specificiraj broj jedinica (doza za odrasle) potrebnih u 24 sata', 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Naznačite broj jedinica (litara) Ringer-laktata ili ekvivalentnih rastvora potrebnih za 24h', 'Speed': 'Brzina', 'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'Sferni Mercator (900913) je potreban za upotrebu OpenStreetMap/Google/Bing baznih slojeva.', 'Spherical Mercator?': 'Sferna Merkatorova?', 'Spreadsheet': 'Tabela (spreadsheet)', 'Spreadsheet Importer': 'Uvoznik tabelarnog prikaza', 'Spreadsheet uploaded': 'Učitan tabelarni prikaz', 'Spring': 'Proljeće', 'Squall': 'Udar vjetra', 'squint-eyed': 'razrok', 'Sri Lanka': 'Šri Lanka', 'Staff': 'Osoblje', 'staff': 'osoblje', 'Staff & Volunteers': 'Osoblje i volonteri', 'Staff & Volunteers (Combined)': 'Osoblje i volonteri (kombinovano)', 'Staff 2': 'Osoblje 2', 'Staff added': 'Član osoblja dodan', 'Staff and Volunteers': 'Osoblje i volonteri', 'Staff Assigned': 'Dodijeljeno osoblje', 'Staff Assignment Details': 'Detalji o dodjeli osoblja', 'Staff Assignment removed': 'Obrisana dodjela osoblja', 'Staff Assignment updated': 'Dodjela osoblja ažurirana', 'Staff Assignments': 'Dodjele osoblja', 'Staff deleted': 'Osoblje obrisano', 'Staff ID': 'ID osoblja', 'Staff Management': 'Upravljanje osobljem', 'Staff member added': 'Član osoblja dodan', 'Staff Member added': 'Član osoblja dodan', 'Staff Member deleted': 'Član osoblja obrisan', 'Staff Member Details': 'Detalji o članovima osoblja', 'Staff Member Details updated': 'Detalji Član osoblja ažuriran', 'Staff Members': 'Članovi osoblja', 'staff members': 'članovi osoblja', 'Staff present and caring for residents': 'Osoblje je prisutno i brine za stanovnike', 'Staff Record': 'Zapis o osoblju', 'Staff Report': 'Izvještaj o osoblju', 'Staff Type added': 'Tip osoblja dodan', 'Staff Type deleted': 'Tip osoblja obrisan', 'Staff Type Details': 'Detaljii o osoblju', 'Staff Type updated': 'Tip osoblja ažuriran', 'Staff Types': 'Vrste osoblja', 'Staff updated': 'Osoblje ažurirano', 'Staff with Contracts Expiring in the next Month': 'Osoblje čiji ugovor ističe sljedećeg mjeseca', 'Staff/Volunteer': 'Osoblje/volonteri', 'Staff/Volunteer Record': 'Zapis o osoblju/volonterima', 'Staff2': 'Osoblje2', 'Staffing': 'Zapošljavanje', 'Staffing Level': 'Nivo osoblja', 'Stairs': 'Stepenice', 'Start Date': 'Datum početka', 'Start date': 'Datum početka', 'Start of Period': 'početak perioda', 'state': 'stanje', 'State': 'Država', 'State / Province': 'Entitet / Savezna država', 'state location': 'položaj države', 'Stationery': 'Školski pribor', 'Statistics': 'Statistika', 'Status': 'Status', "Status 'assigned' requires the %(fieldname)s to not be blank": "Status 'dodijeljen' zahtijeva da %(fieldname)s nije prazno", 'Status added': 'Dodat status', 'Status deleted': 'Obrisan status', 'Status Details': 'Detalji statusa', 'Status of clinical operation of the facility.': 'Status kliničkog rada objekta.', 'Status of general operation of the facility.': 'Status generalnih operacija objekata', 'Status of morgue capacity.': 'Status kapaciteta mrtvačnice', 'Status of operations of the emergency department of this hospital.': 'Operativni status hitnog odjela ove bolnice', 'Status of operations/availability of emergency medical services at this facility.': 'Status operacija/dostupnosti hitnih medicinskih usluga na ovom objektu.', 'Status of security procedures/access restrictions for the facility.': 'Status sigurnosnih procedura/ograničenja pristupa za ovaj objekat.', 'Status of security procedures/access restrictions in the hospital.': 'Status sigurnosnih procedura/zabrane pristupa u bolnicama.', 'Status of the clinical departments.': 'Status kliničkih odjela', 'Status of the facility.': 'Stanje objekta', 'Status of the operating rooms of this facility.': 'Status radnih prostorija na ovom objektu', 'Status of the operating rooms of this hospital.': 'Status operacionih sala u ovoj bolnici.', 'Status Report': 'Izvještaj o stanju', 'Status Report added': 'Statusni izvještaj dodan', 'Status Report deleted': 'Statusni izvještaj obrisan', 'Status Report updated': 'Statusni izvještaj ažuriran', 'Status Updated': 'Status ažuriran', 'Status updated': 'Ažuriran status', 'Statuses': 'Statusi', 'Steel frame': 'Čelični okvir', 'Stock': 'Zaliha', 'Stock added to Warehouse': 'Zaliha dodana u skladište', 'Stock Adjustment': 'Prilagođenje zalihe', 'Stock Adjustment Details': 'Prilagođenje zalihe', 'Stock Adjustments': 'Detalji prilagođenja zaliha', 'Stock Count created': 'Broj zaliha kreiran', 'Stock Count deleted': 'Količina zalihe obrisana', 'Stock Count Details': 'Detalji o broju zaliha', 'Stock Count modified': 'Broj zaliha izmijenjen', 'Stock Counts': 'Količine zaliha', 'Stock Expires %(date)s': 'Zaliha ističe %(date)s', 'Stock in Warehouse': 'Zaliha u skladištu', 'Stock removed from Warehouse': 'Zaliha uklonjena iz skladišta', 'Stolen': 'Ukradeno', 'Storage Bin': 'Korpa za čuvanje', 'Storage Bin added': 'Dodana korpa za smještaj', 'Storage Bin deleted': 'Obrisana smještajna korpa', 'Storage Bin Details': 'Detalji korpe za smještaj', 'Storage Bin Number': 'Broj smještajne korpe', 'Storage Bin Type added': 'Dodana vrsta korpe za smještaj', 'Storage Bin Type deleted': 'Tip korpe za smještaj obrisan', 'Storage Bin Type Details': 'Detalji korpe za smještaj', 'Storage Bin Type updated': 'Korpa za smještaj ažurirana', 'Storage Bin Types': 'Vrste smještajnih korpi', 'Storage Bins': 'Korpe za smještaj', 'Storage Capacity (m3)': 'Kapacitet smještaja(m3)', 'Storage Location': 'Lokacije skladišta', 'Storage Location deleted': 'Lokacija skladišta obrisana', 'Storage Location ID': 'ID lokacije skladišta', 'Storage Location Name': 'Naziv lokacije skladišta', 'Storage Locations': 'Lokacija skladišta', 'Storage Type': 'Tip smještaja', 'Store spreadsheets in the Eden database': 'Sačuvaj proračunske tablice u Eden bazu podataka', 'Storeys at and above ground level': 'Spratovi na i iznad razine tla', 'Storm Force Wind': 'Olujni Vjetar', 'Storm Surge': 'Olujni val', 'Stowaway': 'Slijepi putnik ', 'straight': 'pravo', 'Strategy': 'Strategija', 'Street (add.)': 'Uca (adresa)', 'Street (continued)': 'Ulica (nastavljena)', 'Street Address': 'Adresa (ulica)', 'Street View': 'Prikaz ulica', 'Streetview Enabled?': 'Streetview omogućen?', 'String used to configure Proj4js. Can be found from %(url)s': 'String korišten za konfiguraciju Proj4js. Može se naći na %(url)s', 'Strong': 'Jako', 'Strong Wind': 'Jak vjetar', 'Structural': 'Strukturalno', 'Structural Hazards': 'Strukturne opasnosti', 'Style': 'Stil', 'Style Field': 'Polje stila', 'Style invalid': 'Neispravan stil', 'Style Values': 'vrijednosti stila', 'Sub Category': 'Potkategorija', 'Sub-type': 'Podtip', 'Subject': 'Tema', 'Submission Succesful': 'Predaja uspješna', 'Submission successful - please wait': 'Slanje uspješno - molimo pričekajte', 'Submission successful - please wait...': 'Podnesak uspješan- molimo pričekajte', 'Submit': 'Unesi', 'submit': 'unesi', 'Submit a request for recovery': 'Podnijeti zahtjev za oporavak', 'Submit New': 'Predaj novi', 'Submit New (full form)': 'Podnesite novi (potpuna forma)', 'Submit New (triage)': 'Navedi novi (trijaža)', 'Submit new Level 1 assessment (full form)': 'Potvrdi novo procjenjivanje prvog nivoa (potpuna forma)', 'Submit new Level 1 assessment (triage)': 'Proslijedi novu procjenu nivoa 1 (trijaža)', 'Submit new Level 2 assessment': 'Podnesi novu procjenu Nivoa 2', 'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc': 'Slanje informacija o pojedincima, poput identifikacionih brojeva, fizičkog izgleda, mjesta gdje je zadnji put viđen, status itd', 'Subscribe': 'Pretplati se', 'Subscription added': 'Dodan potpis', 'Subscription deleted': 'Pretplata obrisana', 'Subscription Details': 'Detalji pretplate', 'Subscription updated': 'Pretplata izmjenjena', 'Subscriptions': 'Pretplate', 'Subscriptions Status': 'Status pretplate', 'Subsector': 'podsektor', 'Subsector added': 'Podsektor dodat', 'Subsector deleted': 'Podsektor izbrisan', 'Subsector Details': 'Detalji o podsektoru', 'Subsector updated': 'Ažuriran podsektor', 'Subsectors': 'Podsektori', 'Subsistence Cost': 'Trošak opstanka', 'SubType of': 'Podtip od', 'Suburb': 'Predgrađe', 'Successfully registered at the repository.': 'Uspješno registrovano na repozitoriju.', 'suffered financial losses': 'uočeni finansijski gubici', 'Sufficient care/assistance for chronically ill': 'Dovoljna briga/pomoć za hronično bolesne', 'Suggest not changing this field unless you know what you are doing.': 'Predlažemo da ne vršite nikakve izmjene ovog polja, osim ako ne znate šta radite.', 'Summary': 'Sažetak', 'Summary by Administration Level': 'Sažetak na administrativnom nivou', 'Summary by Question Type - (The fewer text questions the better the analysis can be)': 'Sumarno po vrsti piranja - (Što je manje tekstualnih pitanja, bolja je analiza=', 'Summary Details': 'Detalji sažetka', 'Summary of Completed Assessment Forms': 'Sažetak završenog formular ocjene', 'Summary of Incoming Supplies': 'Sumarno ulaz zaliha', 'Summary of Releases': 'Sumarno izlaz', 'Sunday': 'Nedjelja', 'Supervisor': 'Nadglednik', 'Supplier': 'Dobavljač', 'Supplier added': 'Dobavljač dodan', 'Supplier deleted': 'Dobavljač obrisan', 'Supplier Details': 'Detalji dobavljača', 'Supplier updated': 'Dobavljač ažuriran', 'Supplier/Donor': 'Dobavljač/donator', 'Suppliers': 'Dobavljači', 'Supply Chain Management': 'Upravljanje lancom zaliha', 'Supply Item Categories': 'kategorije predmeta za snadbjevanje.', 'Support Request': 'Zahtjev za podršku', 'Support Requests': 'Zahtjevi za podršku.', 'supports nurses in the field to assess the situation, report on their activities and keep oversight.': 'podrška medicinskim sestrama vezano za procjenu situacije, praćenje aktivnosti i nagzor', 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Podržava odlučivanje velikih grupa eksperata kriznog menadžmenta pomažući grupama da kreiraju rangirane liste', 'Sure you want to delete this object?': 'Da li ste sigurni da želite da obrišete ovaj objekat?', 'Surgery': 'Operacija', 'Suriname': 'Surinam', 'Surplus': 'Višak vrijednosti', 'Survey Answer': 'Odgovori na ankete', 'Survey Answer added': 'Dodat anketni odgovor', 'Survey Answer deleted': 'Izbrisan odgovor na istraživanje', 'Survey Answer Details': 'Detalji odgovora upitnika', 'Survey Answer updated': 'Odgovori anketa ažurirani', 'Survey Module': 'Modul istraživanja', 'Survey Name': 'Naziv istraživanja', 'Survey Question': 'Anketno pitanje', 'Survey Question added': 'Anketna pitanja dodana', 'Survey Question deleted': 'Pitanje ankete obrisano', 'Survey Question Details': 'Detalji pitanja ankete', 'Survey Question Display Name': 'Naslovno Ime pitanja ankete', 'Survey Question updated': 'Anketno pitanje ažurirano', 'Survey Section': 'Anketna sekcija', 'Survey Section deleted': 'Izbrisan odjeljak istraživanja', 'Survey Section Details': 'Detalji odjeljka ankete', 'Survey Section Display Name': 'Naslovno ime odjeljka ankete', 'Survey Section updated': 'Serija istraživanja ažurirana', 'Survey Series': 'Niz anketa', 'Survey Series added': 'Niz aketa dodan', 'Survey Series deleted': 'Serija anketa obrisana', 'Survey Series Details': 'Detalji toka ankete', 'Survey Series Name': 'Naziv niza anketa', 'Survey Series updated': 'Serija istraživanja ažurirana', 'Survey Template': 'Šablon za anketu', 'Survey Template added': 'Obrazac za Upitnik dodan', 'Survey Template deleted': 'Predložak ankete obrisan', 'Survey Template Details': 'Detalji predloška ankete', 'Survey Template updated': 'Šablon za anketu ažuriran', 'Survey Templates': 'Šabloni anketa', 'Surveys': 'Istraživanja', 'Swaziland': 'Svazilend', 'Sweden': 'Švedska', 'Switch to 3D': 'Prebaci na 3D', 'Switzerland': 'Švajcarska', 'Symbologies': 'Značenje simbola', 'Symbology': 'Značenje simbola', 'Symbology added': 'Značenje simbola dodano', 'Symbology deleted': 'Značenje simbola obrisano', 'Symbology Details': 'Detalji značenja simbola', 'Symbology removed from Layer': 'Značenja simbola uklonjena iz sloja', 'Symbology updated': 'Značenje simbola ažurirano', 'Sync Conflicts': 'Konflikti sinkronizacije', 'Sync History': 'Historija sinhronizovanja', 'Sync Now': 'Sinhroniziraj sad', 'Sync Partners': 'Sinhronizuj partnere', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partneri su instance ili saradnici (SahanaEden, SahanaAgasti, Ushahidi, itd.) s kojima želite usklađivati ​​podatke . Kliknite na link na desnoj strani da odete na stranicu na kojoj možete dodati sinhronizacijskog partnera, tražiti sinhronizacijske partnere i mijenjati ih.', 'Sync Password': 'Sinhronizacija lozinki', 'Sync Policy': 'Politika sinhronizacije', 'Sync Pools': 'Sinhronizacija grupisanja', 'Sync process already started on': 'Sinhronizacija procesa već započeta ', 'Sync process already started on ': 'Proces sinhronizacije je već započeo ', 'Sync Schedule': 'Sinkronizirati raspored', 'Sync Schedules': 'Sinhronizacija rasporeda', 'Sync Settings': 'Postavke sikronizacije', 'Sync Settings updated': 'Postavke sinhronizacije su ažurirane', 'Sync Username': 'Sinhronizuj korisničko ime', 'Synchronisation': 'Sinhronizacija', 'Synchronisation - Sync Now': 'Sinhronizacija - sinhronizuj sada', 'Synchronisation History': 'Istorija sinhronizacije', 'Synchronization': 'Usklađivanje', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Sinhronizacija vam omogućuje da dijelite vaše podatke sa ostalima i ažurirate vlastitu bazu podataka sa najnovijim podacima od ostalih učesnika. Ova stranica vam pruža informacije o tome kako koristiti pogodnosti Sahana Eden sinhronizacije.', 'Synchronization Conflicts': 'Sinhronizacijski konflikti', 'Synchronization currently active - refresh page to update status.': 'Sinhronizacija trenutno aktivna - osvježite stranicu da ažurirate status.', 'Synchronization Details': 'Detalji sinhronizovani', 'Synchronization History': 'Historija sinhronizacije', 'Synchronization Job': 'Posao sinhronizacije', 'Synchronization Log': 'Zapisnik sinhronizacije', 'Synchronization mode': 'Režim sinhronizacije', 'Synchronization not configured': 'Sinhronizacija nije konfigurisana', 'Synchronization not configured.': 'Sinhronizacija nije konfigurisana', 'Synchronization Peers': 'Sinhronizacijski saradnici', 'Synchronization Schedule': 'Raspored sinhronizacije', 'Synchronization Settings': 'Postavke za sinhronizaciju', 'Synchronization settings updated': 'Sinhronizacijske postavke ažurirabne', 'Syncronisation History': 'Historija sinhronizacije', 'Syria': 'Sirija', 'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Sistem prati sve volontere koji rade u području katastrofe. Snima ne samo mjesta gdje su aktivni , već također snima podatke u dometu usluga koje se pružaju u svakom području.', "System's Twitter account updated": 'Twitter nalog sistema je ažuriran', 'São Tomé and Príncipe': 'São Tomé i Príncipe', 'Table': 'Tabela', 'table': 'tabela', 'Table name of the resource to synchronize': 'Ime tabele s resursom za sinhronizaciju', 'Table Permissions': 'Dozvole tabele', 'table_name': 'naziv_tabele', 'Tablename': 'Ime tabele', 'Tag': 'Oznaka', 'Tag added': 'Oznaka dodana', 'Tag deleted': 'Oznaka obrisana', 'Tag Details': 'Detalji oznake', 'Tag Post': 'Stavljanje oznake', 'Tag removed': 'Oznaka uklonjena', 'Tag updated': 'Oznaka ažurirana', 'Tags': 'Oznake', 'Taiwan': 'Tajvan', 'Tajikistan': 'Tadžikistan', 'Take shelter in place or per <instruction>': 'Pronađi utočiste u mjestu ili prema <instruction>', 'tall': 'visok', 'Tanzania': 'Tanzanjia', 'Task': 'Zadatak', 'Task added': 'Dodan zadatak', 'Task deleted': 'Obrisan zadatak', 'Task Details': 'Detalji o zadatku', 'Task List': 'Lista zadataka', 'Task removed': 'Zadatak uklonjen', 'Task Status': 'Status zadatka', 'Task updated': 'Ažuriran zadatak', 'Tasks': 'Zaduženja', 'tattooed': 'tetoviran', 'Team': 'Tim', 'Team added': 'Dodan tim', 'Team deleted': 'Obrisan tim', 'Team Description': 'Opis tima', 'Team Details': 'Detalji tima', 'Team Head': 'Vođa time', 'Team ID': 'ID tima', 'Team Leader': 'Vođa tima', 'Team Member added': 'Član grupe dodan', 'Team Members': 'Članovi tima', 'Team Name': 'Naziv tima', 'Team Type': 'Tip tima', 'Team updated': 'Ažuriran tim', 'Teams': 'Timovi', 'technical failure': 'tehnički neuspjeh', 'Technical Support Vehicle': 'Vozila za tehničku podršku', 'Technical testing only, all recipients disregard': 'Samo tehničko ispitivanje, bez obzira na sve primaoce', 'Teeth': 'Zubi', 'Teeth, Dentures': 'Zubi, proteze', 'Teeth, Gaps between front teeth': 'Zubi, razmak između prednjih zuba', 'Teeth, Missing teeth': 'Zubi, nedostajući zubi', 'Teeth, Toothless': 'Zubi, bezub', 'Telecommunications': 'Telekomunikacije', 'Telephone': 'telefon', 'Telephone Details': 'Telefonski detalji', 'Telephony': 'Telefonija', 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Naređuje GeoServeru da uradi MetaTiling što smanjuje broj dupliciranih labela.', 'Temp folder %s not writable - unable to apply theme!': 'Privremeni direktorij %s nije za pisanje - nemoguce staviti temu!', 'Template': 'Predložak', 'Template file %s not readable - unable to apply theme!': 'Datoteka predložaka %s nije čitljiva - ne može se primijeniti tema!', 'Template Name': 'Naziv predloška', 'Template Section added': 'Dodan odjeljak predloška', 'Template Section deleted': 'Obrisan odjeljak predloška', 'Template Section Details': 'Detalji odjeljka predloška', 'Template Section updated': 'Ažuriran odjeljak predloška', 'Template Sections': 'Odjeljci predloška', 'Template Summary': 'Rezime predloška', 'Templates': 'Predlošci', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Termin za peti nivo adminstrativne podjele unutar zemlje (npr. glasačko mjesto). Ovaj nivo se ne koristi često.', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Termin za administrativnu podjelu unutar zemlje na četvrtom nivou (Mjesna zajednica)', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Termin koji se koristi za prvi nivo administrativne podjele (Entitet/Distrikt)', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Termin za administrativnu podjelu drugog nivoa (Kanton/Regija)', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Termin za administrativnu jedinicu trećeg nivoa (Općina/Opština).', 'Term for the top-level administrative division (i.e. Country).': 'Termin za administrativnu podjelu najvišeg nivoa (Država)', 'Terms of Service': 'Uslovi korištenja', 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.': 'Uvjeti korištenja\n\nMorate biti osamnaest ili preko osamnaest da bi bili volonter.', 'Terms of Service\r\n\r\nYou have to be eighteen or over to register as a volunteer.': 'Uvjeti korištenja\r\n\r\nMorate biti osamnaest ili preko osamnaest da bi bili volonter.', 'Terms of Service:': 'Uslovi korištenja:', 'Territorial Authority': 'Teritorijalni autoritet', 'Terrorism': 'Terorizam', 'Tertiary Server (Optional)': 'Tercijarni server (Nije obavezno)', 'Text': 'Tekst', 'Text before each Text Field (One per line)': 'Tekst ispred svakog tekstualnog polja (jedan po redu)', 'Text Colour for Text blocks': 'Boja teksta za tekst blokova', 'Text Direction': 'Smijer teksta', 'Text in Message:': 'Tekst u poruci:', 'Thailand': 'Tajland', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Hvala na potvrdi svog email-a. Vaš korisnički račun čeka na odobrenje administratora (%s). Dobit ćete obavještenje email-om kad vam račun bude aktiviran.', 'Thanks for your assistance': 'Hvala na Vašoj pomoći', 'The': '!', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"Upit" je uslov poput "db.tablela1.polje1==\'vrijednost\'". Nešto poput "db.tabela1.polje1 == db.tabela2.polje2" kao rezultat daje SQL JOIN (spajanje).', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Pitanje" je stanje poput "db.tablela1.polje1==\'vrijednost\'". Nešto poput "db.tabela1.polje1 == db.tabela2.polje2" kao rezultat daje SQL JOIN (spajanje).', 'The answers are missing': 'Nedostaju odgovori', 'The area is': 'Površina je', 'The Area which this Site is located within.': 'Podrucje u kojem se nalazi zadano mjesto', 'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Modul procjene čuva predloške procjene i omogućava odgovore na procjene za specifične događaje da se sakupe i analiziraju', 'The Assessments module allows field workers to send in assessments.': 'Modul procjena omogućava radnicima na terenu da pošalju procjene.', 'The asset must be assigned to a site OR location.': 'Sredstvo mora biti dodjeljeno mjestu ILI lokaciji', 'The attribute used to determine which features to cluster together (optional).': 'Atribut koji se koristi za određivanje koje se karakteristike zajedno spajaju (opciono).', 'The attribute which is used for the title of popups.': 'Atribut koji se koristi za naslove popup-a.', 'The attribute within the KML which is used for the title of popups.': 'Atribut unutar KML koji se koristi za titulu iskočnih prozora.', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KML atribut(i) korišteni za tijelo iskočnih prozora. (atribute razdvojiti praznim znakom)', 'The Author of this Document (optional)': 'Autor ovog dokumenta (opcionalno)', 'The Bin in which the Item is being stored (optional).': 'Korpa u kojoj je stavka smještena (opciono)', 'The body height (crown to heel) in cm.': 'Visina (od glave do pete) u cm.', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Modul za procjenu zgrada omogućava da se ocjeni sigurnost zgrade , n.p.r. poslje zemljotresa.', 'The Camp this person is checking into.': 'Kamp u koji se ova osoba prijavljuje.', 'The Camp this Request is from': 'Kamp iz koga zahtjev potiče', 'The category of the Item.': 'Kategorija stavke', 'The client ID to use for authentication at the remote site (if required for this type of repository).': 'Klijentski ID za autentifikaciju na udaljenom sajtu (ako je potrebno za ovu vrstu repozitorija)-', 'The client secret to use for authentication at the remote site (if required for this type of repository).': 'Klijentska tajna šifra potrebna za autentifikaciju na udaljenom sajtu (ako je potrebno za ovu vrstu repozitorija).', 'The country the person usually lives in.': 'Država u kojoj osoba živi.', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Trenutna lokacija osobe/grupe, koja može biti generalna(za izvještaje) ili precizna(za prikazivanje na mapi). Unesite nekoliko znakova da pretražite dostupne lokacije.', 'The default Facility for which this person is acting.': 'Zadani objekt za koje data osoba djeluje.', 'The default Facility for which you are acting.': 'Podrazumjevani objekat za koji djelujete.', 'The default Organization for whom this person is acting.': 'Predefinirana organizacija za koju ova osoba djeluje.', 'The default Organization for whom you are acting.': 'Podrazumijevana organizacija za koju djelujete', 'The default policy for data import from this peer.': 'Predefinirana pravila za uvoz podataka od ovog suradnika', 'The descriptive name of the peer.': 'Opisni naziv suradnika', 'The District for this Report.': 'Geografsko područje za ovaj izvještaj', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Donator(i) za ovaj projekat. Mogu se odabrati višestruke vrijednosti, držanjem pritisnute 'Control' ('Ctrl') tipke.", 'The duplicate record will be deleted': 'Dupli zapis će biti obrisan', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'E-mail adresa na koju su poslani zahtjevi za odobrenjem (obično će ovo biti grupni mail umjesto individualnog). Ako je polje prazno, onda se zahtjevi automatski odobravaju ako se domena podudara.', 'The facility where this position is based.': 'Objekat na kom je ova pozicija bazirana.', 'The first or only name of the person (mandatory).': 'Ime ili jedino ime osobe (obavezno)', 'The following %(new)s %(resource)s have been added': 'Sljedeći %(new)s %(resource)s je dodan', 'The following %(upd)s %(resource)s have been updated': 'Sljedeći %(new)s %(resource)s je ažuriran', 'The following %s have been added': 'Sljedeće %s je dodano', 'The following %s have been updated': 'Sljedeće %s je ažurirano', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Forma URL-a je http://your/web/map/service?service=WMS&request=GetCapabilities gdje vasa your/web/map/service predstavlja URL stazu za WMS', 'The Gambia': 'Gambia', 'The Group whose members can edit data in this record.': 'Grupa čiji članovi mogu uređivati podatke u ovom zapisu', 'The hospital this record is associated with.': 'Bolnica s kojom je zapis povezan', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Sistem za prijavu incidenata omogućuje javnosti da prijave incidente i da ih prati.', 'The language to use for notifications.': 'Jezik korišten za napomene', 'The language you wish the site to be displayed in.': 'Jezik u kojem želite da stranica bude prikazana.', 'The last known location of the missing person.': 'Zadnja poznata lokacija za nedostajuću osobu', 'The length is': 'Dužina je', 'The level at which Searches are filtered.': 'Nivo na kom su filtrirane pretrage', 'The list of Brands are maintained by the Administrators.': 'Listu marki održavaju administratori.', 'The list of Catalogs are maintained by the Administrators.': 'Listu kataloga održavaju administratori.', 'The list of Item categories are maintained by the Administrators.': 'Lista kataloga stavki koju održavaju administratori.', 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Lokacija ovog mjesta, koja može biti općenita (za izvještaje) ili precizna (za prikaz na mapi). ', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Lokacija odakle osoba dolazi, koja može biti generalna (za izvještavanje) ili precizna (za prikaz na mapi). Unesite nekoliko početnih karaktera za pretragu dostupnih lokacija.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Lokacija na koju osoba ide, koja može biti općenita (za izvještaje) ili precizna (za prikaz na mapi). Unesi nekoliko karaktera za pretragu dostupnih lokacija.', 'The map will be displayed initially with this latitude at the center.': 'Mapa će biti prikazana inicijalno, sa ovom geografskom širinom u centru.', 'The map will be displayed initially with this longitude at the center.': 'Karta će biti prvobitno predstavljena sa ovom geografskom dužinom u centru.', 'The Maximum valid bounds, in projected coordinates': 'Maksimalne važeće granice u projektovanim koordinatama', 'The Media Library provides a catalog of digital media.': 'Media Library pruža kataloge digitalnih medija', 'The Media Library provides a catalogue of digital media.': 'Media Library pruža kataloge digitalnih medija', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Modul za slanje poruke je glavni dio za komunikaciju Sahana sistema. Koristi se za slanje upozorenja/ili poruka koristenjem SMS ili e-maila razlicitim grupama i osoboma, tokom ili poslije nepogode.', 'The minimum number of features to form a cluster.': 'Najmanji broj karakteristika za formiranje skupa.', 'The minimum number of features to form a cluster. 0 to disable.': 'Najmanji broj karakteristika za formiranje skupa. 0 za isključiti-', 'The name to be used when calling for or directly addressing the person (optional).': 'Naziv koji se koristi kada se poziva ili neposredno obraća osobi (opcionalno).', 'The next screen will allow you to detail the number of people here & their needs.': 'Sljedeći ekran će vam omogućiti da opišete broj ljudi ovdje i njihove potrebe.', 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'Sljedeći ekran će vam omogućiti da unesete detaljan spisak objekata i količina, ako odgovara...', 'The number of pixels apart that features need to be before they are clustered.': 'Koliko piksela oznake karakteristika trebaju biti razdvojene prije njihovog grupisanja', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Broj pločica oko vidljive karte za preuzimanje. Nula znači da de 1. stranica učita brže, veće brojke čine da je naknadno paniranje brže.', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Broj mjernih jedinica alternativnih stavki koji je jednak jednoj mjernoj jedinici stavke', 'The Office this record is associated with.': 'Ured s kojom je zapis povezan', 'The Organisation which is funding this Activity.': 'Organizacija koja osniva ou aktivnost', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'Registar organizacija zadržava zapise svih potpornih organizacija u radnoj oblasti.', 'The Organization this record is associated with.': 'Organizacija s kojom je zapis povezan', 'The Organization which is funding this Activity.': 'Organizacija koja osniva ovu aktivnost', 'The parse request has been submitted': 'Zahtjev za analizu je bio podnesen', 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.': 'Sistem Praćenja Pacijenta prati sve evakuisane pacijente i njihove porodice.', 'The person at the location who is reporting this incident (optional)': 'Osoba na lokaciji koja prijavljuje ovaj incident (neobavezno)', 'The Person currently filling this Role.': 'Osoba koja trenutno obavlja ovu ulogu', 'The person reporting about the missing person.': 'Osoba koja je prijavila nestanak osobe', 'The person reporting the missing person.': 'Osoba koja je prijavila nestalu osobu', "The person's manager within this Office/Project.": 'Rukovodilac osobe u ovom uredu/projektu', 'The poll request has been submitted, so new messages should appear shortly - refresh to see them': 'Zahtjev za anketom je podnesen, pa bi se nove poruke uskoro trebale pojaviti - osvježite da ih vidite', 'The POST variable containing the phone number': 'POST varijabla koja sadrži telefonski broj', 'The post variable containing the phone number': 'Postavljena varijabla koja sadrži telefonski broj', 'The post variable on the URL used for sending messages': 'Post varijabla u URL koja se koristi za slanje poruka', 'The POST variable on the URL used for sending messages': 'POST varijabla u URL koja se koristi za slanje poruka', 'The post variables other than the ones containing the message and the phone number': 'Varijable objave različite od onih koje sadrže poruku i broj telefona', 'The POST variables other than the ones containing the message and the phone number': 'POST varijable različite od onih koje sadrže poruku i broj telefona', "The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'Projektni modul se može koristiti za zapis projektnih informacija i generisanje izvještaja "Ko šta radi gdje?".', 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Modul za praćenje projekta omogućuje stvaranje aktivnosti kako bi se ispunile praznine u procjeni potreba.', "The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "Navedeni 'formuuid' je nevažeći. Odabrali ste reviziju forme koja ne postoji na ovom serveru.", "The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "Navedeni 'jobuuid' je nevažeći. Sesija postavljanja formulara je nevažeća. Trebate ponoviti postavljanje.", 'The Rapid Assessments Module stores structured reports done by Professional Organisations.': 'Modul brze procjene čuva struktuirane izvještaje koje obavljaju profesionalne organizacije', 'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'Modul brze procjene čuva struktuirane izvještaje koje obavljaju profesionalne organizacije', 'The request this record is associated with.': 'Zahtjev s kojim je ovaj zapis povezan', 'The Request this record is associated with.': 'Zahtjev s kojim je ovaj zapos povezan', 'The Role this person plays within this hospital.': 'Uloga koju ova osoba ima u ovoj bolnici.', 'The Role this person plays within this Office/Project.': 'Uloga koju ova osoba ima u ovom uredu/projektu', 'The Role to which this Role reports.': 'Uloga za koju se ova uloga izvještava.', 'The scanned copy of this document.': 'Skenirana kopija ovog dokumenta', 'The search request has been submitted, so new messages should appear shortly - refresh to see them': 'Zahtjev za pretragom je podnesen, pa bi se nove poruke ubrzo trebale pojaviti. Osvježite da ih vidite.', 'The search results are now being processed with KeyGraph': 'Rezultate pretrage trenutno obrađuje KeyGraph', 'The search results should appear shortly - refresh to see them': 'Rezultati pretrage će se uskoro pojaviti - osvježite da ih vidite', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Serijski port na koji je modem priključen - npr. /dev/ttyUSB0 na linuxu i com1,com2 na Windowsu', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Server nije primio pravovremeni odgovor od drugog servera, kojem je pristupao da bi popunio zahtjev od strane pretraživača.', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Server je dobio pogrešan odgovor od drugog servera da je pristupio popunjavanju zahtjeva od strane browsera.', 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Registar skloništa prati sva skloništa i pohranjuje osnovne detalje o njima. U saradnji sa ostalim modulima prati ljude u skloništu, dostupne usluge itd.', 'The Shelter this person is checking into.': 'Sklonište u koje se prijavljuje ova osoba', 'The Shelter this Request is from': 'Sklonište iz kog je ovaj zahtjev', 'The Shelter this Request is from (optional).': 'Sklonište iz kog je ovaj zahtjev (opciono)', 'The site where this position is based.': 'Stranica na kojoj je ova pozicija bazirana.', 'The Source this information came from.': 'Izvor odakle je došla ova informacija', "The staff member's official job title": 'Zvanično radno mjesta člana osoblja', 'The staff responsibile for Facilities can make Requests for assistance.': 'Osoblje odgovorno za karakteristike može načiniti zahtjeve za pomoć.', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Osoblje zaduženo za ustanove može zahtijevati pomoć. Obaveze se mogu vršiti nesaglasno sa tim zahtjevima, ali oni ostaju otvoreni sve dok onaj ko je izdao zahtjev ne potvrdi da je on ispunjen.', 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Dotični događaj ne predstavlja više prijetnju niti zabrinjava i svaka slijedeća akcija je objašnjena u <instruction>', 'The subject of the alert (optional)': 'Tema ', 'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'Sinhronizacijski modul omogućava sinhronizaciju podataka između kopija Sahana Eden.', 'The system supports 2 projections by default:': 'Sistem podržava 2 projekcije podrazumijevano:', 'The time at which the Event started.': 'Vrijeme u koje je događaj počeo.', 'The time at which the Incident started.': 'Vrijeme u koje je incident počeo.', 'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'Vremenska razlika između UTC i vaše vremenske zone, navesti kao +HHMM za istočne ili -HHMM za zapadne vremenske zone.', 'The title of the page, as seen in the browser (optional)': 'Naslov stranice kakav se vidi u browser programu (opciono)', 'The token associated with this application on': 'Token povezan s ovom aplikacijom na', 'The Tracking Number %s ""is already used by %s.': 'Broj praćenja %s "" je već u upotrebi od strane %s.', 'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Jedinstveni identifikator koji je pridružen ovom objektu od strane vlade', 'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': 'Jedinstveni identifikator saradnika. Ostavite prazno ako suradnik nie Sahana Eden instanca, u tom slučaju će biti automatski dodijeljeno.', 'The unique identifier which identifies this instance to other instances.': 'Jedinstveni identifikator koji razlikuje ovu instancu od ostalih.', 'The uploaded Form is unreadable, please do manual data entry.': 'Poslani formular je nečitljiv, molim obavite ručni unos podataka.', 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'URL za GetCapabilities stranicu Web Map Service (WMS) čiji slojevi su dostupni na mapi.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL za GetCapabilities stranicu Web Map Service (WMS) čiji slojevi su dostupni na pregledničkom panelu mape.', "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'URL datoteke slike. Ako ne dodate sliku, morate specificirati lokaciju ovdje.', 'The URL of your web gateway without the POST parameters': 'URL Vašeg web prolaza bez POST parametara', 'The URL of your web gateway without the post parameters': 'URL Vašeg web izlaza bez poštanskih parametara', 'The URL to access the service.': 'URL za pristup usluzi.', "The volunteer's role": 'Volonterska uloga', 'The way in which an item is normally distributed': 'Način na koji je stavka normalno distribuirana', 'The weight in kg.': 'Težina u kilogramima.', 'Theme': 'Tema', 'Theme added': 'Tema dodana', 'Theme added to Activity': 'Tema dodana u aktivnost', 'Theme added to Project': 'Tema dodana u projekat', 'Theme added to Project Location': 'Tema dodana u lokaciju projekta', 'Theme Data': 'Podaci teme', 'Theme Data deleted': 'Podaci teme obrisani', 'Theme Data updated': 'Podaci teme ažurirana', 'Theme deleted': 'Tema obrisana', 'Theme Details': 'Tematski detalji', 'Theme Layer': 'Sloj teme', 'Theme removed from Activity': 'Tema uklonjena iz aktivnosti', 'Theme removed from Project': 'Tema uklonjena iz projekta', 'Theme removed from Project Location': 'Tema uklonjena iz lokacije projekta', 'Theme updated': 'Tema ažurirana', 'Themes': 'Teme', 'There are errors': 'Postoje greške', 'There are insufficient items in the Inventory to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are more than %(max)s results, please input more characters.': 'Ima više od %(max)s rezultata, molim unesite više znakova.', 'There are multiple records at this location': 'Ima više zapisa na ovoj lokaciji', 'There are no contacts available for this person!': 'Nema kontakta dostupnih za ovu osobu!', "There are no details for this person yet. Add Person's Details.": 'Nema detalja za ovu osobu. Dodajte detalje osobe', 'There are not sufficient items in the Inventory to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are not sufficient items in the store to send this shipment': 'Nema dovoljno artikala u skladištu kako bi se poslala dostava', 'There are too many features, please Zoom In or Filter': 'Ima previše karakteristika, uvećajte sliku ili filtrirajte', 'There is insufficient data to draw a chart from the questions selected': 'Nedovoljno je podataka za iscrtavanje dijagrama iz izabranih pitanja', 'There is no address for this person yet. Add new address.': 'Još ne postoji adresa za ovu osobu. Dodaj novu adresu.', 'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'Još nema statusa za %(site_label)s. Dodajte %(site_label)s tatus.', 'There was a problem, sorry, please try again later.': 'Došlo je do problema, ispričavamo se, pokušajte ponovno kasnije.', 'These are settings for Inbound Mail.': 'Ovo su postavke za Inbound Mail', 'These are the filters being used by the search.': 'Postoje filteri korišteni pretragom.', 'These are the Incident Categories visible to normal End-Users': 'Ovo su kategorije slučajeva, vidljive običnim krajnjim korisnicima.', 'These need to be added in Decimal Degrees.': 'Moraju biti uneseni u decimalnim stepenima.', 'They': 'Oni', 'thick': 'debelo', 'thin': 'tanko', 'this': 'ova', 'This adjustment has already been closed.': 'Ovo pilagođenje je već zatvoreno', 'This appears to be a duplicate of': 'Ovo je duplikat od', 'This appears to be a duplicate of ': 'Ovo izgleda kao duplikat od ', 'This email address is already in use': 'Ova email adresa je već u upotrebi', 'This email-address is already registered.': 'Ova email adresa je već registrovana', 'This file already exists on the server as': 'Ovaj fajl vec postoji na serveru kao', 'This form allows the administrator to remove a duplicate location.': 'Ovaj formular omogućava administratoru da ukloni duple lokacije.', 'This Group has no Members yet': 'Ova grupa još nema članova', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Ovo je prikladno ako je ovaj nivo u izgradnji. Da bi se spriječile slučajne modifikacije nakon što se ovaj nivo završi, ovo se može postaviti na Netačno', 'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'Ovo se normalno uređje koristeći grafičke kontrole u kartici stilova, svojstava sloja na mapi.', 'This is required if analyzing with KeyGraph.': 'Ovo je potrebno ako se analizira s KeyGraph.', 'This is the full name of the language and will be displayed to the user when selecting the template language.': 'Ovo je puno ime jezika i bit će prikazano korisniku kada se odabira jezik predloška.', 'This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.': 'Ovo je kratko ime jezika i bit će korišteno kao ime datoteke. Ovo treba biti ISO 639 šifra,', 'This is the way to transfer data between machines as it maintains referential integrity.': 'Ovo je način za prenos podataka između mašina, jer održava referencijalni integritet.', 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Ovo je način prenosa podataka između mašina koje održavaju referencijalni integritet... Duplicirane datoteke bi prvo trebalo ručno ukloniti!', "This isn't visible to the published site, but is used to allow menu items to point to the page": 'Ovo nije vidjivo objavljenom sajtu, ali se koristi da se dopusti stavkama menija da pokazuju na stranicu', "This isn't visible to the recipients": 'Ovo nije vidljivo primaocima', 'This job has already been finished successfully.': 'Ovaj posao je već uspješno završen', 'This level is not open for editing.': 'Ovaj nivo nije otvoren za izmjene.', 'This might be due to a temporary overloading or maintenance of the server.': 'Ovo može biti zbog privremenog opterećenja ili održavanja servera.', 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Ovaj modul omogućava stavke iz inventara da budu zahtjevane i dostavljene između različitih objekata.', 'This module allows Warehouse Stock to be managed, requested & shipped between the Warehouses and Other Inventories': 'Ovaj modul omogućava da se zalihe skladišta održavaju, zahtijevaju i isporučuju između skladišta i drugih mjesta zaliha', 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Ovaj modul omogućuje upravljanje događajima - bilo da su prethodno planirani (npr. vježbe) ili incidenti koji se trenutno odvijaju. Možete dodijeliti odgovarajuća sredstva (ljude, alate i postrojenja), tako da oni mogu biti lako mobilizirani.', 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Ovaj modul dopušta planiranje scenarija za vježbe i događaje. Možete alocirati prikladne resurse (ljudstvo, sredstva i objekte) tako da mogu lako mobilizirati.', 'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Ova stranica prikazuje zapisnike prethodnih sinhronizacija. Kliknite na link ispod, kako biste ušli na ovu stranicu.', 'This resource is already configured for this repository': 'Resurs je već konfigurisan za ovaj repozitorij', 'This role can not be assigned to users.': 'Ova uloga se ne može dodijeliti korisnicima', 'This screen allows you to upload a collection of photos to the server.': 'Ovaj prozor Vam dozvoljava da uploadujete kolekciju slika na server.', 'This setting can only be controlled by the Administrator.': 'Ovo podešavanje može kontrolisati samo administrator.', 'This shipment contains %s items': 'Ova dostava sadrži %s stavki', 'This shipment contains one line item': 'Ova isporuka ima stavku od jedne linije', 'This shipment has already been received & subsequently canceled.': 'Ova isporuka je već bila primljena i odmah otkazana.', 'This shipment has already been received.': 'Ova dostava je već primljena.', 'This shipment has already been sent.': 'Ova dostava je već poslana.', 'This shipment has not been received - it has NOT been canceled because can still be edited.': 'Ova isporuka nije poslana - NIJE otkazana zato što još uvijek može biti preuređena.', 'This shipment has not been returned.': 'Ova isporuka nije vraćena', 'This shipment has not been sent - it cannot be returned because it can still be edited.': 'Ova isporuka nije bila poslana - ne može se vratiti jer ju je još moguće mijenjati.', 'This shipment has not been sent - it has NOT been canceled because can still be edited.': 'Ova isporuka nije poslana - NIJE otkazana zato što još uvijek može biti preuređena.', 'This shipment will be confirmed as received.': 'Ova isporuka bit će potvrđena prilikom prijema.', 'This should be an export service URL, see': 'Ovo treba biti izvozni URL servisa, vidi', 'This Team has no Members yet': 'Ovaj tim još nema članova', 'Thunderstorm': 'Grmljavina', 'Thursday': 'Četvrtak', 'Ticket': 'Kartica', 'Ticket added': 'Dodana kartica', 'Ticket deleted': 'Kartica je poništena', 'Ticket Details': 'Pojedinosti kartica', 'Ticket ID': 'ID kartice', 'Ticket updated': 'Kartica izmjenjena', 'Ticketing Module': 'Modul sa karticama', 'Tickets': 'Kartice', 'Tiled': 'popločano', 'Tilt-up concrete': 'Ispupčen beton', 'Timber frame': 'Okvir od dasaka', 'Time': 'Vrijeme', 'Time Actual': 'Stvarno vrijeme', 'Time at which data was exchanged.': 'Vrijeme u konme su podaci razmijenjeni', 'Time Estimate': 'Procjena vremena', 'Time Estimated': 'Potrebno vrijeme', 'Time Frame': 'Vremenski okvir', 'Time In': 'Vrijeme unutra', 'Time in Cache (h:m:s)': 'Vrijeme u kešu (h:m:s)', 'Time Log': 'Vremenski zapis', 'Time Log Deleted': 'Vremenski zapis izbrisan', 'Time Log Updated': 'Vremenski zapis ažuriran', 'Time Logged': 'Vrijeme prijave', 'Time needed to collect water': 'Vrijeme potrebno za sakupljanje vode', 'Time of Request': 'Vrijeme zahtjeva', 'Time Out': 'Vrijeme vani', 'Time Question': 'Vremensko pitanje', 'Time Taken': 'Potrošeno vrijeme', 'Timeline': 'Vremenska crta', 'Timeline Report': 'Izvještaj o vremenskom okviru', 'times': 'puta', 'times (0 = unlimited)': 'puta (0=neograničeno)', 'times and it is still not working. We give in. Sorry.': 'puta i još uvjek ne radi. Odustajemo. Žao nam je.', 'Times Completed': 'Puta završen', 'Timestamp': 'Vremenska oznaka', 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Vremenske oznake se mogu povezati sa oznakama na fotografijama kako bi ih locirali na karti.', 'Title': 'Naslov', 'Title to show for the Web Map Service panel in the Tools panel.': 'Naziv koji će se prikazati za panel Usluge Web Mape u panelu Alati.', 'TMS Layer': 'TMS sloj', 'To': 'Za', 'To %(site)s': 'Za %(site)s', 'To access Sahana documentation, go to': 'Da pristupite Sahana dokumentaciji, idite na', 'to access the system': 'da pristupite sistemu', 'To begin the sync process, click the button on the right =>': 'Da se započne proces sinhronizacije, pritisnite dugme desno =>', 'To begin the sync process, click the button on the right => ': 'Da biste započeli proces sinhronizacije, kliknite na dugme desno => ', 'To begin the sync process, click this button =>': 'Da biste započeli proces sinhronizacije, pritisnite ovo dugme =>', 'To begin the sync process, click this button => ': 'Da biste započeli proces sinhronizacije, pritisnite ovo dugme => ', 'To create a personal map configuration, click': 'Da kreirate konfiguraciju lične mape, kliknite', 'To create a personal map configuration, click ': 'Za kreiranje konfiguracije lične mape, pritisnite ', 'to download a OCR Form.': 'da bi se skinula OCR forma.', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Za uređivanje OpenStreetMap, potrebno je urediti OpenStreetMap opcije u modelima/000_config.py', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'Da uredite OpenStreetMap, trebate promijeniti OpenStreetMap postavke u konfiguraciji mape', 'To Location': 'Prema lokaciji', 'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'Da pomjerite vremenski liniju: koristite točkić miša, tastere s strelicama ili grabite i prevucite vremensku liniju', 'To Organization': 'Za organizaciju', 'To Person': 'Za osobu', 'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'Da štampate ili dijelite mapu trebate uzeti sliku ekrana. Ako vam treba pomoć oko uzimanja slike ekrana pogledajte instrukcije za %(windows)s ili %(mac)s', 'to reset your password': 'da resetujete lozinku', 'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Za pretraživanje po nazivu posla, unesi bilo koji dio naziva. Mozete koristiti % kao džoker znak', "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Da biste izvršili pretragu po imenu osobe, unesite bilo koje od imena, srednjih imena ili prezimena, razdvojenih razmacima. Možete koristiti % kao znak koji će zamijeniti bilo koji karakter ili niz karaktera. Pritisnite 'Traži' bez ikakvog unosa da biste izlistali sve osobe.", 'To search for a body, enter the ID ""tag number of the body. You may use ""% as wildcard.': 'Da tražite tijelo, unesite ID "" broj oznake tijela. Možete koristiti ""% kao džoker.', "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'Za traženje tijela, unesite ID tag broj tijela. Možete koristiti % kao dzoker. Pritisnite "Trazi" bez ulaza na popis svih tijela.', "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'Za traženje tijela, unesite ID oznaku tijela. Možete koristiti % kao dzoker. Pritisnite "Traži" bez ulaza na popis svih tijela.', "To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Da biste potražili bolnicu, unesite bilo koje od imena ili IDova bolnice, ime organizacije ili njen akronim razdvojeno razmakom. Možete koristiti % kao zamjenske karaktere. Pritisnite 'Pretraži' bez unesenih stavki da izlistate sve bolnice.", "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Za pretragu bolnica, unesite bilo koje od imena ili pripadni broj bolnice, sa razmacima. Možete koristiti i % umjesto razmaka. Pritisnite 'Search' (traži) i bez nabrajanja svih bolnica .", "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Da tražite lokaciju, ukucajte ime. Možete koristiti % kao zamjenu. Pritisnite 'Search ' bez unosa da izlistate sve lokacije", "To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "Da tražite člana unesite neki dio imena osobe ili grupu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih članova.", "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.": "Za pretragu pacijenta unesite ime, prezime ili srednje ime odvojene razmakom. Mozete koristiti % kao zamjenu. Pritisnite 'Pretraga' bez unesenih vrijednosti za ispis svih pacijenata.", 'To search for a person, enter any of the ""first, middle or last names and/or an ID ""number of a person, separated by spaces. ""You may use % as wildcard.': 'Da tražite osobu unesite "" prvo, srednje ime ili prezime i/ili ID "" broj osobe, razdvojen razmacima. ""Moćete koristiti % kao džoker.', "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'Da biste tragali za osobom, unesite bilo koje - ime, srednje ime ili prezime i broj lične karte osobe, odvojeno razmacima. Možete koristiti znak % umjesto džokera. Pritisnite "Traži" bez ulaza da vam izlista sve osobe.', "To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Da biste izvršili pretragu po imenu osobe, unesite bilo koje od imena, srednjih imena ili prezimena, razdvojenih razmacima. Možete koristiti % kao znak koji će zamijeniti bilo koji karakter ili niz karaktera. Pritisnite 'Search' (Pretraga) bez ikakvog unosa da biste izlistali sve osobe.", "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": 'Za traženje zahtjeva unesite neki tekst koji tražite. Možete koristiti % kao dzoker. Pritisnite "Traži" bez ulaza na popis svih tijela.', "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'Za traženje procjene ukucajte bilo koji dio broja kartice za procjenu. Možete koristiti % kao dzoker. Pritisnite "Trazi" bez ulaska u cijelu listu procjena.', 'To Site': 'Za mjesto', 'To submit a new job, use the': 'Da unesete novi posao, koristite', 'To variable': 'Do varijable', 'to verify your email': 'da potvrdite vaš email', 'ton': 'tona', 'tonsure': 'ćela s vijencom kose', 'Tools': 'Alatke', 'Tools & Equipment': 'Alati i oprema', 'total': 'ukupno', 'Total': 'Ukupno', 'Total # of Beneficiaries Reached': 'Ukupno # ciljanih korisnika dosegnuto', 'Total # of households of site visited': 'Ukupan broj posjećenih domaćinstava mjesta', 'Total # of Target Beneficiaries': 'Ukupno # ciljanih korisnika', 'Total Affected': 'Ukupno oštećenih', 'Total Annual Budget': 'Ukupni godišnji budžet', 'Total Beds': 'Ukupno kreveta', 'Total Beneficiaries': 'Ukupno korisnika', 'Total Cost': 'Ukupni trošak', 'Total Cost per Megabyte': 'Ukupan trošak po megabajtu', 'Total Cost per Minute': 'Ukupni troškovi po minuti', 'Total Dead': 'Ukupno mrtvih', 'Total Funding (Local Currency)': 'Ukupni fondovi (lokalna valuta=', 'Total Funding Amount': 'Ukupan iznos fonda', 'Total gross floor area (square meters)': 'Ukupna površina poda (u kvadratnim metrima)', 'Total Households': 'Ukupan broj domaćinstava', 'Total Injured': 'Ukupno povrijeđenih', 'Total Locations': 'Ukupno lokacija', 'Total Monthly': 'Ukupno mjesečno', 'Total Monthly Cost': 'Ukupni mjesečni trošak', 'Total Monthly Cost:': 'Ukupni mjesečni trošak:', 'Total Monthly Cost: ': 'Ukupni mjesečni trošak: ', 'Total No of Affectees (Including Students, Teachers & Others)': 'Ukupan broj obuhvaćenih (uključujući učenike, nastavnike i ostale)', 'Total No of Students (Primary To Higher Secondary) in the Total Affectees': 'Ukupan broj učenika (osnovne i srednje škole) od ukupno pogođenih', 'Total No of Teachers & Other Govt Servants in the Total Affectees': 'Ukupno u nastavi i drugim vladinim uslugama u ukupnom broju pogođenih', 'Total number of beds in this facility. Automatically updated from daily reports.': 'Ukupan broj kreveta u ovom objektu. Automatski se ažurira iz dnevnih izvještaja', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'Ukupan broj kreveta u ovoj bolnici. Automatski se ažurira iz dnevnih izvještaja.', 'Total number of houses in the area': 'Ukupan broj kuća u području', 'Total Number of Resources': 'Ukupan broj resursa', 'Total number of schools in affected area': 'Ukupni broj škola u zahvaćenim područjima', 'Total One-time Costs': 'Ukupni jednokratni troškovi', 'Total Persons': 'Ukupan broj osoba', 'Total Population': 'Ukupna populacija', 'Total population of site visited': 'Ukupan broj stanovnika posjećenog mjesta', 'Total Records: %(numrows)s': 'Ukupno zapisa: %(numrows)s', 'Total Recurring Costs': 'Ukupni ponavljajući troškovi', 'Total Unit Cost': 'Totalni jedinični trošak', 'Total Unit Cost:': 'Ukupni jedinični trošak:', 'Total Unit Cost: ': 'Ukupna cijena jedinice: ', 'Total Units': 'Ukupno jedinica', 'Total Value': 'Ukupna vrijednost', 'Totals for Budget:': 'Ukupni iznos Budžeta:', 'Totals for Bundle:': 'Ukupno po paketu:', 'Totals for Kit:': 'Ukupni iznosi za komplet:', 'Tour added': 'Tura dodana', 'Tour Configuration': 'Struktura ture', 'Tour deleted': 'Tura obrisana', 'Tour Details': 'Detalji ture', 'Tour Name': 'Ime ture', 'Tour updated': 'Tura ažurirana', 'Tour User': 'Korisnik ture', 'Tourist Group': 'Grupa turista', 'Tours': 'Ture', 'Town': 'Grad', 'Traceback': 'Praćenje', 'Traces internally displaced people (IDPs) and their needs': 'Prati ljude sa mentalnim poremećajima i njihove potrebe', 'Tracing': 'Praćenje', 'Track': 'Praćenje', 'Track deleted': 'Praćenje obrisano', 'Track Details': 'Prati detalje', 'Track Shipment': 'Prati pošiljku', 'Track updated': 'Praćenje ažurirano', 'Track uploaded': 'Praćenje učitano', 'Track with this Person?': 'Pratiti sa ovom Osobom?', 'Trackable': 'Moguće pratiti', 'Tracking and Tracing of Persons and Groups': 'Praćenje osoba i grupa', 'Tracking of basic information on the location, facilities and size of the Shelters': 'Praćenje osnovnih informacija na lokaciji, ustanova i veličine skloništa.', 'Tracking of Patients': 'Praćenje pacijenata', 'Tracking of Projects, Activities and Tasks': 'Praćenje projekata, aktivnosti i dešavanja', 'Tracks': 'Staze', 'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Prati lokaciju, distribucije, kapacitet i podjelu žrtava u skloništima', 'Traffic Report': 'Izvještaj o prometu', 'Training': 'Obuka', 'Training added': 'Dodan trening', 'Training Course Catalog': 'Katalog o kursu treniranja', 'Training deleted': 'Obrisan trening', 'Training Details': 'Detalji treninga', 'Training Event': 'Događaj obuke', 'Training Event added': 'Događaj obuke dodan', 'Training Event deleted': 'Događaj obuke obrisan', 'Training Event Details': 'Detalji o događaju obuke', 'Training Event updated': 'Događaj obuke ažuriran', 'Training Events': 'Događaji obuke', 'Training Facility': 'Lokacija obuke', 'Training Hours (Month)': 'Sati obuke (mjesečno)', 'Training Hours (Year)': 'Sati obuke (godišnje)', 'Training Report': 'Izvještaj obuke', 'Training updated': 'Ažuriran trening', 'Trainings': 'Treninzi', 'Transfer': 'Prijenos', 'Transfer Ownership': 'Prijenos vlasništva', 'Transfer Ownership To (Organization/Branch)': 'Prebaci vlasništvo na (organizacija/ogranak)', 'Transit': 'Tranzit', 'Transit Status': 'Status tranzita', 'Transit. Status': 'Status tranzita', 'Transition Effect': 'Efekat tranzicije', 'Translate': 'Prevedi', 'Translated File': 'Prevedena datoteka', 'Translation': 'Prevod', 'Translation Functionality': 'Funkcionalnost prijevoda', 'Transnistria': 'Pridnjestrovska', 'Transparent?': 'Providno?', 'Transport Reference': 'Transportna referenca', 'Transportation assistance, Rank': 'Prevozna pomoć, stepen', 'Transportation Required': 'Prijevoz je potreban', 'Transported By': 'Prevoznik', 'Transported by': 'Prevoznik', 'Trauma Center': 'Centar za traume', 'Travel Cost': 'Troškovi putovanja', 'Treatments': 'Tretmani', 'Tree': 'Stablo', 'Trinidad and Tobago': 'Trinidad i Tobago', 'Tropical Storm': 'Tropska Oluja', 'Tropo Messaging Token': 'Tropo token za poruke', 'Tropo Settings': 'Tropo postavke', 'Tropo settings updated': 'Twilio postavke ažurirane', 'Tropo Voice Token': 'Tropo simbol glasa', 'Truck': 'Kamion', 'Try checking the URL for errors, maybe it was mistyped.': 'Pokušajte provjeriti greške u URL-u, možda je pogrešno napisan.', 'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Pokušajte sa pritiskom na dugme za osvježavanje/ponovo učitavanje ili ponovnim unosom URL u adresnoj traci.', 'Try refreshing the page or hitting the back button on your browser.': 'Pokušajte osvježiti stranicu ili pritisnuti dugme za povratak nazad u Vašem pregledniku.', 'Tsunami': 'Cunami', 'Tuesday': 'Utorak', 'Tugboat Capacity': 'Kapacitet skele', 'Tunisia': 'Tunis', 'Turkey': 'Turska', 'turned up': 'okrenut gore', 'turning grey': 'postaje sivo', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy modul nije dostupan u radu sa tekućom verzijom Pythona - potrebna je instalacija non-Tropo Twitter podrške', 'Tweet deleted': 'Tweet ibrisan', 'Tweet Details': 'Tweet detalji', 'Tweeted By': 'Tweet obavio', 'Tweeted by': 'Tweet obavio', 'Tweeted on': 'Tweet datum', 'Tweeted On': 'Tweet datum', 'Twilio (Inbound)': 'Twilio (dolazni)', 'Twilio Setting added': 'Twilio postavke dodane', 'Twilio Setting deleted': 'Twilio postavke obrisane', 'Twilio Setting Details': 'Detalji Twilio postavki', 'Twilio Settings': 'Twilio postavke', 'Twilio settings updated': 'Twilio postavke ažurirane', 'Twilio SMS Settings': 'Twilio SMS postavke', 'Twilio SMS settings': 'Twilio SMS postavke', 'Twitter account updated': 'Twitter nalog ažuriran', 'Twitter ID or #hashtag': 'Twitter ID ili #hashtag', 'Twitter InBox': 'Twitter ulazni sandučić', 'Twitter Search': 'Pretraži Twitter', 'Twitter Search Queries': 'Opcije Twitter pretrage', 'Twitter Search Results': 'Rezultati Twitter pretrage', 'Twitter Settings': 'Postavke Twittera', 'Twitter Timeline': 'Twitter vremenska linija', 'Type': 'Tip', 'Type of cause': 'Tip uzroka', 'Type of Construction': 'Vrsta izgradnje', 'Type of place for defecation': 'Vrsta mjesta za obavljanje nužde', 'Type of Transport': 'Vrsta transporta', 'Type of water source before the disaster': 'Tipovi izvora vode prije nepogode', "Type the first few characters of one of the Participant's names.": 'Upiši prvih nekoliko slova imena jedne od osoba.', "Type the first few characters of one of the Person's names.": 'Upiši prvih nekoliko slova imena jedne od osoba.', "Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.": "Navedite ime postojeće stavke kataloga ili kliknite 'Kreiraj stavku' da dodate novu stavku koja nije u katalogu.", 'Type the name of an existing catalog kit': 'Unesite ime postojećeg kompleta iz kataloga', "Type the name of an existing site OR Click 'Create Warehouse' to add a new warehouse.": "Kreirajte ime postojećeg mjesta ili kliknite na 'Kreiraj skladište' da dodate novo skladište.", 'Types': 'Tipovi', 'Types of health services available': 'Dostupni tipovi zdravstvene zaštite', 'Types of water storage containers available': 'Dostupni tipovi spremnika za vodu', 'UID': 'JIB', 'Ukraine': 'Ukrajina', 'UN agency': 'UN agencija', 'Un-Repairable': 'Nepopravljiv', 'Unable to find sheet %(sheet_name)s in uploaded spreadsheet': 'Ne mogu naći list %(sheet_name)s u postavljenoj tablici', 'Unable to open spreadsheet': 'Ne mogu da otvorim tablicu', 'unable to parse csv file': 'ne mogu analizirati csv datoteku', 'Unable to parse CSV file or file contains invalid data': 'Ne mogu analizirati CSV datoteku ili datoteka sadrži nevažeće podatke', 'Unable to parse CSV file!': 'Nije moguće analizirati CSV dokument !', 'unapproved': 'neodobreno', 'Unassigned': 'Nedodijeljeno', 'Uncheck all': 'Skini sve oznake', 'uncheck all': 'poništi sve oznake', 'uncovered': 'nepokriveno', 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Pod kojim uslovima bi lokalni zapisi trebali biti ažurirani ako su također lokalno mijenjani nakon zadnje sinhronizacije', 'Under which conditions local records shall be updated': 'Pod kojim uslovima bi lokalni zapisi trebali biti ažurirani', 'Understaffed': 'nema dovoljno zaposlenih', 'Unidentified': 'Neidentifikovano', 'unidentified': 'Neidentificiran', 'uninhabitable = foundation and structure destroyed': 'nenaseljivo = temeljji i strukura uništeni', 'Union Council': 'Vijeće saveza', 'Unique code': 'Jedinstveni kôd', 'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Jedinstevni identifikator kojim OVAJ repozitorij definiše sebe samog slanjem sinhronizacijskih zahtjeva.', 'Unique Locations': 'Jedinstvene lokacije', 'Unit': 'Jedinica', 'Unit added': 'Jedinica dodana', 'Unit Bed Capacity': 'Kapacitet kreveta po jedinici', 'Unit Cost': 'Troškovi jedinice', 'Unit deleted': 'Jedinica obrisana', 'Unit Details': 'Detalji jedinice', 'Unit of Measure': 'Jedinica mjere', 'Unit Set': 'Jedinica postavljena', 'Unit Short Code for e.g. m for meter.': 'Kratko ime jedinice, npr m za metar', 'Unit updated': 'Jedinica ažurirana', 'Unit Value': 'Vrijednost jedinice', 'United Arab Emirates': 'Ujedinjeni Arapski Emirati', 'United Kingdom': 'Ujedinjeno Kraljevstvo', 'United States Dollars': 'američki dolari', 'Units': 'Jedinice', 'Units of Measure': 'Mjerna jedinica', 'Unknown': 'Nepoznato', 'unknown': 'Nepoznato', 'Unknown Locations': 'Nepoznate lokacije', 'Unknown Peer': 'Nepoznati saradnik', 'Unknown question code': 'Nepoznata šifra pitanja', 'Unknown type of facility': 'Nepoznata vrsta objekta', 'unlimited': 'neograničeno', 'Unloading': 'Pražnjenje', 'Unmark as duplicate': 'Ukloni oznaku kao duplo', 'Unreinforced masonry': 'Zid bez armature', 'Unresolved Conflicts': 'Neriješeni konflikti', 'Unsafe': 'Nesiguran', 'Unselect to disable the modem': 'Uklonite oznaku da biste isključili modem', 'Unselect to disable this API service': 'Izbriši oznaku da onemogućiš ovu API uslugu', 'Unselect to disable this SMTP service': 'Poništite odabir da bi onemogućili ovu SMTP uslugu', 'Unsent': 'Nije poslano', 'Unskilled': 'Neiskusan', 'unspecified': 'nije navedeno', 'Unsubscribe': 'Otkaži pretplatu', 'Unsupported data format!': 'Nepodržan format podataka!', 'Unsupported method!': 'Nepodržana metoda!', 'unverified': 'nepotvrđeno', 'Update': 'Ažuriranje', 'update': 'ažuriraj', 'Update Activity Report': 'Ažuriraj izvještaj o aktivnostima', 'Update Base Location': 'Ažuriraj baznu lokaciju', 'Update Cholera Treatment Capability Information': 'Ažuriraj informacije o sposobnosti liječenja kolere', 'Update Coalition': 'Ažuriraj koaliciju', 'Update if Master': 'Ažuriraj ako je glavno', 'update if master': 'ažuriraj ako je glavno', 'update if newer': 'ažurirajte ako je novije', 'Update if Newer': 'Ažurirajte ako je novije', 'Update Import Job': 'Ažuriraj posao za uvoz', 'Update Location': 'Ažuriraj lokaciju', 'Update Map': 'Ažuriraj mapu', 'Update Master file': 'Ažuriraj glavnu datoteku', 'Update Method': 'Metod ažuriranja', 'Update Morgue Details': 'Ažuriraj detalje mrtvačnice', 'Update Notification': 'Ažuriraj napomen u', 'Update Policy': 'Pravila ažuriranja', 'Update Report': 'Ažuriraj izvještaj', 'Update Request': 'Ažuriraj zahtjev', 'Update Service Profile': 'Ažuriraj profil usluge', 'Update Status': 'Ažuriraj status', 'Update Task Status': 'Ažuriraj status zadatka', 'Update this entry': 'Ažuriraj ovaj unos', 'Update Unit': 'Ažuriranje jedinice', 'Update your current ordered list': 'Ažuriraj trenutni uređeni spisak', 'Update/Newer': 'Ažuriraj/novije', 'Update:': 'Ažuriraj:', 'updated': 'ažurirano', 'Updated By': 'Ažurirano od', 'updates only': 'samo ažuriranja', 'Upload': 'Pošalji', 'Upload a (completely or partially) translated CSV file': 'Postavi (djelomično ili potpuno) prevedenu CSV datoteku', 'Upload a CSV file': 'Dodaj CVS datoteku', 'Upload a CSV file formatted according to the Template.': 'Učitaj fajl formata CSV prema šablonu.', 'Upload a Question List import file': 'Postavi uvoznu datoteku s listom pitanja', 'Upload a Spreadsheet': 'Slanje proračunskih tablica (spreadsheet)', 'Upload a text file containing new-line separated strings:': 'Postavi tekstualnu datoteku koja sadrži nizove znakova razdvojene novim redovima', 'Upload an Assessment Template import file': 'Postavi uvoznu datoteku za predložak pricjene', 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Učitajte sliku (bmp, gif, jpeg ili png), max. 300x300 !', 'Upload an image file (png or jpeg), max. 400x400 pixels!': 'Učitajte sliku (jpeg ili png), maks. 400x400 !', 'Upload an image file here.': 'stavite sliku ovdje', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Ovdje učitajte slikovnu datoteku. Ako ne učitate slikovnu datoteku, onda morate specificirati njenu lokaciju u URL polju.', 'Upload an image, such as a photo': 'Podesi sliku,kao sto je fotografija', 'Upload Comma Separated Value File': 'Uploaduj datoteku vrijednosti odvojenih zarezom', 'Upload Completed Assessment Form': 'Postavi završen formular ocjene', 'Upload file': 'Postavi datoteku', 'Upload Format': 'Dodati format', 'Upload OCR Form': 'Pošalji OCR formu (optičko prepoznavanje karaktera)', 'Upload Photos': 'Učitaj fotografije', 'Upload Scanned OCR Form': 'Pošalji skenirani OCR formular', 'Upload Shapefile': 'Postavi datoteku s likovima', 'Upload Spreadsheet': 'Pošaljite tabelu proračuna', 'Upload the Completed Assessment Form': 'Postavi završen formular ocjene', 'Upload Track': 'Pošalji praćenje', 'Upload translated files': 'Pošalji prevedene datoteke', 'Upload Web2py portable build as a zip file': 'Pošalji Web2py portabilni sagrađen kao zip datoteka', 'Uploaded': 'Postavljeno', 'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'Postavljena datoteka nije PDF datoteka. Navedite formular u važećem PDF firmatu.', "Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "Postavljena datoteka ili datoteke ne predstavljaju slike. Podržani formati slika su '.png', '.jpg', '.bmp', '.gif'.", 'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'Postavljena PDF datoteka ima više/manje brojeva stranica nego što je potrebno. Provjerite da li ste naveli pravu reviziju za vaš formular i provjerite da li formular sadrži potreban broj stranica.', 'Urban area': 'Urbano područje', 'Urban Fire': 'gradski požar', 'Urban Tank Tactical Vehicle': 'Gradska taktička pokretna cisterna', 'Urgent': 'Hitno', 'urgent': 'hitno', 'URL for the Mobile Commons API': 'URL za Mobile Commons API', 'URL for the twilio API.': 'URL za twilio API.', 'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.': 'URL podrazumijevanog Proxy servera za vezu s udaljenim repozitorijima (ako je potrebno). Ako samo neki repozitoriji zahtijevaju proxy server, možete to konfigurisatu.', 'URL of the proxy server to connect to the repository (leave empty for default proxy)': 'URL Proxy servera za vezu s repozitorijima (prazno za podrazumijevani proxy server)', 'URL of the Ushahidi instance': 'URL Ushahidi instance', 'URL to a Google Calendar to display on the project timeline.': 'URL za Google Calendar za prikaz projektne vremenske linije', 'URL to resume tour': 'URL da se nastavi tura', 'Uruguay': 'Urugvaj', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Koristite (...) & (...) za I, (...) | (...) za ILI, i ~ (...) za NE za izgradnju složenijih upita.', 'Use decimal': 'Koristi decimalno', 'Use default': 'Koristi podrazumjevano', 'Use default from feature class': 'Koristi podrazumijevano iz klase karakteristika', 'Use deg, min, sec': 'Koristi Stepeni Minute Sekunde', 'Use Geocoder for address lookups?': 'Koristi Geocoder za traženje adrese?', 'Use Site?': 'Da li koristiti mjesto?', 'Use these links to download data that is currently in the database.': 'Koristi ove linkove za skidanje podataka koji su trenutno u bazi.', 'Use this link to review the situation.': 'Koristite ovaj link za pregled situacije', 'Use this to set the starting location for the Location Selector.': 'Koristite ovo da podesite početnu lokaciju za Odabirač Lokacije', 'Used by IRS & Assess': 'Korišteno od strane IRS & Assess', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Koristi se u onHover Tooltip & iskočnim prozorima skupova pri razlikovanju tipova', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Korišteno za izgradnju onHover Tooltip i prvo polje je korišteno u iskočnim prozorima skupova za razlikovanje zapisa.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Korisit se za provjeru razumnosti geografske širine unesene lokacije. Može se koristiti za filter liste resursa koji imaju klokacije', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Koristi se za provjeru geografske dužine unesenih lokacija. Može se koristiti kao filter lista izvora koje posjeduju lokacije.', 'Used to import data from spreadsheets into the database': 'Korišteno da se unesu podaci iz Tabele u bazu podataka', 'Used to populate feature attributes which can be used for Styling.': 'Korišteno za punjenje atributa karakteristika korištenih za stilove.', 'Used within Inventory Management, Request Management and Asset Management': 'Korišteno za vođenje inventara, pri upravljanju zahtjevima i upravljanju sredstvima', 'User': 'Korisnik', 'User %(id)s Logged-in': 'Korisnik %(id)s prijavljen', 'User Account': 'Korisnički nalog', 'User Account has been Approved': 'Korisnički nalog je potvrđen', 'User Account has been Disabled': 'Korisnički račun je onemogućen', 'User added': 'Korisnik dodan', 'User added to Role': 'Korisnik dodan u ulogu', 'User already has this role': 'Korisnik već ima datu ulogu', 'User already in Group!': 'Korisnik već u grupi', 'User deleted': 'Korisnik obrisan', 'User Details': 'Korisnički detalji', 'User Guidelines Synchronization': 'Sinhronizacija korisničkih smjernica rada', 'User has been (re)linked to Person and Human Resource record': 'Korisnik je ponovo vezan za zapis o osoblju i ljudskim resursima.', 'User has no Email address!': 'Korisnik nema e-mail adrese!', 'User has no SMS address!': 'Korisnik nema SMS adrese!', 'User ID': 'Korisnički ID', 'User Management': 'Upravljanje korisnicima', 'User Profile': 'Korisnički profil', 'User Requests': 'Korisnički zahtjevi', 'User Roles': 'Uloge korisnika', 'User Updated': 'Ažuriran korisnik', 'User updated': 'Korisnik ažuriran', 'User with Role': 'Korisnik s ulogom', "User's role": 'Korisnička uloga', 'Username': 'Korisničko ime', 'Username & Password': 'Korisničko ime i lozinka', 'Username to use for authentication at the remote site.': 'Korisničko ime za prijavu na udaljeni sajt.', 'Users': 'Korisnici', 'Users in my Organizations': 'Korisnici u mojim organizacijama', 'Users removed': 'Korisnici uklonjeni', 'Users with this Role': 'Korisnici s ovom ulogom', 'Uses the REST Query Format defined in': 'Koristi REST format upita definiran u', 'Ushahidi': 'Ushahidi', 'Ushahidi Import': 'Uvoz iz Ushahidi', 'using default': 'koristim podrazumijevani', 'Usual food sources in the area': 'Ukobičajen izvor hrane u području', 'UTC Offset': 'UTC pomak', 'Utilities': 'Usluge', 'Utility, telecommunication, other non-transport infrastructure': 'Uslužne, telekomunikacijske i ostale netransportne infrastrukture', 'Utilization Details': 'Detalji upotrebe', 'Utilization Report': 'Izvještaj o upotrebi', 'UUID of foreign Sahana server': 'UUID udaljenog Sahana servera', 'Valid': 'važeće', 'Valid From': 'Važi od', 'Valid Until': 'Važi do', 'Value': 'Vrijednost', 'Value per Pack': 'Vrijednost po paketu', 'Various Reporting functionalities': 'Razne funkcionalnosti izvještaja', 'Vatican City': 'Vatikan', 'VCA (Vulnerability and Capacity Assessment)': 'VCA (Procjena ranjivosti i kapaciteta)', 'Vehicle': 'Vozilo', 'Vehicle added': 'Dodano vozilo', 'Vehicle assigned': 'Vozilo dodijeljeno', 'Vehicle Assignment updated': 'Dodjela osoblja ažurirana', 'Vehicle Assignments': 'Dodjele vozila', 'Vehicle Categories': 'Kategorije vozila', 'Vehicle Category': 'Kategorija vozila', 'Vehicle Crime': 'Zločin s vozilima', 'Vehicle deleted': 'Obrisano vozilo', 'Vehicle Details': 'Detalji o vozilu', 'Vehicle Details added': 'dodani detalji vozila', 'Vehicle Details deleted': 'obrisani detalji vozila', 'Vehicle Details updated': 'ažurirani detalji vozila', 'Vehicle Management': 'Vođenje vozila', 'Vehicle Plate Number': 'Registarski broj vozila', 'Vehicle Type': 'Vrsta vozila.', 'Vehicle Type added': 'Vrsta vozila dodana', 'Vehicle Type deleted': 'Vrsta vozila obrisana', 'Vehicle Type Details': 'Detalji o vrsti vozila', 'Vehicle Type updated': 'Vrsta vozila ažurirana', 'Vehicle Types': 'Vrste vozila', 'Vehicle unassigned': 'Vozilo nedodijeljeno', 'Vehicle updated': 'Ažurirano vozilo', 'Vehicles': 'Vozila', 'Vehicles are assets with some extra details.': 'Vozila su sredstva sa nekim dodatnim detaljima', 'Vendor': 'Proizvođač', 'Venezuela': 'Venecuela', 'Venue': 'Mjesto održavanja', 'Verification Status': 'Status provjere', 'verified': 'provjereno', 'Verified': 'Potvrđeno', 'Verified?': 'Potvrđeno?', 'Verify password': 'Potvrdite lozinku', 'Verify Password': 'Potvrdite lozinku', 'Version': 'Verzija', 'vertical': 'vertikalno', 'Very Good': 'Veoma dobro', 'Very High': 'Veoma visok', 'Very Strong': 'Veoma jako', 'Vessel Max Length': 'Max. dužina čamca', 'Victim': 'Žrtva', 'Video Tutorials': 'Video lekcije', 'Vietnam': 'Vijetnam', 'View': 'Pogled', 'view': 'pogled', 'View & Edit Pledges': 'Pregled/Uređivanje ponuda za pomoć', 'View Alerts received using either Email or SMS': 'Pregledaj upozorenja primljena korištenjem Email-a ili SMS-a', 'View All': 'Prikaži sve', 'View all log entries': 'Pogledaj sve unose u zapisniku', 'View All Tickets': 'Pogledaj sve kartice', "View and/or update details of the person's record": 'Prikažite i/ili ažurirajte detalje zapisa za ovu osobu', 'View and/or update their details': 'Prikažite i/ili ažurirajte njihove detalje', 'View as Pages': 'Pogledaj kao stranice', 'View Email Accounts': 'Pogledaj naloge elektronske pošte', 'View Email InBox': 'Pogledaj E-mail dolazne poruke', 'View Error Tickets': 'Pregledati kartice grešaka', 'View full screen': 'Pogledaj preko cijelog ekrana', 'View Fullscreen Map': 'Vidi mapu cijelog ekrana', 'View Image': 'Pogledaj sliku', 'View InBox': 'Pogledaj dolazne poruke', 'View Items': 'Prikaz stavki', 'View Location Details': 'Pogledaj detalje lokacije', 'View log entries per repository': 'Pogledaj stavke zapisnika po repozitoriju', 'View Message Log': 'Prikaži zapisnik poruka', 'View Mobile Commons Settings': 'Pogledaj mobilne postavke', 'View On Map': 'Pogledaj na mapi', 'View on Map': 'Pogledaj na Mapi', 'View or update the status of a hospital.': 'Pregledanje ili ažuriranje statusa bolnice.', 'View Outbox': 'Pogledaj izlazno sanduče', 'View Parser Connections': 'Pogledaj parserske konekcije', 'View pending requests and pledge support.': 'Pregled zahtjeva na čekanju i ponuda podrške', 'View Picture': 'Pogledaj sliku', 'View Queries': 'Pogledaj upite', 'View Requests & Pledge Aid': 'Pogledaj zahtjeve i ponude za pomoć', 'View Requests for Aid': 'Pogledaj zahtjeve za pomoć', 'View Results of completed and/or partially completed assessments': 'Pogledaj rezultat završenih i/ili polovišno završenih procjena', 'View RSS Posts': 'Pogledaj RSS poruke', 'View RSS Settings': 'Pogledaj RSS Postavke', 'View Sender Priority': 'Pogledaj prioritet pošiljaoca', 'View Sent Emails': 'Pogledaj poslane E-mail poruke', 'View Sent SMS': 'Pogledaj poslane SMS poruke', 'View Sent Tweets': 'Pogledaj poslane Tweet poruke', 'View Settings': 'Prikaz postavki', 'View SMS InBox': 'Pogledaj SMS dolazne poruke', 'View SMS OutBox': 'Pogledaj SMS odlazne poruke', 'View Test Result Reports': 'Pogledaj izještaj o rezultatima testiranja', 'View the hospitals on a map.': 'Pogledaj bolnice na mapi.', 'View the module-wise percentage of translated strings': 'Pogledaj procenat prevedenosti stringova po modulu', 'View Tickets': 'Vidi kartice', 'View Translation Percentage': 'Pogledaj procenat prijevoda', 'View Tweet': 'Pogledaj tweet', 'View Twilio Settings': 'Pogledaj Twilio postavke', 'View Twitter InBox': 'Pogledaj Twittwe dolazne poruke', 'View/Edit Person Details': 'Pogledaj/uredi detalje osobe', 'View/Edit the Database directly': 'Pogledaj/Uredi Bazu podataka direktno', "View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'Pogledaj/uredi bazu podataka direktno (Upozorenje: nepoštivanje okvirnih pravila!)', 'Village': 'Selo', 'Village / Suburb': 'Selo / predgrađe', 'Village Leader': 'Vođa sela', 'Visible?': 'Vidljivo?', 'Visual Recognition': 'Vizuelno prepoznavanje', 'vm_action': 'vm_akcija', 'Volcanic Ash Cloud': 'Oblak vulkanskog pepela', 'Volcanic Event': 'Vulkanski događaj', 'Volume (m3)': 'Zapremina (m3)', 'Volume - Fluids': 'Sadržaj - tečnosti', 'Volume - Solids': 'Sadržaj - čvrsti', 'Volume/Dimensions': 'Sadržaj/Dimenzije', 'Voluntarios': 'Volonterski', 'Volunteer': 'Volonter', 'volunteer': 'volonter', 'Volunteer added': 'Volonter dodan', 'Volunteer Availability': 'Dostupnost volontera', 'Volunteer availability added': 'Dodana dostupnost volontera', 'Volunteer availability deleted': 'Dostupnost volontera obrisana', 'Volunteer availability updated': 'Ažurirana dostupnost volontera', 'Volunteer Cluster': 'Skup volontera', 'Volunteer Cluster added': 'Skup volontera dodan', 'Volunteer Cluster deleted': 'Skup volontera obrisan', 'Volunteer Cluster Position': 'Pozicija skupa volontera', 'Volunteer Cluster Position added': 'Pozicija skupa volontera dodana', 'Volunteer Cluster Position deleted': 'Pozicija skupa volontera obrisana', 'Volunteer Cluster Position updated': 'Pozicija skupa volontera ažurirana', 'Volunteer Cluster Type': 'Tip skupa volontera', 'Volunteer Cluster Type added': 'Vrsta skupa volontera dodana', 'Volunteer Cluster Type deleted': 'Vrsta skupa volontera obrisana', 'Volunteer Cluster Type updated': 'Vrsta skupa volontera ažurirana', 'Volunteer Cluster updated': 'Skup volontera ažuriran', 'Volunteer Contact': 'Kontakt volontera', 'Volunteer Data': 'Podaci o volonterima', 'Volunteer deleted': 'Volonter obrisan', 'Volunteer Details': 'Detalji o volonteru', 'Volunteer details updated': 'Detalji o volonterima ažurirani', 'Volunteer Details updated': 'Volonter ažuriran', 'Volunteer Hours': 'Volonterski sati', 'Volunteer ID': 'ID volontera', 'Volunteer Information': 'Informacije Volontera', 'Volunteer Management': 'Koordinacija volontera', 'Volunteer Project': 'Volonterski projekat', 'Volunteer Record': 'Volonterski zapis', 'Volunteer Report': 'Izvještaj o volonterima', 'Volunteer Request': 'Volonterski zahtjev', 'Volunteer Role': 'Volonterska uloga', 'Volunteer Role added': 'Uloga volontera dodana', 'Volunteer Role Catalog': 'Katalog volonterskih uloga', 'Volunteer Role deleted': 'Uloga volontera obrisana', 'Volunteer Role Details': 'Detalji volonterske uloge', 'Volunteer Role updated': 'Uloga volontera ažurirana', 'Volunteer Service Record': 'Izvještaj o volonterskoj usluzi', 'volunteers': 'volonteri', 'Volunteers': 'Volonteri', 'Volunteers were notified!': 'Volonteri su obavješteni!', 'Vote': 'Glasati', 'Votes': 'Glasovi', 'Vulnerability Document': 'Dokument o ranjivosti', 'Walking Only': 'Samo hodanje', 'Walking time to the health service': 'Potrebno vrijeme hoda do zdravstvenih usluga', 'Wall or other structural damage': 'Zid ili druga strukturna oštećenja', 'Warehouse': 'Skladište', 'Warehouse added': 'Skladište dodano', 'Warehouse deleted': 'Skladište obrisano', 'Warehouse Details': 'Detalji o skladištu', 'Warehouse Item added': 'Dodata stavka skladišta', 'Warehouse Item deleted': 'Stavka skladišta obrisana', 'Warehouse Item Details': 'Detalji o stavci skladišta', 'Warehouse Items': 'Stavke skladišta', 'Warehouse Management': 'Upravljanje skladištem', 'Warehouse Stock': 'Zaliha u skladištu', 'Warehouse Stock Details': 'Detalji o zalihi skladišta', 'Warehouse Stock Report': 'Izvještaj zaliha u skladištu', 'Warehouse Stock updated': 'Ažurirana stavka skladišta', 'Warehouse updated': 'Skladište ažurirano', 'Warehouse/Sites Registry': 'Registar skladiša/mjesta', 'Warehouses': 'Skladišta', 'Warehousing Storage Capacity': 'Kapacitet skladišta', 'WARNING': 'UPOZORENJE', 'WASH': 'OPERI', 'Water': 'Voda', 'Water collection': 'Skupljanje vode', 'Water gallon': 'Kanister vode', 'Water Sanitation Hygiene': 'Higijena sanitacije vode', 'Water storage containers in households': 'Kontejneri za vodu u domaćinstvima', 'Water storage containers sufficient per HH': 'Kontejneri za vodu dovoljni za domaćinsto', 'Water Supply': 'Dostava vode', 'Water supply': 'Dostava vode', 'Waterspout': 'Vodena pijavica', 'WatSan': 'WatSan', 'wavy': 'valovito', 'Way Bill(s)': 'Putni troškovi', 'Waybill': 'Tovarni list', 'WAYBILL': 'TOVARNILIST', 'Waybill Number': 'Broj tovarnog lista', "We have no active problem. That's great!": 'Nemamo aktivnog problema. Odlično!', 'We have tried': 'Pokušavali smo', 'Weak': 'Slabo', 'Web API settings updated': 'Web API postavke ažurirane', 'Web Form': 'Web formular', 'Web Map Service Browser Name': 'Ime usluge za pregled mape preko web-a', 'Web Map Service Browser URL': 'URL izbornika Web Map servisa', 'Web2py executable zip file found - Upload to replace the existing file': 'Web2py izvršna zip datoteka nađena - Pošaljite da zamijenite postojeću datoteku', 'Web2py executable zip file needs to be uploaded first to use this function.': 'Web2py izvršna zip datoteka treba da se prvo pošalje da bi ste koristili ovu funkcionalnost', 'Web2py executable zip file needs to be uploaded to use this function.': 'Web2py izvršna zip datoteka treba da se prvo pošalje da bi ste koristili ovu funkcionalnost', 'Website': 'Web stranica', 'Wednesday': 'Srijeda', 'Week': 'Sedmica', 'Weekends only': 'Samo vikendima', 'weekly': 'sedmično', 'Weekly': 'Sedmično', 'Weight': 'Težina', 'Weight (kg)': 'Težina (kg)', 'Welcome to the Sahana Eden Disaster Management System': 'Dobrodošli na Sahana Eden, sistem za upravljanje u slučaju katastrofa', 'Welcome to the Sahana FOSS Disaster Management System': 'Dobrodošli na Sahana FOSS, sistem za upravljanje u slučaju katastrofa', 'Welcome to the Sahana Portal at': 'Dobrodošli na Sahana Portal u', 'Well-Known Text': 'Dobro poznat tekst', 'Were breast milk substitutes used prior to the disaster?': 'Da li su korištene zamjene za majčino mlijeko prije katastrofe?', 'WFS Layer': 'WFS sloj', 'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84 (EPSG 4236) je potreban za mnoge WMS servere', 'What are the factors affecting school attendance?': 'Koji faktori koji utiču na pohađanje škole', 'What are your main sources of cash to restart your business?': 'Koji su vaši glavni izvori novca za ponovni početak posla?', 'What are your main sources of income now?': 'Koji su vaši izvori primanja sada?', 'What do you spend most of your income on now?': 'Na šta sada trošite najveći dio prihoda?', 'What food stocks exist? (main dishes)': 'Koje zalihe hrane postoje (glavne namirnice)=', 'What food stocks exist? (side dishes)': 'Koje zalihe hrane postoje (dodatne namirnice)', 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': 'Koji je glavni izvor čiste vode za svakodnevnu upotrebu (pranje, kuhanje, kupanje=', 'What is your major source of drinking water?': 'Koji je glavni izvor pitke vode?', 'What order to be contacted in.': 'Redoslijed kontaktiranja.', "What should be done to reduce women and children's vulnerability to violence?": 'Šta uraditi za smanjiti ranjivost žena i djece zbog nasilja?', 'What the Items will be used for': 'Za što će se koristiti ove stavke?', 'What type of latrines are available in the village/IDP centre/Camp?': 'Koji tip zahoda je dostupan u selu/centru/kampu?', 'What type of salvage material can be used from destroyed houses?': 'Koji tip spašenog materijala se može koristiti iz uništenih kuća', 'What type of salvage material can be used from destroyed schools?': 'Koji tip spašenog materijala se može koristiti iz uništenih škola', 'What types of health problems do children currently have?': 'Koju vrstu zdravstvenih problema djeca trenutno imaju?', 'What types of household water storage containers are available?': 'Koji tipovi kućnih spremnika za vodu su dostupni?', 'What were your main sources of income before the disaster?': 'Koji su bili vaši izvori primanja prije nepogode', 'Wheat': 'Žito', 'When reports were entered': 'Kada su izvještaji uneseni', "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Kada se podaci usklađuju, dolazi do konflikta u slučaju kada dvije (ili više) stranaka želi da sinhronizira informacije koje su izmjenili, tj. protivne informacije. Sync modul pokušava riješiti ovakve konflikte ali ne uspjeva u nekim slučajevima. Tada je do Vas da riješite konflikte ručno, kliknite na link sa desne strane koji će Vas uputiti na tu stranicu.', 'When this search was last checked for changes.': 'Kada je ova pretraga zadnji put provjerena za promjene.', 'Where are the alternative places for studying?': 'Gdje su alternativna mjesta za studiranje', 'Where are the separated children originally from?': 'Koje je porijeklo odvojene djece?', 'Where do the majority of people defecate?': 'Gdje većina ljudi vrši nuždu?', 'Where have the children been sent?': 'Gdje su djeca poslana?', 'Where is solid waste disposed in the village/camp?': 'Gdje se smeće ostavlja u selu/kampu?', 'Where reached': 'Gdje je dosegnut', 'Whether calls to this resource should use this configuration as the default one': 'Da li bi pozivi na ovaj resurs trebali koristiti ovu konfiguraciju kao podrazumijevanu', 'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'Da li su geografska širina i dužina naslijeđeni iz višeg nivoa u hijerarhiji lokacija, umjesto da su posebno navedeni.', 'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'Da li se resurs treba pratiti koristeći S3Track umjesto da se samo koriste iz bazne lokacije', 'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Da li je ovo kopija Sahana Eden, Sahana Agasti, Ushahidi ili drugo', 'Which methods to apply when importing data to the local repository': 'Koje metode primijeniti pri uvozu podataka u lokalni repozitorij', 'Whiskers': 'Brkovi', 'white': 'bijela', 'Whitelist a Sender': 'Stavi pošiljaoca na bijelu listu', 'Whitelisted Senders': 'Pošiljaoci na bijeloj listi', 'Who is doing what and where': 'Ko šta radi i gdje', 'Who is doing What Where': 'Ko šta radi i gdje', 'Who usually collects water for the family?': 'Ko obično u porodici sakuplja vodu?', 'wide': 'širok', 'wider area, longer term, usually contain multiple Activities': 'veće područje, na duže vrijeme, obično sadrži više aktivnosti', 'widowed': 'udovac/udovica', 'Width': 'širina', 'Width (m)': 'Širina (m)', 'Wikipedia': 'Wikipedia', 'Wild Fire': 'Požar', 'Will be filled automatically when the Item has been Repacked': 'Bit će popunjeno automatski pri prepakovanju stavke', 'Will create and link your user account to the following records': 'Kreiraće i povezati korisnički nalog s sljedećim zapisima', 'Wind Chill': 'Hladni Vjetar', 'window': 'prozor', 'Window frame': 'Okvir prozora', 'windows broken, cracks in walls, roof slightly damaged': 'prozori razbijeni, pukotine u zidovima, krov blago oštećen', 'Winter Storm': 'Zimska oluja', 'within human habitat': 'unutar prebivališta', 'WKT is Invalid!': 'WKT nije validan', 'WMS Browser URL': 'URL za WMS pretraživač', 'WMS Layer': 'WMS sloj', 'Women of Child Bearing Age': 'Žena u reproduktivnom dobu', 'Women participating in coping activities': 'Žene učesnici u aktivnostima prilagođavanja', 'Women who are Pregnant or in Labour': 'Trudnice i porodilje', 'Womens Focus Groups': 'Ženske fokus grupe', 'Wooden plank': 'drvena daska', 'Wooden poles': 'Drveni stubovi', 'Work': 'Posao', 'Work on Program': 'Rad na programu', 'Work phone': 'Telefon na poslu', 'Working hours end': 'Radna satnica završena', 'Working hours start': 'Početak radnog vremena', 'Working or other to provide money/food': 'Radi, ili nešto drugo, da bi osigurao novac/hranu', 'Would you like to display the photos on the map?': 'Želite li prikazati fotografije na mapi', 'X-Ray': 'X-zraci', 'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt modul nije dostupan uz tekuću verziju Pythona - to se treba instalirati za XLS izlaz!', 'xlwt module not available within the running Python - this needs installing to do XLS Reporting!': 'Modul xlwt nije dostupan unutar pokrenutog Python-a, ovo zahtijeva instalaciju XLS izvještaja!', 'xlwt not installed, so cannot export as a Spreadsheet': 'xlwt nije instaliran pa ne mogu izvesti kao tablicu', 'XSL Template Not Found:': 'XSL šablon nije pronađen:', 'XSL Transformation Error:': 'Greška u XSL transformaciji', 'XSLT Template Not Found:': 'XSLT šablon nije pronađen:', 'XSLT Transformation Error:': 'Greška u XSLT transformaciji', 'XYZ Layer': 'XYZ sloj', "Yahoo Layers cannot be displayed if there isn't a valid API Key": 'Yahoo slojevi ne mogu biti prikazani ako ne postoji ispravan API ključ', 'Year': 'Godina', 'Year built': 'Godina izgradnje', 'Year of Manufacture': 'Godina proizvodnje', 'Year that the organization was founded': 'Godina osnivanja organizacije', 'Yellow': 'Žuta', 'Yemen': 'Jemen', 'YES': 'DA', 'yes': 'da', 'Yes': 'Da', 'Yes, No': 'Da , ne', "Yes, No, Don't Know": 'Da, Ne, Ne znam', 'You are a recovery team?': 'Vi ste ekipa za sanaciju?', 'You are attempting to delete your own account - are you sure you want to proceed?': 'Pokušavate izbrisati svoj vlastiti račun - da li ste sigurni da želite da nastavite?', 'You are currently reported missing!': 'Vi ste trenutno prijavljeni kao nestali!', 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Možete promijeniti konfiguraciju modula sinhronizacije u sekciji Postavke. Ova konfiguracije uključuje vaš UUID (jedinstveni indentifikacijski broj), sinhronizaciju rasporeda, upravljački servis itd. Idite na sljedeći link da biste otišli na stranicu Postavke sinhronizacije.', 'You can click on the map below to select the Lat/Lon fields': 'Pritisnite na mapu ispod da selektirate Lat/Lon polja', 'You can only make %d kit(s) with the available stock': 'Možete napraviti %d komplet(a) s dostupnom zalihom', "You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets.": "Možete tražiti po broju sredstva, opisu stavke ili komentarima. Možete koristi % kao džoker. Pritisnite 'Traži' bez unosa za spisak svih sredstava.", "You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": "Možete tražiti po imenu grupe, opisu ili komentarima i po imenu organizacije ili akronimu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svega.", "You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": "Možete tražiti po imenu kursa, mjestu održavanja ili komentarima događaja. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih događaja-", "You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents.": "Možete tražiti po opisu. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih incidenata.", "You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Možete tražiti po radnom mjestu ili ličnom imenu, unesite ime, prezime ili srednje ime razdvojeno razmacima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih osoba.", 'You can search by name, acronym or comments': 'Možete tražiti po imenu, akronimu ili komentarima', 'You can search by name, acronym, comments or parent name or acronym.': 'Možete tražiti po imenu, akronimu, komentarima ili imenu/akronimu nadređenog zapisa.', "You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Možete tražiti po ličnom imenu, unesite ime, prezime ili srednje ime razdvojeno razmacima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih osoba.", "You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": "Možete tražiti po imenu kursiste, imenu kursa ili komentarima. Možete koristiti % kao džoker. Pritisnite 'Traži' bez unosa za prikaz svih kursista.", 'You can select an area on the image and save to crop it.': 'Možete odabrati područje slike za njeno snimanje i izrezivanje.', 'You can select the Draw tool': 'Možete odabrati alat za crtanje', 'You can select the Draw tool (': 'Možete odabrati alat za crtanje (', 'You can set the modem settings for SMS here.': 'Možete postaviti postavke modema za SMS ovdje.', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Možete koristiti sredstvo za konverziju kako bi pretvorili iz GPS koordinata ili iz Stepeni/Minute/Sekunde.', 'You do no have permission to cancel this received shipment.': 'Nemate dozvolu da otkažete primljenu isporuku.', 'You do no have permission to cancel this sent shipment.': 'Nemate dozvolu da otkažete ovu poslanu pošiljku.', 'You do no have permission to make this commitment.': 'Ne posjedujte dozvolu da se obavežete za takvo nešto', 'You do no have permission to receive this shipment.': 'Nemate dozvolu da primite ovu pošiljku.', 'You do no have permission to send this shipment.': 'Nemate dozvolu da pošaljete ovu pošiljku', 'You do not have permission for any facility to add an order.': 'Nemate odobrenja ni za jedan objekat da dodate narudžbu.', 'You do not have permission for any facility to make a commitment.': 'Nemate dozvolu za angažovanje za neki objekt.', 'You do not have permission for any facility to make a request.': 'Nemate dozvolu za podnošenje zahtjeva bilo kojem objektu.', 'You do not have permission for any facility to perform this action.': 'Nemate odobrenja ni za jedan objekat da obavite ovu akciju.', 'You do not have permission for any facility to receive a shipment.': 'Nemate dozvolu ni za jedan objekat da primite pošiljku.', 'You do not have permission for any facility to send a shipment.': 'Nemate dozvolu da pošaljete pošiljku bilo kojem objektu.', 'You do not have permission for any organization to perform this action.': 'Nemate odobrenja ni za jednu organizaciju da obavite ovu akciju', 'You do not have permission for any site to add an inventory item.': 'Nemate dozvolu da dodate stavku inventara ni na jednom mjestu', 'You do not have permission for any site to receive a shipment.': 'Nemate dozvolu da ijedna lokacija dobije pošiljku', 'You do not have permission for any site to send a shipment.': 'Nemate dozvolu za bilo koju stranicu za slanje pošiljke', 'You do not have permission to adjust the stock level in this warehouse.': 'Nemate odobrenja da prilagodite nivo zalihe za ovo skladište', 'You do not have permission to cancel this received shipment.': 'Nemate dozvolu da otkažete primljenu isporuku.', 'You do not have permission to cancel this sent shipment.': 'Nemate dozvolu da otkažete ovu poslanu pošiljku.', 'You do not have permission to make this commitment.': 'Ne posjedujte dozvolu da napravite ovo zaduženje', 'You do not have permission to receive this shipment.': 'Nemate dozvolu da primite ovu pošiljku.', 'You do not have permission to return this sent shipment.': 'Nemate dozvolu da vratite ovu poslanu pošiljku.', 'You do not have permission to send a shipment from this site.': 'Nemate dozvolu da šaljete pošiljku sa ovog mjesta.', 'You do not have permission to send messages': 'Nemate dozvolu da pošaljete poruke', 'You do not have permission to send this shipment.': 'Nemate dozvolu da pošaljete ovu pošiljku', 'You have a personal map configuration. To change your personal configuration, click': 'Imate ličnu konfiguraciju mape. Ukoliko želite promjeniti ličnu konfiguraciju, kliknite', 'You have a personal map configuration. To change your personal configuration, click ': 'Imate ličnu konfiguraciju mape. Za promjenu, kliknite ', 'You have committed for all people in this Request. Please check that all details are correct and update as-required.': 'Zadužili ste za sve ljude u ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have committed to all items in this Request. Please check that all details are correct and update as-required.': 'Zadužili ste sve ljude u ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have committed to this Request. Please check that all details are correct and update as-required.': 'Napravili ste zaduženje po ovom zahtjevu. Molim provjerite da su svi detalji ispravni i ažurirajte kako je potrebno.', 'You have found a dead body?': 'Pronašli ste mrtvo tijelo?', "You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click": 'Imate lične postavke, pa promjene ovdje načinjene vam neće biti vidljive. Ukoliko želite promjeniti ličnu konfiguraciju, kliknite', "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "Postoje promjene koje nisu spašene. Pritisnite 'Odustani', zatim pritisnite 'Snimi' da biste ih sačuvali. Pritisnite OK da biste ih odbacili.", 'You have unsaved changes. You need to press the Save button to save them': 'Imate nesnimljenih promjena. Možete kliknuti dugme za snimanje da ih snimite', "You haven't made any calculations": 'Niste napravili nikakve proračune', 'You must agree to the Terms of Service': 'Morate se složiti s uslovima upotrebe', 'You must be logged in to register volunteers.': 'Morate biti prijavljeni da registrujete volontere.', 'You must be logged in to report persons missing or found.': 'Morate biti ulogovani da biste prijavili nestanak ili pronalazak osobe.', 'You must enter a minimum of %d characters': 'Morate unijeti najmanje %d znakova', 'You must enter a minimum of 4 characters': 'Morate unijeti najmanje 4 znaka', 'You must provide a series id to proceed.': 'Morate obezbijediti ID serije da nastavite.', 'You need to check all item quantities and allocate to bins before you can receive the shipment': 'Morate provjeriti sve količine stavki i dodijeliti ih u korpe prije prijema pošiljke', 'You need to check all item quantities before you can complete the return process': 'Morate provjeriti sve količine stavki prije završetka procesa vraćanja', 'You need to create a template before you can create a series': 'Trebate kreirati predložak prije nego možete kreirati seriju', 'You need to have at least 2 records in this list in order to merge them.': 'Trebate imati bar 2 zapisa u ovoj listi da ih možet spojiti.', 'You need to use the spreadsheet which you can download from this page': 'Trebate koristiti tablicu koju možete preuzeti s ove stranice', 'You should edit Twitter settings in models/000_config.py': 'Trebali biste izmijeniti postavke na Twitteru u models/000_config.py', 'Your action is required. Please approve user': 'Vaša akcija je potrebna, molim potvrdite korisnika', 'Your action is required. Please approve user %s asap:': 'Potrebna je vaša akcija. Potvrdite korisnika %s što prije moguće:', 'Your current ordered list ... (#TODO [String])': 'Vaša trenutna lista narudžbi ... (#TODO [String])', 'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Vaša trenutno naručena lista riješenih predmeta je prikazana ispod. Možete je promijeniti tako što ćete opet glasati.', 'Your name for this search. Notifications will use this name.': 'Vaše ime za ovu pretragu. Napomene će koristiti ovo ime.', 'Your post was added successfully.': 'Vaša poruka je uspješno dodata.', 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Vašem sistemu je dodijeljen jedinstveni identifikacijski broj (UUID), kojeg ostali računari oko vas mogu koristiti da vas identifikuju. Da biste vidjeli svoj UUID, možete otići na Sinhronizacija-> Postavke sinhronizacije. Na toj stranici možete vidjeti i ostala podešavanja.', 'Zambia': 'Zambija', 'Zero Hour': 'Početni trenutak', 'Zeroconf Description': 'Opis bez potrebe za konfiguracijom', 'Zimbabwe': 'Zimbabve', 'Zinc roof': 'Krov od cinka', 'ZIP Code': 'Poštanski broj', 'ZIP/Postcode': 'Poštanski broj', 'Zone': 'Zona', 'Zoom': 'Uvećaj', 'Zoom In': 'Uvećaj', 'Zoom in closer to Edit OpenStreetMap layer': 'Približi za uređivanjeEdit OpenStreetMap sloja', 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'Uvećanje: kliknite unutar mape ili koristite lijevu tipku miša da napravite pravougaonik', 'Zoom Levels': 'Nivoi zumiranja', 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'Umanjenje: kliknite unutar mape ili koristite lijevu tipku miša da napravite pravougaonik', 'Zoom to Current Location': 'Uvećaj na trenutnu lokaciju', 'Zoom to maximum map extent': 'Uvećaj na maksimalnu veličinui mape', }
devinbalkind/eden
languages/bs.py
Python
mit
538,172
[ "VisIt" ]
53424b02d0abbd85a65f2eaaaaef896b7e859688aba104200880d836e32a0860
################################################################################ # Copyright (C) 2015 Jaakko Luttinen # # This file is licensed under the MIT License. ################################################################################ """ Unit tests for `gamma` module. """ import numpy as np from scipy import special from numpy import testing from .. import gaussian from bayespy.nodes import (Gaussian, GaussianARD, GaussianGamma, Gamma, Wishart) from ...vmp import VB from bayespy.utils import misc from bayespy.utils import linalg from bayespy.utils import random from bayespy.utils.misc import TestCase class TestGamma(TestCase): def test_lower_bound_contribution(self): a = 15 b = 21 y = 4 x = Gamma(a, b) x.observe(y) testing.assert_allclose( x.lower_bound_contribution(), ( a * np.log(b) + (a - 1) * np.log(y) - b * y - special.gammaln(a) ) ) # Just one latent node so we'll get exact marginal likelihood # # p(Y) = p(Y,X)/p(X|Y) = p(Y|X) * p(X) / p(X|Y) a = 2.3 b = 4.1 x = 1.9 y = 4.8 tau = Gamma(a, b) Y = GaussianARD(x, tau) Y.observe(y) mu = x nu = 2 * a s2 = b / a a_post = a + 0.5 b_post = b + 0.5*(y - x)**2 tau.update() testing.assert_allclose( [-b_post, a_post], tau.phi ) testing.assert_allclose( Y.lower_bound_contribution() + tau.lower_bound_contribution(), # + tau.g, ( special.gammaln((nu+1)/2) - special.gammaln(nu/2) - 0.5 * np.log(nu) - 0.5 * np.log(np.pi) - 0.5 * np.log(s2) - 0.5 * (nu + 1) * np.log( 1 + (y - mu)**2 / (nu * s2) ) ) ) return class TestGammaGradient(TestCase): """Numerically check Riemannian gradient of several nodes. Using VB-EM update equations will take a unit length step to the Riemannian gradient direction. Thus, the change caused by a VB-EM update and the Riemannian gradient should be equal. """ def test_riemannian_gradient(self): """Test Riemannian gradient of a Gamma node.""" # # Without observations # # Construct model a = np.random.rand() b = np.random.rand() tau = Gamma(a, b) # Random initialization tau.initialize_from_parameters(np.random.rand(), np.random.rand()) # Initial parameters phi0 = tau.phi # Gradient g = tau.get_riemannian_gradient() # Parameters after VB-EM update tau.update() phi1 = tau.phi # Check self.assertAllClose(g[0], phi1[0] - phi0[0]) self.assertAllClose(g[1], phi1[1] - phi0[1]) # # With observations # # Construct model a = np.random.rand() b = np.random.rand() tau = Gamma(a, b) mu = np.random.randn() Y = GaussianARD(mu, tau) Y.observe(np.random.randn()) # Random initialization tau.initialize_from_parameters(np.random.rand(), np.random.rand()) # Initial parameters phi0 = tau.phi # Gradient g = tau.get_riemannian_gradient() # Parameters after VB-EM update tau.update() phi1 = tau.phi # Check self.assertAllClose(g[0], phi1[0] - phi0[0]) self.assertAllClose(g[1], phi1[1] - phi0[1]) pass def test_gradient(self): """Test standard gradient of a Gamma node.""" D = 3 np.random.seed(42) # # Without observations # # Construct model a = np.random.rand(D) b = np.random.rand(D) tau = Gamma(a, b) Q = VB(tau) # Random initialization tau.initialize_from_parameters(np.random.rand(D), np.random.rand(D)) # Initial parameters phi0 = tau.phi # Gradient rg = tau.get_riemannian_gradient() g = tau.get_gradient(rg) # Numerical gradient eps = 1e-8 p0 = tau.get_parameters() l0 = Q.compute_lowerbound(ignore_masked=False) g_num = [np.zeros(D), np.zeros(D)] for i in range(D): e = np.zeros(D) e[i] = eps p1 = p0[0] + e tau.set_parameters([p1, p0[1]]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[0][i] = (l1 - l0) / eps for i in range(D): e = np.zeros(D) e[i] = eps p1 = p0[1] + e tau.set_parameters([p0[0], p1]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[1][i] = (l1 - l0) / eps # Check self.assertAllClose(g[0], g_num[0]) self.assertAllClose(g[1], g_num[1]) # # With observations # # Construct model a = np.random.rand(D) b = np.random.rand(D) tau = Gamma(a, b) mu = np.random.randn(D) Y = GaussianARD(mu, tau) Y.observe(np.random.randn(D)) Q = VB(Y, tau) # Random initialization tau.initialize_from_parameters(np.random.rand(D), np.random.rand(D)) # Initial parameters phi0 = tau.phi # Gradient rg = tau.get_riemannian_gradient() g = tau.get_gradient(rg) # Numerical gradient eps = 1e-8 p0 = tau.get_parameters() l0 = Q.compute_lowerbound(ignore_masked=False) g_num = [np.zeros(D), np.zeros(D)] for i in range(D): e = np.zeros(D) e[i] = eps p1 = p0[0] + e tau.set_parameters([p1, p0[1]]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[0][i] = (l1 - l0) / eps for i in range(D): e = np.zeros(D) e[i] = eps p1 = p0[1] + e tau.set_parameters([p0[0], p1]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[1][i] = (l1 - l0) / eps # Check self.assertAllClose(g[0], g_num[0]) self.assertAllClose(g[1], g_num[1]) pass
bayespy/bayespy
bayespy/inference/vmp/nodes/tests/test_gamma.py
Python
mit
6,897
[ "Gaussian" ]
f58c3f750e9df37f25c94670fca0f770c1928efdd56d8a4d3326e7a7a7bd01ab
# Copyright 2009 Brian Quinlan. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Implements ProcessPoolExecutor. The follow diagram and text describe the data-flow through the system: |======================= In-process =====================|== Out-of-process ==| +----------+ +----------+ +--------+ +-----------+ +---------+ | | => | Work Ids | => | | => | Call Q | => | | | | +----------+ | | +-----------+ | | | | | ... | | | | ... | | | | | | 6 | | | | 5, call() | | | | | | 7 | | | | ... | | | | Process | | ... | | Local | +-----------+ | Process | | Pool | +----------+ | Worker | | #1..n | | Executor | | Thread | | | | | +----------- + | | +-----------+ | | | | <=> | Work Items | <=> | | <= | Result Q | <= | | | | +------------+ | | +-----------+ | | | | | 6: call() | | | | ... | | | | | | future | | | | 4, result | | | | | | ... | | | | 3, except | | | +----------+ +------------+ +--------+ +-----------+ +---------+ Executor.submit() called: - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict - adds the id of the _WorkItem to the "Work Ids" queue Local worker thread: - reads work ids from the "Work Ids" queue and looks up the corresponding WorkItem from the "Work Items" dict: if the work item has been cancelled then it is simply removed from the dict, otherwise it is repackaged as a _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). - reads _ResultItems from "Result Q", updates the future stored in the "Work Items" dict and deletes the dict entry Process #1..n: - reads _CallItems from "Call Q", executes the calls, and puts the resulting _ResultItems in "Request Q" """ __author__ = 'Brian Quinlan (brian@sweetapp.com)' import atexit from concurrent.futures import _base import queue import multiprocessing import threading import weakref # Workers are created as daemon threads and processes. This is done to allow the # interpreter to exit when there are still idle processes in a # ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, # allowing workers to die with the interpreter has two undesirable properties: # - The workers would still be running during interpretor shutdown, # meaning that they would fail in unpredictable ways. # - The workers could be killed while evaluating a work item, which could # be bad if the callable being evaluated has external side-effects e.g. # writing to a file. # # To work around this problem, an exit handler is installed which tells the # workers to exit when their work queues are empty and then waits until the # threads/processes finish. _threads_queues = weakref.WeakKeyDictionary() _shutdown = False def _python_exit(): global _shutdown _shutdown = True items = list(_threads_queues.items()) for t, q in items: q.put(None) for t, q in items: t.join() # Controls how many more calls than processes will be queued in the call queue. # A smaller number will mean that processes spend more time idle waiting for # work while a larger number will make Future.cancel() succeed less frequently # (Futures in the call queue cannot be cancelled). EXTRA_QUEUED_CALLS = 1 class _WorkItem(object): def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn self.args = args self.kwargs = kwargs class _ResultItem(object): def __init__(self, work_id, exception=None, result=None): self.work_id = work_id self.exception = exception self.result = result class _CallItem(object): def __init__(self, work_id, fn, args, kwargs): self.work_id = work_id self.fn = fn self.args = args self.kwargs = kwargs def _process_worker(call_queue, result_queue): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. """ while True: call_item = call_queue.get(block=True) if call_item is None: # Wake up queue management thread result_queue.put(None) return try: r = call_item.fn(*call_item.args, **call_item.kwargs) except BaseException as e: result_queue.put(_ResultItem(call_item.work_id, exception=e)) else: result_queue.put(_ResultItem(call_item.work_id, result=r)) def _add_call_item_to_queue(pending_work_items, work_ids, call_queue): """Fills call_queue with _WorkItems from pending_work_items. This function never blocks. Args: pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids are consumed and the corresponding _WorkItems from pending_work_items are transformed into _CallItems and put in call_queue. call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems. """ while True: if call_queue.full(): return try: work_id = work_ids.get(block=False) except queue.Empty: return else: work_item = pending_work_items[work_id] if work_item.future.set_running_or_notify_cancel(): call_queue.put(_CallItem(work_id, work_item.fn, work_item.args, work_item.kwargs), block=True) else: del pending_work_items[work_id] continue def _queue_management_worker(executor_reference, processes, pending_work_items, work_ids_queue, call_queue, result_queue): """Manages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. """ nb_shutdown_processes = 0 def shutdown_one_process(): """Tell a worker to terminate, which will in turn wake us again""" nonlocal nb_shutdown_processes call_queue.put(None) nb_shutdown_processes += 1 while True: _add_call_item_to_queue(pending_work_items, work_ids_queue, call_queue) result_item = result_queue.get(block=True) if result_item is not None: work_item = pending_work_items[result_item.work_id] del pending_work_items[result_item.work_id] if result_item.exception: work_item.future.set_exception(result_item.exception) else: work_item.future.set_result(result_item.result) continue # If we come here, we either got a timeout or were explicitly woken up. # In either case, check whether we should start shutting down. executor = executor_reference() # No more work items can be added if: # - The interpreter is shutting down OR # - The executor that owns this worker has been collected OR # - The executor that owns this worker has been shutdown. if _shutdown or executor is None or executor._shutdown_thread: # Since no new work items can be added, it is safe to shutdown # this thread if there are no pending work items. if not pending_work_items: while nb_shutdown_processes < len(processes): shutdown_one_process() # If .join() is not called on the created processes then # some multiprocessing.Queue methods may deadlock on Mac OS # X. for p in processes: p.join() return else: # Start shutting down by telling a process it can exit. shutdown_one_process() del executor _system_limits_checked = False _system_limited = None def _check_system_limits(): global _system_limits_checked, _system_limited if _system_limits_checked: if _system_limited: raise NotImplementedError(_system_limited) _system_limits_checked = True try: import os nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems_max == -1: # indetermine limit, assume that limit is determined # by available memory only return if nsems_max >= 256: # minimum number of semaphores available # according to POSIX return _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max raise NotImplementedError(_system_limited) class ProcessPoolExecutor(_base.Executor): def __init__(self, max_workers=None): """Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. """ _check_system_limits() if max_workers is None: self._max_workers = multiprocessing.cpu_count() else: self._max_workers = max_workers # Make the call queue slightly larger than the number of processes to # prevent the worker processes from idling. But don't make it too big # because futures in the call queue cannot be cancelled. self._call_queue = multiprocessing.Queue(self._max_workers + EXTRA_QUEUED_CALLS) self._result_queue = multiprocessing.Queue() self._work_ids = queue.Queue() self._queue_management_thread = None self._processes = set() # Shutdown is a two-step process. self._shutdown_thread = False self._shutdown_lock = threading.Lock() self._queue_count = 0 self._pending_work_items = {} def _start_queue_management_thread(self): # When the executor gets lost, the weakref callback will wake up # the queue management thread. def weakref_cb(_, q=self._result_queue): q.put(None) if self._queue_management_thread is None: self._queue_management_thread = threading.Thread( target=_queue_management_worker, args=(weakref.ref(self, weakref_cb), self._processes, self._pending_work_items, self._work_ids, self._call_queue, self._result_queue)) self._queue_management_thread.daemon = True self._queue_management_thread.start() _threads_queues[self._queue_management_thread] = self._result_queue def _adjust_process_count(self): for _ in range(len(self._processes), self._max_workers): p = multiprocessing.Process( target=_process_worker, args=(self._call_queue, self._result_queue)) p.start() self._processes.add(p) def submit(self, fn, *args, **kwargs): with self._shutdown_lock: if self._shutdown_thread: raise RuntimeError('cannot schedule new futures after shutdown') f = _base.Future() w = _WorkItem(f, fn, args, kwargs) self._pending_work_items[self._queue_count] = w self._work_ids.put(self._queue_count) self._queue_count += 1 # Wake up queue management thread self._result_queue.put(None) self._start_queue_management_thread() self._adjust_process_count() return f submit.__doc__ = _base.Executor.submit.__doc__ def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown_thread = True if self._queue_management_thread: # Wake up queue management thread self._result_queue.put(None) if wait: self._queue_management_thread.join() # To reduce the risk of openning too many files, remove references to # objects that use file descriptors. self._queue_management_thread = None self._call_queue = None self._result_queue = None self._processes = None shutdown.__doc__ = _base.Executor.shutdown.__doc__ atexit.register(_python_exit)
edmundgentle/schoolscript
SchoolScript/bin/Debug/pythonlib/Lib/concurrent/futures/process.py
Python
gpl-2.0
15,316
[ "Brian" ]
f3ff94a128d107c4575db7814756601aeecdfff1616a72a8083bc4c84dfb75e6
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2005-2014 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## """ Domain objects related to the sale process in Stoq. Sale object and related objects implementation """ # pylint: enable=E1101 import collections from decimal import Decimal from kiwi.currency import currency from kiwi.python import Settable from stoqdrivers.enum import TaxType from storm.expr import (And, Avg, Count, LeftJoin, Join, Max, Or, Sum, Alias, Select, Cast, Eq, Coalesce) from storm.info import ClassAlias from storm.references import Reference, ReferenceSet from zope.interface import implementer from stoqlib.database.expr import (Concat, Date, Distinct, Field, NullIf, TransactionTimestamp) from stoqlib.database.properties import (UnicodeCol, DateTimeCol, IntCol, PriceCol, QuantityCol, IdentifierCol, IdCol, BoolCol, EnumCol) from stoqlib.database.runtime import (get_current_user, get_current_branch) from stoqlib.database.viewable import Viewable from stoqlib.domain.address import Address, CityLocation from stoqlib.domain.base import Domain from stoqlib.domain.costcenter import CostCenter from stoqlib.domain.event import Event from stoqlib.domain.events import (SaleStatusChangedEvent, SaleCanCancelEvent, SaleIsExternalEvent, SaleItemBeforeDecreaseStockEvent, SaleItemBeforeIncreaseStockEvent, SaleItemAfterSetBatchesEvent, DeliveryStatusChangedEvent) from stoqlib.domain.fiscal import FiscalBookEntry, Invoice from stoqlib.domain.interfaces import IContainer, IInvoice, IInvoiceItem from stoqlib.domain.payment.payment import Payment from stoqlib.domain.person import (Person, Client, Branch, LoginUser, SalesPerson, Company, Individual, ClientCategory) from stoqlib.domain.product import (Product, ProductHistory, Storable, StockTransactionHistory, StorableBatch) from stoqlib.domain.returnedsale import ReturnedSale, ReturnedSaleItem from stoqlib.domain.sellable import Sellable, SellableCategory from stoqlib.domain.service import Service from stoqlib.domain.taxes import InvoiceItemIcms, InvoiceItemIpi from stoqlib.exceptions import SellError, StockError, DatabaseInconsistency from stoqlib.lib.dateutils import localnow from stoqlib.lib.defaults import quantize from stoqlib.lib.formatters import format_quantity from stoqlib.lib.parameters import sysparam from stoqlib.lib.translation import stoqlib_gettext _ = stoqlib_gettext # pyflakes: Reference requires that CostCenter is imported at least once CostCenter # pylint: disable=W0104 # # Base Domain Classes # @implementer(IInvoiceItem) class SaleItem(Domain): """An item of a |sellable| within a |sale|. Different from |sellable| which contains information about the base price, tax, etc, this contains the price in which *self* was sold, it's taxes, the quantity, etc. Note that objects of this type should never be created manually, only by calling :meth:`Sale.add_sellable` See also: `schema <http://doc.stoq.com.br/schema/tables/sale_item.html>`__ """ __storm_table__ = 'sale_item' repr_fields = ['sale_id'] #: the quantity of the of sold item in this sale quantity = QuantityCol() #: the quantity already decreased from stock. quantity_decreased = QuantityCol(default=0) #: original value the |sellable| had when adding the sale item base_price = PriceCol() #: averiage cost of the items in this item average_cost = PriceCol(default=0) #: price of this item price = PriceCol() sale_id = IdCol() #: |sale| for this item sale = Reference(sale_id, 'Sale.id') sellable_id = IdCol() #: |sellable| for this item sellable = Reference(sellable_id, 'Sellable.id') batch_id = IdCol() #: If the sellable is a storable, the |batch| that it was removed from batch = Reference(batch_id, 'StorableBatch.id') delivery_id = IdCol(default=None) #: |delivery| or None delivery = Reference(delivery_id, 'Delivery.id') cfop_id = IdCol(default=None) #: :class:`fiscal entry <stoqlib.domain.fiscal.CfopData>` cfop = Reference(cfop_id, 'CfopData.id') #: user defined notes, currently only used by services notes = UnicodeCol(default=None) #: estimated date that *self* will be fixed, currently #: only used by services estimated_fix_date = DateTimeCol(default_factory=localnow) # FIXME: This doesn't appear to be used anywhere. Maybe we # should remove it from the database completion_date = DateTimeCol(default=None) icms_info_id = IdCol() #: the :class:`stoqlib.domain.taxes.InvoiceItemIcms` tax for *self* icms_info = Reference(icms_info_id, 'InvoiceItemIcms.id') ipi_info_id = IdCol() #: the :class:`stoqlib.domain.taxes.InvoiceItemIpi` tax for *self* ipi_info = Reference(ipi_info_id, 'InvoiceItemIpi.id') def __init__(self, store=None, **kw): if not 'kw' in kw: if not 'sellable' in kw: raise TypeError('You must provide a sellable argument') base_price = kw['sellable'].price kw['base_price'] = base_price if not kw.get('cfop'): kw['cfop'] = kw['sellable'].default_sale_cfop if not kw.get('cfop'): kw['cfop'] = sysparam.get_object(store, 'DEFAULT_SALES_CFOP') store = kw.get('store', store) kw['ipi_info'] = InvoiceItemIpi(store=store) kw['icms_info'] = InvoiceItemIcms(store=store) Domain.__init__(self, store=store, **kw) product = self.sellable.product if product: # Set ipi details before icms, since icms may depend on the ipi self.ipi_info.set_item_tax(self) self.icms_info.set_item_tax(self) # # Properties # @property def returned_quantity(self): # FIXME: Verificar status do ReturnedSale return self.store.find(ReturnedSaleItem, sale_item=self).sum(ReturnedSaleItem.quantity) or Decimal('0') @property def sale_discount(self): """The discount percentage (relative to the original price when the item was sold) :returns: the discount amount """ if self.price > 0 and self.price < self.base_price: return (1 - (self.price / self.base_price)) * 100 return 0 @property def price_with_discount(self): """Applies the sale discount to this item. This will apply the discount given in the sale proportionally to this item. This value should be used when returning or trading this item, since the user should not receive more than what he paid for. Please note that this may result in rounding problems, since precision may be lost when appling the discount in the items. :returns: price with discount/surcharge """ diff = self.sale.discount_value - self.sale.surcharge_value if diff == 0: return currency(self.price) # Dont use self.sale.(surcharge/discount)_percentage here since they are # already quantized, and we may lose even more precision. percentage = diff / self.sale.get_sale_subtotal() return currency(quantize(self.price * (1 - percentage))) # # Invoice implementation # @property def item_discount(self): if self.price < self.base_price: return self.base_price - self.price return Decimal('0') @property def parent(self): return self.sale @property def nfe_cfop_code(self): """Returns the cfop code to be used on the NF-e If the sale was also printed on a ECF, then the cfop should be: * 5.929: if sold to a |Client| in the same state or * 6-929: if sold to a |Client| in a different state. :returns: the cfop code """ if self.sale.coupon_id: # find out if the client is in the same state as we are. client_address = self.sale.client.person.get_main_address() our_address = self.sale.branch.person.get_main_address() same_state = True if (our_address.city_location.state != client_address.city_location.state): same_state = False if same_state: return u'5929' else: return u'6929' if self.cfop: return self.cfop.code.replace(u'.', u'') # FIXME: remove sale cfop? return self.sale.cfop.code.replace(u'.', u'') # # Public API # def sell(self, branch): store = self.store if not (branch and branch.id == get_current_branch(store).id): raise SellError(_(u"Stoq still doesn't support sales for " u"branch companies different than the " u"current one")) if not self.sellable.is_available(): raise SellError(_(u"%s is not available for sale. Try making it " u"available first and then try again.") % ( self.sellable.get_description())) # This is emitted here instead of inside the if bellow because one can # connect on it and change this item in a way that, if it wasn't going # to decrease stock before, it will after SaleItemBeforeDecreaseStockEvent.emit(self) quantity_to_decrease = self.quantity - self.quantity_decreased storable = self.sellable.product_storable if storable and quantity_to_decrease: try: item = storable.decrease_stock( quantity_to_decrease, branch, StockTransactionHistory.TYPE_SELL, self.id, cost_center=self.sale.cost_center, batch=self.batch) except StockError as err: raise SellError(str(err)) self.average_cost = item.stock_cost self.quantity_decreased += quantity_to_decrease def cancel(self, branch): # This is emitted here instead of inside the if bellow because one can # connect on it and change this item in a way that, if it wasn't going # to increase stock before, it will after SaleItemBeforeIncreaseStockEvent.emit(self) storable = self.sellable.product_storable if storable and self.quantity_decreased: storable.increase_stock(self.quantity_decreased, branch, StockTransactionHistory.TYPE_CANCELED_SALE, self.id, batch=self.batch) self.quantity_decreased = Decimal(0) def reserve(self, quantity): """Reserve some quantity of this this item from stock This will remove the informed quantity from the stock. """ assert 0 < quantity <= (self.quantity - self.quantity_decreased) storable = self.sellable.product_storable if storable: storable.decrease_stock(quantity, self.sale.branch, StockTransactionHistory.TYPE_SALE_RESERVED, self.id, batch=self.batch) self.quantity_decreased += quantity def return_to_stock(self, quantity): """Return some reserved quantity to stock This will return a previously reserved quantity to stock, so that it can be sold in any other sale. """ assert 0 < quantity <= self.quantity_decreased storable = self.sellable.product_storable if storable: storable.increase_stock(quantity, self.sale.branch, StockTransactionHistory.TYPE_SALE_RESERVED, self.id, batch=self.batch) self.quantity_decreased -= quantity def set_batches(self, batches): """Set batches for this sale item Set how much quantity of each |batch| this sale item represents. Note that this will replicate this item and create others, since the batch reference is one per sale item. At the end, this sale item will contain the quantity not used by any batch yet or, if the sum of quantities on batches are equal to :obj:`.quantity`, it will be used for one of the batches :param batches: a dict mapping the batch to it's quantity :returns: a list of the new created items :raises: :exc:`ValueError` if this item already has a batch :raises: :exc:`ValueError` if the sum of the batches quantities is greater than this item's original quantity """ # Make a copy since we are going to modify this dict batches = batches.copy() if self.batch is not None: raise ValueError("This item already has a batch") quantities_sum = sum(quantity for quantity in batches.values()) if quantities_sum > self.quantity: raise ValueError("The sum of batch quantities needs to be equal " "or less than the item's original quantity") missing = self.quantity - quantities_sum # If there's some quantity missing batch information, leave self # with that missing quantity so it can be set again in the future if missing: self.quantity = missing else: self.batch, self.quantity = batches.popitem() self.icms_info.update_values(self) self.ipi_info.update_values(self) new_sale_items = [] for batch, quantity in batches.items(): new_item = self.__class__( store=self.store, sellable=self.sellable, sale=self.sale, quantity=quantity, batch=batch, cfop=self.cfop, base_price=self.base_price, price=self.price, notes=self.notes) new_item.icms_info.update_values(new_item) new_item.ipi_info.update_values(new_item) new_sale_items.append(new_item) SaleItemAfterSetBatchesEvent.emit(self, new_sale_items) return new_sale_items def set_discount(self, discount): """Apply *discount* on this item Note that the discount will be applied based on :obj:`.base_price` and then substitute :obj:`.price`, making any previous discount/surcharge being lost :param decimal.Decimal discount: the discount to be applied as a percentage, e.g. 10.0, 22.5 """ discount_value = quantize(self.base_price * discount / 100) # The value cannot be <= 0 self.price = max(self.base_price - discount_value, Decimal('0.01')) def get_total(self): # Sale items are suposed to have only 2 digits, but the value price # * quantity may have more than 2, so we need to round it. if self.ipi_info: return currency(quantize(self.price * self.quantity + self.ipi_info.v_ipi)) return currency(quantize(self.price * self.quantity)) def get_quantity_unit_string(self): return u"%s %s" % (format_quantity(self.quantity), self.sellable.unit_description) def get_description(self): return self.sellable.get_description() def is_totally_returned(self): """If this sale item was totally returned :returns: ``True`` if it was totally returned, ``False`` otherwise. """ return self.quantity == self.returned_quantity def is_service(self): """If this sale item contains a |service|. :returns: ``True`` if it's a service """ service = self.store.find(Service, sellable=self.sellable).one() return service is not None def get_sale_surcharge(self): """The surcharge percentage (relative to the original price when the item was sold) :returns: the surcharge amount """ if self.price > self.base_price: return ((self.price / self.base_price) - 1) * 100 return 0 @implementer(IContainer) class Delivery(Domain): """Delivery, transporting a set of sale items for sale. Involves a |transporter| transporting a set of |saleitems| to a receival |address|. Optionally a :obj:`.tracking_code` can be set to track the items. See also: `schema <http://doc.stoq.com.br/schema/tables/delivery.html>`__ """ __storm_table__ = 'delivery' #: The delivery was created STATUS_INITIAL = u'initial' #: sent to deliver STATUS_SENT = u'sent' #: received by the |client| STATUS_RECEIVED = u'received' statuses = {STATUS_INITIAL: _(u"Waiting"), STATUS_SENT: _(u"Sent"), STATUS_RECEIVED: _(u"Received")} #: the delivery status status = EnumCol(allow_none=False, default=STATUS_INITIAL) #: the date which the delivery was created open_date = DateTimeCol(default=None) #: the date which the delivery sent to deliver deliver_date = DateTimeCol(default=None) #: the date which the delivery received by the |client| receive_date = DateTimeCol(default=None) #: the delivery tracking code, a transporter specific identifier that #: can be used to look up the status of the delivery tracking_code = UnicodeCol(default=u'') address_id = IdCol(default=None) #: the |address| to deliver to address = Reference(address_id, 'Address.id') transporter_id = IdCol(default=None) #: the |transporter| for this delivery transporter = Reference(transporter_id, 'Transporter.id') service_item_id = IdCol(default=None) #: the |saleitem| for the delivery itself service_item = Reference(service_item_id, 'SaleItem.id') #: the |saleitems| for the items to deliver delivery_items = ReferenceSet('id', 'SaleItem.delivery_id') def __init__(self, store=None, **kwargs): if not 'open_date' in kwargs: kwargs['open_date'] = TransactionTimestamp() super(Delivery, self).__init__(store=store, **kwargs) # # Properties # @property def status_str(self): return self.statuses[self.status] @property def address_str(self): if self.address: return self.address.get_address_string() return u'' @property def client_str(self): client = self.service_item.sale.client if client: return client.get_description() return u'' # # Public API # def set_initial(self): self._set_delivery_status(self.STATUS_INITIAL) def set_sent(self): self._set_delivery_status(self.STATUS_SENT) def set_received(self): self._set_delivery_status(self.STATUS_RECEIVED) # # IContainer implementation # def add_item(self, item): item.delivery = self def get_items(self): return list(self.delivery_items) def remove_item(self, item): item.delivery = None # # Private # def _set_delivery_status(self, status): old_status = self.status DeliveryStatusChangedEvent.emit(self, old_status) self.status = status @implementer(IContainer) @implementer(IInvoice) class Sale(Domain): """Sale logic, the process of selling a |sellable| to a |client|. * calculates the sale price including discount/interest/markup * creates payments * decreases the stock for products * creates a delivery (optional) * verifies that the client is suitable * creates commissions to the sales person * add money to the till (if paid with money) * calculate taxes and fiscal book entries +----------------------------+----------------------------+ | **Status** | **Can be set to** | +----------------------------+----------------------------+ | :obj:`STATUS_QUOTE` | :obj:`STATUS_INITIAL` | +----------------------------+----------------------------+ | :obj:`STATUS_INITIAL` | :obj:`STATUS_ORDERED`, | +----------------------------+----------------------------+ | :obj:`STATUS_ORDERED` | :obj:`STATUS_CONFIRMED` | | | :obj:`STATUS_CANCELLED` | +----------------------------+----------------------------+ | :obj:`STATUS_CONFIRMED` | :obj:`STATUS_RENEGOTIATED` | +----------------------------+----------------------------+ | :obj:`STATUS_CANCELLED` | None | +----------------------------+----------------------------+ | :obj:`STATUS_RENEGOTIATED` | None | +----------------------------+----------------------------+ | :obj:`STATUS_RETURNED` | None | +----------------------------+----------------------------+ .. graphviz:: digraph sale_status { STATUS_QUOTE -> STATUS_INITIAL; STATUS_INITIAL -> STATUS_ORDERED; STATUS_ORDERED -> STATUS_CONFIRMED; STATUS_ORDERED -> STATUS_CANCELLED; STATUS_CONFIRMED -> STATUS_CANCELLED; STATUS_CONFIRMED -> STATUS_RENEGOTIATED; } See also: `schema <http://doc.stoq.com.br/schema/tables/sale.html>`__ """ __storm_table__ = 'sale' repr_fields = ['identifier', 'status'] #: The sale is opened, products or other |sellable| items might have #: been added. STATUS_INITIAL = u'initial' #: When asking for sale quote this is the initial state that is set before #: reaching the initial state STATUS_QUOTE = u'quote' #: This state means the order was left the quoting state, but cant just yet #: go to the confirmed state. This may happen for various reasons, #: like when there is not enough stock to confirm the sale; when the sale #: has pending work orders; or when the confirmation should happen on #: the till app (because of the CONFIRM_SALES_AT_TILL parameter) STATUS_ORDERED = u'ordered' #: The sale has been confirmed and all payments have been registered, #: but not necessarily paid. STATUS_CONFIRMED = u'confirmed' #: The sale has been canceled, this can only happen #: to an sale which has not yet reached the SALE_CONFIRMED status. STATUS_CANCELLED = u'cancelled' #: The sale has been returned, all the payments made have been canceled #: and the |client| has been compensated for everything already paid. STATUS_RETURNED = u'returned' #: A sale that is closed as renegotiated, all payments for this sale #: should be canceled at list point. Another new sale is created with #: the new, renegotiated payments. STATUS_RENEGOTIATED = u'renegotiated' statuses = collections.OrderedDict([ (STATUS_INITIAL, _(u'Opened')), (STATUS_QUOTE, _(u'Quoting')), (STATUS_ORDERED, _(u'Ordered')), (STATUS_CONFIRMED, _(u'Confirmed')), (STATUS_CANCELLED, _(u'Cancelled')), (STATUS_RETURNED, _(u'Returned')), (STATUS_RENEGOTIATED, _(u'Renegotiated')), ]) #: A numeric identifier for this object. This value should be used instead of #: :obj:`Domain.id` when displaying a numerical representation of this object to #: the user, in dialogs, lists, reports and such. identifier = IdentifierCol() #: status of the sale status = EnumCol(allow_none=False, default=STATUS_INITIAL) # FIXME: this doesn't really belong to the sale # FIXME: it should also be renamed and avoid *_id #: identifier for the coupon of this sale, used by a ECF printer coupon_id = IntCol() # FIXME: This doesn't appear to be used anywhere. # Maybe we should remove it from the database. service_invoice_number = IntCol(default=None) #: the date sale was created, this is always set open_date = DateTimeCol(default_factory=localnow) #: the date sale was confirmed, or None if it hasn't been confirmed confirm_date = DateTimeCol(default=None) #: the date sale was paid, or None if it hasn't be paid close_date = DateTimeCol(default=None) #: the date sale was confirmed, or None if it hasn't been cancelled cancel_date = DateTimeCol(default=None) #: the date sale was confirmed, or None if it hasn't been returned return_date = DateTimeCol(default=None) #: date when this sale expires, used by quotes expire_date = DateTimeCol(default=None) #: This flag indicates if the sale its completely paid and received paid = BoolCol(default=False) #: discount of the sale, in absolute value, for instance:: #: #: sale.total_sale_amount = 150 #: sale.discount_value = 18 #: # the price of the sale will now be 132 #: discount_value = PriceCol(default=0) #: surcharge of the sale, in absolute value, for instance:: #: #: sale.total_sale_amount = 150 #: sale.surcharge_value = 18 #: # the price of the sale will now be 168 #: surcharge_value = PriceCol(default=0) #: the total value of all the items in the same, this is set when #: a sale is confirmed, this is the same as calling #: :obj:`Sale.get_total_sale_amount()` at the time of confirming the sale, total_amount = PriceCol(default=0) #: invoice number for this sale, appears on bills etc. invoice_number = IntCol(default=None) operation_nature = UnicodeCol(default=u'') cfop_id = IdCol() #: the :class:`fiscal entry <stoqlib.domain.fiscal.CfopData>` cfop = Reference(cfop_id, 'CfopData.id') client_id = IdCol(default=None) #: the |client| who this sale was sold to client = Reference(client_id, 'Client.id') salesperson_id = IdCol() #: the |salesperson| who sold the sale salesperson = Reference(salesperson_id, 'SalesPerson.id') branch_id = IdCol() #: the |branch| this sale belongs to branch = Reference(branch_id, 'Branch.id') transporter_id = IdCol(default=None) # FIXME: transporter should only be used on Delivery. #: If we have a delivery, this is the |transporter| for this sale transporter = Reference(transporter_id, 'Transporter.id') group_id = IdCol() #: the |paymentgroup| of this sale group = Reference(group_id, 'PaymentGroup.id') client_category_id = IdCol(default=None) #: the |clientcategory| used for price determination. client_category = Reference(client_category_id, 'ClientCategory.id') cost_center_id = IdCol(default=None) #: the |costcenter| that the cost of the products sold in this sale should #: be accounted for. When confirming a sale with a |costcenter| set, a #: |costcenterentry| will be created for each product cost_center = Reference(cost_center_id, 'CostCenter.id') #: list of :class:`comments <stoqlib.domain.sale.SaleComment>` for #: this sale comments = ReferenceSet('id', 'SaleComment.sale_id', order_by='SaleComment.date') #: All returned sales of this sale returned_sales = ReferenceSet('id', 'ReturnedSale.sale_id', order_by='ReturnedSale.return_date') invoice_id = IdCol() #: The |sale_token| id sale_token_id = IdCol() #: Reference to |SaleToken| sale_token = Reference(sale_token_id, 'SaleToken.id') #: The |invoice| generated by the sale invoice = Reference(invoice_id, 'Invoice.id') def __init__(self, store=None, **kw): kw['invoice'] = Invoice(store=store, invoice_type=Invoice.TYPE_OUT) super(Sale, self).__init__(store=store, **kw) # Branch needs to be set before cfop, which triggers an # implicit flush. self.branch = kw.pop('branch', None) if not 'cfop' in kw: self.cfop = sysparam.get_object(store, 'DEFAULT_SALES_CFOP') # # Classmethods # @classmethod def get_status_name(cls, status): """The :obj:`Sale.status` as a translated string""" if not status in cls.statuses: raise DatabaseInconsistency(_(u"Invalid status %d") % status) return cls.statuses[status] @classmethod def get_last_invoice_number(cls, store): """Returns the last sale invoice number. If there is not an invoice number used, the returned value will be zero. :param store: a store :returns: an integer representing the last sale invoice number """ return store.find(cls).max(cls.invoice_number) or 0 # # IContainer implementation # def add_item(self, sale_item): assert not sale_item.sale sale_item.sale = self def get_items(self): store = self.store return store.find(SaleItem, sale=self) def remove_item(self, sale_item): if sale_item.quantity_decreased > 0: sale_item.return_to_stock(sale_item.quantity_decreased) sale_item.sale = None self.store.maybe_remove(sale_item) # # IInvoice implementation # @property def recipient(self): if self.client: return self.client.person return None @property def invoice_subtotal(self): return self.get_sale_subtotal() @property def invoice_total(self): return self.get_total_sale_amount() @property def nfe_coupon_info(self): """Returns """ if not self.coupon_id: return None # FIXME: we still dont have the number of the ecf stored in stoq # (note: this is not the serial number) return Settable(number=u'', coo=self.coupon_id) # Status def can_order(self): """Only newly created sales can be ordered :returns: ``True`` if the sale can be ordered """ return (self.status == Sale.STATUS_INITIAL or self.status == Sale.STATUS_QUOTE) def can_confirm(self): """Only ordered sales can be confirmed :returns: ``True`` if the sale can be confirmed """ if self.client: method_values = {} for p in self.payments: # We should ignore already paid payments if p.is_paid(): continue method_values.setdefault(p.method, 0) method_values[p.method] += p.value for method, value in method_values.items(): assert self.client.can_purchase(method, value) return (self.status == Sale.STATUS_ORDERED or self.status == Sale.STATUS_QUOTE) def can_set_paid(self): """Only confirmed sales can raise the flag paid. Also, the sale must have at least one payment and all the payments must be already paid. :returns: ``True`` if the sale can be set as paid """ if self.paid: return False payments = list(self.payments) if not payments: return False return all(p.is_paid() for p in payments) def can_set_not_paid(self): """Only confirmed sales can be paid :returns: ``True`` if the sale can be set as paid """ return self.paid def can_set_renegotiated(self): """Only sales with status confirmed can be renegotiated. :returns: ``True`` if the sale can be renegotiated """ # This should be as simple as: # return self.status == Sale.STATUS_CONFIRMED # But due to bug 3890 we have to check every payment. return self.payments.find( Payment.status == Payment.STATUS_PENDING).count() > 0 def can_cancel(self): """Only ordered, confirmed, paid and quoting sales can be cancelled. :returns: ``True`` if the sale can be cancelled """ # None is acceptable as it means no one catch the event if SaleCanCancelEvent.emit(self) is False: return False # If ALLOW_CANCEL_CONFIRMED_SALES is not set, we can only cancel # quoting sales if not sysparam.get_bool("ALLOW_CANCEL_CONFIRMED_SALES"): return self.status == self.STATUS_QUOTE return self.status in (Sale.STATUS_CONFIRMED, Sale.STATUS_ORDERED, Sale.STATUS_QUOTE) def can_return(self): """Only confirmed (with or without payment) sales can be returned :returns: ``True`` if the sale can be returned """ return self.status == Sale.STATUS_CONFIRMED def can_edit(self): """Check if the sale can be edited. Only quoting and ordered sales can be edited, as long as they are not external. :returns: ``True`` if the sale can be edited """ if self.is_external(): return False return (self.status == Sale.STATUS_QUOTE or self.status == Sale.STATUS_ORDERED) def is_external(self): """Check if this is an external sale. :rtype: bool """ return bool(SaleIsExternalEvent.emit(self)) def is_returned(self): return self.status == Sale.STATUS_RETURNED def order(self): """Orders the sale Ordering a sale is the first step done after creating it. The state of the sale will change to Sale.STATUS_ORDERED. To order a sale you need to add sale items to it. A |client| might also be set for the sale, but it is not necessary. """ assert self.can_order() if self.get_items().is_empty(): raise SellError(_('The sale must have sellable items')) if self.client and not self.client.is_active: raise SellError(_('Unable to make sales for clients with status ' '%s') % self.client.get_status_string()) self._set_sale_status(Sale.STATUS_ORDERED) def confirm(self, till=None): """Confirms the sale Confirming a sale means that the customer has confirmed the sale. Sale items containing products are physically received and the payments are agreed upon but not necessarily received. All money payments will be set as paid. :param till: the |till| where this sale was confirmed. Can be `None` in case the process was automated (e.g. a virtual store) """ assert self.can_confirm() assert self.branch # FIXME: We should use self.branch, but it's not supported yet store = self.store branch = get_current_branch(store) for item in self.get_items(): self.validate_batch(item.batch, sellable=item.sellable) if item.sellable.product: ProductHistory.add_sold_item(store, branch, item) item.sell(branch) self.total_amount = self.get_total_sale_amount() self.group.confirm() self._add_inpayments(till=till) self._create_fiscal_entries() # Save invoice number, operation_nature and branch in Invoice table. self.invoice.invoice_number = self.invoice_number self.invoice.operation_nature = self.operation_nature self.invoice.branch = branch if self._create_commission_at_confirm(): for payment in self.payments: self.create_commission(payment) if self.client: self.group.payer = self.client.person self.confirm_date = TransactionTimestamp() # When confirming a sale, all credit and money payments are # automatically paid. # Since some plugins may listen to the sale status change event, we should # set payments as paid before the status change. for method in (u'money', u'credit'): self.group.pay_method_payments(method) self._set_sale_status(Sale.STATUS_CONFIRMED) if self.sale_token: self.sale_token.close_token() # do not log money payments twice if not self.only_paid_with_money(): if self.client: msg = _(u"Sale {sale_number} to client {client_name} was " u"confirmed with value {total_value:.2f}.").format( sale_number=self.identifier, client_name=self.client.person.name, total_value=self.get_total_sale_amount()) else: msg = _(u"Sale {sale_number} without a client was " u"confirmed with value {total_value:.2f}.").format( sale_number=self.identifier, total_value=self.get_total_sale_amount()) Event.log(self.store, Event.TYPE_SALE, msg) def set_paid(self): """Mark the sale as paid Marking a sale as paid means that all the payments have been received. """ assert self.can_set_paid() # Right now commissions are created when the payment is confirmed # (if the parameter is set) or when the payment is confirmed. # This code is still here for the users that some payments created # (and paid) but no commission created yet. # This can be removed sometime in the future. for payment in self.payments: self.create_commission(payment) self.close_date = TransactionTimestamp() self.paid = True if self.only_paid_with_money(): # Money payments are confirmed and paid, so lof them that way if self.client: msg = _(u"Sale {sale_number} to client {client_name} was paid " u"and confirmed with value {total_value:.2f}.").format( sale_number=self.identifier, client_name=self.client.person.name, total_value=self.get_total_sale_amount()) else: msg = _(u"Sale {sale_number} without a client was paid " u"and confirmed with value {total_value:.2f}.").format( sale_number=self.identifier, total_value=self.get_total_sale_amount()) else: if self.client: msg = _(u"Sale {sale_number} to client {client_name} was paid " u"with value {total_value:.2f}.").format( sale_number=self.identifier, client_name=self.client.person.name, total_value=self.get_total_sale_amount()) else: msg = _(u"Sale {sale_number} without a client was paid " u"with value {total_value:.2f}.").format( sale_number=self.identifier, total_value=self.get_total_sale_amount()) Event.log(self.store, Event.TYPE_SALE, msg) def set_not_paid(self): """Mark a sale as not paid. This happens when the user sets a previously paid payment as not paid. """ assert self.can_set_not_paid() self.close_date = None self.paid = False def set_renegotiated(self): """Set the sale as renegotiated. The sale payments have been renegotiated and the operations will be done in another |paymentgroup|.""" assert self.can_set_renegotiated() self.close_date = TransactionTimestamp() self._set_sale_status(Sale.STATUS_RENEGOTIATED) def set_not_returned(self): """Sets a sale as not returnd This will reset the sale status to confirmed (once you can only returna confirmed sale). Also, the return_date will be reset. """ self._set_sale_status(Sale.STATUS_CONFIRMED) self.return_date = None def cancel(self, force=False): """Cancel the sale You can only cancel an ordered sale. This will also cancel all the payments related to it. :param force: if ``True``, :meth:`.can_cancel` will not be asserted. Only use this if you really need to (for example, when canceling the last sale on the ecf) """ if not force: assert self.can_cancel() branch = get_current_branch(self.store) for item in self.get_items(): item.cancel(branch) self.cancel_date = TransactionTimestamp() self._set_sale_status(Sale.STATUS_CANCELLED) self.paid = False # Cancel payments for payment in self.payments: if payment.can_cancel(): payment.cancel() def return_(self, returned_sale): """Returns a sale Returning a sale means that all the items are returned to the stock. A renegotiation object needs to be supplied which contains the invoice number and the eventual penalty :param returned_sale: a :class:`stoqlib.domain.returnedsale.ReturnedSale` object. It can be created by :meth:`create_sale_return_adapter` """ assert self.can_return() assert isinstance(returned_sale, ReturnedSale) totally_returned = all([sale_item.is_totally_returned() for sale_item in self.get_items()]) if totally_returned: self.return_date = TransactionTimestamp() self._set_sale_status(Sale.STATUS_RETURNED) self.paid = False if self.client: if totally_returned: msg = _(u"Sale {sale_number} to client {client_name} was " u"totally returned with value {total_value:.2f}. " u"Reason: {reason}") else: msg = _(u"Sale {sale_number} to client {client_name} was " u"partially returned with value {total_value:.2f}. " u"Reason: {reason}") msg = msg.format(sale_number=self.identifier, client_name=self.client.person.name, total_value=returned_sale.returned_total, reason=returned_sale.reason) else: if totally_returned: msg = _(u"Sale {sale_number} without a client was " u"totally returned with value {total_value:.2f}. " u"Reason: {reason}") else: msg = _(u"Sale {sale_number} without a client was " u"partially returned with value {total_value:.2f}. " u"Reason: {reason}") msg = msg.format(sale_number=self.identifier, total_value=returned_sale.returned_total, reason=returned_sale.reason) Event.log(self.store, Event.TYPE_SALE, msg) def set_items_discount(self, discount): """Apply discount on this sale's items :param decimal.Decimal discount: the discount to be applied as a percentage, e.g. 10.0, 22.5 """ new_total = currency(0) items = self.get_items() for item in items: item.set_discount(discount) new_total += item.price * item.quantity # Since we apply the discount percentage above, items can generate a # 3rd decimal place, that will be rounded to the 2nd, making the value # differ. Find that difference and apply it to a sale item. The sale # item that will be used for this rounding is the first one that the # quantity can divide the diff. discount_value = quantize((self.get_sale_base_subtotal() * discount) / 100) diff = new_total - self.get_sale_base_subtotal() + discount_value if diff: # The value cannot be <= 0 # Note that we should use price instead of base_price, since the # for above may have changed the price already for item in items: if (diff * 100) % item.quantity == 0: item.price = max(item.price - diff / item.quantity, Decimal('0.01')) break # # Accessors # def get_total_sale_amount(self, subtotal=None): """ Fetches the total value paid by the |client|. It can be calculated as:: Sale total = Sum(product and service prices) + surcharge + interest - discount :param subtotal: pre calculated subtotal, pass in this to avoid a querying the database :returns: the total value """ if subtotal is None: subtotal = self.get_sale_subtotal() surcharge_value = self.surcharge_value or Decimal(0) discount_value = self.discount_value or Decimal(0) total_amount = subtotal + surcharge_value - discount_value return currency(total_amount) def get_sale_subtotal(self): """Fetch the subtotal for the sale, eg the sum of the prices for of all items. :returns: subtotal """ total = 0 for i in self.get_items(): total += i.get_total() return currency(total) def get_sale_base_subtotal(self): """Get the base subtotal of items Just a helper that, unlike :meth:`.get_sale_subtotal`, will return the total based on item's base price. :returns: the base subtotal """ subtotal = self.get_items().sum(SaleItem.quantity * SaleItem.base_price) return currency(subtotal) def get_items_total_quantity(self): """Fetches the total number of items in the sale :returns: number of items """ return self.get_items().sum(SaleItem.quantity) or Decimal(0) def get_total_paid(self): """Return the total amount already paid for this sale :returns: the total amount paid """ total_paid = 0 for payment in self.group.get_valid_payments(): if payment.is_inpayment() and payment.is_paid(): # Already paid by client. Value instead of paid_value as the # second might have penalties and discounts not applicable here total_paid += payment.value elif payment.is_outpayment(): # Already returned to client total_paid -= payment.value return currency(total_paid) def get_total_to_pay(self): """Missing payment value for this sale. Returns the value the client still needs to pay for this sale. This is the same as :meth:`.get_total_sale_amount` - :meth:`.get_total_paid` """ return currency(self.get_total_sale_amount() - self.get_total_paid()) def get_returned_value(self): """The total value returned from this sale. This will return the sum of all returned sales of this sale. """ return currency(sum(i.returned_total for i in self.returned_sales)) def get_available_discount_for_items(self, user=None, exclude_item=None): """Get available discount for items in this sale The available items discount is the total discount not used by items in this sale. For instance, if we have 2 products with a price of 100 and they can have 10% of discount, we have 20 of discount available. If one of those products price is set to 98, that is, using 2 of it's discount, the available discount is now 18. :param user: passed to :meth:`stoqlib.domain.sellable.Sellable.get_maximum_discount` together with :obj:`.client_category` to check for the max discount for sellables on this sale :param exclude_item: a |saleitem| to exclude from the calculations. Useful if you are trying to get some extra discount for that item and you don't want it's discount to be considered here :returns: the available discount """ available_discount = currency(0) used_discount = currency(0) for item in self.get_items(): if item == exclude_item: continue # Don't put surcharges on the discount, or it can end up negative if item.price > item.base_price: continue used_discount += item.base_price - item.price max_discount = item.sellable.get_maximum_discount( category=self.client_category, user=user) / 100 available_discount += item.base_price * max_discount return available_discount - used_discount def get_details_str(self): """Returns the sale details The details are composed by the items notes, the delivery address and the estimated fix date Note that there might be some extra comments on :obj:`.comments` :returns: the sale details string. """ details = [] delivery_added = False for sale_item in self.get_items(): if delivery_added is False: # FIXME: Add the delivery info just once can lead to an error. # It's possible that some item went to delivery X while # some went to delivery Y. delivery = sale_item.delivery if delivery is not None: details.append(_(u'Delivery Address: %s') % delivery.address.get_address_string()) # At the moment, we just support only one delivery per sale. delivery_added = True delivery = None else: if sale_item.notes: details.append(_(u'"%s" Notes: %s') % ( sale_item.get_description(), sale_item.notes)) if sale_item.is_service() and sale_item.estimated_fix_date: details.append(_(u'"%s" Estimated Fix Date: %s') % ( sale_item.get_description(), sale_item.estimated_fix_date.strftime('%x'))) return u'\n'.join(sorted(details)) def get_salesperson_name(self): """ :returns: the sales person name """ return self.salesperson.get_description() def get_client_name(self): """Returns the client name, if a |client| has been provided for this sale :returns: the client name of a place holder string for sales without clients set. """ if not self.client: return _(u'Not Specified') return self.client.get_name() def get_client_document(self): """Returns the client document for this sale This could be either its cnpj or cpf. """ if not self.client_id: return None return self.client.person.get_cnpj_or_cpf() # FIXME: move over to client or person def get_client_role(self): """Fetches the client role :returns: the client role (an |individual| or a |company|) instance or None if the sale haven't |client| set. """ if not self.client: return None client_role = self.client.person.has_individual_or_company_facets() if client_role is None: raise DatabaseInconsistency( _(u"The sale %r have a client but no " u"client_role defined.") % self) return client_role def get_items_missing_batch(self): """Get all |saleitems| missing |batch| This usually happens when we create a quote. Since we are not removing the items from the stock, they probably were not set on the |saleitem|. :returns: a result set of |saleitems| that needs to set set the batch information """ return self.store.find( SaleItem, And(SaleItem.sale_id == self.id, SaleItem.sellable_id == Sellable.id, Product.sellable_id == Sellable.id, Storable.product_id == Product.id, Eq(Storable.is_batch, True), Eq(SaleItem.batch_id, None))) def need_adjust_batches(self): """Checks if we need to set |batches| for this sale's |saleitems| This usually happens when we create a quote. Since we are not removing the items from the stock, they probably were not set on the |saleitem|. :returns: ``True`` if any |saleitem| needs a |batch|, ``False`` otherwise. """ return not self.get_items_missing_batch().is_empty() def check_and_adjust_batches(self): """ Check batches and perform a first adjustment when a sale item has only one batch. :returns: ``True`` if all items that need a batch were adjusted, or ``False`` if there are items that were not possible to be adjusted. """ # No batchs or batches already adjusted if not self.need_adjust_batches(): return True all_adjusted = True sale_items = self.get_items_missing_batch() # Set unique batch. for item in sale_items: storable = item.sellable.product_storable available_batches = list(storable.get_available_batches(self.branch)) if len(available_batches) == 1: item.batch = available_batches[0] else: all_adjusted = False return all_adjusted def only_paid_with_money(self): """Find out if the sale is paid using money :returns: ``True`` if the sale was paid with money """ if self.payments.is_empty(): return False return all(payment.is_of_method(u'money') for payment in self.payments) def add_sellable(self, sellable, quantity=1, price=None, quantity_decreased=0, batch=None): """Adds a new item to a sale. :param sellable: the |sellable| :param quantity: quantity to add, defaults to 1 :param price: optional, the price, it not set the price from the sellable will be used :param quantity_decreased: the quantity already decreased from stock. e.g. The param quantity 10 and that quantity were already decreased, so this param should be 10 too. :param batch: the |batch| this sellable comes from, if the sellable is a storable. Should be ``None`` if it is not a storable or if the storable does not have batches. :returns: a |saleitem| for representing the sellable within this sale. """ # Quote can add items without batches, but they will be validated # after on self.confirm if self.status not in (self.STATUS_QUOTE, self.STATUS_ORDERED): self.validate_batch(batch, sellable=sellable) price = price or sellable.price return SaleItem(store=self.store, quantity=quantity, quantity_decreased=quantity_decreased, sale=self, sellable=sellable, batch=batch, price=price) def create_sale_return_adapter(self): store = self.store current_user = get_current_user(store) returned_sale = ReturnedSale( store=store, sale=self, branch=get_current_branch(store), responsible=current_user, ) for sale_item in self.get_items(): if sale_item.is_totally_returned(): # Exclude quantities already returned from this one continue ReturnedSaleItem( store=store, sale_item=sale_item, returned_sale=returned_sale, quantity=sale_item.quantity_decreased, batch=sale_item.batch, # XXX Please note that when applying the sale discount in the # items, ther may be some rounding issues, leaving the total # value either greater or lower than the expected value. price=sale_item.price_with_discount ) return returned_sale def create_commission(self, payment): """Creates a commission for the *payment* This will create a |commission| for the given |payment|, :obj:`.sale` and :obj:`.sale.salesperson`. Note that, if the payment already has a commission, nothing will be done. """ from stoqlib.domain.commission import Commission if payment.has_commission(): return commission = Commission( commission_type=self._get_commission_type(), sale=self, payment=payment, store=self.store) if payment.is_outpayment(): commission.value = -commission.value return commission def get_first_sale_comment(self): first_comment = self.comments.first() if first_comment: return first_comment.comment return u'' # # Properties # @property def status_str(self): return self.get_status_name(self.status) @property def products(self): """All |saleitems| of this sale containing a |product|. :returns: the result set containing the |saleitems|, ordered by :attr:`stoqlib.domain.sellable.Sellable.code` """ return self.store.find( SaleItem, And(SaleItem.sale_id == self.id, SaleItem.sellable_id == Product.sellable_id, SaleItem.sellable_id == Sellable.id)).order_by( Sellable.code) @property def services(self): """All |saleitems| of this sale containing a |service|. :returns: the result set containing the |saleitems|, ordered by :attr:`stoqlib.domain.sellable.Sellable.code` """ return self.store.find( SaleItem, And(SaleItem.sale_id == self.id, SaleItem.sellable_id == Service.sellable_id, SaleItem.sellable_id == Sellable.id)).order_by( Sellable.code) @property def payments(self): """Returns all valid payments for this sale ordered by open date This will return a list of valid payments for this sale, that is, all payments on the |paymentgroups| that were not cancelled. If you need to get the cancelled too, use :obj:`.group.payments`. :returns: an ordered iterable of |payment|. """ return self.group.get_valid_payments().order_by(Payment.open_date) @property def discount_percentage(self): """Sets a discount by percentage. Note that percentage must be added as an absolute value, in other words:: sale.total_sale_amount = 200 sale.discount_percentage = 5 # the price of the sale will now be be `190` """ discount_value = self.discount_value if not discount_value: return Decimal(0) subtotal = self.get_sale_subtotal() assert subtotal > 0, ('the sale subtotal should not be zero ' 'at this point') total = subtotal - discount_value percentage = (1 - total / subtotal) * 100 return quantize(percentage) @discount_percentage.setter def discount_percentage(self, value): self.discount_value = self._get_percentage_value(value) @property def surcharge_percentage(self): """Sets a discount by percentage. Note that percentage must be added as an absolute value, in other words:: sale.total_sale_amount = 200 sale.surcharge_percentage = 5 # the price of the sale will now be `210` """ surcharge_value = self.surcharge_value if not surcharge_value: return Decimal(0) subtotal = self.get_sale_subtotal() assert subtotal > 0, ('the sale subtotal should not be zero ' 'at this point') total = subtotal + surcharge_value percentage = ((total / subtotal) - 1) * 100 return quantize(percentage) @surcharge_percentage.setter def surcharge_percentage(self, value): self.surcharge_value = self._get_percentage_value(value) # # Private API # def _set_sale_status(self, status): old_status = self.status self.status = status SaleStatusChangedEvent.emit(self, old_status) def _get_percentage_value(self, percentage): if not percentage: return currency(0) subtotal = self.get_sale_subtotal() percentage = Decimal(percentage) perc_value = subtotal * (percentage / Decimal(100)) # discount/surchage cannot have more than 2 decimal points return quantize(currency(perc_value)) def _add_inpayments(self, till=None): payments = self.payments if not payments.count(): raise ValueError( _('You must have at least one payment for each payment group')) if till is None: return for payment in payments: if not payment.is_inpayment(): # There may be a change payment if the client has overpaid the # sale. continue till.add_entry(payment) def _create_commission_at_confirm(self): return sysparam.get_bool('SALE_PAY_COMMISSION_WHEN_CONFIRMED') def _get_commission_type(self): from stoqlib.domain.commission import Commission nitems = 0 for item in self.payments: if not item.is_outpayment(): nitems += 1 if nitems <= 1: return Commission.DIRECT return Commission.INSTALLMENTS def _get_pm_commission_total(self): """Return the payment method commission total. Usually credit card payment method is the most common method which uses commission """ return currency(0) def _get_icms_total(self, av_difference): """A Brazil-specific method Calculates the icms total value :param av_difference: the average difference for the sale items. it means the average discount or surcharge applied over all sale items """ icms_total = Decimal(0) for item in self.products: price = item.price + av_difference sellable = item.sellable tax_constant = sellable.get_tax_constant() if tax_constant is None or tax_constant.tax_type != TaxType.CUSTOM: continue icms_tax = tax_constant.tax_value / Decimal(100) icms_total += icms_tax * (price * item.quantity) return icms_total def _get_iss_total(self, av_difference): """A Brazil-specific method Calculates the iss total value :param av_difference: the average difference for the sale items. it means the average discount or surcharge applied over all sale items """ iss_total = Decimal(0) iss_tax = sysparam.get_decimal('ISS_TAX') / Decimal(100) for item in self.services: price = item.price + av_difference iss_total += iss_tax * (price * item.quantity) return iss_total def _get_average_difference(self): if self.get_items().is_empty(): raise DatabaseInconsistency( _(u"Sale orders must have items, which means products or " u"services")) total_quantity = self.get_items_total_quantity() if not total_quantity: raise DatabaseInconsistency( _(u"Sale total quantity should never be zero")) # If there is a discount or a surcharge applied in the whole total # sale amount, we must share it between all the item values # otherwise the icms and iss won't be calculated properly total = (self.get_total_sale_amount() - self._get_pm_commission_total()) subtotal = self.get_sale_subtotal() return (total - subtotal) / total_quantity def _get_iss_entry(self): return FiscalBookEntry.get_entry_by_payment_group( self.store, self.group, FiscalBookEntry.TYPE_SERVICE) def _create_fiscal_entries(self): """A Brazil-specific method Create new ICMS and ISS entries in the fiscal book for a given sale. Important: freight and interest are not part of the base value for ICMS. Only product values and surcharge which applies increasing the product totals are considered here. """ av_difference = self._get_average_difference() if not self.products.is_empty(): FiscalBookEntry.create_product_entry( self.store, self.group, self.cfop, self.coupon_id, self._get_icms_total(av_difference)) if not self.services.is_empty() and self.service_invoice_number: FiscalBookEntry.create_service_entry( self.store, self.group, self.cfop, self.service_invoice_number, self._get_iss_total(av_difference)) class SaleToken(Domain): """A Token to help on sale for restaurants This will be attached to a |sale| to help the sale for restaurants, hotels. eg: table 1, table 2, room 12, room 334 """ __storm_table__ = 'sale_token' STATUS_AVAILABLE = u'available' STATUS_OCCUPIED = u'occupied' statuses = {STATUS_AVAILABLE: _(u'Available'), STATUS_OCCUPIED: _(u'Occupied')} #: the status of the sale_token status = EnumCol(allow_none=False, default=STATUS_AVAILABLE) #: the code that used to identify the token code = UnicodeCol() # # Class methods # @classmethod def get_status_name(cls, status): return cls.statuses[status] # # Properties # @property def status_str(self): return self.get_status_name(self.status) # # Public API # def open_token(self): assert self._can_open() self.status = SaleToken.STATUS_OCCUPIED def close_token(self): assert self._can_close() self.status = SaleToken.STATUS_AVAILABLE # # Private API # def _can_open(self): return self.status == SaleToken.STATUS_AVAILABLE def _can_close(self): return self.status == SaleToken.STATUS_OCCUPIED class SaleComment(Domain): """A simple holder for |sale| comments See also: `schema <http://doc.stoq.com.br/schema/tables/sale_comment.html>`__ """ __storm_table__ = 'sale_comment' #: When this comment was created date = DateTimeCol(default_factory=localnow) #: The comment itself. comment = UnicodeCol() author_id = IdCol() #: The author of the comment author = Reference(author_id, 'LoginUser.id') sale_id = IdCol() #: The |sale| that was commented sale = Reference(sale_id, 'Sale.id') # # Views # class ReturnedSaleItemsView(Viewable): branch = Branch returned_sale = ReturnedSale # returned and original sale item id = ReturnedSaleItem.id quantity = ReturnedSaleItem.quantity price = ReturnedSaleItem.price # returned and original sale _sale_id = Sale.id _new_sale_id = ReturnedSale.new_sale_id returned_identifier = ReturnedSale.identifier invoice_number = ReturnedSale.invoice_number return_date = ReturnedSale.return_date reason = ReturnedSale.reason # sellable sellable_id = ReturnedSaleItem.sellable_id code = Sellable.code description = Sellable.description batch_number = Coalesce(StorableBatch.batch_number, u'') batch_date = StorableBatch.create_date # summaries total = ReturnedSaleItem.price * ReturnedSaleItem.quantity tables = [ ReturnedSaleItem, LeftJoin(StorableBatch, StorableBatch.id == ReturnedSaleItem.batch_id), Join(SaleItem, SaleItem.id == ReturnedSaleItem.sale_item_id), Join(Sellable, Sellable.id == ReturnedSaleItem.sellable_id), Join(ReturnedSale, ReturnedSale.id == ReturnedSaleItem.returned_sale_id), Join(Sale, Sale.id == ReturnedSale.sale_id), # Note that the sale branch may be different than the returned sale # branch Join(Branch, Branch.id == ReturnedSale.branch_id), ] @property def new_sale(self): if not self._new_sale_id: return None return self.store.get(Sale, self._new_sale_id) # # Classmethods # @classmethod def find_by_sale(cls, store, sale): return store.find(cls, _sale_id=sale.id).order_by(ReturnedSale.return_date) _SaleItemSummary = Select(columns=[SaleItem.sale_id, Alias(Sum(InvoiceItemIpi.v_ipi), 'v_ipi'), Alias(Sum(SaleItem.quantity), 'total_quantity'), Alias(Sum(SaleItem.quantity * SaleItem.price), 'subtotal')], tables=[SaleItem, LeftJoin(InvoiceItemIpi, InvoiceItemIpi.id == SaleItem.ipi_info_id)], group_by=[SaleItem.sale_id]) SaleItemSummary = Alias(_SaleItemSummary, '_sale_item') class SaleView(Viewable): """Stores general informatios about sales """ Person_Branch = ClassAlias(Person, 'person_branch') Person_Client = ClassAlias(Person, 'person_client') Person_SalesPerson = ClassAlias(Person, 'person_sales_person') #: the |sale| of the view sale = Sale #: the |client| of the view client = Client #: The branch this sale was sold branch = Branch #: the id of the sale table id = Sale.id #: unique numeric identifier for the sale identifier = Sale.identifier #: unique numeric identifier for the sale, text representation identifier_str = Cast(Sale.identifier, 'text') #: the sale invoice number invoice_number = Sale.invoice_number #: the id generated by the fiscal printer coupon_id = Sale.coupon_id #: the date when the sale was started open_date = Sale.open_date #: the date when the sale was closed close_date = Sale.close_date #: the date when the sale was confirmed confirm_date = Sale.confirm_date #: the date when the sale was cancelled cancel_date = Sale.cancel_date #: the date when the sale was returned return_date = Sale.return_date #: the date when the sale will expire expire_date = Sale.expire_date #: the sale status status = Sale.status #: the flag that indicates if the sale is completely paid paid = Sale.paid #: the sale surcharge value surcharge_value = Sale.surcharge_value #: the sale discount value discount_value = Sale.discount_value #: the |branch| where this |sale| was sold branch_id = Sale.branch_id #: the if of the |client| table client_id = Client.id #: the salesperson name salesperson_name = Coalesce(Person_SalesPerson.name, u'') #: the |sale| salesperson id salesperson_id = SalesPerson.id #: the |sale| client name client_name = Coalesce(Person_Client.name, u'') #: name of the |branch| this |sale| was sold branch_name = Coalesce(NullIf(Company.fancy_name, u''), Person_Branch.name) # Summaries v_ipi = Coalesce(Field('_sale_item', 'v_ipi'), 0) #: the sum of all items in the sale _subtotal = Coalesce(Field('_sale_item', 'subtotal'), 0) + v_ipi #: the items total quantity for the sale total_quantity = Coalesce(Field('_sale_item', 'total_quantity'), 0) #: the subtotal - discount + charge _total = Coalesce(Field('_sale_item', 'subtotal'), 0) - \ Sale.discount_value + Sale.surcharge_value + v_ipi tables = [ Sale, LeftJoin(SaleItemSummary, Field('_sale_item', 'sale_id') == Sale.id), LeftJoin(Branch, Sale.branch_id == Branch.id), LeftJoin(Client, Sale.client_id == Client.id), LeftJoin(SalesPerson, Sale.salesperson_id == SalesPerson.id), LeftJoin(Person_Branch, Branch.person_id == Person_Branch.id), LeftJoin(Company, Company.person_id == Person_Branch.id), LeftJoin(Person_Client, Client.person_id == Person_Client.id), LeftJoin(Person_SalesPerson, SalesPerson.person_id == Person_SalesPerson.id), ] @classmethod def post_search_callback(cls, sresults): select = sresults.get_select_expr(Count(1), Sum(cls._total)) return ('count', 'sum'), select # # Class methods # @classmethod def find_by_branch(cls, store, branch): if branch: return store.find(cls, Sale.branch == branch) return store.find(cls) # # Properties # @property def returned_sales(self): return self.store.find(ReturnedSale, sale_id=self.id) @property def return_total(self): store = self.store returned_items = store.find(ReturnedSaleItemsView, Sale.id == self.id) return currency(returned_items.sum(ReturnedSaleItemsView.total) or 0) # # Public API # def can_return(self): return self.sale.can_return() def can_confirm(self): return self.sale.can_confirm() def can_cancel(self): return self.sale.can_cancel() def can_edit(self): return self.sale.can_edit() @property def subtotal(self): # The editor requires the model to be a currency, but _subtotal is a # decimal. So we need to convert it return currency(self._subtotal) @property def total(self): return currency(self._total) @property def open_date_as_string(self): return self.open_date.strftime("%x") @property def status_name(self): return Sale.get_status_name(self.status) class SaleCommentsView(Viewable): """A view for |salecomments| This is used to get the most information of a |salecomment| without doing lots of database queries """ #: the |salecomment| object comment = SaleComment # SaleComment id = SaleComment.id comment = SaleComment.comment date = SaleComment.date # Sale sale_id = Sale.id sale_identifier = Sale.identifier # Person author_name = Person.name tables = [ SaleComment, Join(Sale, SaleComment.sale_id == Sale.id), Join(LoginUser, SaleComment.author_id == LoginUser.id), Join(Person, LoginUser.person_id == Person.id), ] @classmethod def find_by_sale(cls, store, sale): """Find results for this view for *sale* :param store: a store :param sale: the |sale| used to filter the results :returns: the matching views :rtype: a sequence of :class:`SaleCommentsView` """ return store.find(cls, sale_id=sale.id) class ReturnedSaleView(Viewable): """Stores general informatios about returned sales.""" #: alias for Branch Person Object Person_Branch = ClassAlias(Person, 'person_branch') #: alias for Client Person Object Person_Client = ClassAlias(Person, 'person_client') #: alias for Sales Person Sales Object Person_SalesPerson = ClassAlias(Person, 'person_sales_person') #: alias for Login User Person Object Person_LoginUser = ClassAlias(Person, 'person_login_user') #: the |sale| sale = Sale #: the |client| client = Client branch = Branch #: the |returnedsale| returned_sale = ReturnedSale #: the id of the returned_sale table id = ReturnedSale.id #: a unique numeric identifer for the returned sale identifier = ReturnedSale.identifier #: the identifier as a string identifier_str = Cast(ReturnedSale.identifier, 'text') #: invoice of the returned sale invoice_number = ReturnedSale.invoice_number #: date of the return product return_date = ReturnedSale.return_date #: reason to return the sale reason = ReturnedSale.reason #: person_id of the responsible for return the sale responsible_id = ReturnedSale.responsible_id #: id of the sales branch branch_id = ReturnedSale.branch_id #: id of the new sale, if exist, an exchange of product or something else new_sale_id = ReturnedSale.new_sale_id #: Sale id sale_id = Sale.id #: value of the total of returned sale total = Sum(ReturnedSaleItem.price * ReturnedSaleItem.quantity) #: Client id client_id = Client.id #: Name of Sales Person Sales salesperson_name = Person_SalesPerson.name #: Name of Client Person client_name = Person_Client.name #: Name of Branch Person branch_name = Coalesce(NullIf(Company.fancy_name, u''), Person_Branch.name) # Name of Responsible for Returned Sale responsible_name = Person_LoginUser.name # Tables Queries tables = [ ReturnedSale, Join(Sale, Sale.id == ReturnedSale.sale_id), LeftJoin(ReturnedSaleItem, ReturnedSaleItem.returned_sale_id == ReturnedSale.id), LeftJoin(Sellable, Sellable.id == ReturnedSaleItem.sellable_id), LeftJoin(Branch, ReturnedSale.branch_id == Branch.id), LeftJoin(Client, Sale.client_id == Client.id), LeftJoin(SalesPerson, Sale.salesperson_id == SalesPerson.id), LeftJoin(LoginUser, LoginUser.id == ReturnedSale.responsible_id), LeftJoin(Person_Branch, Branch.person_id == Person_Branch.id), LeftJoin(Company, Company.person_id == Person_Branch.id), LeftJoin(Person_Client, Client.person_id == Person_Client.id), LeftJoin(Person_SalesPerson, SalesPerson.person_id == Person_SalesPerson.id), LeftJoin(Person_LoginUser, Person_LoginUser.id == LoginUser.person_id) ] group_by = [id, sale_id, Sellable.id, client_id, branch_id, branch_name, salesperson_name, client_name, LoginUser.id, Person_LoginUser.name, branch] #: Name of Sellable product product_name = Sellable.description class SalePaymentMethodView(SaleView): # If a sale has more than one payment, it will appear as much times in the # search. Must always be used with select(distinct=True). tables = SaleView.tables[:] tables.append(LeftJoin(Payment, Sale.group_id == Payment.group_id)) # # Class Methods # @classmethod def find_by_payment_method(cls, store, method): if method: results = store.find(cls, Payment.method == method) else: results = store.find(cls) results.config(distinct=True) return results class SoldSellableView(Viewable): Person_Client = ClassAlias(Person, 'person_client') Person_SalesPerson = ClassAlias(Person, 'person_sales_person') id = Sellable.id code = Sellable.code description = Sellable.description client_id = Sale.client_id client_name = Person_Client.name # Aggregates total_quantity = Sum(SaleItem.quantity) subtotal = Sum(SaleItem.quantity * SaleItem.price) group_by = [id, code, description, client_id, client_name] tables = [ Sellable, LeftJoin(SaleItem, SaleItem.sellable_id == Sellable.id), LeftJoin(Sale, Sale.id == SaleItem.sale_id), LeftJoin(Client, Sale.client_id == Client.id), LeftJoin(SalesPerson, Sale.salesperson_id == SalesPerson.id), LeftJoin(Person_Client, Client.person_id == Person_Client.id), LeftJoin(Person_SalesPerson, SalesPerson.person_id == Person_SalesPerson.id), LeftJoin(InvoiceItemIpi, InvoiceItemIpi.id == SaleItem.ipi_info_id), ] class SoldServicesView(SoldSellableView): estimated_fix_date = SaleItem.estimated_fix_date group_by = SoldSellableView.group_by[:] group_by.append(estimated_fix_date) tables = SoldSellableView.tables[:] tables.append(Join(Service, Sellable.id == Service.sellable_id)) class SoldProductsView(SoldSellableView): # Aggregates last_date = Max(Sale.open_date) avg_value = Avg(SaleItem.price) quantity = Sum(SaleItem.quantity) total_value = Sum(SaleItem.quantity * SaleItem.price) tables = SoldSellableView.tables[:] tables.append(Join(Product, Sellable.id == Product.sellable_id)) # FIXME: This needs some more work, as currently, this viewable is: # * Not filtering the paiments correctly given a date. # * Not ignoring payments from returned sales # Get the total amount already paid in a sale and group it by sales person _PaidSale = Select(columns=[Sale.salesperson_id, Alias(Sum(Payment.paid_value), 'paid_value')], tables=[Sale, LeftJoin(Payment, Payment.group_id == Sale.group_id)], group_by=[Sale.salesperson_id]) PaidSale = Alias(_PaidSale, '_paid_sale') class SalesPersonSalesView(Viewable): id = SalesPerson.id name = Person.name # aggregates total_amount = Sum(Sale.total_amount) total_quantity = Sum(Field('_sale_item', 'total_quantity')) total_sales = Count(Sale.id) #paid_value = Field('_paid_sale', 'paid_value') group_by = [id, name] tables = [ SalesPerson, LeftJoin(Sale, Sale.salesperson_id == SalesPerson.id), LeftJoin(SaleItemSummary, Field('_sale_item', 'sale_id') == Sale.id), LeftJoin(Person, Person.id == SalesPerson.person_id), #LeftJoin(PaidSale, Field('_paid_sale', 'salesperson_id') == SalesPerson.id), ] clause = Sale.status == Sale.STATUS_CONFIRMED @classmethod def find_by_date(cls, store, date): if date: if isinstance(date, tuple): date_query = And(Date(Sale.confirm_date) >= date[0], Date(Sale.confirm_date) <= date[1]) else: date_query = Date(Sale.confirm_date) == date results = store.find(cls, date_query) else: results = store.find(cls) results.config(distinct=True) return results class ClientsWithSaleView(Viewable): main_address = Address city_location = CityLocation id = Person.id person_name = Person.name phone = Person.phone_number email = Person.email cpf = Individual.cpf birth_date = Individual.birth_date cnpj = Company.cnpj category = ClientCategory.name sales = Count(Distinct(Sale.id)) sale_items = Sum(SaleItem.quantity) total_amount = Sum(SaleItem.price * SaleItem.quantity) last_purchase = Max(Sale.confirm_date) tables = [ Person, Join(Client, Person.id == Client.person_id), LeftJoin(ClientCategory, ClientCategory.id == Client.category_id), LeftJoin(Individual, Individual.person_id == Person.id), LeftJoin(Company, Company.person_id == Person.id), Join(Sale, Client.id == Sale.client_id), Join(SaleItem, SaleItem.sale_id == Sale.id), Join(Sellable, Sellable.id == SaleItem.sellable_id), LeftJoin(SellableCategory, SellableCategory.id == Sellable.category_id), LeftJoin(Address, And(Address.person_id == Person.id, Eq(Address.is_main_address, True))), LeftJoin(CityLocation, Address.city_location_id == CityLocation.id), ] group_by = [id, Individual.id, Company.id, ClientCategory.id, Address.id, CityLocation.id] clause = Sale.status == Sale.STATUS_CONFIRMED # # Public API # @property def address_string(self): return self.main_address.get_address_string() @property def details_string(self): return self.main_address.get_details_string() @property def cnpj_or_cpf(self): return self.cnpj or self.cpf class SoldItemsByClient(Viewable): product = Product sellable = Sellable id = Concat(Sellable.id, Person.name) # Sellable code = Sellable.code description = Sellable.description sellable_category = SellableCategory.description # Client client_name = Person.name email = Person.email phone_number = Person.phone_number # Aggregates base_price = Avg(SaleItem.base_price) quantity = Sum(SaleItem.quantity) price = Avg(SaleItem.price) total = Sum(SaleItem.quantity * SaleItem.price) tables = [ Sellable, Join(SellableCategory, SellableCategory.id == Sellable.category_id), Join(Product, Product.sellable_id == Sellable.id), Join(SaleItem, SaleItem.sellable_id == Sellable.id), Join(Sale, SaleItem.sale_id == Sale.id), LeftJoin(Client, Client.id == Sale.client_id), LeftJoin(Person, Person.id == Client.person_id), ] clause = Or(Sale.status == Sale.STATUS_CONFIRMED, Sale.status == Sale.STATUS_ORDERED) group_by = [id, Person.id, Product, sellable_category, Sellable.id]
andrebellafronte/stoq
stoqlib/domain/sale.py
Python
gpl-2.0
86,939
[ "VisIt" ]
7cb370625b267198d2cae275aa86c324415aed897aa2b2c79b20908cd2efbe5a
import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.contrib.layers.python import layers as tf_layers from models.conv_lstm import basic_conv_lstm_cell, conv_lstm_cell_no_input # Amount to use when lower bounding tensors RELU_SHIFT = 1e-12 FC_LAYER_SIZE = 256 FC_LSTM_LAYER_SIZE = 128 def encoder_model(frames, sequence_length, initializer, scope='encoder', fc_conv_layer=False): """ Args: frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels) sequence_length: number of frames that shall be encoded scope: tensorflow variable scope name initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data) fc_conv_layer: adds an fc layer at the end of the encoder Returns: hidden4: hidden state of highest ConvLSTM layer fc_conv_layer: indicated whether a Fully Convolutional (8x8x16 -> 1x1x1024) shall be added """ lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5, lstm_state6 = None, None, None, None, None, None for i in range(sequence_length): frame = frames[:,i,:,:,:] reuse = (i > 0) with tf.variable_scope(scope, reuse=reuse): #LAYER 1: conv1 conv1 = slim.layers.conv2d(frame, 16, [5, 5], stride=2, scope='conv1', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm1'}) #LAYER 2: convLSTM1 hidden1, lstm_state1 = basic_conv_lstm_cell(conv1, lstm_state1, 16, initializer, filter_size=5, scope='convlstm1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2') #LAYER 3: conv2 conv2 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [5, 5], stride=2, scope='conv2', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm3'}) #LAYER 4: convLSTM2 hidden2, lstm_state2 = basic_conv_lstm_cell(conv2, lstm_state2, 16, initializer, filter_size=5, scope='convlstm2') hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm4') #LAYER 5: conv3 conv3 = slim.layers.conv2d(hidden2, hidden2.get_shape()[3], [5, 5], stride=2, scope='conv3', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm5'}) #LAYER 6: convLSTM3 hidden3, lstm_state3 = basic_conv_lstm_cell(conv3, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm6') #LAYER 7: conv4 conv4 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm7'}) #LAYER 8: convLSTM4 (8x8 featuremap size) hidden4, lstm_state4 = basic_conv_lstm_cell(conv4, lstm_state4, 32, initializer, filter_size=3, scope='convlstm4') hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm8') #LAYER 8: conv5 conv5 = slim.layers.conv2d(hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv5', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm9'}) # LAYER 9: convLSTM5 (4x84 featuremap size) hidden5, lstm_state5 = basic_conv_lstm_cell(conv5, lstm_state5, 32, initializer, filter_size=3, scope='convlstm5') hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm10') #LAYER 10: Fully Convolutional Layer (4x4x32 --> 1x1xFC_LAYER_SIZE) fc_conv = slim.layers.conv2d(hidden5, FC_LAYER_SIZE, [4,4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer) #LAYER 11: Fully Convolutional LSTM (1x1x256 -> 1x1x128) hidden6, lstm_state6 = basic_conv_lstm_cell(fc_conv, lstm_state6, FC_LSTM_LAYER_SIZE, initializer, filter_size=1, scope='convlstm6') hidden_repr = lstm_state6 return hidden_repr def decoder_model(hidden_repr, sequence_length, initializer, num_channels=3, scope='decoder', fc_conv_layer=False): """ Args: hidden_repr: Tensor of latent space representation sequence_length: number of frames that shall be decoded from the hidden_repr num_channels: number of channels for generated frames initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data) fc_conv_layer: adds an fc layer at the end of the encoder Returns: frame_gen: array of generated frames (Tensors) fc_conv_layer: indicates whether hidden_repr is 1x1xdepth tensor a and fully concolutional layer shall be added """ frame_gen = [] lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5, lstm_state0 = None, None, None, None, None, None assert (not fc_conv_layer) or (hidden_repr.get_shape()[1] == hidden_repr.get_shape()[2] == 1) lstm_state0 = hidden_repr for i in range(sequence_length): reuse = (i > 0) #reuse variables (recurrence) after first time step with tf.variable_scope(scope, reuse=reuse): #Fully Convolutional Layer (1x1xFC_LAYER_SIZE -> 4x4x16) hidden0, lstm_state0 = conv_lstm_cell_no_input(lstm_state0, FC_LSTM_LAYER_SIZE, initializer, filter_size=1, scope='convlstm0') fc_conv = slim.layers.conv2d_transpose(hidden0, 32, [4, 4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer) #LAYER 1: convLSTM1 hidden1, lstm_state1 = basic_conv_lstm_cell(fc_conv, lstm_state1, 32, initializer, filter_size=3, scope='convlstm1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm1') #LAYER 2: upconv1 (8x8 -> 16x16) upconv1 = slim.layers.conv2d_transpose(hidden1, hidden1.get_shape()[3], 3, stride=2, scope='upconv1', weights_initializer=initializer, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm2'}) #LAYER 3: convLSTM2 hidden2, lstm_state2 = basic_conv_lstm_cell(upconv1, lstm_state2, 32, initializer, filter_size=3, scope='convlstm2') hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3') #LAYER 4: upconv2 (16x16 -> 32x32) upconv2 = slim.layers.conv2d_transpose(hidden2, hidden2.get_shape()[3], 3, stride=2, scope='upconv2', weights_initializer=initializer, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm4'}) #LAYER 5: convLSTM3 hidden3, lstm_state3 = basic_conv_lstm_cell(upconv2, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm5') # LAYER 6: upconv3 (32x32 -> 64x64) upconv3 = slim.layers.conv2d_transpose(hidden3, hidden3.get_shape()[3], 5, stride=2, scope='upconv3', weights_initializer=initializer, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm6'}) #LAYER 7: convLSTM4 hidden4, lstm_state4 = basic_conv_lstm_cell(upconv3, lstm_state4, 16, initializer, filter_size=5, scope='convlstm4') hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm7') #Layer 8: upconv4 (64x64 -> 128x128) upconv4 = slim.layers.conv2d_transpose(hidden4, 16, 5, stride=2, scope='upconv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm8'}) #LAYER 9: convLSTM5 hidden5, lstm_state5 = basic_conv_lstm_cell(upconv4, lstm_state5, 16, initializer, filter_size=5, scope='convlstm5') hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm9') upconv5 = slim.layers.conv2d_transpose(hidden5, num_channels, 5, stride=2, scope='upconv5', weights_initializer=initializer) frame_gen.append(upconv5) assert len(frame_gen)==sequence_length return frame_gen def composite_model(frames, encoder_len=5, decoder_future_len=5, decoder_reconst_len=5, uniform_init=True, num_channels=3, fc_conv_layer=True): """ Args: frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels) encoder_len: number of frames that shall be encoded decoder_future_sequence_length: number of frames that shall be decoded from the hidden_repr uniform_init: specifies if the weight initialization should be drawn from gaussian or uniform distribution (default:uniform) num_channels: number of channels for generated frames fc_conv_layer: indicates whether fully connected layer shall be added between encoder and decoder Returns: frame_gen: array of generated frames (Tensors) """ assert all([len > 0 for len in [encoder_len, decoder_future_len, decoder_reconst_len]]) initializer = tf_layers.xavier_initializer(uniform=uniform_init) hidden_repr = encoder_model(frames, encoder_len, initializer, fc_conv_layer=fc_conv_layer) frames_pred = decoder_model(hidden_repr, decoder_future_len, initializer, num_channels=num_channels, scope='decoder_pred', fc_conv_layer=fc_conv_layer) frames_reconst = decoder_model(hidden_repr, decoder_reconst_len, initializer, num_channels=num_channels, scope='decoder_reconst', fc_conv_layer=fc_conv_layer) return frames_pred, frames_reconst, hidden_repr
jonasrothfuss/DeepEpisodicMemory
models/model_zoo/model_conv5_fc_lstm2_128.py
Python
mit
9,772
[ "Gaussian" ]
6cb3d4b3822b8903fe7faa09594110480dd710c9b3b5bbca2a84e97747db72b7
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import math import numpy as np import tensorflow as tf import time from differential_privacy.multiple_teachers import utils FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_integer('dropout_seed', 123, """seed for dropout.""") tf.app.flags.DEFINE_integer('batch_size', 128, """Nb of images in a batch.""") tf.app.flags.DEFINE_integer('epochs_per_decay', 350, """Nb epochs per decay""") tf.app.flags.DEFINE_integer('learning_rate', 5, """100 * learning rate""") tf.app.flags.DEFINE_boolean('log_device_placement', False, """see TF doc""") # Constants describing the training process. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. def _variable_on_cpu(name, shape, initializer): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/cpu:0'): var = tf.get_variable(name, shape, initializer=initializer) return var def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev)) if wd is not None: weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var def inference(images, dropout=False): """Build the CNN model. Args: images: Images returned from distorted_inputs() or inputs(). dropout: Boolean controling whether to use dropout or not Returns: Logits """ if FLAGS.dataset == 'mnist': first_conv_shape = [5, 5, 1, 64] else: first_conv_shape = [5, 5, 3, 64] # conv1 with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=first_conv_shape, stddev=1e-4, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope.name) if dropout: conv1 = tf.nn.dropout(conv1, 0.3, seed=FLAGS.dropout_seed) # pool1 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') # norm1 norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # conv2 with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 128], stddev=1e-4, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1)) bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name=scope.name) if dropout: conv2 = tf.nn.dropout(conv2, 0.3, seed=FLAGS.dropout_seed) # norm2 norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') # pool2 pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') # local3 with tf.variable_scope('local3') as scope: # Move everything into depth so we can perform a single matrix multiply. reshape = tf.reshape(pool2, [FLAGS.batch_size, -1]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) if dropout: local3 = tf.nn.dropout(local3, 0.5, seed=FLAGS.dropout_seed) # local4 with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) if dropout: local4 = tf.nn.dropout(local4, 0.5, seed=FLAGS.dropout_seed) # compute logits with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, FLAGS.nb_labels], stddev=1/192.0, wd=0.0) biases = _variable_on_cpu('biases', [FLAGS.nb_labels], tf.constant_initializer(0.0)) logits = tf.add(tf.matmul(local4, weights), biases, name=scope.name) return logits def inference_deeper(images, dropout=False): """Build a deeper CNN model. Args: images: Images returned from distorted_inputs() or inputs(). dropout: Boolean controling whether to use dropout or not Returns: Logits """ if FLAGS.dataset == 'mnist': first_conv_shape = [3, 3, 1, 96] else: first_conv_shape = [3, 3, 3, 96] # conv1 with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=first_conv_shape, stddev=0.05, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope.name) # conv2 with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[3, 3, 96, 96], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name=scope.name) # conv3 with tf.variable_scope('conv3') as scope: kernel = _variable_with_weight_decay('weights', shape=[3, 3, 96, 96], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(conv2, kernel, [1, 2, 2, 1], padding='SAME') biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv3 = tf.nn.relu(bias, name=scope.name) if dropout: conv3 = tf.nn.dropout(conv3, 0.5, seed=FLAGS.dropout_seed) # conv4 with tf.variable_scope('conv4') as scope: kernel = _variable_with_weight_decay('weights', shape=[3, 3, 96, 192], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv4 = tf.nn.relu(bias, name=scope.name) # conv5 with tf.variable_scope('conv5') as scope: kernel = _variable_with_weight_decay('weights', shape=[3, 3, 192, 192], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv5 = tf.nn.relu(bias, name=scope.name) # conv6 with tf.variable_scope('conv6') as scope: kernel = _variable_with_weight_decay('weights', shape=[3, 3, 192, 192], stddev=0.05, wd=0.0) conv = tf.nn.conv2d(conv5, kernel, [1, 2, 2, 1], padding='SAME') biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv6 = tf.nn.relu(bias, name=scope.name) if dropout: conv6 = tf.nn.dropout(conv6, 0.5, seed=FLAGS.dropout_seed) # conv7 with tf.variable_scope('conv7') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 192, 192], stddev=1e-4, wd=0.0) conv = tf.nn.conv2d(conv6, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) bias = tf.nn.bias_add(conv, biases) conv7 = tf.nn.relu(bias, name=scope.name) # local1 with tf.variable_scope('local1') as scope: # Move everything into depth so we can perform a single matrix multiply. reshape = tf.reshape(conv7, [FLAGS.batch_size, -1]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 192], stddev=0.05, wd=0) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) # local2 with tf.variable_scope('local2') as scope: weights = _variable_with_weight_decay('weights', shape=[192, 192], stddev=0.05, wd=0) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local2 = tf.nn.relu(tf.matmul(local1, weights) + biases, name=scope.name) if dropout: local2 = tf.nn.dropout(local2, 0.5, seed=FLAGS.dropout_seed) # compute logits with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, FLAGS.nb_labels], stddev=0.05, wd=0.0) biases = _variable_on_cpu('biases', [FLAGS.nb_labels], tf.constant_initializer(0.0)) logits = tf.add(tf.matmul(local2, weights), biases, name=scope.name) return logits def loss_fun(logits, labels): """Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] distillation: if set to True, use probabilities and not class labels to compute softmax loss Returns: Loss tensor of type float. """ # Calculate the cross entropy between labels and predictions labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='cross_entropy_per_example') # Calculate the average cross entropy loss across the batch. cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') # Add to TF collection for losses tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss') def moving_av(total_loss): """ Generates moving average for all losses Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) return loss_averages_op def train_op_fun(total_loss, global_step): """Train model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. nb_ex_per_train_epoch = int(60000 / FLAGS.nb_teachers) num_batches_per_epoch = nb_ex_per_train_epoch / FLAGS.batch_size decay_steps = int(num_batches_per_epoch * FLAGS.epochs_per_decay) initial_learning_rate = float(FLAGS.learning_rate) / 100.0 # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(initial_learning_rate, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.scalar_summary('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = moving_av(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op def _input_placeholder(): """ This helper function declares a TF placeholder for the graph input data :return: TF placeholder for the graph input data """ if FLAGS.dataset == 'mnist': image_size = 28 num_channels = 1 else: image_size = 32 num_channels = 3 # Declare data placeholder train_node_shape = (FLAGS.batch_size, image_size, image_size, num_channels) return tf.placeholder(tf.float32, shape=train_node_shape) def train(images, labels, ckpt_path, dropout=False): """ This function contains the loop that actually trains the model. :param images: a numpy array with the input data :param labels: a numpy array with the output labels :param ckpt_path: a path (including name) where model checkpoints are saved :param dropout: Boolean, whether to use dropout or not :return: True if everything went well """ # Check training data assert len(images) == len(labels) assert images.dtype == np.float32 assert labels.dtype == np.int32 # Set default TF graph with tf.Graph().as_default(): global_step = tf.Variable(0, trainable=False) # Declare data placeholder train_data_node = _input_placeholder() # Create a placeholder to hold labels train_labels_shape = (FLAGS.batch_size,) train_labels_node = tf.placeholder(tf.int32, shape=train_labels_shape) print("Done Initializing Training Placeholders") # Build a Graph that computes the logits predictions from the placeholder if FLAGS.deeper: logits = inference_deeper(train_data_node, dropout=dropout) else: logits = inference(train_data_node, dropout=dropout) # Calculate loss loss = loss_fun(logits, train_labels_node) # Build a Graph that trains the model with one batch of examples and # updates the model parameters. train_op = train_op_fun(loss, global_step) # Create a saver. saver = tf.train.Saver(tf.all_variables()) print("Graph constructed and saver created") # Build an initialization operation to run below. init = tf.global_variables_initializer() # Create and init sessions sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) #NOLINT(long-line) sess.run(init) print("Session ready, beginning training loop") # Initialize the number of batches data_length = len(images) nb_batches = math.ceil(data_length / FLAGS.batch_size) for step in xrange(FLAGS.max_steps): # for debug, save start time start_time = time.time() # Current batch number batch_nb = step % nb_batches # Current batch start and end indices start, end = utils.batch_indices(batch_nb, data_length, FLAGS.batch_size) # Prepare dictionnary to feed the session with feed_dict = {train_data_node: images[start:end], train_labels_node: labels[start:end]} # Run training step _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) # Compute duration of training step duration = time.time() - start_time # Sanity check assert not np.isnan(loss_value), 'Model diverged with loss = NaN' # Echo loss once in a while if step % 100 == 0: num_examples_per_step = FLAGS.batch_size examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch)) # Save the model checkpoint periodically. if step % 1000 == 0 or (step + 1) == FLAGS.max_steps: saver.save(sess, ckpt_path, global_step=step) return True def softmax_preds(images, ckpt_path, return_logits=False): """ Compute softmax activations (probabilities) with the model saved in the path specified as an argument :param images: a np array of images :param ckpt_path: a TF model checkpoint :param logits: if set to True, return logits instead of probabilities :return: probabilities (or logits if logits is set to True) """ # Compute nb samples and deduce nb of batches data_length = len(images) nb_batches = math.ceil(len(images) / FLAGS.batch_size) # Declare data placeholder train_data_node = _input_placeholder() # Build a Graph that computes the logits predictions from the placeholder if FLAGS.deeper: logits = inference_deeper(train_data_node) else: logits = inference(train_data_node) if return_logits: # We are returning the logits directly (no need to apply softmax) output = logits else: # Add softmax predictions to graph: will return probabilities output = tf.nn.softmax(logits) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Will hold the result preds = np.zeros((data_length, FLAGS.nb_labels), dtype=np.float32) # Create TF session with tf.Session() as sess: # Restore TF session from checkpoint file saver.restore(sess, ckpt_path) # Parse data by batch for batch_nb in xrange(0, int(nb_batches+1)): # Compute batch start and end indices start, end = utils.batch_indices(batch_nb, data_length, FLAGS.batch_size) # Prepare feed dictionary feed_dict = {train_data_node: images[start:end]} # Run session ([0] because run returns a batch with len 1st dim == 1) preds[start:end, :] = sess.run([output], feed_dict=feed_dict)[0] # Reset graph to allow multiple calls tf.reset_default_graph() return preds
aakashsinha19/Aspectus
Image Classification/models/differential_privacy/multiple_teachers/deep_cnn.py
Python
apache-2.0
21,830
[ "Gaussian" ]
9765483f7e3a0398f7ed9eabe55ca90f5ca79990bed9be715679066dc63c2f2e
import sys tests = [ ("testExecs/testFeatures.exe", "", {}), ("python", "test_list.py", {'dir': 'Wrap'}), ] longTests = [] if __name__ == '__main__': import sys from rdkit import TestRunner failed, tests = TestRunner.RunScript('test_list.py', 0, 1) sys.exit(len(failed))
jandom/rdkit
Code/GraphMol/MolChemicalFeatures/test_list.py
Python
bsd-3-clause
286
[ "RDKit" ]
b3d514d50c5fc27a479630b8c52686ec503f3c2e799a8ba6e4a3af745a133db9
from __future__ import (absolute_import, division, print_function) # pylint: disable=no-init,invalid-name,too-few-public-methods,unused-import from mantid.kernel import * from mantid.simpleapi import * from mantid.api import * from mantid.geometry import * import os class PoldiCompound(object): """Small helper class to handle the results from PoldiCrystalFileParser.""" def __init__(self, name, elements): self._spacegroup = "" self._atomString = "" self._cellDict = "" self._name = name self.assign(elements) def assign(self, elements): for c in elements: if c[0] == "atoms": self._atomString = ';'.join(c[1:]) elif c[0] == "lattice": cellNames = ['a', 'b', 'c', 'alpha', 'beta', 'gamma'] self._cellDict = dict(list(zip(cellNames, c[1:]))) elif c[0] == "spacegroup": self._spacegroup = c[1] def getAtomString(self): return self._atomString def getCellParameters(self): return self._cellDict def getSpaceGroup(self): return self._spacegroup def getName(self): return self._name def raiseParseErrorException(message): raise ParseException(message) # pylint: disable=too-many-instance-attributes class PoldiCrystalFileParser(object): """Small parser for crystal structure files used at POLDI This class encapsulates a small parser for crystal structure files that are used at POLDI. The files contains information about the lattice, the space group and the basis (atoms in the asymmetric unit). The file format is defined as follows: Compound_1 { Lattice: [1 - 6 floats] => a, b, c, alpha, beta, gamma Spacegroup: [valid space group symbol] Atoms; { Element x y z [occupancy [U_eq]] Element x y z [occupancy [U_eq]] } } Compound_2 { ... } The parser returns a list of PoldiCompound objects with the compounds that were found in the file. These are then processed by PoldiCreatePeaksFromFile to generate arguments for calling PoldiCreatePeaksFromCell. """ def __init__(self): self.elementSymbol = Word(alphas, min=1, max=2).setFailAction( lambda o, s, loc, token: raiseParseErrorException("Element symbol must be one or two characters.")) self.integerNumber = Word(nums) self.decimalSeparator = Word('./', max=1) self.floatNumber = Combine( self.integerNumber + Optional(self.decimalSeparator + Optional(self.integerNumber)) ) self.whiteSpace = Suppress(White()) self.atomLine = Combine( self.elementSymbol + self.whiteSpace + delimitedList(self.floatNumber, delim=White()), joinString=' ' ) self.keyValueSeparator = Suppress(Literal(":")) self.groupOpener = Suppress(Literal('{')) self.groupCloser = Suppress(Literal('}')) self.atomsGroup = Group(CaselessLiteral("atoms") + self.keyValueSeparator + self.groupOpener + delimitedList(self.atomLine, delim=lineEnd) + self.groupCloser) self.unitCell = Group(CaselessLiteral("lattice") + self.keyValueSeparator + delimitedList( self.floatNumber, delim=White())) self.spaceGroup = Group(CaselessLiteral("spacegroup") + self.keyValueSeparator + Word( alphanums + "-" + ' ' + '/')) self.compoundContent = Each([self.atomsGroup, self.unitCell, self.spaceGroup]).setFailAction( lambda o, s, loc, token: raiseParseErrorException( "One of 'Lattice', 'SpaceGroup', 'Atoms' is missing or contains errors.")) self.compoundName = Word(alphanums + '_') self.compound = Group(self.compoundName + Optional(self.whiteSpace) + self.groupOpener + self.compoundContent + self.groupCloser) self.comment = Suppress(Literal('#') + restOfLine) self.compounds = Optional(self.comment) + OneOrMore(self.compound).ignore(self.comment) + stringEnd def __call__(self, contentString): parsedContent = None if os.path.isfile(contentString): parsedContent = self._parseFile(contentString) else: parsedContent = self._parseString(contentString) return [PoldiCompound(x[0], x[1:]) for x in parsedContent] def _parseFile(self, filename): return self.compounds.parseFile(filename) def _parseString(self, stringContent): return self.compounds.parseString(stringContent) class PoldiCreatePeaksFromFile(PythonAlgorithm): _parser=None def category(self): return "SINQ\\Poldi" def name(self): return "PoldiLoadCrystalData" def summary(self): return ("The algorithm reads a POLDI crystal structure file and creates a WorkspaceGroup that contains tables" "with the expected reflections.") def PyInit(self): self.declareProperty( FileProperty(name="InputFile", defaultValue="", action=FileAction.Load, extensions=["dat"]), doc="A file with POLDI crystal data.") self.declareProperty("LatticeSpacingMin", 0.5, direction=Direction.Input, doc="Lowest allowed lattice spacing.") self.declareProperty("LatticeSpacingMax", 0.0, direction=Direction.Input, doc="Largest allowed lattice spacing.") self.declareProperty( WorkspaceProperty(name="OutputWorkspace", defaultValue="", direction=Direction.Output), doc="WorkspaceGroup with reflection tables.") self._parser = PoldiCrystalFileParser() def PyExec(self): crystalFileName = self.getProperty("InputFile").value try: # Try parsing the supplied file using PoldiCrystalFileParser compounds = self._parser(crystalFileName) dMin = self.getProperty("LatticeSpacingMin").value dMax = self.getProperty("LatticeSpacingMax").value workspaces = [] # Go through found compounds and run "_createPeaksFromCell" for each of them # If two compounds have the same name, a warning is written to the log. for compound in compounds: if compound.getName() in workspaces: self.log().warning("A compound with the name '" + compound.getName() + "' has already been created. Please check the file '" + crystalFileName + "'") else: workspaces.append(self._createPeaksFromCell(compound, dMin, dMax)) self.setProperty("OutputWorkspace", GroupWorkspaces(workspaces)) # All parse errors are caught here and logged as errors except ParseException as error: errorString = "Could not parse input file '" + crystalFileName + "'.\n" errorString += "The parser reported the following error:\n\t" + str(error) self.log().error(errorString) def _createPeaksFromCell(self, compound, dMin, dMax): if not SpaceGroupFactory.isSubscribedSymbol(compound.getSpaceGroup()): raise RuntimeError("SpaceGroup '" + compound.getSpaceGroup() + "' is not registered.") PoldiCreatePeaksFromCell(SpaceGroup=compound.getSpaceGroup(), Atoms=compound.getAtomString(), LatticeSpacingMin=dMin, LatticeSpacingMax=dMax, OutputWorkspace=compound.getName(), **compound.getCellParameters()) return compound.getName() try: from pyparsing import * AlgorithmFactory.subscribe(PoldiCreatePeaksFromFile) except ImportError: logger.debug('Failed to subscribe algorithm PoldiCreatePeaksFromFile; Python package pyparsing' 'may be missing (https://pypi.python.org/pypi/pyparsing)')
dymkowsk/mantid
Framework/PythonInterface/plugins/algorithms/PoldiCreatePeaksFromFile.py
Python
gpl-3.0
8,311
[ "CRYSTAL" ]
112188c14351cce75e9c826f1aece2ab3e06e76909138929791c8acdc61086d7
# Name: mapper_hirlam2nc.py # Purpose: Mapper for Hirlam wind data converted from felt to netCDF # Authors: Knut-Frode Dagestad # Licence: This file is part of NANSAT. You can redistribute it or modify # under the terms of GNU General Public License, v.3 # http://www.gnu.org/licenses/gpl-3.0.html import datetime from nansat.utils import gdal, ogr from nansat.exceptions import WrongMapperError from nansat.vrt import VRT class Mapper(VRT): def __init__(self, filename, gdalDataset, gdalMetadata, logLevel=30, **kwargs): if not gdalMetadata: raise WrongMapperError isHirlam = False for key in gdalMetadata.keys(): if 'creation by fimex from file' in gdalMetadata[key]: isHirlam = True if not isHirlam: raise WrongMapperError #GeolocMetaDict = [{'src': # {'SourceFilename': 'NETCDF:"' + filename + '":longitude', # 'SourceBand': 1, # 'ScaleRatio': 1, # 'ScaleOffset': 0}, # 'dst': {}}, # {'src': # {'SourceFilename': 'NETCDF:"' + filename + '":latitude', # 'SourceBand': 1, # 'ScaleRatio': 1, # 'ScaleOffset': 0}, # 'dst': {}}] subDataset = gdal.Open('NETCDF:"' + filename + '":x_wind_10m') #self.GeolocVRT = VRT(srcRasterXSize=subDataset.RasterXSize, # srcRasterYSize=subDataset.RasterYSize) #self.GeolocVRT.create_bands(GeolocMetaDict) #GeolocObject = GeolocationArray(xVRT=self.GeolocVRT, # yVRT=self.GeolocVRT, # xBand=1, yBand=2, # lineOffset=0, pixelOffset=0, # lineStep=1, pixelStep=1) ## create empty VRT dataset with geolocation only #VRT.__init__(self, srcRasterXSize = subDataset.RasterXSize, # srcRasterYSize = subDataset.RasterYSize, # geolocationArray = GeolocObject, # srcProjection = GeolocObject.d['SRS']) lon = gdal.Open( 'NETCDF:"' + filename + '":longitude"').ReadAsArray() lat = gdal.Open( 'NETCDF:"' + filename + '":latitude"').ReadAsArray() self._init_from_lonlat(lon, lat) # Add bands with wind components metaDict = [{'src': {'SourceFilename': ('NETCDF:"' + filename + '":x_wind_10m'), 'NODATA': -32767}, 'dst': {'name': 'U', 'wkv': 'eastward_wind'}}, {'src': {'SourceFilename': ('NETCDF:"' + filename + '":y_wind_10m'), 'NODATA': -32767}, 'dst': {'name': 'V', 'wkv': 'northward_wind'}}] # Add pixel function with wind speed metaDict.append({'src': [{'SourceFilename': ('NETCDF:"' + filename + '":x_wind_10m'), 'SourceBand': 1, 'DataType': 6}, {'SourceFilename': ('NETCDF:"' + filename + '":y_wind_10m'), 'SourceBand': 1, 'DataType': 6}], 'dst': {'wkv': 'wind_speed', 'name': 'windspeed', 'height': '10 m', 'PixelFunctionType': 'UVToMagnitude', 'NODATA': 9999}}) # add bands with metadata and corresponding values # to the empty VRT self.create_bands(metaDict) # Add valid time validTime = datetime.datetime.utcfromtimestamp( int(subDataset.GetRasterBand(1). GetMetadata()['NETCDF_DIM_time'])) self.dataset.SetMetadataItem('time_coverage_start', validTime.isoformat())
nansencenter/nansat
nansat/mappers/mapper_hirlam_wind_netcdf.py
Python
gpl-3.0
4,217
[ "NetCDF" ]
e44459220e232e256fc6cf339b32b6e0d43864d3a7945fac6b28f95604c1c7f0
import numpy as np import scipy.stats import galsim def render(model,scale,size=None,stamp=None): """ Returns a rendering of the specified GalSim model into a square postage stamp with the specified pixel scale (arcsecs). Either a stamp pixel size or an existing stamp (with its scale attribute set) must be provided. Note that the model will be convolved with the pixel response, so should not already be convolved with the pixel response. """ if stamp is None: stamp = galsim.Image(size,size) stamp.scale = scale return model.drawImage(image = stamp) def padPeriodic(x,n,offset=0.): """ Extend the array x[i] with 0 <= i < N to cover -n <= i < N+n+1 assuming periodicity x[n+N] = x[n]+offset to duplicate existing entries. Returns a new array that contains a copy of the original array. Note that an additional copy is needed on the high side to cover the interval x[N-1] - x[N]. """ nlo,nhi = n,n+1 shape = list(x.shape) shape[0] += nlo+nhi padded = np.resize(x,tuple(shape)) padded[:nlo] = x[-nlo:] - offset padded[nlo:-nhi] = x padded[-nhi:] = x[:nhi] + offset return padded def getDeltaChiSq(CL=(0.6827,0.9543,0.9973),dof=2): """ Returns a numpy array of delta(chisq) values corresponding to the specified list of confidence levels. The default CL values correspond to the enclosed probabilities of 1,2,3-sigmas for a 1D Gaussian pdf. """ return scipy.stats.chi2.isf(1-np.array(CL),df=dof) def getBinEdges(*binCenters,**options): """ Returns a numpy array of bin edges corresponding to the input array of bin centers, which will have a length one greater than the input length. The outer edges will be the same as the outer bin centers unless expand=True is passed as a keyword option. This function is normally used with the matplotlib pcolormesh function, e.g. ex,ey = getBinEdges(x,y) plt.pcolormesh(ex,ey,z,cmap='rainbow') levels = np.min(z) + getDeltaChiSq(dof=2) plt.contour(x,y,z,levels=levels,colors='w',linestyles=('-','--',':')) """ expand = 'expand' in options and options['expand'] binEdges = [ ] for centers in binCenters: edges = np.empty((len(centers)+1,),dtype=centers.dtype) edges[1:-1] = 0.5*(centers[1:] + centers[:-1]) if expand: edges[0] = 0.5*(3*centers[0] - centers[1]) edges[-1] = 0.5*(3*centers[-1] - centers[-2]) else: edges[0] = centers[0] edges[-1] = centers[-1] binEdges.append(edges) return binEdges if len(binEdges) > 1 else binEdges[0]
deepzot/bashes
bashes/utility.py
Python
mit
2,652
[ "Gaussian" ]
1b106e26cf8a1c9bda42451552ca0b79d132f7651524364187014e26ed202dbb
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2018 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # """ | Database of Hydrogen transfer reactions. | Geometries from Bozkaya and Sherrill. | Reference energies from Bozkaya and Sherrill. - **benchmark** - ``'<benchmark_name>'`` <Reference>. - |dl| ``'<default_benchmark_name>'`` |dr| <Reference>. - **subset** - ``'small'`` <members_description> - ``'large'`` <members_description> - ``'<subset>'`` <members_description> """ import re import qcdb # <<< HTR40 Database Module >>> dbse = 'HTR40' isOS = 'True' # <<< Database Members >>> HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40] HRXN_SM = [] HRXN_LG = [] HTR15 = [1, 2, 3, 4, 11, 12, 13, 20, 21, 34, 35, 36, 37, 38, 39] # <<< Chemical Systems Involved >>> RXNM = {} # reaction matrix of reagent contributions per reaction ACTV = {} # order of active reagents per reaction ACTV['%s-%s' % (dbse, 1 )] = ['%s-%s-reagent' % (dbse, 'ch3'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 1 )] = dict(zip(ACTV['%s-%s' % (dbse, 1)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 2 )] = ['%s-%s-reagent' % (dbse, 'c2h'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 2 )] = dict(zip(ACTV['%s-%s' % (dbse, 2)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 3 )] = ['%s-%s-reagent' % (dbse, 'c2h3'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 3 )] = dict(zip(ACTV['%s-%s' % (dbse, 3)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 4 )] = ['%s-%s-reagent' % (dbse, 't-butyl'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'isobutane'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 4 )] = dict(zip(ACTV['%s-%s' % (dbse, 4)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 5 )] = ['%s-%s-reagent' % (dbse, 'cfch2'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'chfch2'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 5 )] = dict(zip(ACTV['%s-%s' % (dbse, 5)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 6 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'ch3cho'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 6 )] = dict(zip(ACTV['%s-%s' % (dbse, 6)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 7 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'ch3cn'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 7 )] = dict(zip(ACTV['%s-%s' % (dbse, 7)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 8 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'ch3cch'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 8 )] = dict(zip(ACTV['%s-%s' % (dbse, 8)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 9 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'ch2chcn'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 9 )] = dict(zip(ACTV['%s-%s' % (dbse, 9)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 10 )] = ['%s-%s-reagent' % (dbse, 'allyl'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'ch3chch2'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 10 )] = dict(zip(ACTV['%s-%s' % (dbse, 10)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 11 )] = ['%s-%s-reagent' % (dbse, 'c2h'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 11 )] = dict(zip(ACTV['%s-%s' % (dbse, 11)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 12 )] = ['%s-%s-reagent' % (dbse, 'c2h3'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 12 )] = dict(zip(ACTV['%s-%s' % (dbse, 12)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 13 )] = ['%s-%s-reagent' % (dbse, 't-butyl'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'isobutane'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 13 )] = dict(zip(ACTV['%s-%s' % (dbse, 13)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 14 )] = ['%s-%s-reagent' % (dbse, 'cfch2'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'chfch2'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 14 )] = dict(zip(ACTV['%s-%s' % (dbse, 14)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 15 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'ch3cho'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 15 )] = dict(zip(ACTV['%s-%s' % (dbse, 15)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 16 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'ch3cn'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 16 )] = dict(zip(ACTV['%s-%s' % (dbse, 16)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 17 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'ch3cch'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 17 )] = dict(zip(ACTV['%s-%s' % (dbse, 17)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 18 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'ch2chcn'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 18 )] = dict(zip(ACTV['%s-%s' % (dbse, 18)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 19 )] = ['%s-%s-reagent' % (dbse, 'allyl'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'ch3chch2'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 19 )] = dict(zip(ACTV['%s-%s' % (dbse, 19)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 20 )] = ['%s-%s-reagent' % (dbse, 'c2h'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 20 )] = dict(zip(ACTV['%s-%s' % (dbse, 20)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 21 )] = ['%s-%s-reagent' % (dbse, 't-butyl'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'isobutane'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 21 )] = dict(zip(ACTV['%s-%s' % (dbse, 21)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 22 )] = ['%s-%s-reagent' % (dbse, 'cfch2'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'chfch2'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 22 )] = dict(zip(ACTV['%s-%s' % (dbse, 22)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 23 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'ch3cho'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 23 )] = dict(zip(ACTV['%s-%s' % (dbse, 23)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 24 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'ch3cn'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 24 )] = dict(zip(ACTV['%s-%s' % (dbse, 24)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 25 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'ch3cch'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 25 )] = dict(zip(ACTV['%s-%s' % (dbse, 25)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 26 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'ch2chcn'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 26 )] = dict(zip(ACTV['%s-%s' % (dbse, 26)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 27 )] = ['%s-%s-reagent' % (dbse, 'allyl'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'ch3chch2'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 27 )] = dict(zip(ACTV['%s-%s' % (dbse, 27)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 28 )] = ['%s-%s-reagent' % (dbse, 'cfch2'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'chfch2'), '%s-%s-reagent' % (dbse, 'c2h') ] RXNM['%s-%s' % (dbse, 28 )] = dict(zip(ACTV['%s-%s' % (dbse, 28)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 29 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'ch3cho'), '%s-%s-reagent' % (dbse, 'c2h') ] RXNM['%s-%s' % (dbse, 29 )] = dict(zip(ACTV['%s-%s' % (dbse, 29)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 30 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'ch3cn'), '%s-%s-reagent' % (dbse, 'c2h') ] RXNM['%s-%s' % (dbse, 30 )] = dict(zip(ACTV['%s-%s' % (dbse, 30)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 31 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'ch3cch'), '%s-%s-reagent' % (dbse, 'c2h') ] RXNM['%s-%s' % (dbse, 31 )] = dict(zip(ACTV['%s-%s' % (dbse, 31)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 32 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'ch2chcn'), '%s-%s-reagent' % (dbse, 'c2h') ] RXNM['%s-%s' % (dbse, 32 )] = dict(zip(ACTV['%s-%s' % (dbse, 32)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 33 )] = ['%s-%s-reagent' % (dbse, 'allyl'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'ch3chch2'), '%s-%s-reagent' % (dbse, 'c2h') ] RXNM['%s-%s' % (dbse, 33 )] = dict(zip(ACTV['%s-%s' % (dbse, 33)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 34 )] = ['%s-%s-reagent' % (dbse, 'c2h'), '%s-%s-reagent' % (dbse, 'isobutane'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 't-butyl') ] RXNM['%s-%s' % (dbse, 34 )] = dict(zip(ACTV['%s-%s' % (dbse, 34)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 35 )] = ['%s-%s-reagent' % (dbse, 'c6h5'), '%s-%s-reagent' % (dbse, 'h2'), '%s-%s-reagent' % (dbse, 'c6h6'), '%s-%s-reagent' % (dbse, 'h') ] RXNM['%s-%s' % (dbse, 35 )] = dict(zip(ACTV['%s-%s' % (dbse, 35)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 36 )] = ['%s-%s-reagent' % (dbse, 'c6h5'), '%s-%s-reagent' % (dbse, 'c2h4'), '%s-%s-reagent' % (dbse, 'c6h6'), '%s-%s-reagent' % (dbse, 'c2h3') ] RXNM['%s-%s' % (dbse, 36 )] = dict(zip(ACTV['%s-%s' % (dbse, 36)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 37 )] = ['%s-%s-reagent' % (dbse, 'c6h5'), '%s-%s-reagent' % (dbse, 'isobutane'), '%s-%s-reagent' % (dbse, 'c6h6'), '%s-%s-reagent' % (dbse, 't-butyl') ] RXNM['%s-%s' % (dbse, 37 )] = dict(zip(ACTV['%s-%s' % (dbse, 37)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 38 )] = ['%s-%s-reagent' % (dbse, 'c2h'), '%s-%s-reagent' % (dbse, 'c6h6'), '%s-%s-reagent' % (dbse, 'c2h2'), '%s-%s-reagent' % (dbse, 'c6h5') ] RXNM['%s-%s' % (dbse, 38 )] = dict(zip(ACTV['%s-%s' % (dbse, 38)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 39 )] = ['%s-%s-reagent' % (dbse, 'c6h5'), '%s-%s-reagent' % (dbse, 'ch4'), '%s-%s-reagent' % (dbse, 'c6h6'), '%s-%s-reagent' % (dbse, 'ch3') ] RXNM['%s-%s' % (dbse, 39 )] = dict(zip(ACTV['%s-%s' % (dbse, 39)], [-1,-1,1,1])) ACTV['%s-%s' % (dbse, 40 )] = ['%s-%s-reagent' % (dbse, 'c6h5'), '%s-%s-reagent' % (dbse, 'ch3cn'), '%s-%s-reagent' % (dbse, 'c6h6'), '%s-%s-reagent' % (dbse, 'ch2cn') ] RXNM['%s-%s' % (dbse, 40 )] = dict(zip(ACTV['%s-%s' % (dbse, 40)], [-1,-1,1,1])) # <<< Reference Values [kcal/mol] >>> BIND = {} BIND['%s-%s' % (dbse, 1 )] = 0.000 BIND['%s-%s' % (dbse, 2 )] = 0.000 BIND['%s-%s' % (dbse, 3 )] = 0.000 BIND['%s-%s' % (dbse, 4 )] = 0.000 BIND['%s-%s' % (dbse, 5 )] = 0.000 BIND['%s-%s' % (dbse, 6 )] = 0.000 BIND['%s-%s' % (dbse, 7 )] = 0.000 BIND['%s-%s' % (dbse, 8 )] = 0.000 BIND['%s-%s' % (dbse, 9 )] = 0.000 BIND['%s-%s' % (dbse, 10 )] = 0.000 BIND['%s-%s' % (dbse, 11 )] = 0.000 BIND['%s-%s' % (dbse, 12 )] = 0.000 BIND['%s-%s' % (dbse, 13 )] = 0.000 BIND['%s-%s' % (dbse, 14 )] = 0.000 BIND['%s-%s' % (dbse, 15 )] = 0.000 BIND['%s-%s' % (dbse, 16 )] = 0.000 BIND['%s-%s' % (dbse, 17 )] = 0.000 BIND['%s-%s' % (dbse, 18 )] = 0.000 BIND['%s-%s' % (dbse, 19 )] = 0.000 BIND['%s-%s' % (dbse, 20 )] = 0.000 BIND['%s-%s' % (dbse, 21 )] = 0.000 BIND['%s-%s' % (dbse, 22 )] = 0.000 BIND['%s-%s' % (dbse, 23 )] = 0.000 BIND['%s-%s' % (dbse, 24 )] = 0.000 BIND['%s-%s' % (dbse, 25 )] = 0.000 BIND['%s-%s' % (dbse, 27 )] = 0.000 BIND['%s-%s' % (dbse, 28 )] = 0.000 BIND['%s-%s' % (dbse, 29 )] = 0.000 BIND['%s-%s' % (dbse, 30 )] = 0.000 BIND['%s-%s' % (dbse, 31 )] = 0.000 BIND['%s-%s' % (dbse, 32 )] = 0.000 BIND['%s-%s' % (dbse, 33 )] = 0.000 BIND['%s-%s' % (dbse, 34 )] = 0.000 BIND['%s-%s' % (dbse, 35 )] = 0.000 BIND['%s-%s' % (dbse, 36 )] = 0.000 BIND['%s-%s' % (dbse, 37 )] = 0.000 BIND['%s-%s' % (dbse, 38 )] = 0.000 BIND['%s-%s' % (dbse, 39 )] = 0.000 BIND['%s-%s' % (dbse, 40 )] = 0.000 # <<< Comment Lines >>> TAGL = {} TAGL['%s-%s' % (dbse, 1 )] = """Reaction 1 """ TAGL['%s-%s' % (dbse, 2 )] = """Reaction 2 """ TAGL['%s-%s' % (dbse, 3 )] = """Reaction 3 """ TAGL['%s-%s' % (dbse, 4 )] = """Reaction 4 """ TAGL['%s-%s' % (dbse, 5 )] = """Reaction 5 """ TAGL['%s-%s' % (dbse, 6 )] = """Reaction 6 """ TAGL['%s-%s' % (dbse, 7 )] = """Reaction 7 """ TAGL['%s-%s' % (dbse, 8 )] = """Reaction 8 """ TAGL['%s-%s' % (dbse, 9 )] = """Reaction 9 """ TAGL['%s-%s' % (dbse, 10 )] = """Reaction 10 """ TAGL['%s-%s' % (dbse, 11 )] = """Reaction 11 """ TAGL['%s-%s' % (dbse, 12 )] = """Reaction 12 """ TAGL['%s-%s' % (dbse, 13 )] = """Reaction 13 """ TAGL['%s-%s' % (dbse, 14 )] = """Reaction 14 """ TAGL['%s-%s' % (dbse, 15 )] = """Reaction 15 """ TAGL['%s-%s' % (dbse, 16 )] = """Reaction 16 """ TAGL['%s-%s' % (dbse, 17 )] = """Reaction 17 """ TAGL['%s-%s' % (dbse, 18 )] = """Reaction 18 """ TAGL['%s-%s' % (dbse, 19 )] = """Reaction 19 """ TAGL['%s-%s' % (dbse, 20 )] = """Reaction 20 """ TAGL['%s-%s' % (dbse, 21 )] = """Reaction 21 """ TAGL['%s-%s' % (dbse, 22 )] = """Reaction 22 """ TAGL['%s-%s' % (dbse, 23 )] = """Reaction 23 """ TAGL['%s-%s' % (dbse, 24 )] = """Reaction 24 """ TAGL['%s-%s' % (dbse, 25 )] = """Reaction 25 """ TAGL['%s-%s' % (dbse, 26 )] = """Reaction 26 """ TAGL['%s-%s' % (dbse, 27 )] = """Reaction 27 """ TAGL['%s-%s' % (dbse, 28 )] = """Reaction 28 """ TAGL['%s-%s' % (dbse, 29 )] = """Reaction 29 """ TAGL['%s-%s' % (dbse, 30 )] = """Reaction 30 """ TAGL['%s-%s' % (dbse, 31 )] = """Reaction 31 """ TAGL['%s-%s' % (dbse, 32 )] = """Reaction 32 """ TAGL['%s-%s' % (dbse, 33 )] = """Reaction 33 """ TAGL['%s-%s' % (dbse, 34 )] = """Reaction 34 """ TAGL['%s-%s' % (dbse, 35 )] = """Reaction 35 """ TAGL['%s-%s' % (dbse, 36 )] = """Reaction 36 """ TAGL['%s-%s' % (dbse, 37 )] = """Reaction 37 """ TAGL['%s-%s' % (dbse, 38 )] = """Reaction 38 """ TAGL['%s-%s' % (dbse, 39 )] = """Reaction 39 """ TAGL['%s-%s' % (dbse, 40 )] = """Reaction 40 """ TAGL['%s-%s-reagent' % (dbse, 't-butyl' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'cfch2' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'c2h2' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch3cho' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch2cn' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'c2h4' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch2chcn' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'c2h' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch4' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'c2h3' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch3cn' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'allyl' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch3cch' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch3' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'h2' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch3chch2' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'chfch2' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'h' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'isobutane' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch2cho' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch2cch' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'ch2ccn' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'c6h6' )] = """ """ TAGL['%s-%s-reagent' % (dbse, 'c6h5' )] = """ """ # <<< Geometry Specification Strings >>> GEOS = {} GEOS['%s-%s-%s' % (dbse, 't-butyl', 'reagent')] = qcdb.Molecule(""" 0 2 C 0.00000073 -0.17324401 0.00000000 C -1.49313671 0.03293053 0.00000000 C 0.74656816 0.03293930 1.29309432 C 0.74656816 0.03293930 -1.29309432 H -1.75412591 1.11750215 0.00000000 H 0.87705536 1.11751245 1.51911455 H 0.87705536 1.11751245 -1.51911455 H -1.96447858 -0.41104829 0.89724346 H -1.96447858 -0.41104829 -0.89724346 H 1.75927771 -0.41103277 1.25266784 H 0.20520649 -0.41104076 2.14991136 H 0.20520649 -0.41104076 -2.14991136 H 1.75927771 -0.41103277 -1.25266784 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'cfch2', 'reagent')] = qcdb.Molecule(""" 0 2 F 1.18236000 0.12888000 0.00008000 C -0.00743000 -0.42937000 -0.00017000 C -1.20008000 0.11938000 -0.00004000 H -2.08423000 -0.50156000 0.00065000 H -1.31194000 1.20159000 -0.00015000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'c2h2', 'reagent')] = qcdb.Molecule(""" 0 1 C -0.00000000 0.00000000 0.60249005 C 0.00000000 0.00000000 -0.60249005 H 0.00000000 -0.00000000 1.66141025 H 0.00000000 -0.00000000 -1.66141025 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch3cho', 'reagent')] = qcdb.Molecule(""" 0 1 H 1.15548000 -1.23749000 -0.00007000 C 1.16885000 -0.14776000 -0.00001000 C -0.23563000 0.39721000 -0.00001000 H -0.30509000 1.50872000 0.00003000 H 1.70764000 0.22227000 0.87912000 H 1.70778000 0.22245000 -0.87897000 O -1.23314000 -0.27658000 0.00000000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch2cn', 'reagent')] = qcdb.Molecule(""" 0 2 C 0.18728000 0.00000000 -0.00001000 N 1.35587000 0.00000000 0.00000000 C -1.19103000 0.00000000 0.00000000 H -1.73431000 0.93522000 0.00001000 H -1.73432000 -0.93521000 0.00001000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'c2h4', 'reagent')] = qcdb.Molecule(""" 0 1 C 0.00000000 0.00000000 0.66741206 C 0.00000000 0.00000000 -0.66741206 H -0.00000000 0.92046521 1.22998610 H 0.00000000 -0.92046521 1.22998610 H -0.00000000 0.92046521 -1.22998610 H 0.00000000 -0.92046521 -1.22998610 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch2chcn', 'reagent')] = qcdb.Molecule(""" 0 1 H -2.62698000 0.00343000 0.00128000 C -1.60538000 -0.35565000 0.00019000 C -0.58353000 0.50215000 -0.00012000 C 0.78296000 0.08953000 -0.00091000 N 1.89484000 -0.22376000 0.00060000 H -1.45042000 -1.42771000 -0.00098000 H -0.75076000 1.57438000 0.00050000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'c2h', 'reagent')] = qcdb.Molecule(""" 0 2 C 0.00000000 0.00000000 0.00000000 C 1.21283562 0.00000000 0.00000000 H -1.05818189 0.00000000 0.00000000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch4', 'reagent')] = qcdb.Molecule(""" 0 1 C 0.00000000 0.00000000 0.00000000 H 1.08613677 0.00000000 0.00000000 H -0.36204538 1.02401965 0.00000000 H -0.36204538 -0.51200982 -0.88682703 H -0.36204538 -0.51200982 0.88682703 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'c2h3', 'reagent')] = qcdb.Molecule(""" 0 2 C 0.02607811 0.69573733 0.00000000 C 0.02857044 -0.62089246 0.00000000 H -0.70268837 1.48374496 0.00000000 H -0.89678124 -1.18847800 0.00000000 H 0.94877872 -1.18643193 0.00000000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch3cn', 'reagent')] = qcdb.Molecule(""" 0 1 H 1.55303000 -0.19362000 1.00615000 C 1.17602000 0.00000000 0.00000000 C -0.28096000 0.00000000 -0.00001000 N -1.43280000 0.00000000 0.00000000 H 1.55307000 0.96815000 -0.33536000 H 1.55311000 -0.77453000 -0.67072000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'allyl', 'reagent')] = qcdb.Molecule(""" 0 2 H 1.29620000 1.27815000 0.00032000 C 1.22748000 0.19570000 0.00008000 C 0.00002000 -0.44151000 -0.00012000 C -1.22747000 0.19573000 -0.00006000 H -2.15453000 -0.36307000 0.00092000 H 2.15460000 -0.36295000 -0.00001000 H -0.00013000 -1.52978000 -0.00012000 H -1.29630000 1.27818000 -0.00046000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch3cch', 'reagent')] = qcdb.Molecule(""" 0 1 H 1.62987000 -0.22419000 0.99609000 C 1.23812000 0.00000000 -0.00001000 C -0.21922000 -0.00003000 0.00000000 C -1.42012000 0.00006000 0.00007000 H -2.48217000 -0.00018000 -0.00029000 H 1.62978000 0.97478000 -0.30390000 H 1.62986000 -0.75054000 -0.69223000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch3', 'reagent')] = qcdb.Molecule(""" 0 2 C 0.00000000 0.00000000 0.00000031 H -0.00000000 0.00000000 1.07554864 H -0.00000000 0.93145124 -0.53777618 H 0.00000000 -0.93145124 -0.53777618 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'h2', 'reagent')] = qcdb.Molecule(""" 0 1 H 0.00000000 0.00000000 -0.37169941 H 0.00000000 0.00000000 0.37169941 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch3chch2', 'reagent')] = qcdb.Molecule(""" 0 1 H -1.80765000 -0.15368000 -0.87813000 C -1.23352000 0.16234000 0.00003000 C 0.13465000 -0.45362000 -0.00010000 C 1.28048000 0.22043000 -0.00006000 H 2.23888000 -0.28641000 0.00025000 H -1.18210000 1.25365000 -0.00053000 H -1.80711000 -0.15289000 0.87881000 H 0.16668000 -1.54201000 0.00023000 H 1.30167000 1.30642000 0.00021000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'chfch2', 'reagent')] = qcdb.Molecule(""" 0 1 F 1.15776000 -0.22307000 0.00001000 C -0.02076000 0.43302000 -0.00002000 C -1.18005000 -0.19843000 0.00000000 H 0.11660000 1.50810000 0.00003000 H -2.09896000 0.37141000 0.00003000 H -1.23262000 -1.27944000 -0.00001000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'h', 'reagent')] = qcdb.Molecule(""" 0 2 H 0.00000000 0.00000000 0.00000000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'isobutane', 'reagent')] = qcdb.Molecule(""" 0 1 C 0.00000090 -0.36984652 0.00000000 H 0.00000133 -1.47997543 0.00000000 C -1.46235985 0.10584355 0.00000000 C 0.73117989 0.10584959 1.26644108 C 0.73117989 0.10584959 -1.26644108 H -1.50901508 1.21298165 0.00000000 H 0.75449342 1.21298769 1.30684890 H 0.75449342 1.21298769 -1.30684890 H -2.00230292 -0.25604001 0.89527086 H -2.00230292 -0.25604001 -0.89527086 H 1.77648250 -0.25602315 1.28640747 H 0.22582896 -0.25604150 2.18168063 H 0.22582896 -0.25604150 -2.18168063 H 1.77648250 -0.25602315 -1.28640747 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch2cho', 'reagent')] = qcdb.Molecule(""" 0 2 H 1.26423000 -1.25075000 -0.00022000 C 1.16786000 -0.17142000 -0.00004000 C -0.13387000 0.40647000 -0.00006000 H -0.18501000 1.51210000 -0.00018000 H 2.05914000 0.44514000 0.00051000 O -1.16779000 -0.26460000 0.00006000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch2cch', 'reagent')] = qcdb.Molecule(""" 0 2 C 0.11561000 -0.00003000 -0.00001000 C 1.33791000 0.00003000 -0.00001000 H 2.40011000 -0.00005000 0.00008000 C -1.25132000 0.00001000 -0.00001000 H -1.80663000 0.93004000 0.00004000 H -1.80669000 -0.93000000 0.00004000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'ch2ccn', 'reagent')] = qcdb.Molecule(""" 0 2 C -1.80740000 0.05630000 0.00027000 C -0.52077000 -0.14829000 -0.00057000 C 0.80722000 -0.01017000 0.00028000 N 1.98158000 0.04542000 0.00000000 H -2.22720000 1.06314000 -0.00029000 H -2.51811000 -0.76812000 0.00035000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'c6h6', 'reagent')] = qcdb.Molecule(""" 0 1 C -0.00000000 1.41066086 0.00000000 C 1.22166790 0.70533038 -0.00000000 C 1.22166790 -0.70533038 0.00000000 C 0.00000000 -1.41066086 0.00000000 C -1.22166790 -0.70533038 0.00000000 C -1.22166790 0.70533038 0.00000000 H -0.00000000 2.50726822 0.00000000 H 2.17135800 1.25363362 0.00000000 H 2.17135800 -1.25363362 0.00000000 H 0.00000000 -2.50726822 0.00000000 H -2.17135800 -1.25363362 0.00000000 H -2.17135800 1.25363362 0.00000000 units angstrom """) GEOS['%s-%s-%s' % (dbse, 'c6h5', 'reagent')] = qcdb.Molecule(""" 0 2 C -0.02911138 1.44968932 0.00000000 C 1.19145514 0.72918056 0.00000000 C 1.18256095 -0.68275137 0.00000000 C -0.03576037 -1.39642075 0.00000000 C -1.27002298 -0.69963430 0.00000000 C -1.20569095 0.69610546 0.00000000 H -0.03893619 2.54557277 0.00000000 H 2.14438286 1.27284078 0.00000000 H 2.13198633 -1.23090189 0.00000000 H -0.03011941 -2.49351046 0.00000000 H -2.22399795 -1.23906809 0.00000000 units angstrom """)
amjames/psi4
psi4/share/psi4/databases/HTR40.py
Python
lgpl-3.0
41,501
[ "Psi4" ]
7821b04e2d2537c333403dfd90236d498774c51eb373c4fab535b0e8f1c0d734
#!/usr/bin/env python __author__ = "Greg Caporaso" __copyright__ = "Copyright 2011, The QIIME Project" __credits__ = [ "Rob Knight", "Greg Caporaso", "Jeremy Widmann", "Kyle Bittinger"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Greg Caporaso" __email__ = "gregcaporaso@gmail.com" """Contains code for aligning sequences, using several techniques. This module has the responsibility for taking a set of sequences and returning an alignment. Mostly, it will be thin wrappers for code already in cogent.app.*, to which wrappers for e.g. PyNAST need to be added.. """ import warnings warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found') from os import remove from numpy import median import bfillings from bfillings.infernal import cmalign_from_alignment import bfillings.clustalw import bfillings.muscle_v38 import bfillings.mafft from cogent.parse.rfam import MinimalRfamParser, ChangedSequence from burrito.util import ApplicationNotFoundError from skbio.io import RecordError from skbio.parse.sequences import parse_fasta from qiime.util import (FunctionWithParams, get_qiime_temp_dir) from skbio.alignment import SequenceCollection, Alignment from skbio.sequence import DNASequence from skbio.parse.sequences import parse_fasta # Load PyNAST if it's available. If it's not, skip it if not but set up # to raise errors if the user tries to use it. try: from pynast.util import pynast_seqs, pairwise_alignment_methods from pynast.logger import NastLogger except ImportError: def raise_pynast_not_found_error(*args, **kwargs): raise ApplicationNotFoundError("PyNAST cannot be found.\nIs PyNAST installed? Is it in your $PYTHONPATH?" + "\nYou can obtain PyNAST from http://qiime.org/pynast/.") # set functions which cannot be imported to raise_pynast_not_found_error pynast_seqs = NastLogger = raise_pynast_not_found_error pairwise_alignment_methods = {} class Aligner(FunctionWithParams): """An Aligner takes an unaligned set of sequences and returns an alignment. This is an abstract class: subclasses should implement the __call__ method. Note: sequence ids should be preserved during this process, i.e. the description lines should be saved/restored if the alignment app is destructive to them. """ Name = 'Aligner' def __init__(self, params): """Return new Aligner object with specified params. Note: expect params to contain both generic and per-method (e.g. for infernal vs. PyNAST vs. whatever) params, so leaving it as a dict rather than setting attributes. Some standard entries in params are: Application: 3rd-party application used, if any, e.g. infernal [can't actually think of any other params that apply to all of e.g. PyNAST, infernal, and muscle] """ self.Params = params def __call__(self, seq_path, result_path=None, log_path=None): """Returns alignment from sequences. Parameters: seq_path: path to file of sequences result_path: path to file of results. If specified, should dump the result to the desired path as fasta, otherwise should return skbio.core.alignment.Alignment object. log_path: path to log, which should include dump of params. """ raise NotImplementedError("Aligner is an abstract class") class CogentAligner(Aligner): """Generic aligner using Cogent multiple alignment methods.""" Name = 'CogentAligner' def getResult(self, seq_path): """Returns alignment from sequences. By convention, app parameters begin with a '-'. Key-value pairs in self.Params following this convention will be passed as parameters to the module's alignment function. """ module = self.Params['Module'] seqs = self.getData(seq_path) params = dict( [(k, v) for (k, v) in self.Params.items() if k.startswith('-')]) result = module.align_unaligned_seqs(seqs, params=params) return result def __call__(self, result_path=None, log_path=None, *args, **kwargs): """Calls superclass method to align seqs""" return FunctionWithParams.__call__(self, result_path=result_path, log_path=log_path, *args, **kwargs) class InfernalAligner(Aligner): Name = 'InfernalAligner' def __init__(self, params): """Return new InfernalAligner object with specified params. """ _params = { 'Application': 'Infernal', } _params.update(params) Aligner.__init__(self, _params) def __call__(self, seq_path, result_path=None, log_path=None, failure_path=None, cmbuild_params=None, cmalign_params=None): log_params = [] # load candidate sequences candidate_sequences = dict(parse_fasta(open(seq_path, 'U'))) # load template sequences try: info, template_alignment, struct = list(MinimalRfamParser(open( self.Params['template_filepath'], 'U'), seq_constructor=ChangedSequence))[0] except RecordError: raise ValueError( "Template alignment must be in Stockholm format with corresponding secondary structure annotation when using InfernalAligner.") # Need to make separate mapping for unaligned sequences unaligned = SequenceCollection.from_fasta_records( candidate_sequences.iteritems(), DNASequence) mapped_seqs, new_to_old_ids = unaligned.int_map(prefix='unaligned_') mapped_seq_tuples = [(k, str(v)) for k,v in mapped_seqs.iteritems()] # Turn on --gapthresh option in cmbuild to force alignment to full # model if cmbuild_params is None: cmbuild_params = {} cmbuild_params.update({'--gapthresh': 1.0}) # record cmbuild parameters log_params.append('cmbuild parameters:') log_params.append(str(cmbuild_params)) # Turn on --sub option in Infernal, since we know the unaligned sequences # are fragments. # Also turn on --gapthresh to use same gapthresh as was used to build # model if cmalign_params is None: cmalign_params = {} cmalign_params.update({'--sub': True, '--gapthresh': 1.0}) # record cmalign parameters log_params.append('cmalign parameters:') log_params.append(str(cmalign_params)) # Align sequences to alignment including alignment gaps. aligned, struct_string = cmalign_from_alignment(aln=template_alignment, structure_string=struct, seqs=mapped_seq_tuples, include_aln=True, params=cmalign_params, cmbuild_params=cmbuild_params) # Pull out original sequences from full alignment. infernal_aligned = [] # Get a dict of the ids to sequences (note that this is a # cogent alignment object, hence the call to NamedSeqs) aligned_dict = aligned.NamedSeqs for n, o in new_to_old_ids.iteritems(): aligned_seq = aligned_dict[n] infernal_aligned.append((o, aligned_seq)) # Create an Alignment object from alignment dict infernal_aligned = Alignment.from_fasta_records(infernal_aligned, DNASequence) if log_path is not None: log_file = open(log_path, 'w') log_file.write('\n'.join(log_params)) log_file.close() if result_path is not None: result_file = open(result_path, 'w') result_file.write(infernal_aligned.to_fasta()) result_file.close() return None else: try: return infernal_aligned except ValueError: return {} class PyNastAligner(Aligner): Name = 'PyNastAligner' def __init__(self, params): """Return new PyNastAligner object with specified params. """ _params = { 'min_pct': 75.0, 'min_len': 150, 'blast_db': None, 'template_filepath': None, 'pairwise_alignment_method': 'blast', 'Application': 'PyNAST', 'Algorithm': 'NAST', } _params.update(params) Aligner.__init__(self, _params) def __call__(self, seq_path, result_path=None, log_path=None, failure_path=None): # load candidate sequences seq_file = open(seq_path, 'U') candidate_sequences = parse_fasta(seq_file) # load template sequences template_alignment = [] template_alignment_fp = self.Params['template_filepath'] for seq_id, seq in parse_fasta(open(template_alignment_fp)): # replace '.' characters with '-' characters template_alignment.append((seq_id, seq.replace('.', '-').upper())) template_alignment = Alignment.from_fasta_records( template_alignment, DNASequence, validate=True) # initialize_logger logger = NastLogger(log_path) # get function for pairwise alignment method pairwise_alignment_f = pairwise_alignment_methods[ self.Params['pairwise_alignment_method']] pynast_aligned, pynast_failed = pynast_seqs( candidate_sequences, template_alignment, min_pct=self.Params['min_pct'], min_len=self.Params['min_len'], align_unaligned_seqs_f=pairwise_alignment_f, logger=logger, temp_dir=get_qiime_temp_dir()) logger.record(str(self)) for i, seq in enumerate(pynast_failed): skb_seq = DNASequence(str(seq), id=seq.Name) pynast_failed[i] = skb_seq pynast_failed = SequenceCollection(pynast_failed) for i, seq in enumerate(pynast_aligned): skb_seq = DNASequence(str(seq), id=seq.Name) pynast_aligned[i] = skb_seq pynast_aligned = Alignment(pynast_aligned) if failure_path is not None: fail_file = open(failure_path, 'w') fail_file.write(pynast_failed.to_fasta()) fail_file.close() if result_path is not None: result_file = open(result_path, 'w') result_file.write(pynast_aligned.to_fasta()) result_file.close() return None else: return pynast_aligned def compute_min_alignment_length(seqs_f, fraction=0.75): """ compute the min alignment length as n standard deviations below the mean """ med_length = median([len(s) for _, s in parse_fasta(seqs_f)]) return int(med_length * fraction) alignment_method_constructors = {'pynast': PyNastAligner, 'infernal': InfernalAligner} alignment_module_names = { 'muscle': bfillings.muscle_v38, 'clustalw': bfillings.clustalw, 'mafft': bfillings.mafft, 'infernal': bfillings.infernal, }
squirrelo/qiime
qiime/align_seqs.py
Python
gpl-2.0
11,369
[ "BLAST" ]
e648f2e960bae90ab77d6d748e7c079c9b8db89acaad5e4e392fce25546f074d
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2003-2006 Donald N. Allingham # Copyright (C) 2007-2008 Brian G. Matherly # Copyright (C) 2010 Jakim Friant # Copyright (C) 2012 Paul Franklin, Nicolas Adenis-Lamarre, Benny Malengier # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ #------------------------------------------------------------------------ # # python modules # #------------------------------------------------------------------------ from gramps.gen.ggettext import gettext as _ from math import pi, cos, sin, log10, acos def log2(val): """ Calculate the log base 2 of a value. """ return int(log10(val)/log10(2)) #------------------------------------------------------------------------ # # gramps modules # #------------------------------------------------------------------------ from gramps.gen.errors import ReportError from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle, FONT_SANS_SERIF, PARA_ALIGN_CENTER, IndexMark, INDEX_TYPE_TOC) from gramps.gen.plug.menu import (EnumeratedListOption, NumberOption, PersonOption, BooleanOption) from gramps.gen.plug.report import Report from gramps.gen.plug.report import utils from gramps.gen.plug.report import MenuReportOptions from gramps.gen.config import config from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback from gramps.gen.lib import EventType #------------------------------------------------------------------------ # # private constants # #------------------------------------------------------------------------ FULL_CIRCLE = 0 HALF_CIRCLE = 1 QUAR_CIRCLE = 2 BACKGROUND_WHITE = 0 BACKGROUND_GEN = 1 RADIAL_UPRIGHT = 0 RADIAL_ROUNDABOUT = 1 # minor offset just usefull for generation 11, # to not a bit offset between the text and the polygon # this can be considered as a bad hack WEDGE_TEXT_BARRE_OFFSET = 0.0016 pt2cm = utils.pt2cm cal = config.get('preferences.calendar-format-report') #------------------------------------------------------------------------ # # private functions # #------------------------------------------------------------------------ def draw_wedge(doc, style, centerx, centery, radius, start_angle, end_angle, do_rendering, short_radius=0): """ Draw a wedge shape. """ while end_angle < start_angle: end_angle += 360 p = [] degreestoradians = pi / 180.0 radiansdelta = degreestoradians / 2 sangle = start_angle * degreestoradians eangle = end_angle * degreestoradians while eangle < sangle: eangle = eangle + 2 * pi angle = sangle if short_radius == 0: if (end_angle - start_angle) != 360: p.append((centerx, centery)) else: origx = (centerx + cos(angle) * short_radius) origy = (centery + sin(angle) * short_radius) p.append((origx, origy)) while angle < eangle: x = centerx + cos(angle) * radius y = centery + sin(angle) * radius p.append((x, y)) angle = angle + radiansdelta x = centerx + cos(eangle) * radius y = centery + sin(eangle) * radius p.append((x, y)) if short_radius: x = centerx + cos(eangle) * short_radius y = centery + sin(eangle) * short_radius p.append((x, y)) angle = eangle while angle >= sangle: x = centerx + cos(angle) * short_radius y = centery + sin(angle) * short_radius p.append((x, y)) angle -= radiansdelta if do_rendering: doc.draw_path(style, p) delta = (eangle - sangle) / 2.0 rad = short_radius + (radius - short_radius) / 2.0 return ( (centerx + cos(sangle + delta + WEDGE_TEXT_BARRE_OFFSET) * rad), (centery + sin(sangle + delta + WEDGE_TEXT_BARRE_OFFSET) * rad)) #------------------------------------------------------------------------ # # FanChart # #------------------------------------------------------------------------ class FanChart(Report): def __init__(self, database, options, user): """ Create the FanChart object that produces the report. The arguments are: database - the GRAMPS database instance options - instance of the Options class for this report user - a gen.user.User instance This report needs the following parameters (class variables) that come in the options class. maxgen - Maximum number of generations to include. circle - Draw a full circle, half circle, or quarter circle. background - Background color is generation dependent or white. radial - Print radial texts roundabout or as upright as possible. draw_empty - draw background when there is no information same_style - use the same style for all generation """ menu = options.menu self.max_generations = menu.get_option_by_name('maxgen').get_value() self.circle = menu.get_option_by_name('circle').get_value() self.background = menu.get_option_by_name('background').get_value() self.radial = menu.get_option_by_name('radial').get_value() pid = menu.get_option_by_name('pid').get_value() self.draw_empty = menu.get_option_by_name('draw_empty').get_value() self.same_style = menu.get_option_by_name('same_style').get_value() self.center_person = database.get_person_from_gramps_id(pid) if (self.center_person == None) : raise ReportError(_("Person %s is not in the Database") % pid ) self.graphic_style = [] self.text_style = [] for i in range (0, self.max_generations): self.graphic_style.append('FC-Graphic' + '%02d' % i) self.text_style.append('FC-Text' + '%02d' % i) self.calendar = 0 Report.__init__(self, database, options, user) self.height = 0 self.map = [None] * 2**self.max_generations self.text = {} def apply_filter(self,person_handle,index): """traverse the ancestors recursively until either the end of a line is found, or until we reach the maximum number of generations that we want to deal with""" if (not person_handle) or (index >= 2**self.max_generations): return self.map[index-1] = person_handle self.text[index-1] = self.get_info(person_handle, log2(index)) person = self.database.get_person_from_handle(person_handle) family_handle = person.get_main_parents_family_handle() if family_handle: family = self.database.get_family_from_handle(family_handle) self.apply_filter(family.get_father_handle(),index*2) self.apply_filter(family.get_mother_handle(),(index*2)+1) def write_report(self): self.doc.start_page() self.apply_filter(self.center_person.get_handle(),1) n = self.center_person.get_primary_name().get_regular_name() if self.circle == FULL_CIRCLE: max_angle = 360.0 start_angle = 90 max_circular = 5 x = self.doc.get_usable_width() / 2.0 y = self.doc.get_usable_height() / 2.0 min_xy = min (x, y) elif self.circle == HALF_CIRCLE: max_angle = 180.0 start_angle = 180 max_circular = 3 x = (self.doc.get_usable_width()/2.0) y = self.doc.get_usable_height() min_xy = min (x, y) else: # quarter circle max_angle = 90.0 start_angle = 270 max_circular = 2 x = 0 y = self.doc.get_usable_height() min_xy = min (self.doc.get_usable_width(), y) # choose one line or two lines translation according to the width title = _("%(generations)d Generation Fan Chart for %(person)s" ) % \ { 'generations' : self.max_generations, 'person' : n } title_nb_lines = 1 style_sheet = self.doc.get_style_sheet() if style_sheet: paragraph_style = style_sheet.get_paragraph_style('FC-Title') if paragraph_style: font = paragraph_style.get_font() if font: title_width = pt2cm(self.doc.string_width(font, title)) if title_width > self.doc.get_usable_width(): title = _("%(generations)d Generation Fan Chart for\n%(person)s" ) % \ { 'generations' : self.max_generations, 'person' : n } title_nb_lines = 2 if self.circle == FULL_CIRCLE or self.circle == QUAR_CIRCLE: # adjust only if full circle or 1/4 circle in landscape mode if self.doc.get_usable_height() <= self.doc.get_usable_width(): # Should be in Landscape now style_sheet = self.doc.get_style_sheet() paragraph_style = style_sheet.get_paragraph_style('FC-Title') if paragraph_style: font = paragraph_style.get_font() if font: fontsize = pt2cm(font.get_size()) # y is vertical distance to center of circle, move center down 1 fontsize y += fontsize*title_nb_lines # min_XY is the diameter of the circle, subtract two fontsize # so we dont draw outside bottom of the paper min_xy = min(min_xy, y - 2*fontsize*title_nb_lines) if self.max_generations > max_circular: block_size = min_xy / (self.max_generations * 2 - max_circular) else: block_size = min_xy / self.max_generations # adaptation of the fonts (title and others) optimized_style_sheet = self.get_optimized_style_sheet(title, max_circular, block_size, self.same_style, not self.same_style, # if same_style, use default generated colors self.background == BACKGROUND_WHITE) if optimized_style_sheet: self.doc.set_style_sheet(optimized_style_sheet) # title mark = IndexMark(title, INDEX_TYPE_TOC, 1) self.doc.center_text ('FC-Graphic-title', title, self.doc.get_usable_width() / 2, 0, mark) #wheel for generation in range (0, min (max_circular, self.max_generations)): self.draw_circular (x, y, start_angle, max_angle, block_size, generation) for generation in range (max_circular, self.max_generations): self.draw_radial (x, y, start_angle, max_angle, block_size, generation) self.doc.end_page() def get_info(self,person_handle,generation): person = self.database.get_person_from_handle(person_handle) pn = person.get_primary_name() self.calendar = config.get('preferences.calendar-format-report') birth = get_birth_or_fallback(self.database, person) b = "" if birth: b = str(birth.get_date_object().to_calendar(self.calendar).get_year()) if b == 0: b = "" elif birth.get_type() != EventType.BIRTH: b += '*' death = get_death_or_fallback(self.database, person) d = "" if death: d = str(death.get_date_object().to_calendar(self.calendar).get_year()) if d == 0: d = "" elif death.get_type() != EventType.DEATH: d += '*' if b and d: val = "%s - %s" % (str(b),str(d)) elif b: val = "* %s" % (str(b)) elif d: val = "+ %s" % (str(d)) else: val = "" if generation > 7: if (pn.get_first_name() != "") and (pn.get_surname() != ""): name = pn.get_first_name() + " " + pn.get_surname() else: name = pn.get_first_name() + pn.get_surname() if (name != "") and (val != ""): string = name + ", " + val else: string = name + val return [ string ] elif generation == 7: if (pn.get_first_name() != "") and (pn.get_surname() != ""): name = pn.get_first_name() + " " + pn.get_surname() else: name = pn.get_first_name() + pn.get_surname() if self.circle == FULL_CIRCLE: return [ name, val ] elif self.circle == HALF_CIRCLE: return [ name, val ] else: if (name != "") and (val != ""): string = name + ", " + val else: string = name + val return [string] elif generation == 6: if self.circle == FULL_CIRCLE: return [ pn.get_first_name(), pn.get_surname(), val ] elif self.circle == HALF_CIRCLE: return [ pn.get_first_name(), pn.get_surname(), val ] else: if (pn.get_first_name() != "") and (pn.get_surname() != ""): name = pn.get_first_name() + " " + pn.get_surname() else: name = pn.get_first_name() + pn.get_surname() return [ name, val ] else: return [ pn.get_first_name(), pn.get_surname(), val ] def get_max_width_for_circles(self, rad1, rad2, max_centering_proportion): """ __ /__\ <- compute the line width which is drawable between 2 circles. / _ \ max_centering_proportion : 0, touching the circle1, 1, | |_| | touching the circle2, 0.5 : middle between the 2 circles | | \ / \__/ basically, max_centering_proportion is max_centering_proportion/nb_lines """ # radius at the center of the 2 circles rmid = rad2 - (rad2-rad1)*max_centering_proportion return sin(acos(rmid/rad2)) * rad2 * 2 def get_max_width_for_circles_line(self, rad1, rad2, line, nb_lines, centering = False): """ __ /__\ <- compute the line width which is drawable between 2 circles. / _ \ instead of a max_centering_proportion, you get a line/nb_lines position. | |_| | (we suppose that lines have the same heights) | | for example, if you've 2 lines to draw, \ / line 2 max width is at the 2/3 between the 2 circles \__/ """ if centering: return self.get_max_width_for_circles(rad1, rad2, 1.0) else: return self.get_max_width_for_circles(rad1, rad2, line/float(nb_lines+1)) def get_optimized_font_size_for_text(self, rad1, rad2, text, font, centering = False): """ a text can be several lines find the font size equals or lower than font.get_size() which fit between rad1 and rad2 to display the text. centering is a special case when you've the full circle available to draw the text in the middle of it """ min_font_size = font.get_size() i = 1 nb_lines = len(text) for line in text: font_size = self.get_optimized_font_size(line, font, self.get_max_width_for_circles_line(rad1, rad2, i, nb_lines, centering)) i += 1 if min_font_size > font_size: min_font_size = font_size return min_font_size def get_optimized_font_size(self, line, font, max_width): """ for a given width, guess the best font size which is equals or smaller than font which make line fit into max_width """ test_font = FontStyle(font) w = pt2cm(self.doc.string_width(test_font, line)) while w > max_width and test_font.get_size() > 1: test_font.set_size(test_font.get_size() -1) w = pt2cm(self.doc.string_width(test_font, line)) return test_font.get_size() def get_optimized_style_sheet(self, title, max_circular, block_size, map_style_from_single, map_paragraphs_colors_to_graphics, make_background_white): """ returns an optimized (modified) style sheet which make fanchart look nicer """ redefined_style_sheet = self.doc.get_style_sheet() if not redefined_style_sheet: return self.doc.get_style_sheet() # update title font size pstyle_name = 'FC-Title' paragraph_style = redefined_style_sheet.get_paragraph_style(pstyle_name) if paragraph_style: title_font = paragraph_style.get_font() if title_font: title_width = pt2cm(self.doc.string_multiline_width(title_font, title)) while (title_width > self.doc.get_usable_width() and title_font.get_size() > 1): title_font.set_size(title_font.get_size()-1) title_width = pt2cm(self.doc.string_multiline_width( title_font, title)) redefined_style_sheet.add_paragraph_style(pstyle_name, paragraph_style) # biggest font allowed is the one of the fist generation, after, # always lower than the previous one paragraph_style = redefined_style_sheet.get_paragraph_style(self.text_style[0]) font = None if paragraph_style: font = paragraph_style.get_font() if font: previous_generation_font_size = font.get_size() for generation in range (0, self.max_generations): gstyle_name = self.graphic_style[generation] pstyle_name = self.text_style [generation] g = redefined_style_sheet.get_draw_style(gstyle_name) # paragraph_style is a copy of 'FC-Text' - use different style # to be able to auto change some fonts for some generations if map_style_from_single: paragraph_style = redefined_style_sheet.get_paragraph_style('FC-Text') else: paragraph_style = redefined_style_sheet.get_paragraph_style(pstyle_name) if g and paragraph_style: # set graphic colors to paragraph colors, while it's fonctionnaly # the same for fanchart or make backgrounds white if make_background_white: g.set_fill_color((255,255,255)) redefined_style_sheet.add_draw_style(gstyle_name, g) elif map_paragraphs_colors_to_graphics: pstyle = redefined_style_sheet.get_paragraph_style(pstyle_name) if pstyle: g.set_fill_color(pstyle.get_background_color()) redefined_style_sheet.add_draw_style(gstyle_name, g) # adapt font size if too big segments = 2**generation if generation < min (max_circular, self.max_generations): # adpatation for circular fonts rad1, rad2 = self.get_circular_radius(block_size, generation, self.circle) font = paragraph_style.get_font() if font: min_font_size = font.get_size() # find the smallest font required for index in range(segments - 1, 2*segments - 1): if self.map[index]: font_size = \ self.get_optimized_font_size_for_text( rad1, rad2, self.text[index], paragraph_style.get_font(), (self.circle == FULL_CIRCLE and generation == 0) ) if font_size < min_font_size: min_font_size = font_size font.set_size(min(previous_generation_font_size, min_font_size)) else: # adaptation for radial fonts # find the largest string for the generation longest_line = "" longest_width = 0 for index in range(segments - 1, 2*segments - 1): if self.map[index]: for line in self.text[index]: width = pt2cm(self.doc.string_multiline_width( paragraph_style.get_font(), line)) if width > longest_width: longest_line = line longest_width = width # determine maximum width allowed for this generation rad1, rad2 = self.get_radial_radius(block_size, generation, self.circle) max_width = rad2 - rad1 # reduce the font so that longest_width fit into max_width font = paragraph_style.get_font() if font: font.set_size(min(previous_generation_font_size, self.get_optimized_font_size(longest_line, paragraph_style.get_font(), max_width)) ) # redefine the style redefined_style_sheet.add_paragraph_style(pstyle_name, paragraph_style) font = paragraph_style.get_font() if font: previous_generation_font_size = font.get_size() # finished return redefined_style_sheet def draw_circular(self, x, y, start_angle, max_angle, size, generation): segments = 2**generation delta = max_angle / segments end_angle = start_angle text_angle = start_angle - 270 + (delta / 2.0) rad1, rad2 = self.get_circular_radius(size, generation, self.circle) graphic_style = self.graphic_style[generation] for index in range(segments - 1, 2*segments - 1): start_angle = end_angle end_angle = start_angle + delta (xc,yc) = draw_wedge(self.doc, graphic_style, x, y, rad2, start_angle, end_angle, self.map[index] or self.draw_empty, rad1) if self.map[index]: if (generation == 0) and self.circle == FULL_CIRCLE: yc = y person = self.database.get_person_from_handle(self.map[index]) mark = utils.get_person_mark(self.database, person) self.doc.rotate_text(graphic_style, self.text[index], xc, yc, text_angle, mark) text_angle += delta def get_radial_radius(self, size, generation, circle): if circle == FULL_CIRCLE: rad1 = size * ((generation * 2) - 5) rad2 = size * ((generation * 2) - 3) elif circle == HALF_CIRCLE: rad1 = size * ((generation * 2) - 3) rad2 = size * ((generation * 2) - 1) else: # quarter circle rad1 = size * ((generation * 2) - 2) rad2 = size * (generation * 2) return rad1, rad2 def get_circular_radius(self, size, generation, circle): return size * generation, size * (generation + 1) def draw_radial(self, x, y, start_angle, max_angle, size, generation): segments = 2**generation delta = max_angle / segments end_angle = start_angle text_angle = start_angle - delta / 2.0 graphic_style = self.graphic_style[generation] rad1, rad2 = self.get_radial_radius(size, generation, self.circle) for index in range(segments - 1, 2*segments - 1): start_angle = end_angle end_angle = start_angle + delta (xc,yc) = draw_wedge(self.doc, graphic_style, x, y, rad2, start_angle, end_angle, self.map[index] or self.draw_empty, rad1) text_angle += delta if self.map[index]: person = self.database.get_person_from_handle(self.map[index]) mark = utils.get_person_mark(self.database, person) if self.radial == RADIAL_UPRIGHT and (start_angle >= 90) and (start_angle < 270): self.doc.rotate_text(graphic_style, self.text[index], xc, yc, text_angle + 180, mark) else: self.doc.rotate_text(graphic_style, self.text[index], xc, yc, text_angle, mark) #------------------------------------------------------------------------ # # # #------------------------------------------------------------------------ class FanChartOptions(MenuReportOptions): def __init__(self, name, dbase): self.MAX_GENERATIONS = 11 MenuReportOptions.__init__(self, name, dbase) def add_menu_options(self, menu): """ Add options to the menu for the fan chart. """ category_name = _("Report Options") pid = PersonOption(_("Center Person")) pid.set_help(_("The center person for the report")) menu.add_option(category_name, "pid", pid) max_gen = NumberOption(_("Generations"), 5, 1, self.MAX_GENERATIONS) max_gen.set_help(_("The number of generations to include in the report")) menu.add_option(category_name, "maxgen", max_gen) circle = EnumeratedListOption(_('Type of graph'), HALF_CIRCLE) circle.add_item(FULL_CIRCLE, _('full circle')) circle.add_item(HALF_CIRCLE, _('half circle')) circle.add_item(QUAR_CIRCLE, _('quarter circle')) circle.set_help( _("The form of the graph: full circle, half circle," " or quarter circle.")) menu.add_option(category_name, "circle", circle) background = EnumeratedListOption(_('Background color'), BACKGROUND_GEN) background.add_item(BACKGROUND_WHITE, _('white')) background.add_item(BACKGROUND_GEN, _('generation dependent')) background.set_help(_("Background color is either white or generation" " dependent")) menu.add_option(category_name, "background", background) radial = EnumeratedListOption( _('Orientation of radial texts'), RADIAL_UPRIGHT ) radial.add_item(RADIAL_UPRIGHT, _('upright')) radial.add_item(RADIAL_ROUNDABOUT, _('roundabout')) radial.set_help(_("Print radial texts upright or roundabout")) menu.add_option(category_name, "radial", radial) draw_empty = BooleanOption(_("Draw empty boxes"), True) draw_empty.set_help(_("Draw the background although there is no information")) menu.add_option(category_name, "draw_empty", draw_empty) same_style = BooleanOption(_("Use one font style for all generations"), True) same_style.set_help(_("You can customize font and color for each generation in the style editor")) menu.add_option(category_name, "same_style", same_style) def make_default_style(self,default_style): """Make the default output style for the Fan Chart report.""" BACKGROUND_COLORS = [ (255, 63, 0), (255,175, 15), (255,223, 87), (255,255,111), (159,255,159), (111,215,255), ( 79,151,255), (231, 23,255), (231, 23,221), (210,170,124), (189,153,112) ] #Paragraph Styles f = FontStyle() f.set_size(18) f.set_bold(1) f.set_type_face(FONT_SANS_SERIF) p = ParagraphStyle() p.set_font(f) p.set_alignment(PARA_ALIGN_CENTER) p.set_description(_('The style used for the title.')) default_style.add_paragraph_style("FC-Title",p) f = FontStyle() f.set_size(9) f.set_type_face(FONT_SANS_SERIF) p = ParagraphStyle() p.set_font(f) p.set_alignment(PARA_ALIGN_CENTER) p.set_description(_('The basic style used for the default text display.')) default_style.add_paragraph_style("FC-Text", p) for i in range (0, self.MAX_GENERATIONS): f = FontStyle() f.set_size(9) f.set_type_face(FONT_SANS_SERIF) p = ParagraphStyle() p.set_font(f) p.set_alignment(PARA_ALIGN_CENTER) p.set_description(_('The style used for the text display of generation ' + "%d" % i)) default_style.add_paragraph_style("FC-Text" + "%02d" % i, p) # GraphicsStyles g = GraphicsStyle() g.set_paragraph_style('FC-Title') default_style.add_draw_style('FC-Graphic-title', g) for i in range (0, self.MAX_GENERATIONS): g = GraphicsStyle() g.set_paragraph_style('FC-Text' + '%02d' % i) g.set_fill_color(BACKGROUND_COLORS[i]) default_style.add_draw_style('FC-Graphic' + '%02d' % i, g)
arunkgupta/gramps
gramps/plugins/drawreport/fanchart.py
Python
gpl-2.0
31,716
[ "Brian" ]
10c13a6d6e47830a4ea3888284726088c8a3ecf91faa3c20a94f374ddb2f0454
from __future__ import print_function import sys import os import copy import random sys.path.insert(1, os.path.join("..","..","..")) import h2o from tests import pyunit_utils from h2o.grid.grid_search import H2OGridSearch from h2o.estimators.deeplearning import H2ODeepLearningEstimator def deeplearning_grid_cars(): cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv")) r = cars[0].runif(seed=42) train = cars[r > .2] validation_scheme = random.randint(1,3) # 1:none, 2:cross-validation, 3:validation set print("Validation scheme: {0}".format(validation_scheme)) if validation_scheme == 2: nfolds = 2 print("Nfolds: 2") if validation_scheme == 3: valid = cars[r <= .2] grid_space = pyunit_utils.make_random_grid_space(algo="dl") print("Grid space: {0}".format(grid_space)) predictors = ["displacement","power","weight","acceleration","year"] if grid_space['distribution'][0] == 'bernoulli': response_col = "economy_20mpg" elif grid_space['distribution'][0] == 'gaussian': response_col = "economy" else: response_col = "cylinders" print("Predictors: {0}".format(predictors)) print("Response: {0}".format(response_col)) if grid_space['distribution'][0] in ['bernoulli', 'multinomial']: print("Converting the response column to a factor...") train[response_col] = train[response_col].asfactor() if validation_scheme == 3: valid[response_col] = valid[response_col].asfactor() print("Constructing the grid of gbm models...") cars_dl_grid = H2OGridSearch(H2ODeepLearningEstimator, hyper_params=grid_space) if validation_scheme == 1: cars_dl_grid.train(x=predictors,y=response_col,training_frame=train) elif validation_scheme == 2: cars_dl_grid.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds) else: cars_dl_grid.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid) for model in cars_dl_grid: assert isinstance(model, H2ODeepLearningEstimator) print("Performing various checks of the constructed grid...") print("Check cardinality of grid, that is, the correct number of models have been created...") size_of_grid_space = 1 for v in list(grid_space.values()): size_of_grid_space = size_of_grid_space * len(v) actual_size = len(cars_dl_grid) assert size_of_grid_space == actual_size, "Expected size of grid to be {0}, but got {1}" \ "".format(size_of_grid_space,actual_size) print("Duplicate-entries-in-grid-space check") new_grid_space = copy.deepcopy(grid_space) for name in list(grid_space.keys()): if not name == "distribution": new_grid_space[name] = grid_space[name] + grid_space[name] print("The new search space: {0}".format(new_grid_space)) print("Constructing the new grid of gbm models...") cars_dl_grid2 = H2OGridSearch(H2ODeepLearningEstimator, hyper_params=new_grid_space) if validation_scheme == 1: cars_dl_grid2.train(x=predictors,y=response_col,training_frame=train) elif validation_scheme == 2: cars_dl_grid2.train(x=predictors,y=response_col,training_frame=train,nfolds=nfolds) else: cars_dl_grid2.train(x=predictors,y=response_col,training_frame=train,validation_frame=valid) actual_size2 = len(cars_dl_grid2) assert actual_size == actual_size2, "Expected duplicates to be ignored. Without dups grid size: {0}. With dups " \ "size: {1}".format(actual_size, actual_size2) print("Check that the hyper_params that were passed to grid, were used to construct the models...") for name in list(grid_space.keys()): pyunit_utils.expect_model_param(cars_dl_grid, name, grid_space[name]) for model in cars_dl_grid2: assert isinstance(model, H2ODeepLearningEstimator) if __name__ == "__main__": pyunit_utils.standalone_test(deeplearning_grid_cars) else: deeplearning_grid_cars()
mathemage/h2o-3
h2o-py/tests/testdir_algos/deeplearning/pyunit_grid_cars_deeplearning.py
Python
apache-2.0
4,115
[ "Gaussian" ]
985f24e8c8920e4e67d39adaee3fde216d09bafab03bdc4ece52771c5aa000f0
################################################################################ # The Neural Network (NN) based Speech Synthesis System # https://github.com/CSTR-Edinburgh/merlin # # Centre for Speech Technology Research # University of Edinburgh, UK # Copyright (c) 2014-2015 # All Rights Reserved. # # The system as a whole and most of the files in it are distributed # under the following copyright and conditions # # Permission is hereby granted, free of charge, to use and distribute # this software and its documentation without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of this work, and to # permit persons to whom this work is furnished to do so, subject to # the following conditions: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # - The authors' names may not be used to endorse or promote products derived # from this software without specific prior written permission. # # THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK # DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT # SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE # FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN # AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, # ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF # THIS SOFTWARE. ################################################################################ import pickle import gzip import os, sys, errno import time import math import subprocess import socket # only for socket.getfqdn() # numpy & theano imports need to be done in this order (only for some numpy installations, not sure why) import numpy #import gnumpy as gnp # we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself import numpy.distutils.__config__ # and only after that can we import theano import theano from utils.providers import ListDataProvider from frontend.label_normalisation import HTSLabelNormalisation from frontend.silence_remover import SilenceRemover from frontend.silence_remover import trim_silence from frontend.min_max_norm import MinMaxNormalisation from frontend.acoustic_composition import AcousticComposition from frontend.parameter_generation import ParameterGeneration from frontend.mean_variance_norm import MeanVarianceNorm # the new class for label composition and normalisation from frontend.label_composer import LabelComposer from frontend.label_modifier import HTSLabelModification from frontend.merge_features import MergeFeat import configuration from models.deep_rnn import DeepRecurrentNetwork from utils.compute_distortion import DistortionComputation, IndividualDistortionComp from utils.generate import generate_wav from utils.learn_rates import ExpDecreaseLearningRate from io_funcs.binary_io import BinaryIOCollection # our custom logging class that can also plot from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot import logging # as logging import logging.config import io from utils.file_paths import FilePaths from utils.utils import read_file_list, prepare_file_path_list def extract_file_id_list(file_list): file_id_list = [] for file_name in file_list: file_id = os.path.basename(os.path.splitext(file_name)[0]) file_id_list.append(file_id) return file_id_list def make_output_file_list(out_dir, in_file_lists): out_file_lists = [] for in_file_name in in_file_lists: file_id = os.path.basename(in_file_name) out_file_name = out_dir + '/' + file_id out_file_lists.append(out_file_name) return out_file_lists def visualize_dnn(dnn): plotlogger = logging.getLogger("plotting") # reference activation weights in layers W = list(); layer_name = list() for i in range(len(dnn.params)): aa = dnn.params[i].get_value(borrow=True).T print(aa.shape, aa.size) if aa.size > aa.shape[0]: W.append(aa) layer_name.append(dnn.params[i].name) ## plot activation weights including input and output layer_num = len(W) for i_layer in range(layer_num): fig_name = 'Activation weights W' + str(i_layer) + '_' + layer_name[i_layer] fig_title = 'Activation weights of W' + str(i_layer) xlabel = 'Neuron index of hidden layer ' + str(i_layer) ylabel = 'Neuron index of hidden layer ' + str(i_layer+1) if i_layer == 0: xlabel = 'Input feature index' if i_layer == layer_num-1: ylabel = 'Output feature index' logger.create_plot(fig_name, SingleWeightMatrixPlot) plotlogger.add_plot_point(fig_name, fig_name, W[i_layer]) plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel) def load_covariance(var_file_dict, out_dimension_dict): var = {} io_funcs = BinaryIOCollection() for feature_name in list(var_file_dict.keys()): var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1) var_values = numpy.reshape(var_values, (out_dimension_dict[feature_name], 1)) var[feature_name] = var_values return var def train_DNN(train_xy_file_list, valid_xy_file_list, \ nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False, var_dict=None, cmp_mean_vector = None, cmp_std_vector = None, init_dnn_model_file = None): # get loggers for this function # this one writes to both console and file logger = logging.getLogger("main.train_DNN") logger.debug('Starting train_DNN') if plot: # this one takes care of plotting duties plotlogger = logging.getLogger("plotting") # create an (empty) plot of training convergence, ready to receive data points logger.create_plot('training convergence',MultipleSeriesPlot) try: assert numpy.sum(ms_outs) == n_outs except AssertionError: logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs)) raise ####parameters##### finetune_lr = float(hyper_params['learning_rate']) training_epochs = int(hyper_params['training_epochs']) batch_size = int(hyper_params['batch_size']) l1_reg = float(hyper_params['l1_reg']) l2_reg = float(hyper_params['l2_reg']) warmup_epoch = int(hyper_params['warmup_epoch']) momentum = float(hyper_params['momentum']) warmup_momentum = float(hyper_params['warmup_momentum']) hidden_layer_size = hyper_params['hidden_layer_size'] buffer_utt_size = buffer_size early_stop_epoch = int(hyper_params['early_stop_epochs']) hidden_activation = hyper_params['hidden_activation'] output_activation = hyper_params['output_activation'] model_type = hyper_params['model_type'] hidden_layer_type = hyper_params['hidden_layer_type'] ## use a switch to turn on pretraining ## pretraining may not help too much, if this case, we turn it off to save time do_pretraining = hyper_params['do_pretraining'] pretraining_epochs = int(hyper_params['pretraining_epochs']) pretraining_lr = float(hyper_params['pretraining_lr']) sequential_training = hyper_params['sequential_training'] dropout_rate = hyper_params['dropout_rate'] buffer_size = int(buffer_size / batch_size) * batch_size ################### (train_x_file_list, train_y_file_list) = train_xy_file_list (valid_x_file_list, valid_y_file_list) = valid_xy_file_list logger.debug('Creating training data provider') train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = True) logger.debug('Creating validation data provider') valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = False) if cfg.rnn_batch_training: train_data_reader.set_rnn_params(training_algo=cfg.training_algo, batch_size=cfg.batch_size, seq_length=cfg.seq_length, merge_size=cfg.merge_size, bucket_range=cfg.bucket_range) valid_data_reader.reshape_input_output() shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition() train_set_x, train_set_y = shared_train_set_xy shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition() valid_set_x, valid_set_y = shared_valid_set_xy train_data_reader.reset() valid_data_reader.reset() ##temporally we use the training set as pretrain_set_x. ##we need to support any data for pretraining # numpy random generator numpy_rng = numpy.random.RandomState(123) logger.info('building the model') dnn_model = None pretrain_fn = None ## not all the model support pretraining right now train_fn = None valid_fn = None valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion if model_type == 'DNN': dnn_model = DeepRecurrentNetwork(n_in= n_ins, hidden_layer_size = hidden_layer_size, n_out = n_outs, L1_reg = l1_reg, L2_reg = l2_reg, hidden_layer_type = hidden_layer_type, dropout_rate = dropout_rate, optimizer = cfg.optimizer, rnn_batch_training = cfg.rnn_batch_training) else: logger.critical('%s type NN model is not supported!' %(model_type)) raise ## Model adaptation -- fine tuning the existing model ## We can't just unpickle the old model and use that because fine-tune functions ## depend on opt_l2e option used in construction of initial model. One way around this ## would be to unpickle, manually set unpickled_dnn_model.opt_l2e=True and then call ## unpickled_dnn_model.build_finetne_function() again. This is another way, construct ## new model from scratch with opt_l2e=True, then copy existing weights over: use_lhuc = cfg.use_lhuc if init_dnn_model_file != "_": logger.info('load parameters from existing model: %s' %(init_dnn_model_file)) if not os.path.isfile(init_dnn_model_file): sys.exit('Model file %s does not exist'%(init_dnn_model_file)) existing_dnn_model = pickle.load(open(init_dnn_model_file, 'rb')) if not use_lhuc and not len(existing_dnn_model.params) == len(dnn_model.params): sys.exit('Old and new models have different numbers of weight matrices') elif use_lhuc and len(dnn_model.params) < len(existing_dnn_model.params): sys.exit('In LHUC adaptation new model must have more parameters than old model.') # assign the existing dnn model parameters to the new dnn model k = 0 for i in range(len(dnn_model.params)): ## Added for LHUC ## # In LHUC, we keep all the old parameters intact and learn only a small set of new # parameters if dnn_model.params[i].name == 'c': continue else: old_val = existing_dnn_model.params[k].get_value() new_val = dnn_model.params[i].get_value() if numpy.shape(old_val) == numpy.shape(new_val): dnn_model.params[i].set_value(old_val) else: sys.exit('old and new weight matrices have different shapes') k = k + 1 train_fn, valid_fn = dnn_model.build_finetune_functions( (train_set_x, train_set_y), (valid_set_x, valid_set_y), use_lhuc) #, batch_size=batch_size logger.info('fine-tuning the %s model' %(model_type)) start_time = time.time() best_dnn_model = dnn_model best_validation_loss = sys.float_info.max previous_loss = sys.float_info.max lr_decay = cfg.lr_decay if lr_decay>0: early_stop_epoch *= lr_decay early_stop = 0 val_loss_counter = 0 previous_finetune_lr = finetune_lr epoch = 0 while (epoch < training_epochs): epoch = epoch + 1 if lr_decay==0: # fixed learning rate reduce_lr = False elif lr_decay<0: # exponential decay reduce_lr = False if epoch <= warmup_epoch else True elif val_loss_counter > 0: # linear decay reduce_lr = False if val_loss_counter%lr_decay==0: reduce_lr = True val_loss_counter = 0 else: # no decay reduce_lr = False if reduce_lr: current_finetune_lr = previous_finetune_lr * 0.5 current_momentum = momentum else: current_finetune_lr = previous_finetune_lr current_momentum = warmup_momentum previous_finetune_lr = current_finetune_lr train_error = [] sub_start_time = time.time() logger.debug("training params -- learning rate: %f, early_stop: %d/%d" % (current_finetune_lr, early_stop, early_stop_epoch)) while (not train_data_reader.is_finish()): _, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition() # if sequential training, the batch size will be the number of frames in an utterance # batch_size for sequential training is considered only when rnn_batch_training is set to True if sequential_training == True: batch_size = temp_train_set_x.shape[0] n_train_batches = temp_train_set_x.shape[0] // batch_size for index in range(n_train_batches): ## send a batch to the shared variable, rather than pass the batch size and batch index to the finetune function train_set_x.set_value(numpy.asarray(temp_train_set_x[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True) train_set_y.set_value(numpy.asarray(temp_train_set_y[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True) this_train_error = train_fn(current_finetune_lr, current_momentum) train_error.append(this_train_error) train_data_reader.reset() logger.debug('calculating validation loss') validation_losses = [] while (not valid_data_reader.is_finish()): shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition() valid_set_x.set_value(numpy.asarray(temp_valid_set_x, dtype=theano.config.floatX), borrow=True) valid_set_y.set_value(numpy.asarray(temp_valid_set_y, dtype=theano.config.floatX), borrow=True) this_valid_loss = valid_fn() validation_losses.append(this_valid_loss) valid_data_reader.reset() this_validation_loss = numpy.mean(validation_losses) this_train_valid_loss = numpy.mean(numpy.asarray(train_error)) sub_end_time = time.time() loss_difference = this_validation_loss - previous_loss logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time))) if plot: plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss)) plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss)) plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error') if this_validation_loss < best_validation_loss: pickle.dump(best_dnn_model, open(nnets_file_name, 'wb')) best_dnn_model = dnn_model best_validation_loss = this_validation_loss if this_validation_loss >= previous_loss: logger.debug('validation loss increased') val_loss_counter+=1 early_stop+=1 if epoch > 15 and early_stop > early_stop_epoch: logger.debug('stopping early') break if math.isnan(this_validation_loss): break previous_loss = this_validation_loss end_time = time.time() logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss)) if plot: plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error') return best_validation_loss def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, reshape_io=False): logger = logging.getLogger("dnn_generation") logger.debug('Starting dnn_generation') plotlogger = logging.getLogger("plotting") dnn_model = pickle.load(open(nnets_file_name, 'rb')) file_number = len(valid_file_list) for i in range(file_number): #file_number logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) ) fid_lab = open(valid_file_list[i], 'rb') features = numpy.fromfile(fid_lab, dtype=numpy.float32) fid_lab.close() features = features[:(n_ins * (features.size // n_ins))] test_set_x = features.reshape((-1, n_ins)) n_rows = test_set_x.shape[0] if reshape_io: test_set_x = numpy.reshape(test_set_x, (1, test_set_x.shape[0], n_ins)) test_set_x = numpy.array(test_set_x, 'float32') predicted_parameter = dnn_model.parameter_prediction(test_set_x) predicted_parameter = predicted_parameter.reshape(-1, n_outs) predicted_parameter = predicted_parameter[0:n_rows] ### write to cmp file predicted_parameter = numpy.array(predicted_parameter, 'float32') temp_parameter = predicted_parameter fid = open(out_file_list[i], 'wb') predicted_parameter.tofile(fid) logger.debug('saved to %s' % out_file_list[i]) fid.close() ##generate bottleneck layer as features def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, bottleneck_index): logger = logging.getLogger("dnn_generation") logger.debug('Starting dnn_generation') plotlogger = logging.getLogger("plotting") dnn_model = pickle.load(open(nnets_file_name, 'rb')) file_number = len(valid_file_list) for i in range(file_number): logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) ) fid_lab = open(valid_file_list[i], 'rb') features = numpy.fromfile(fid_lab, dtype=numpy.float32) fid_lab.close() features = features[:(n_ins * (features.size // n_ins))] features = features.reshape((-1, n_ins)) temp_set_x = features.tolist() test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX)) predicted_parameter = dnn_model.generate_hidden_layer(test_set_x, bottleneck_index) ### write to cmp file predicted_parameter = numpy.array(predicted_parameter, 'float32') temp_parameter = predicted_parameter fid = open(out_file_list[i], 'wb') predicted_parameter.tofile(fid) logger.debug('saved to %s' % out_file_list[i]) fid.close() def main_function(cfg): file_paths = FilePaths(cfg) # get a logger for this main function logger = logging.getLogger("main") # get another logger to handle plotting duties plotlogger = logging.getLogger("plotting") # later, we might do this via a handler that is created, attached and configured # using the standard config mechanism of the logging module # but for now we need to do it manually plotlogger.set_plot_path(cfg.plot_dir) # create plot dir if set to True if not os.path.exists(cfg.plot_dir) and cfg.plot: os.makedirs(cfg.plot_dir) #### parameter setting######## hidden_layer_size = cfg.hyper_params['hidden_layer_size'] ####prepare environment try: file_id_list = read_file_list(cfg.file_id_scp) logger.debug('Loaded file id list from %s' % cfg.file_id_scp) except IOError: # this means that open(...) threw an error logger.critical('Could not load file id list from %s' % cfg.file_id_scp) raise ###total file number including training, development, and testing total_file_number = len(file_id_list) assert cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number == total_file_number, 'check train, valid, test file number' data_dir = cfg.data_dir inter_data_dir = cfg.inter_data_dir nn_cmp_dir = file_paths.nn_cmp_dir nn_cmp_norm_dir = file_paths.nn_cmp_norm_dir model_dir = file_paths.model_dir gen_dir = file_paths.gen_dir in_file_list_dict = {} for feature_name in list(cfg.in_dir_dict.keys()): in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False) nn_cmp_file_list = file_paths.get_nn_cmp_file_list() nn_cmp_norm_file_list = file_paths.get_nn_cmp_norm_file_list() ###normalisation information norm_info_file = file_paths.norm_info_file ### normalise input full context label # currently supporting two different forms of lingustic features # later, we should generalise this assert cfg.label_style == 'HTS', 'Only HTS-style labels are now supported as input to Merlin' label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats) add_feat_dim = sum(cfg.additional_features.values()) lab_dim = label_normaliser.dimension + add_feat_dim + cfg.appended_input_dim if cfg.VoiceConversion: lab_dim = cfg.cmp_dim logger.info('Input label dimension is %d' % lab_dim) suffix=str(lab_dim) if cfg.process_labels_in_work_dir: inter_data_dir = cfg.work_dir # the number can be removed file_paths.set_label_dir(label_normaliser.dimension, suffix, lab_dim) file_paths.set_label_file_list() binary_label_dir = file_paths.binary_label_dir nn_label_dir = file_paths.nn_label_dir nn_label_norm_dir = file_paths.nn_label_norm_dir in_label_align_file_list = file_paths.in_label_align_file_list binary_label_file_list = file_paths.binary_label_file_list nn_label_file_list = file_paths.nn_label_file_list nn_label_norm_file_list = file_paths.nn_label_norm_file_list min_max_normaliser = None label_norm_file = file_paths.label_norm_file test_id_list = file_paths.test_id_list if cfg.NORMLAB: # simple HTS labels logger.info('preparing label data (input) using standard HTS style labels') label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list, label_type=cfg.label_type) if cfg.additional_features: out_feat_file_list = file_paths.out_feat_file_list in_dim = label_normaliser.dimension for new_feature, new_feature_dim in cfg.additional_features.items(): new_feat_dir = os.path.join(data_dir, new_feature) new_feat_file_list = prepare_file_path_list(file_id_list, new_feat_dir, '.'+new_feature) merger = MergeFeat(lab_dim = in_dim, feat_dim = new_feature_dim) merger.merge_data(binary_label_file_list, new_feat_file_list, out_feat_file_list) in_dim += new_feature_dim binary_label_file_list = out_feat_file_list remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats) remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list) min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99) ###use only training data to find min-max information, then apply on the whole dataset if cfg.GenTestList: min_max_normaliser.load_min_max_values(label_norm_file) else: min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number]) ### enforce silence such that the normalization runs without removing silence: only for final synthesis if cfg.GenTestList and cfg.enforce_silence: min_max_normaliser.normalise_data(binary_label_file_list, nn_label_norm_file_list) else: min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list) if min_max_normaliser != None and not cfg.GenTestList: ### save label normalisation information for unseen testing labels label_min_vector = min_max_normaliser.min_vector label_max_vector = min_max_normaliser.max_vector label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0) label_norm_info = numpy.array(label_norm_info, 'float32') fid = open(label_norm_file, 'wb') label_norm_info.tofile(fid) fid.close() logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file)) ### make output duration data if cfg.MAKEDUR: logger.info('creating duration (output) features') label_normaliser.prepare_dur_data(in_label_align_file_list, file_paths.dur_file_list, cfg.label_type, cfg.dur_feature_type) ### make output acoustic data if cfg.MAKECMP: logger.info('creating acoustic (output) features') delta_win = cfg.delta_win #[-0.5, 0.0, 0.5] acc_win = cfg.acc_win #[1.0, -2.0, 1.0] if cfg.GenTestList: for feature_name in list(cfg.in_dir_dict.keys()): in_file_list_dict[feature_name] = prepare_file_path_list(test_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False) nn_cmp_file_list = prepare_file_path_list(test_id_list, nn_cmp_dir, cfg.cmp_ext) nn_cmp_norm_file_list = prepare_file_path_list(test_id_list, nn_cmp_norm_dir, cfg.cmp_ext) acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win) if 'dur' in list(cfg.in_dir_dict.keys()) and cfg.AcousticModel: lf0_file_list = file_paths.get_lf0_file_list() acoustic_worker.make_equal_frames(dur_file_list, lf0_file_list, cfg.in_dimension_dict) acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict) if cfg.remove_silence_using_binary_labels: ## do this to get lab_dim: label_composer = LabelComposer() label_composer.load_label_configuration(cfg.label_config_file) lab_dim=label_composer.compute_label_dimension() silence_feature = 0 ## use first feature in label -- hardcoded for now logger.info('Silence removal from CMP using binary label file') ## overwrite the untrimmed audio with the trimmed version: trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim, binary_label_file_list, lab_dim, silence_feature) elif cfg.remove_silence_using_hts_labels: ## back off to previous method using HTS labels: remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats) remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself ### save acoustic normalisation information for normalising the features back var_dir = file_paths.var_dir var_file_dict = file_paths.get_var_dic() ### normalise output acoustic data if cfg.NORMCMP: logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation) cmp_norm_info = None if cfg.output_feature_normalisation == 'MVN': normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim) if cfg.GenTestList: # load mean std values global_mean_vector, global_std_vector = normaliser.load_mean_std_values(norm_info_file) else: ###calculate mean and std vectors on the training data, and apply on the whole dataset global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim) global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim) # for hmpd vocoder we don't need to normalize the # pdd values if cfg.vocoder_type == 'hmpd': stream_start_index = {} dimension_index = 0 recorded_vuv = False vuv_dimension = None for feature_name in cfg.out_dimension_dict.keys(): if feature_name != 'vuv': stream_start_index[feature_name] = dimension_index else: vuv_dimension = dimension_index recorded_vuv = True dimension_index += cfg.out_dimension_dict[feature_name] logger.info('hmpd pdd values are not normalized since they are in 0 to 1') global_mean_vector[:,stream_start_index['pdd']: stream_start_index['pdd'] + cfg.out_dimension_dict['pdd']] = 0 global_std_vector[:,stream_start_index['pdd']: stream_start_index['pdd'] + cfg.out_dimension_dict['pdd']] = 1 normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list) cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0) elif cfg.output_feature_normalisation == 'MINMAX': min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99) if cfg.GenTestList: min_max_normaliser.load_min_max_values(norm_info_file) else: min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number]) min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list) cmp_min_vector = min_max_normaliser.min_vector cmp_max_vector = min_max_normaliser.max_vector cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0) else: logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation)) raise if not cfg.GenTestList: cmp_norm_info = numpy.array(cmp_norm_info, 'float32') fid = open(norm_info_file, 'wb') cmp_norm_info.tofile(fid) fid.close() logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file)) feature_index = 0 for feature_name in list(cfg.out_dimension_dict.keys()): feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32') fid = open(var_file_dict[feature_name], 'w') feature_var_vector = feature_std_vector**2 feature_var_vector.tofile(fid) fid.close() logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name])) feature_index += cfg.out_dimension_dict[feature_name] train_x_file_list, train_y_file_list = file_paths.get_train_list_x_y() valid_x_file_list, valid_y_file_list = file_paths.get_valid_list_x_y() test_x_file_list, test_y_file_list = file_paths.get_test_list_x_y() # we need to know the label dimension before training the DNN # computing that requires us to look at the labels # label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats) add_feat_dim = sum(cfg.additional_features.values()) lab_dim = label_normaliser.dimension + add_feat_dim + cfg.appended_input_dim if cfg.VoiceConversion: lab_dim = cfg.cmp_dim logger.info('label dimension is %d' % lab_dim) combined_model_arch = str(len(hidden_layer_size)) for hid_size in hidden_layer_size: combined_model_arch += '_' + str(hid_size) nnets_file_name = file_paths.get_nnets_file_name() temp_dir_name = file_paths.get_temp_nn_dir_name() gen_dir = os.path.join(gen_dir, temp_dir_name) if cfg.switch_to_keras or cfg.switch_to_tensorflow: ### set configuration variables ### cfg.inp_dim = lab_dim cfg.out_dim = cfg.cmp_dim cfg.inp_feat_dir = nn_label_norm_dir cfg.out_feat_dir = nn_cmp_norm_dir cfg.pred_feat_dir = gen_dir if cfg.GenTestList and cfg.test_synth_dir!="None": cfg.inp_feat_dir = cfg.test_synth_dir cfg.pred_feat_dir = cfg.test_synth_dir if cfg.switch_to_keras: ### call kerasclass and use an instance ### from run_keras_with_merlin_io import KerasClass keras_instance = KerasClass(cfg) elif cfg.switch_to_tensorflow: ### call Tensorflowclass and use an instance ### from run_tensorflow_with_merlin_io import TensorflowClass tf_instance = TensorflowClass(cfg) ### DNN model training if cfg.TRAINDNN: var_dict = load_covariance(var_file_dict, cfg.out_dimension_dict) logger.info('training DNN') fid = open(norm_info_file, 'rb') cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32) fid.close() cmp_min_max = cmp_min_max.reshape((2, -1)) cmp_mean_vector = cmp_min_max[0, ] cmp_std_vector = cmp_min_max[1, ] try: os.makedirs(model_dir) except OSError as e: if e.errno == errno.EEXIST: # not an error - just means directory already exists pass else: logger.critical('Failed to create model directory %s' % model_dir) logger.critical(' OS error was: %s' % e.strerror) raise try: if cfg.switch_to_keras: keras_instance.train_keras_model() elif cfg.switch_to_tensorflow: tf_instance.train_tensorflow_model() else: train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \ valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \ nnets_file_name = nnets_file_name, \ n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \ hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot, var_dict = var_dict, cmp_mean_vector = cmp_mean_vector, cmp_std_vector = cmp_std_vector,init_dnn_model_file=cfg.start_from_trained_model) except KeyboardInterrupt: logger.critical('train_DNN interrupted via keyboard') # Could 'raise' the exception further, but that causes a deep traceback to be printed # which we don't care about for a keyboard interrupt. So, just bail out immediately sys.exit(1) except: logger.critical('train_DNN threw an exception') raise if cfg.GENBNFEA: # Please only tune on this step when you want to generate bottleneck features from DNN gen_dir = file_paths.bottleneck_features bottleneck_size = min(hidden_layer_size) bottleneck_index = 0 for i in range(len(hidden_layer_size)): if hidden_layer_size[i] == bottleneck_size: bottleneck_index = i logger.info('generating bottleneck features from DNN') try: os.makedirs(gen_dir) except OSError as e: if e.errno == errno.EEXIST: # not an error - just means directory already exists pass else: logger.critical('Failed to create generation directory %s' % gen_dir) logger.critical(' OS error was: %s' % e.strerror) raise gen_file_id_list = file_id_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] test_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext) dnn_hidden_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, bottleneck_index) ### generate parameters from DNN gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] if cfg.GenTestList: gen_file_id_list = test_id_list test_x_file_list = nn_label_norm_file_list if cfg.test_synth_dir!="None": gen_dir = cfg.test_synth_dir if cfg.DNNGEN: logger.info('generating from DNN') try: os.makedirs(gen_dir) except OSError as e: if e.errno == errno.EEXIST: # not an error - just means directory already exists pass else: logger.critical('Failed to create generation directory %s' % gen_dir) logger.critical(' OS error was: %s' % e.strerror) raise gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext) if cfg.switch_to_keras: keras_instance.test_keras_model() elif cfg.switch_to_tensorflow: tf_instance.test_tensorflow_model() else: reshape_io = True if cfg.rnn_batch_training else False dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, reshape_io) logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation) fid = open(norm_info_file, 'rb') cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32) fid.close() cmp_min_max = cmp_min_max.reshape((2, -1)) cmp_min_vector = cmp_min_max[0, ] cmp_max_vector = cmp_min_max[1, ] if cfg.output_feature_normalisation == 'MVN': denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim) denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector) elif cfg.output_feature_normalisation == 'MINMAX': denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector) denormaliser.denormalise_data(gen_file_list, gen_file_list) else: logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation)) raise if cfg.AcousticModel: ##perform MLPG to smooth parameter trajectory ## lf0 is included, the output features much have vuv. generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features, enforce_silence = cfg.enforce_silence) generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG, cfg=cfg) if cfg.DurationModel: ### Perform duration normalization(min. state dur set to 1) ### gen_dur_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.dur_ext) gen_label_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.lab_ext) in_gen_label_align_file_list = prepare_file_path_list(gen_file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False) generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features) generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict) label_modifier = HTSLabelModification(silence_pattern = cfg.silence_pattern, label_type = cfg.label_type) label_modifier.modify_duration_labels(in_gen_label_align_file_list, gen_dur_list, gen_label_list) ### generate wav if cfg.GENWAV: logger.info('reconstructing waveform(s)') generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech # generate_wav(nn_cmp_dir, gen_file_id_list, cfg) # reference copy synthesis speech ### setting back to original conditions before calculating objective scores ### if cfg.GenTestList: in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False) binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext) gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] ### evaluation: RMSE and CORR for duration if cfg.CALMCD and cfg.DurationModel: logger.info('calculating MCD') ref_data_dir = os.path.join(inter_data_dir, 'ref_data') ref_dur_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.dur_ext) in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] calculator = IndividualDistortionComp() valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number] test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_dur_list, cfg.dur_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.dur_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features) remover.remove_silence(in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_dur_list) valid_dur_rmse, valid_dur_corr = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim) test_dur_rmse, test_dur_corr = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim) logger.info('Develop: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \ %(valid_dur_rmse, valid_dur_corr)) logger.info('Test: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \ %(test_dur_rmse, test_dur_corr)) ### evaluation: calculate distortion if cfg.CALMCD and cfg.AcousticModel: logger.info('calculating MCD') ref_data_dir = os.path.join(inter_data_dir, 'ref_data') ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext) # for straight or world vocoders ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext) ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext) # for GlottDNN vocoder ref_lsf_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lsf_ext) ref_slsf_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.slsf_ext) ref_gain_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.gain_ext) ref_hnr_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.hnr_ext) # for pulsemodel vocoder ref_pdd_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.pdd_ext) in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] calculator = IndividualDistortionComp() spectral_distortion = 0.0 bap_mse = 0.0 f0_mse = 0.0 vuv_error = 0.0 valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number] test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] if cfg.remove_silence_using_binary_labels: ## get lab_dim: label_composer = LabelComposer() label_composer.load_label_configuration(cfg.label_config_file) lab_dim=label_composer.compute_label_dimension() ## use first feature in label -- hardcoded for now silence_feature = 0 ## Use these to trim silence: untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] if 'mgc' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \ untrimmed_test_labels, lab_dim, silence_feature) elif cfg.remove_silence_using_hts_labels: remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list) else: ref_data_dir = os.path.join(data_dir, 'mgc') valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim) test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim) valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD if 'bap' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \ untrimmed_test_labels, lab_dim, silence_feature) elif cfg.remove_silence_using_hts_labels: remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list) else: ref_data_dir = os.path.join(data_dir, 'bap') valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim) test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim) valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC if 'lf0' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \ untrimmed_test_labels, lab_dim, silence_feature) elif cfg.remove_silence_using_hts_labels: remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list) else: ref_data_dir = os.path.join(data_dir, 'lf0') valid_f0_mse, valid_f0_corr, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim) test_f0_mse , test_f0_corr, test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim) if 'lsf' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['lsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_lsf_list, cfg.lsf_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.lsf_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['lsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lsf_list) valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lsf_ext, cfg.lsf_dim) test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lsf_ext, cfg.lsf_dim) if 'slsf' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['slsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_slsf_list, cfg.slsf_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.slsf_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['slsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_slsf_list) valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.slsf_ext, cfg.slsf_dim) test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.slsf_ext, cfg.slsf_dim) if 'hnr' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['hnr'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_hnr_list, cfg.hnr_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.hnr_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['hnr'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_hnr_list) valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.hnr_ext, cfg.hnr_dim) test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.hnr_ext, cfg.hnr_dim) if 'gain' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['gain'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_gain_list, cfg.gain_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.gain_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['gain'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_gain_list) valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.gain_ext, cfg.gain_dim) test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.gain_ext, cfg.gain_dim) if 'pdd' in cfg.in_dimension_dict: if cfg.remove_silence_using_binary_labels: untrimmed_reference_data = in_file_list_dict['pdd'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number] trim_silence(untrimmed_reference_data, ref_pdd_list, cfg.pdd_dim, \ untrimmed_test_labels, lab_dim, silence_feature) else: remover = SilenceRemover(n_cmp = cfg.pdd_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type) remover.remove_silence(in_file_list_dict['pdd'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_pdd_list) valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.pdd_ext, cfg.pdd_dim) test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.pdd_ext, cfg.pdd_dim) logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \ %(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_f0_corr, valid_vuv_error*100.)) logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \ %(test_spectral_distortion , test_bap_mse , test_f0_mse , test_f0_corr, test_vuv_error*100.)) if __name__ == '__main__': # these things should be done even before trying to parse the command line # create a configuration instance # and get a short name for this instance cfg=configuration.cfg # set up logging to use our custom class logging.setLoggerClass(LoggerPlotter) # get a logger for this main function logger = logging.getLogger("main") if len(sys.argv) != 2: logger.critical('usage: run_merlin.sh [config file name]') sys.exit(1) config_file = sys.argv[1] config_file = os.path.abspath(config_file) cfg.configure(config_file) logger.info('Installation information:') logger.info(' Merlin directory: '+os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))) logger.info(' PATH:') env_PATHs = os.getenv('PATH') if env_PATHs: env_PATHs = env_PATHs.split(':') for p in env_PATHs: if len(p)>0: logger.info(' '+p) logger.info(' LD_LIBRARY_PATH:') env_LD_LIBRARY_PATHs = os.getenv('LD_LIBRARY_PATH') if env_LD_LIBRARY_PATHs: env_LD_LIBRARY_PATHs = env_LD_LIBRARY_PATHs.split(':') for p in env_LD_LIBRARY_PATHs: if len(p)>0: logger.info(' '+p) logger.info(' Python version: '+sys.version.replace('\n','')) logger.info(' PYTHONPATH:') env_PYTHONPATHs = os.getenv('PYTHONPATH') if env_PYTHONPATHs: env_PYTHONPATHs = env_PYTHONPATHs.split(':') for p in env_PYTHONPATHs: if len(p)>0: logger.info(' '+p) logger.info(' Numpy version: '+numpy.version.version) logger.info(' Theano version: '+theano.version.version) logger.info(' THEANO_FLAGS: '+os.getenv('THEANO_FLAGS')) logger.info(' device: '+theano.config.device) # Check for the presence of git ret = os.system('git status > /dev/null') if ret==0: logger.info(' Git is available in the working directory:') git_describe = subprocess.Popen(['git', 'describe', '--tags', '--always'], stdout=subprocess.PIPE).communicate()[0][:-1] logger.info(' Merlin version: {}'.format(git_describe)) git_branch = subprocess.Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=subprocess.PIPE).communicate()[0][:-1] logger.info(' branch: {}'.format(git_branch)) git_diff = subprocess.Popen(['git', 'diff', '--name-status'], stdout=subprocess.PIPE).communicate()[0] if sys.version_info.major >= 3: git_diff = git_diff.decode('utf-8') git_diff = git_diff.replace('\t',' ').split('\n') logger.info(' diff to Merlin version:') for filediff in git_diff: if len(filediff)>0: logger.info(' '+filediff) logger.info(' (all diffs logged in '+os.path.basename(cfg.log_file)+'.gitdiff'+')') os.system('git diff > '+cfg.log_file+'.gitdiff') logger.info('Execution information:') logger.info(' HOSTNAME: '+socket.getfqdn()) logger.info(' USER: '+os.getenv('USER')) logger.info(' PID: '+str(os.getpid())) PBS_JOBID = os.getenv('PBS_JOBID') if PBS_JOBID: logger.info(' PBS_JOBID: '+PBS_JOBID) if cfg.profile: logger.info('profiling is activated') import cProfile, pstats cProfile.run('main_function(cfg)', 'mainstats') # create a stream for the profiler to write to profiling_output = io.StringIO() p = pstats.Stats('mainstats', stream=profiling_output) # print stats to that stream # here we just report the top 10 functions, sorted by total amount of time spent in each p.strip_dirs().sort_stats('tottime').print_stats(10) # print the result to the log logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() ) profiling_output.close() logger.info('---End of profiling result---') else: main_function(cfg) sys.exit(0)
bajibabu/merlin
src/run_merlin.py
Python
apache-2.0
60,558
[ "NEURON" ]
552119ae90ff4089aebc1a389544d424417821c7838e89b6350bdedbeb749394
from ase import Atoms from gpaw import GPAW from gpaw.test import equal from ase.units import Bohr, Hartree a = 7.5 * Bohr n = 16 atoms = Atoms('He', [(0.0, 0.0, 0.0)], cell=(a, a, a), pbc=True) calc = GPAW(gpts=(n, n, n), nbands=1, xc='PBE') atoms.set_calculator(calc) e1 = atoms.get_potential_energy() niter1 = calc.get_number_of_iterations() e1a = calc.get_reference_energy() calc.set(xc='revPBE') e2 = atoms.get_potential_energy() niter2 = calc.get_number_of_iterations() e2a = calc.get_reference_energy() equal(e1a, -2.893 * Hartree, 8e-3) equal(e2a, -2.908 * Hartree, 9e-3) equal(e1, e2, 4e-3) energy_tolerance = 0.000001 niter_tolerance = 0 equal(e1, -0.07904951, energy_tolerance) equal(niter1, 14, niter_tolerance) equal(e2, -0.08147563, energy_tolerance) equal(niter2, 11, niter_tolerance)
qsnake/gpaw
gpaw/test/revPBE.py
Python
gpl-3.0
803
[ "ASE", "GPAW" ]
a517bd6198c278752f96c8acf3aaa9b9567552c69a35d99d806f5e3f29ad190c
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2012 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## from stoqlib.domain.taxes import ProductTaxTemplate from stoqlib.gui.search.taxclasssearch import TaxTemplatesSearch from stoqlib.gui.test.uitestutils import GUITest class TestTaxClassSearch(GUITest): def test_search(self): ProductTaxTemplate(name=u'Test ICMS Tax Template', tax_type=ProductTaxTemplate.TYPE_ICMS, store=self.store) ProductTaxTemplate(name=u'Test IPI Tax Template', tax_type=ProductTaxTemplate.TYPE_IPI, store=self.store) search = TaxTemplatesSearch(self.store) search.search.refresh() self.check_search(search, 'product-tax-template')
andrebellafronte/stoq
stoqlib/gui/test/test_taxclasssearch.py
Python
gpl-2.0
1,600
[ "VisIt" ]
7a0ee14975250e1177dacc999d8a145bb379842b3a0cdf5de450041238b62cb8
# LAMMPS Documentation Utilities # # Copyright (C) 2015 Richard Berger # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import re def detect_local_toc(paragraph): local_toc_pattern = re.compile(r"[0-9]+\.[0-9]*\s+.+<BR>", re.MULTILINE) m = local_toc_pattern.match(paragraph) if m: return "" return paragraph def indent(content): indented = "" for line in content.splitlines(): indented += " %s\n" % line return indented def detect_and_format_notes(paragraph): note_pattern = re.compile(r"(?P<type>(IMPORTANT )?NOTE):\s+(?P<content>.+)", re.MULTILINE | re.DOTALL) if note_pattern.match(paragraph): m = note_pattern.match(paragraph) content = m.group('content') content = indent(content.strip()) if m.group('type') == 'IMPORTANT NOTE': paragraph = '.. warning::\n\n' + content + '\n' else: paragraph = '.. note::\n\n' + content + '\n' return paragraph def detect_and_add_command_to_index(content): command_pattern = re.compile(r"^(?P<command>.+) command\s*\n") m = command_pattern.match(content) if m: cmd = m.group('command') index = ".. index:: %s\n\n" % cmd return index + content return content def filter_file_header_until_first_horizontal_line(content): hr = '----------\n\n' first_hr = content.find(hr) common_links = "\n.. _lws: http://lammps.sandia.gov\n" \ ".. _ld: Manual.html\n" \ ".. _lc: Section_commands.html#comm\n" if first_hr >= 0: return content[first_hr+len(hr):].lstrip() + common_links return content def promote_doc_keywords(content): content = content.replace('**Syntax:**\n', 'Syntax\n' '""""""\n') content = content.replace('**Examples:**\n', 'Examples\n' '""""""""\n') content = content.replace('**Description:**\n', 'Description\n' '"""""""""""\n') content = content.replace('**Restart, fix_modify, output, run start/stop, minimize info:**\n', 'Restart, fix_modify, output, run start/stop, minimize info\n' '""""""""""""""""""""""""""""""""""""""""""""""""""""""""""\n') content = content.replace('**Restrictions:**', 'Restrictions\n' '""""""""""""\n') content = content.replace('**Related commands:**\n', 'Related commands\n' '""""""""""""""""\n') content = content.replace('**Default:**\n', 'Default\n' '"""""""\n') return content def filter_multiple_horizontal_rules(content): return re.sub(r"----------[\s\n]+----------", '', content)
zimmermant/dlvo_lammps
doc/utils/converters/lammpsdoc/lammps_filters.py
Python
gpl-2.0
3,497
[ "LAMMPS" ]
bc7d1d3f20d4aa876e63efc96e3c0a8902934229b0ac2aeac558512643a4d214
import numpy import pylab import os #TODO change directory path = "/home/alex/GitHub/NEUCOGAR/nest/dopamine/test_results/test400_8/" dpi_n = 120 def spike_make_diagram(ts, gids, name, title, hist): pylab.figure() color_marker = "." color_bar = "blue" color_edge = "black" ylabel = "Neuron ID" if hist == "True": #TODO this part doesn't work! Trying to fix hist_binwidth = 5.0 ts1 = ts neurons = gids ax1 = pylab.axes([0.1, 0.3, 0.85, 0.6]) pylab.plot(ts1, gids, color_marker) pylab.ylabel(ylabel) pylab.xticks([]) xlim = pylab.xlim() pylab.axes([0.1, 0.1, 0.85, 0.17]) t_bins = numpy.arange(numpy.amin(ts), numpy.amax(ts), hist_binwidth) n, bins = pylab.histogram(ts, bins=t_bins) num_neurons = len(numpy.unique(neurons)) print "num_neurons " + str(num_neurons) heights = 1000 * n / (hist_binwidth * num_neurons) print "t_bins " + str(len(t_bins)) + "\n" + str(t_bins) + "\n" + \ "height " + str(len(heights)) + "\n" + str(heights) + "\n" #bar(left,height, width=0.8, bottom=None, hold=None, **kwargs): pylab.bar(t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge) pylab.yticks([int(a) for a in numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)]) pylab.ylabel("Rate (Hz)") pylab.xlabel("Time (ms)") pylab.xlim(xlim) pylab.axes(ax1) else: pylab.plot(ts, gids, color_marker) pylab.xlabel("Time (ms)") pylab.ylabel(ylabel) pylab.title(title) pylab.draw() pylab.savefig(path + name + ".png", dpi=dpi_n, format='png') pylab.close() def voltage_make_diagram(times, voltages, name, title): timeunit="ms" line_style = "" if not len(times): raise nest.NESTError("No events recorded! Make sure that withtime and withgid are set to True.") pylab.plot(times, voltages, line_style, label=title) pylab.ylabel("Membrane potential (mV)") pylab.xlabel("Time (%s)" % timeunit) pylab.legend(loc="best") pylab.title(title) pylab.draw() pylab.savefig(path + name + ".png", dpi=dpi_n, format='png') pylab.close() block = [filename for filename in os.listdir(path) if filename[0] == "@"] listing = [] for filename in block: x_vals = [] y_vals = [] if filename.startswith('@spikes'): with open(path + filename, 'r') as f: header = f.readline() log = list( v.strip() for k, v in (item.split(':') for item in header.split(',')) ) for line in f: for item in line[line.index("[")+1 : line.index("]")].split(","): x_vals.append(float(line[:6])) y_vals.append(int(item)) spike_make_diagram(x_vals, y_vals, log[0], log[1], log[2]) else: with open(path + filename, 'r') as f: header = f.readline() log = list( v.strip() for k, v in (item.split(':') for item in header.split(',')) ) for line in f: x, y = line.split() x_vals.append(x) y_vals.append(y) voltage_make_diagram(x_vals, y_vals, log[0], log[1]) print filename + " diagram created"
vitaliykomarov/NEUCOGAR
nest/common_scripts/build_diagram.py
Python
gpl-2.0
3,299
[ "NEURON" ]
a131138438c5a26cd184627a5c8a642437eb1fcb7de70435ec365b155992820f
# Copyright 2004 by Frank Kauff. All rights reserved. # Revisions copyright 2008-2013 by Peter Cock. All rights reserved. # Revisions copyright 2009-2009 by Michiel de Hoon. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. import unittest from Bio.Sequencing import Ace class AceTestOne(unittest.TestCase): def setUp(self): self.handle = open("Ace/contig1.ace") def tearDown(self): self.handle.close() def test_check_ACEParser(self): """Test to check that ACEParser can parse the whole file into one record.""" record = Ace.read(self.handle) self.assertEqual(record.ncontigs, 2) self.assertEqual(record.nreads, 16) self.assertEqual(len(record.wa), 1) self.assertEqual(record.wa[0].tag_type, "phrap_params") self.assertEqual(record.wa[0].program, "phrap") self.assertEqual(record.wa[0].date, "040203:114710") self.assertEqual(record.wa[0].info, ['phrap 304_nuclsu.fasta.screen -new_ace -retain_duplicates', 'phrap version 0.990329']) self.assertEqual(len(record.contigs), 2) self.assertEqual(len(record.contigs[0].reads), 2) self.assertEqual(record.contigs[0].name, "Contig1") self.assertEqual(record.contigs[0].nbases, 856) self.assertEqual(record.contigs[0].nreads, 2) self.assertEqual(record.contigs[0].nsegments, 31) self.assertEqual(record.contigs[0].uorc, 'U') center = len(record.contigs[0].sequence)//2 self.assertEqual(record.contigs[0].sequence[:10], "aatacgGGAT") self.assertEqual(record.contigs[0].sequence[center-5:center+5], "ACATCATCTG") self.assertEqual(record.contigs[0].sequence[-10:], "cATCTAGtac") center = len(record.contigs[0].quality)//2 self.assertEqual(record.contigs[0].quality[:10], [0, 0, 0, 0, 0, 0, 22, 23, 25, 28]) self.assertEqual(record.contigs[0].quality[center-5:center+5], [90, 90, 90, 90, 90, 90, 90, 90, 90, 90]) self.assertEqual(record.contigs[0].quality[-10:], [15, 22, 30, 24, 28, 22, 21, 15, 19, 0]) self.assertEqual(len(record.contigs[0].af), 2) self.assertEqual(record.contigs[0].af[1].name, "BL060c3-LR0R.b.ab1") self.assertEqual(record.contigs[0].af[1].coru, "U") self.assertEqual(record.contigs[0].af[1].padded_start, 1) self.assertEqual(len(record.contigs[0].bs), 31) self.assertEqual(record.contigs[0].bs[15].name, "BL060c3-LR5.g.ab1") self.assertEqual(record.contigs[0].bs[15].padded_start, 434) self.assertEqual(record.contigs[0].bs[15].padded_end, 438) self.assertEqual(record.contigs[0].bs[30].name, "BL060c3-LR0R.b.ab1") self.assertEqual(record.contigs[0].bs[30].padded_start, 823) self.assertEqual(record.contigs[0].bs[30].padded_end, 856) self.assertEqual(len(record.contigs[0].ct), 1) self.assertEqual(record.contigs[0].ct[0].name, "Contig1") self.assertEqual(record.contigs[0].ct[0].tag_type, "repeat") self.assertEqual(record.contigs[0].ct[0].program, "phrap") self.assertEqual(record.contigs[0].ct[0].padded_start, 52) self.assertEqual(record.contigs[0].ct[0].padded_end, 53) self.assertEqual(record.contigs[0].ct[0].date, "555456:555432") self.assertEqual(record.contigs[0].ct[0].info, ['This is the forst line of comment for c1', 'and this the second for c1']) self.assertEqual(record.contigs[0].wa, None) self.assertEqual(len(record.contigs[0].reads), 2) self.assertEqual(record.contigs[0].reads[0].rd.name, "BL060c3-LR5.g.ab1") self.assertEqual(record.contigs[0].reads[0].rd.padded_bases, 868) self.assertEqual(record.contigs[0].reads[0].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[0].rd.read_tags, 0) center = len(record.contigs[0].reads[0].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[0].rd.sequence[:10], "tagcgaggaa") self.assertEqual(record.contigs[0].reads[0].rd.sequence[center-5:center+5], "CCGAGGCCAA") self.assertEqual(record.contigs[0].reads[0].rd.sequence[-10:], "gaaccatcag") self.assertEqual(record.contigs[0].reads[0].qa.qual_clipping_start, 80) self.assertEqual(record.contigs[0].reads[0].qa.qual_clipping_end, 853) self.assertEqual(record.contigs[0].reads[0].qa.align_clipping_start, 22) self.assertEqual(record.contigs[0].reads[0].qa.align_clipping_end, 856) self.assertEqual(record.contigs[0].reads[0].ds, None) self.assertEqual(len(record.contigs[0].reads[0].rt), 4) self.assertEqual(record.contigs[0].reads[0].rt[0].name, "BL060c3-LR5.g.ab1") self.assertEqual(record.contigs[0].reads[0].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(record.contigs[0].reads[0].rt[0].program, "phrap") self.assertEqual(record.contigs[0].reads[0].rt[0].padded_start, 590) self.assertEqual(record.contigs[0].reads[0].rt[0].padded_end, 607) self.assertEqual(record.contigs[0].reads[0].rt[0].date, "040217:110357") self.assertEqual(record.contigs[0].reads[0].rt[1].name, "BL060c3-LR5.g.ab1") self.assertEqual(record.contigs[0].reads[0].rt[1].tag_type, "matchElsewhereHighQual") self.assertEqual(record.contigs[0].reads[0].rt[1].program, "phrap") self.assertEqual(record.contigs[0].reads[0].rt[1].padded_start, 617) self.assertEqual(record.contigs[0].reads[0].rt[1].padded_end, 631) self.assertEqual(record.contigs[0].reads[0].rt[1].date, "040217:110357") self.assertEqual(record.contigs[0].reads[0].rt[2].name, "BL060c3-LR5.g.ab1") self.assertEqual(record.contigs[0].reads[0].rt[2].tag_type, "matchElsewhereHighQual") self.assertEqual(record.contigs[0].reads[0].rt[2].program, "phrap") self.assertEqual(record.contigs[0].reads[0].rt[2].padded_start, 617) self.assertEqual(record.contigs[0].reads[0].rt[2].padded_end, 631) self.assertEqual(record.contigs[0].reads[0].rt[2].date, "040217:110357") self.assertEqual(record.contigs[0].reads[0].rt[3].name, "BL060c3-LR5.g.ab1") self.assertEqual(record.contigs[0].reads[0].rt[3].tag_type, "matchElsewhereHighQual") self.assertEqual(record.contigs[0].reads[0].rt[3].program, "phrap") self.assertEqual(record.contigs[0].reads[0].rt[3].padded_start, 617) self.assertEqual(record.contigs[0].reads[0].rt[3].padded_end, 631) self.assertEqual(record.contigs[0].reads[0].rt[3].date, "040217:110357") self.assertEqual(len(record.contigs[0].reads[0].wr), 1) self.assertEqual(record.contigs[0].reads[0].wr[0].name, "BL060c3-LR5.g.ab1") self.assertEqual(record.contigs[0].reads[0].wr[0].aligned, "unaligned") self.assertEqual(record.contigs[0].reads[0].wr[0].program, "phrap") self.assertEqual(record.contigs[0].reads[0].wr[0].date, "040217:110357") self.assertEqual(record.contigs[0].reads[1].rd.name, "BL060c3-LR0R.b.ab1") self.assertEqual(record.contigs[0].reads[1].rd.padded_bases, 856) self.assertEqual(record.contigs[0].reads[1].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[1].rd.read_tags, 0) center = len(record.contigs[0].reads[1].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[1].rd.sequence[:10], "aatacgGGAT") self.assertEqual(record.contigs[0].reads[1].rd.sequence[center-5:center+5], "ACATCATCTG") self.assertEqual(record.contigs[0].reads[1].rd.sequence[-10:], "cATCTAGtac") self.assertEqual(record.contigs[0].reads[1].qa.qual_clipping_start, 7) self.assertEqual(record.contigs[0].reads[1].qa.qual_clipping_end, 778) self.assertEqual(record.contigs[0].reads[1].qa.align_clipping_start, 1) self.assertEqual(record.contigs[0].reads[1].qa.align_clipping_end, 856) self.assertEqual(record.contigs[0].reads[1].ds, None) self.assertEqual(record.contigs[0].reads[1].rt, None) self.assertEqual(record.contigs[0].reads[1].wr, None) self.assertEqual(len(record.contigs[1].reads), 14) self.assertEqual(record.contigs[1].name, "Contig2") self.assertEqual(record.contigs[1].nbases, 3296) self.assertEqual(record.contigs[1].nreads, 14) self.assertEqual(record.contigs[1].nsegments, 214) self.assertEqual(record.contigs[1].uorc, 'U') center = len(record.contigs[1].sequence) // 2 self.assertEqual(record.contigs[1].sequence[:10], "cacggatgat") self.assertEqual(record.contigs[1].sequence[center-5:center+5], "TTTGAATATT") self.assertEqual(record.contigs[1].sequence[-10:], "Atccttgtag") center = len(record.contigs[1].quality) // 2 self.assertEqual(record.contigs[1].quality[:10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(record.contigs[1].quality[center-5:center+5], [90, 90, 90, 90, 90, 90, 90, 90, 90, 90]) self.assertEqual(record.contigs[1].quality[-10:], [24, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(len(record.contigs[1].af), 14) self.assertEqual(record.contigs[1].af[7].name, "BL060-LR3R.b.ab1") self.assertEqual(record.contigs[1].af[7].coru, "C") self.assertEqual(record.contigs[1].af[7].padded_start, 1601) self.assertEqual(record.contigs[1].af[13].name, "BL060c2-LR0R.b.ab1") self.assertEqual(record.contigs[1].af[13].coru, "C") self.assertEqual(record.contigs[1].af[13].padded_start, 2445) self.assertEqual(len(record.contigs[1].bs), 214) self.assertEqual(record.contigs[1].bs[107].name, "BL060-c1-LR3R.b.ab1") self.assertEqual(record.contigs[1].bs[107].padded_start, 2286) self.assertEqual(record.contigs[1].bs[107].padded_end, 2292) self.assertEqual(record.contigs[1].bs[213].name, "BL060c2-LR0R.b.ab1") self.assertEqual(record.contigs[1].bs[213].padded_start, 3236) self.assertEqual(record.contigs[1].bs[213].padded_end, 3296) self.assertEqual(len(record.contigs[1].ct), 1) self.assertEqual(record.contigs[1].ct[0].name, "Contig2") self.assertEqual(record.contigs[1].ct[0].tag_type, "repeat") self.assertEqual(record.contigs[1].ct[0].program, "phrap") self.assertEqual(record.contigs[1].ct[0].padded_start, 42) self.assertEqual(record.contigs[1].ct[0].padded_end, 43) self.assertEqual(record.contigs[1].ct[0].date, "123456:765432") self.assertEqual(record.contigs[1].ct[0].info, ['This is the forst line of comment for c2', 'and this the second for c2']) self.assertEqual(len(record.contigs[1].wa), 1) self.assertEqual(record.contigs[1].wa[0].tag_type, "phrap_params") self.assertEqual(record.contigs[1].wa[0].program, "phrap") self.assertEqual(record.contigs[1].wa[0].date, "040203:114710") self.assertEqual(record.contigs[1].wa[0].info, ['phrap 304_nuclsu.fasta.screen -new_ace -retain_duplicates', 'phrap version 0.990329']) self.assertEqual(len(record.contigs[1].reads), 14) # Read 0 self.assertEqual(record.contigs[1].reads[0].rd.name, "BL060-c1-LR12.g.ab1") self.assertEqual(record.contigs[1].reads[0].rd.padded_bases, 862) self.assertEqual(record.contigs[1].reads[0].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[0].rd.read_tags, 0) center = len(record.contigs[1].reads[0].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[0].rd.sequence[:10], "cacggatgat") self.assertEqual(record.contigs[1].reads[0].rd.sequence[center-5:center+5], "GTTCTCGTTG") self.assertEqual(record.contigs[1].reads[0].rd.sequence[-10:], "CGTTTACCcg") self.assertEqual(record.contigs[1].reads[0].qa.qual_clipping_start, 81) self.assertEqual(record.contigs[1].reads[0].qa.qual_clipping_end, 842) self.assertEqual(record.contigs[1].reads[0].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[0].qa.align_clipping_end, 862) self.assertEqual(record.contigs[1].reads[0].ds.chromat_file, "BL060-c1-LR12.g.ab1") self.assertEqual(record.contigs[1].reads[0].ds.phd_file, "BL060-c1-LR12.g.ab1.phd.1") self.assertEqual(record.contigs[1].reads[0].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(record.contigs[1].reads[0].ds.chem, "term") self.assertEqual(record.contigs[1].reads[0].ds.dye, "big") self.assertEqual(record.contigs[1].reads[0].ds.template, "") self.assertEqual(record.contigs[1].reads[0].ds.direction, "") self.assertEqual(record.contigs[1].reads[0].rt, None) self.assertEqual(record.contigs[1].reads[0].wr, None) # Read 1 self.assertEqual(record.contigs[1].reads[1].rd.name, "BL060-c1-LR11.g.ab1") self.assertEqual(record.contigs[1].reads[1].rd.padded_bases, 880) self.assertEqual(record.contigs[1].reads[1].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[1].rd.read_tags, 0) center = len(record.contigs[1].reads[1].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[1].rd.sequence[:10], "ctttctgacC") self.assertEqual(record.contigs[1].reads[1].rd.sequence[center-5:center+5], "CTGTGGTTTC") self.assertEqual(record.contigs[1].reads[1].rd.sequence[-10:], "cggagttacg") self.assertEqual(record.contigs[1].reads[1].qa.qual_clipping_start, 11) self.assertEqual(record.contigs[1].reads[1].qa.qual_clipping_end, 807) self.assertEqual(record.contigs[1].reads[1].qa.align_clipping_start, 8) self.assertEqual(record.contigs[1].reads[1].qa.align_clipping_end, 880) self.assertEqual(record.contigs[1].reads[1].ds.chromat_file, "BL060-c1-LR11.g.ab1") self.assertEqual(record.contigs[1].reads[1].ds.phd_file, "BL060-c1-LR11.g.ab1.phd.1") self.assertEqual(record.contigs[1].reads[1].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(record.contigs[1].reads[1].ds.chem, "term") self.assertEqual(record.contigs[1].reads[1].ds.dye, "big") self.assertEqual(record.contigs[1].reads[1].ds.template, "") self.assertEqual(record.contigs[1].reads[1].ds.direction, "") self.assertEqual(len(record.contigs[1].reads[1].rt), 0) self.assertEqual(record.contigs[1].reads[1].wr, None) # Read 2 self.assertEqual(record.contigs[1].reads[2].rd.name, "BL060-c1-LR9.g.ab1") self.assertEqual(record.contigs[1].reads[2].rd.padded_bases, 864) self.assertEqual(record.contigs[1].reads[2].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[2].rd.read_tags, 0) center = len(record.contigs[1].reads[2].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[2].rd.sequence[:10], "cacccaCTTT") self.assertEqual(record.contigs[1].reads[2].rd.sequence[center-5:center+5], "ACCAAACATT") self.assertEqual(record.contigs[1].reads[2].rd.sequence[-10:], "GGTAGCACgc") self.assertEqual(record.contigs[1].reads[2].qa.qual_clipping_start, 7) self.assertEqual(record.contigs[1].reads[2].qa.qual_clipping_end, 840) self.assertEqual(record.contigs[1].reads[2].qa.align_clipping_start, 4) self.assertEqual(record.contigs[1].reads[2].qa.align_clipping_end, 864) self.assertEqual(record.contigs[1].reads[2].ds.chromat_file, "BL060-c1-LR9.g.ab1") self.assertEqual(record.contigs[1].reads[2].ds.phd_file, "BL060-c1-LR9.g.ab1.phd.1") self.assertEqual(record.contigs[1].reads[2].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(record.contigs[1].reads[2].ds.chem, "term") self.assertEqual(record.contigs[1].reads[2].ds.dye, "big") self.assertEqual(record.contigs[1].reads[2].ds.template, "") self.assertEqual(record.contigs[1].reads[2].ds.direction, "") self.assertEqual(record.contigs[1].reads[2].rt, None) self.assertEqual(record.contigs[1].reads[2].wr, None) # Read 3 self.assertEqual(record.contigs[1].reads[3].rd.name, "BL060-c1-LR17R.b.ab1") self.assertEqual(record.contigs[1].reads[3].rd.padded_bases, 863) self.assertEqual(record.contigs[1].reads[3].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[3].rd.read_tags, 0) center = len(record.contigs[1].reads[3].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[3].rd.sequence[:10], "ctaattggcc") self.assertEqual(record.contigs[1].reads[3].rd.sequence[center-5:center+5], "GGAACCTTTC") self.assertEqual(record.contigs[1].reads[3].rd.sequence[-10:], "CAACCTgact") self.assertEqual(record.contigs[1].reads[3].qa.qual_clipping_start, 63) self.assertEqual(record.contigs[1].reads[3].qa.qual_clipping_end, 857) self.assertEqual(record.contigs[1].reads[3].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[3].qa.align_clipping_end, 861) self.assertEqual(record.contigs[1].reads[3].ds.chromat_file, "BL060-c1-LR17R.b.ab1") self.assertEqual(record.contigs[1].reads[3].ds.phd_file, "BL060-c1-LR17R.b.ab1.phd.1") self.assertEqual(record.contigs[1].reads[3].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(record.contigs[1].reads[3].ds.chem, "term") self.assertEqual(record.contigs[1].reads[3].ds.dye, "big") self.assertEqual(record.contigs[1].reads[3].ds.template, "") self.assertEqual(record.contigs[1].reads[3].ds.direction, "") self.assertEqual(record.contigs[1].reads[3].rt, []) self.assertEqual(record.contigs[1].reads[3].wr, None) # Read 4 self.assertEqual(record.contigs[1].reads[4].rd.name, "BL060-LR8.5.g.ab1") self.assertEqual(record.contigs[1].reads[4].rd.padded_bases, 877) self.assertEqual(record.contigs[1].reads[4].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[4].rd.read_tags, 0) center = len(record.contigs[1].reads[4].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[4].rd.sequence[:10], "tgCTGCGGTT") self.assertEqual(record.contigs[1].reads[4].rd.sequence[center-5:center+5], "GGCAGTTTCA") self.assertEqual(record.contigs[1].reads[4].rd.sequence[-10:], "tactcataaa") self.assertEqual(record.contigs[1].reads[4].qa.qual_clipping_start, 13) self.assertEqual(record.contigs[1].reads[4].qa.qual_clipping_end, 729) self.assertEqual(record.contigs[1].reads[4].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[4].qa.align_clipping_end, 877) self.assertEqual(record.contigs[1].reads[4].ds.chromat_file, "BL060-LR8.5.g.ab1") self.assertEqual(record.contigs[1].reads[4].ds.phd_file, "BL060-LR8.5.g.ab1.phd.1") self.assertEqual(record.contigs[1].reads[4].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(record.contigs[1].reads[4].ds.chem, "term") self.assertEqual(record.contigs[1].reads[4].ds.dye, "big") self.assertEqual(record.contigs[1].reads[4].ds.template, "") self.assertEqual(record.contigs[1].reads[4].ds.direction, "") self.assertEqual(record.contigs[1].reads[4].rt, None) self.assertEqual(record.contigs[1].reads[4].wr, None) # Read 5 self.assertEqual(record.contigs[1].reads[5].rd.name, "BL060-LR3R.b.ab1") self.assertEqual(record.contigs[1].reads[5].rd.padded_bases, 874) self.assertEqual(record.contigs[1].reads[5].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[5].rd.read_tags, 0) center = len(record.contigs[1].reads[5].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[5].rd.sequence[:10], "ctCTTAGGAT") self.assertEqual(record.contigs[1].reads[5].rd.sequence[center-5:center+5], "AACTCACATT") self.assertEqual(record.contigs[1].reads[5].rd.sequence[-10:], "*CACCCAAac") self.assertEqual(record.contigs[1].reads[5].qa.qual_clipping_start, 65) self.assertEqual(record.contigs[1].reads[5].qa.qual_clipping_end, 874) self.assertEqual(record.contigs[1].reads[5].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[5].qa.align_clipping_end, 874) self.assertEqual(record.contigs[1].reads[5].ds.chromat_file, "BL060-LR3R.b.ab1") self.assertEqual(record.contigs[1].reads[5].ds.phd_file, "BL060-LR3R.b.ab1.phd.1") self.assertEqual(record.contigs[1].reads[5].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(record.contigs[1].reads[5].ds.chem, "term") self.assertEqual(record.contigs[1].reads[5].ds.dye, "big") self.assertEqual(record.contigs[1].reads[5].ds.template, "") self.assertEqual(record.contigs[1].reads[5].ds.direction, "") self.assertEqual(record.contigs[1].reads[5].rt, None) self.assertEqual(record.contigs[1].reads[5].wr, None) # Read 6 self.assertEqual(record.contigs[1].reads[6].rd.name, "BL060-c1-LR3R.b.ab1") self.assertEqual(record.contigs[1].reads[6].rd.padded_bases, 864) self.assertEqual(record.contigs[1].reads[6].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[6].rd.read_tags, 0) center = len(record.contigs[1].reads[6].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[6].rd.sequence[:10], "CCaTGTCCAA") self.assertEqual(record.contigs[1].reads[6].rd.sequence[center-5:center+5], "AAGGGTT*CA") self.assertEqual(record.contigs[1].reads[6].rd.sequence[-10:], "ACACTCGCga") self.assertEqual(record.contigs[1].reads[6].qa.qual_clipping_start, 73) self.assertEqual(record.contigs[1].reads[6].qa.qual_clipping_end, 862) self.assertEqual(record.contigs[1].reads[6].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[6].qa.align_clipping_end, 863) self.assertEqual(record.contigs[1].reads[6].ds.chromat_file, "BL060-c1-LR3R.b.ab1") self.assertEqual(record.contigs[1].reads[6].ds.phd_file, "BL060-c1-LR3R.b.ab1.phd.1") self.assertEqual(record.contigs[1].reads[6].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(record.contigs[1].reads[6].ds.chem, "term") self.assertEqual(record.contigs[1].reads[6].ds.dye, "big") self.assertEqual(record.contigs[1].reads[6].ds.template, "") self.assertEqual(record.contigs[1].reads[6].ds.direction, "") self.assertEqual(record.contigs[1].reads[6].rt, None) self.assertEqual(record.contigs[1].reads[6].wr, None) # Read 7 self.assertEqual(record.contigs[1].reads[7].rd.name, "BL060-LR3R.b.ab1") self.assertEqual(record.contigs[1].reads[7].rd.padded_bases, 857) self.assertEqual(record.contigs[1].reads[7].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[7].rd.read_tags, 0) center = len(record.contigs[1].reads[7].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[7].rd.sequence[:10], "agaaagagga") self.assertEqual(record.contigs[1].reads[7].rd.sequence[center-5:center+5], "nnnannnnnn") self.assertEqual(record.contigs[1].reads[7].rd.sequence[-10:], "gtctttgctc") self.assertEqual(record.contigs[1].reads[7].qa.qual_clipping_start, 548) self.assertEqual(record.contigs[1].reads[7].qa.qual_clipping_end, 847) self.assertEqual(record.contigs[1].reads[7].qa.align_clipping_start, 442) self.assertEqual(record.contigs[1].reads[7].qa.align_clipping_end, 854) self.assertEqual(record.contigs[1].reads[7].ds.chromat_file, "BL060-LR3R.b.ab1") self.assertEqual(record.contigs[1].reads[7].ds.phd_file, "BL060-LR3R.b.ab1.phd.1") self.assertEqual(record.contigs[1].reads[7].ds.time, "Fri Jan 16 09:01:10 2004") self.assertEqual(record.contigs[1].reads[7].ds.chem, "term") self.assertEqual(record.contigs[1].reads[7].ds.dye, "big") self.assertEqual(record.contigs[1].reads[7].ds.template, "") self.assertEqual(record.contigs[1].reads[7].ds.direction, "") self.assertEqual(record.contigs[1].reads[7].rt, None) self.assertEqual(record.contigs[1].reads[7].wr, None) # Read 8 self.assertEqual(record.contigs[1].reads[8].rd.name, "BL060-c1-LR7.g.ab1") self.assertEqual(record.contigs[1].reads[8].rd.padded_bases, 878) self.assertEqual(record.contigs[1].reads[8].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[8].rd.read_tags, 0) center = len(record.contigs[1].reads[8].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[8].rd.sequence[:10], "agTttc*ctc") self.assertEqual(record.contigs[1].reads[8].rd.sequence[center-5:center+5], "TCATAAAACT") self.assertEqual(record.contigs[1].reads[8].rd.sequence[-10:], "xxxxxxxxxx") self.assertEqual(record.contigs[1].reads[8].qa.qual_clipping_start, 20) self.assertEqual(record.contigs[1].reads[8].qa.qual_clipping_end, 798) self.assertEqual(record.contigs[1].reads[8].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[8].qa.align_clipping_end, 798) self.assertEqual(record.contigs[1].reads[8].ds.chromat_file, "BL060-c1-LR7.g.ab1") self.assertEqual(record.contigs[1].reads[8].ds.phd_file, "BL060-c1-LR7.g.ab1.phd.1") self.assertEqual(record.contigs[1].reads[8].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(record.contigs[1].reads[8].ds.chem, "term") self.assertEqual(record.contigs[1].reads[8].ds.dye, "big") self.assertEqual(record.contigs[1].reads[8].ds.template, "") self.assertEqual(record.contigs[1].reads[8].ds.direction, "") self.assertEqual(record.contigs[1].reads[8].rt, None) self.assertEqual(record.contigs[1].reads[8].wr, None) # Read 9 self.assertEqual(record.contigs[1].reads[9].rd.name, "BL060-LR7.g.ab1") self.assertEqual(record.contigs[1].reads[9].rd.padded_bases, 880) self.assertEqual(record.contigs[1].reads[9].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[9].rd.read_tags, 0) center = len(record.contigs[1].reads[9].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[9].rd.sequence[:10], "ggctaCGCCc") self.assertEqual(record.contigs[1].reads[9].rd.sequence[center-5:center+5], "ATTGAGTTTC") self.assertEqual(record.contigs[1].reads[9].rd.sequence[-10:], "tggcgttgcg") self.assertEqual(record.contigs[1].reads[9].qa.qual_clipping_start, 14) self.assertEqual(record.contigs[1].reads[9].qa.qual_clipping_end, 765) self.assertEqual(record.contigs[1].reads[9].qa.align_clipping_start, 4) self.assertEqual(record.contigs[1].reads[9].qa.align_clipping_end, 765) self.assertEqual(record.contigs[1].reads[9].ds.chromat_file, "BL060-LR7.g.ab1") self.assertEqual(record.contigs[1].reads[9].ds.phd_file, "BL060-LR7.g.ab1.phd.1") self.assertEqual(record.contigs[1].reads[9].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(record.contigs[1].reads[9].ds.chem, "term") self.assertEqual(record.contigs[1].reads[9].ds.dye, "big") self.assertEqual(record.contigs[1].reads[9].ds.template, "") self.assertEqual(record.contigs[1].reads[9].ds.direction, "") self.assertEqual(record.contigs[1].reads[9].rt, None) self.assertEqual(record.contigs[1].reads[9].wr, None) # Read 10 self.assertEqual(record.contigs[1].reads[10].rd.name, "BL060c5-LR5.g.ab1") self.assertEqual(record.contigs[1].reads[10].rd.padded_bases, 871) self.assertEqual(record.contigs[1].reads[10].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[10].rd.read_tags, 0) center = len(record.contigs[1].reads[10].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[10].rd.sequence[:10], "ggtTCGATTA") self.assertEqual(record.contigs[1].reads[10].rd.sequence[center-5:center+5], "ACCAATTGAC") self.assertEqual(record.contigs[1].reads[10].rd.sequence[-10:], "ACCACCCatt") self.assertEqual(record.contigs[1].reads[10].qa.qual_clipping_start, 12) self.assertEqual(record.contigs[1].reads[10].qa.qual_clipping_end, 767) self.assertEqual(record.contigs[1].reads[10].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[10].qa.align_clipping_end, 871) self.assertEqual(record.contigs[1].reads[10].ds.chromat_file, "BL060c5-LR5.g.ab1") self.assertEqual(record.contigs[1].reads[10].ds.phd_file, "BL060c5-LR5.g.ab1.phd.1") self.assertEqual(record.contigs[1].reads[10].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(record.contigs[1].reads[10].ds.chem, "term") self.assertEqual(record.contigs[1].reads[10].ds.dye, "big") self.assertEqual(record.contigs[1].reads[10].ds.template, "") self.assertEqual(record.contigs[1].reads[10].ds.direction, "") self.assertEqual(record.contigs[1].reads[10].rt, None) self.assertEqual(record.contigs[1].reads[10].wr, None) # Read 11 self.assertEqual(record.contigs[1].reads[11].rd.name, "BL060c2-LR5.g.ab1") self.assertEqual(record.contigs[1].reads[11].rd.padded_bases, 839) self.assertEqual(record.contigs[1].reads[11].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[11].rd.read_tags, 0) center = len(record.contigs[1].reads[11].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[11].rd.sequence[:10], "ggttcatatg") self.assertEqual(record.contigs[1].reads[11].rd.sequence[center-5:center+5], "TAAAATCAGT") self.assertEqual(record.contigs[1].reads[11].rd.sequence[-10:], "TCTTGCaata") self.assertEqual(record.contigs[1].reads[11].qa.qual_clipping_start, 11) self.assertEqual(record.contigs[1].reads[11].qa.qual_clipping_end, 757) self.assertEqual(record.contigs[1].reads[11].qa.align_clipping_start, 10) self.assertEqual(record.contigs[1].reads[11].qa.align_clipping_end, 835) self.assertEqual(record.contigs[1].reads[11].ds, None) self.assertEqual(len(record.contigs[1].reads[11].rt), 1) self.assertEqual(record.contigs[1].reads[11].rt[0].name, "BL060c2-LR5.g.ab1") self.assertEqual(record.contigs[1].reads[11].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(record.contigs[1].reads[11].rt[0].program, "phrap") self.assertEqual(record.contigs[1].reads[11].rt[0].padded_start, 617) self.assertEqual(record.contigs[1].reads[11].rt[0].padded_end, 631) self.assertEqual(record.contigs[1].reads[11].rt[0].date, "040217:110357") self.assertEqual(record.contigs[1].reads[11].wr, None) # Read 12 self.assertEqual(record.contigs[1].reads[12].rd.name, "BL060c5-LR0R.b.ab1") self.assertEqual(record.contigs[1].reads[12].rd.padded_bases, 855) self.assertEqual(record.contigs[1].reads[12].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[12].rd.read_tags, 0) center = len(record.contigs[1].reads[12].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[12].rd.sequence[:10], "cACTCGCGTA") self.assertEqual(record.contigs[1].reads[12].rd.sequence[center-5:center+5], "CTCGTAAAAT") self.assertEqual(record.contigs[1].reads[12].rd.sequence[-10:], "aacccctgca") self.assertEqual(record.contigs[1].reads[12].qa.qual_clipping_start, 94) self.assertEqual(record.contigs[1].reads[12].qa.qual_clipping_end, 835) self.assertEqual(record.contigs[1].reads[12].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[12].qa.align_clipping_end, 847) self.assertEqual(record.contigs[1].reads[12].ds.chromat_file, "BL060c5-LR0R.b.ab1") self.assertEqual(record.contigs[1].reads[12].ds.phd_file, "BL060c5-LR0R.b.ab1.phd.1") self.assertEqual(record.contigs[1].reads[12].ds.time, "Wed Nov 12 08:16:30 2003") self.assertEqual(record.contigs[1].reads[12].ds.chem, "term") self.assertEqual(record.contigs[1].reads[12].ds.dye, "big") self.assertEqual(record.contigs[1].reads[12].ds.template, "") self.assertEqual(record.contigs[1].reads[12].ds.direction, "") self.assertEqual(len(record.contigs[1].reads[12].rt), 1) self.assertEqual(record.contigs[1].reads[12].rt[0].name, "BL060c5-LR0R.b.ab1") self.assertEqual(record.contigs[1].reads[12].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(record.contigs[1].reads[12].rt[0].program, "phrap") self.assertEqual(record.contigs[1].reads[12].rt[0].padded_start, 617) self.assertEqual(record.contigs[1].reads[12].rt[0].padded_end, 631) self.assertEqual(record.contigs[1].reads[12].rt[0].date, "040217:110357") self.assertEqual(record.contigs[1].reads[12].wr, None) # Read 13 self.assertEqual(record.contigs[1].reads[13].rd.name, "BL060c2-LR0R.b.ab1") self.assertEqual(record.contigs[1].reads[13].rd.padded_bases, 852) self.assertEqual(record.contigs[1].reads[13].rd.info_items, 0) self.assertEqual(record.contigs[1].reads[13].rd.read_tags, 0) center = len(record.contigs[1].reads[13].rd.sequence)//2 self.assertEqual(record.contigs[1].reads[13].rd.sequence[:10], "cgCGTa*tTG") self.assertEqual(record.contigs[1].reads[13].rd.sequence[center-5:center+5], "GTAAAATATT") self.assertEqual(record.contigs[1].reads[13].rd.sequence[-10:], "Atccttgtag") self.assertEqual(record.contigs[1].reads[13].qa.qual_clipping_start, 33) self.assertEqual(record.contigs[1].reads[13].qa.qual_clipping_end, 831) self.assertEqual(record.contigs[1].reads[13].qa.align_clipping_start, 1) self.assertEqual(record.contigs[1].reads[13].qa.align_clipping_end, 852) self.assertEqual(record.contigs[1].reads[13].ds.chromat_file, "BL060c2-LR0R.b.ab1") self.assertEqual(record.contigs[1].reads[13].ds.phd_file, "BL060c2-LR0R.b.ab1.phd.1") self.assertEqual(record.contigs[1].reads[13].ds.time, "Wed Nov 12 08:16:29 2003") self.assertEqual(record.contigs[1].reads[13].ds.chem, "term") self.assertEqual(record.contigs[1].reads[13].ds.dye, "big") self.assertEqual(record.contigs[1].reads[13].ds.template, "") self.assertEqual(record.contigs[1].reads[13].ds.direction, "") self.assertEqual(record.contigs[1].reads[13].rt, []) self.assertEqual(len(record.contigs[1].reads[13].wr), 1) self.assertEqual(record.contigs[1].reads[13].wr[0].name, "BL060c2-LR0R.b.ab1") self.assertEqual(record.contigs[1].reads[13].wr[0].aligned, "unaligned") self.assertEqual(record.contigs[1].reads[13].wr[0].program, "phrap") self.assertEqual(record.contigs[1].reads[13].wr[0].date, "040217:110357") def test_check_record_parser(self): """Test to check that contig parser parses each contig into a contig.""" contigs=Ace.parse(self.handle) # First contig contig = next(contigs) self.assertEqual(len(contig.reads), 2) self.assertEqual(contig.name, "Contig1") self.assertEqual(contig.nbases, 856) self.assertEqual(contig.nreads, 2) self.assertEqual(contig.nsegments, 31) self.assertEqual(contig.uorc, 'U') center = len(contig.sequence)//2 self.assertEqual(contig.sequence[:10], "aatacgGGAT") self.assertEqual(contig.sequence[center-5:center+5], "ACATCATCTG") self.assertEqual(contig.sequence[-10:], "cATCTAGtac") center = len(contig.quality)//2 self.assertEqual(contig.quality[:10], [0, 0, 0, 0, 0, 0, 22, 23, 25, 28]) self.assertEqual(contig.quality[center-5:center+5], [90, 90, 90, 90, 90, 90, 90, 90, 90, 90]) self.assertEqual(contig.quality[-10:], [15, 22, 30, 24, 28, 22, 21, 15, 19, 0]) self.assertEqual(len(contig.af), 2) self.assertEqual(contig.af[1].name, "BL060c3-LR0R.b.ab1") self.assertEqual(contig.af[1].coru, "U") self.assertEqual(contig.af[1].padded_start, 1) self.assertEqual(len(contig.bs), 31) self.assertEqual(contig.bs[15].name, "BL060c3-LR5.g.ab1") self.assertEqual(contig.bs[15].padded_start, 434) self.assertEqual(contig.bs[15].padded_end, 438) self.assertEqual(contig.bs[30].name, "BL060c3-LR0R.b.ab1") self.assertEqual(contig.bs[30].padded_start, 823) self.assertEqual(contig.bs[30].padded_end, 856) self.assertEqual(contig.ct, None) self.assertEqual(contig.wa, None) self.assertEqual(len(contig.reads), 2) self.assertEqual(contig.reads[0].rd.name, "BL060c3-LR5.g.ab1") self.assertEqual(contig.reads[0].rd.padded_bases, 868) self.assertEqual(contig.reads[0].rd.info_items, 0) self.assertEqual(contig.reads[0].rd.read_tags, 0) center = len(contig.reads[0].rd.sequence)//2 self.assertEqual(contig.reads[0].rd.sequence[:10], "tagcgaggaa") self.assertEqual(contig.reads[0].rd.sequence[center-5:center+5], "CCGAGGCCAA") self.assertEqual(contig.reads[0].rd.sequence[-10:], "gaaccatcag") self.assertEqual(contig.reads[0].qa.qual_clipping_start, 80) self.assertEqual(contig.reads[0].qa.qual_clipping_end, 853) self.assertEqual(contig.reads[0].qa.align_clipping_start, 22) self.assertEqual(contig.reads[0].qa.align_clipping_end, 856) self.assertEqual(contig.reads[0].ds, None) self.assertEqual(len(contig.reads[0].rt), 2) self.assertEqual(contig.reads[0].rt[0].name, "BL060c3-LR5.g.ab1") self.assertEqual(contig.reads[0].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(contig.reads[0].rt[0].program, "phrap") self.assertEqual(contig.reads[0].rt[0].padded_start, 590) self.assertEqual(contig.reads[0].rt[0].padded_end, 607) self.assertEqual(contig.reads[0].rt[0].date, "040217:110357") self.assertEqual(contig.reads[0].rt[1].name, "BL060c3-LR5.g.ab1") self.assertEqual(contig.reads[0].rt[1].tag_type, "matchElsewhereHighQual") self.assertEqual(contig.reads[0].rt[1].program, "phrap") self.assertEqual(contig.reads[0].rt[1].padded_start, 617) self.assertEqual(contig.reads[0].rt[1].padded_end, 631) self.assertEqual(contig.reads[0].rt[1].date, "040217:110357") self.assertEqual(len(contig.reads[0].wr), 1) self.assertEqual(contig.reads[0].wr[0].name, "BL060c3-LR5.g.ab1") self.assertEqual(contig.reads[0].wr[0].aligned, "unaligned") self.assertEqual(contig.reads[0].wr[0].program, "phrap") self.assertEqual(contig.reads[0].wr[0].date, "040217:110357") self.assertEqual(contig.reads[1].rd.name, "BL060c3-LR0R.b.ab1") self.assertEqual(contig.reads[1].rd.padded_bases, 856) self.assertEqual(contig.reads[1].rd.info_items, 0) self.assertEqual(contig.reads[1].rd.read_tags, 0) center = len(contig.reads[1].rd.sequence)//2 self.assertEqual(contig.reads[1].rd.sequence[:10], "aatacgGGAT") self.assertEqual(contig.reads[1].rd.sequence[center-5:center+5], "ACATCATCTG") self.assertEqual(contig.reads[1].rd.sequence[-10:], "cATCTAGtac") self.assertEqual(contig.reads[1].qa.qual_clipping_start, 7) self.assertEqual(contig.reads[1].qa.qual_clipping_end, 778) self.assertEqual(contig.reads[1].qa.align_clipping_start, 1) self.assertEqual(contig.reads[1].qa.align_clipping_end, 856) self.assertEqual(contig.reads[1].ds, None) self.assertEqual(contig.reads[1].rt, None) self.assertEqual(contig.reads[1].wr, None) # Second contig contig = next(contigs) self.assertEqual(len(contig.reads), 14) self.assertEqual(contig.name, "Contig2") self.assertEqual(contig.nbases, 3296) self.assertEqual(contig.nreads, 14) self.assertEqual(contig.nsegments, 214) self.assertEqual(contig.uorc, 'U') center = len(contig.sequence) // 2 self.assertEqual(contig.sequence[:10], "cacggatgat") self.assertEqual(contig.sequence[center-5:center+5], "TTTGAATATT") self.assertEqual(contig.sequence[-10:], "Atccttgtag") center = len(contig.quality) // 2 self.assertEqual(contig.quality[:10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(contig.quality[center-5:center+5], [90, 90, 90, 90, 90, 90, 90, 90, 90, 90]) self.assertEqual(contig.quality[-10:], [24, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(len(contig.af), 14) self.assertEqual(contig.af[7].name, "BL060-LR3R.b.ab1") self.assertEqual(contig.af[7].coru, "C") self.assertEqual(contig.af[7].padded_start, 1601) self.assertEqual(contig.af[13].name, "BL060c2-LR0R.b.ab1") self.assertEqual(contig.af[13].coru, "C") self.assertEqual(contig.af[13].padded_start, 2445) self.assertEqual(len(contig.bs), 214) self.assertEqual(contig.bs[107].name, "BL060-c1-LR3R.b.ab1") self.assertEqual(contig.bs[107].padded_start, 2286) self.assertEqual(contig.bs[107].padded_end, 2292) self.assertEqual(contig.bs[213].name, "BL060c2-LR0R.b.ab1") self.assertEqual(contig.bs[213].padded_start, 3236) self.assertEqual(contig.bs[213].padded_end, 3296) self.assertEqual(len(contig.ct), 3) self.assertEqual(contig.ct[0].name, "Contig2") self.assertEqual(contig.ct[0].tag_type, "repeat") self.assertEqual(contig.ct[0].program, "phrap") self.assertEqual(contig.ct[0].padded_start, 42) self.assertEqual(contig.ct[0].padded_end, 43) self.assertEqual(contig.ct[0].date, "123456:765432") self.assertEqual(contig.ct[0].info, ['This is the forst line of comment for c2', 'and this the second for c2']) self.assertEqual(contig.ct[1].name, "unrelated_Contig") self.assertEqual(contig.ct[1].tag_type, "repeat") self.assertEqual(contig.ct[1].program, "phrap") self.assertEqual(contig.ct[1].padded_start, 1142) self.assertEqual(contig.ct[1].padded_end, 143) self.assertEqual(contig.ct[1].date, "122226:722232") self.assertEqual(contig.ct[1].info, ['This is the forst line of comment for the unrelated ct tag', 'and this the second']) self.assertEqual(contig.ct[2].name, "Contig1") self.assertEqual(contig.ct[2].tag_type, "repeat") self.assertEqual(contig.ct[2].program, "phrap") self.assertEqual(contig.ct[2].padded_start, 52) self.assertEqual(contig.ct[2].padded_end, 53) self.assertEqual(contig.ct[2].date, "555456:555432") self.assertEqual(contig.ct[2].info, ['This is the forst line of comment for c1', 'and this the second for c1']) self.assertEqual(len(contig.wa), 1) self.assertEqual(contig.wa[0].tag_type, "phrap_params") self.assertEqual(contig.wa[0].program, "phrap") self.assertEqual(contig.wa[0].date, "040203:114710") self.assertEqual(contig.wa[0].info, ['phrap 304_nuclsu.fasta.screen -new_ace -retain_duplicates', 'phrap version 0.990329']) self.assertEqual(len(contig.reads), 14) # Read 0 self.assertEqual(contig.reads[0].rd.name, "BL060-c1-LR12.g.ab1") self.assertEqual(contig.reads[0].rd.padded_bases, 862) self.assertEqual(contig.reads[0].rd.info_items, 0) self.assertEqual(contig.reads[0].rd.read_tags, 0) center = len(contig.reads[0].rd.sequence)//2 self.assertEqual(contig.reads[0].rd.sequence[:10], "cacggatgat") self.assertEqual(contig.reads[0].rd.sequence[center-5:center+5], "GTTCTCGTTG") self.assertEqual(contig.reads[0].rd.sequence[-10:], "CGTTTACCcg") self.assertEqual(contig.reads[0].qa.qual_clipping_start, 81) self.assertEqual(contig.reads[0].qa.qual_clipping_end, 842) self.assertEqual(contig.reads[0].qa.align_clipping_start, 1) self.assertEqual(contig.reads[0].qa.align_clipping_end, 862) self.assertEqual(contig.reads[0].ds.chromat_file, "BL060-c1-LR12.g.ab1") self.assertEqual(contig.reads[0].ds.phd_file, "BL060-c1-LR12.g.ab1.phd.1") self.assertEqual(contig.reads[0].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(contig.reads[0].ds.chem, "term") self.assertEqual(contig.reads[0].ds.dye, "big") self.assertEqual(contig.reads[0].ds.template, "") self.assertEqual(contig.reads[0].ds.direction, "") self.assertEqual(contig.reads[0].rt, None) self.assertEqual(contig.reads[0].wr, None) # Read 1 self.assertEqual(contig.reads[1].rd.name, "BL060-c1-LR11.g.ab1") self.assertEqual(contig.reads[1].rd.padded_bases, 880) self.assertEqual(contig.reads[1].rd.info_items, 0) self.assertEqual(contig.reads[1].rd.read_tags, 0) center = len(contig.reads[1].rd.sequence)//2 self.assertEqual(contig.reads[1].rd.sequence[:10], "ctttctgacC") self.assertEqual(contig.reads[1].rd.sequence[center-5:center+5], "CTGTGGTTTC") self.assertEqual(contig.reads[1].rd.sequence[-10:], "cggagttacg") self.assertEqual(contig.reads[1].qa.qual_clipping_start, 11) self.assertEqual(contig.reads[1].qa.qual_clipping_end, 807) self.assertEqual(contig.reads[1].qa.align_clipping_start, 8) self.assertEqual(contig.reads[1].qa.align_clipping_end, 880) self.assertEqual(contig.reads[1].ds.chromat_file, "BL060-c1-LR11.g.ab1") self.assertEqual(contig.reads[1].ds.phd_file, "BL060-c1-LR11.g.ab1.phd.1") self.assertEqual(contig.reads[1].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(contig.reads[1].ds.chem, "term") self.assertEqual(contig.reads[1].ds.dye, "big") self.assertEqual(contig.reads[1].ds.template, "") self.assertEqual(contig.reads[1].ds.direction, "") self.assertEqual(len(contig.reads[1].rt), 1) self.assertEqual(contig.reads[1].rt[0].name, "BL060c3-LR5.g.ab1") self.assertEqual(contig.reads[1].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(contig.reads[1].rt[0].program, "phrap") self.assertEqual(contig.reads[1].rt[0].padded_start, 617) self.assertEqual(contig.reads[1].rt[0].padded_end, 631) self.assertEqual(contig.reads[1].rt[0].date, "040217:110357") self.assertEqual(contig.reads[1].wr, None) # Read 2 self.assertEqual(contig.reads[2].rd.name, "BL060-c1-LR9.g.ab1") self.assertEqual(contig.reads[2].rd.padded_bases, 864) self.assertEqual(contig.reads[2].rd.info_items, 0) self.assertEqual(contig.reads[2].rd.read_tags, 0) center = len(contig.reads[2].rd.sequence)//2 self.assertEqual(contig.reads[2].rd.sequence[:10], "cacccaCTTT") self.assertEqual(contig.reads[2].rd.sequence[center-5:center+5], "ACCAAACATT") self.assertEqual(contig.reads[2].rd.sequence[-10:], "GGTAGCACgc") self.assertEqual(contig.reads[2].qa.qual_clipping_start, 7) self.assertEqual(contig.reads[2].qa.qual_clipping_end, 840) self.assertEqual(contig.reads[2].qa.align_clipping_start, 4) self.assertEqual(contig.reads[2].qa.align_clipping_end, 864) self.assertEqual(contig.reads[2].ds.chromat_file, "BL060-c1-LR9.g.ab1") self.assertEqual(contig.reads[2].ds.phd_file, "BL060-c1-LR9.g.ab1.phd.1") self.assertEqual(contig.reads[2].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(contig.reads[2].ds.chem, "term") self.assertEqual(contig.reads[2].ds.dye, "big") self.assertEqual(contig.reads[2].ds.template, "") self.assertEqual(contig.reads[2].ds.direction, "") self.assertEqual(contig.reads[2].rt, None) self.assertEqual(contig.reads[2].wr, None) # Read 3 self.assertEqual(contig.reads[3].rd.name, "BL060-c1-LR17R.b.ab1") self.assertEqual(contig.reads[3].rd.padded_bases, 863) self.assertEqual(contig.reads[3].rd.info_items, 0) self.assertEqual(contig.reads[3].rd.read_tags, 0) center = len(contig.reads[3].rd.sequence)//2 self.assertEqual(contig.reads[3].rd.sequence[:10], "ctaattggcc") self.assertEqual(contig.reads[3].rd.sequence[center-5:center+5], "GGAACCTTTC") self.assertEqual(contig.reads[3].rd.sequence[-10:], "CAACCTgact") self.assertEqual(contig.reads[3].qa.qual_clipping_start, 63) self.assertEqual(contig.reads[3].qa.qual_clipping_end, 857) self.assertEqual(contig.reads[3].qa.align_clipping_start, 1) self.assertEqual(contig.reads[3].qa.align_clipping_end, 861) self.assertEqual(contig.reads[3].ds.chromat_file, "BL060-c1-LR17R.b.ab1") self.assertEqual(contig.reads[3].ds.phd_file, "BL060-c1-LR17R.b.ab1.phd.1") self.assertEqual(contig.reads[3].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(contig.reads[3].ds.chem, "term") self.assertEqual(contig.reads[3].ds.dye, "big") self.assertEqual(contig.reads[3].ds.template, "") self.assertEqual(contig.reads[3].ds.direction, "") self.assertEqual(len(contig.reads[3].rt), 1) self.assertEqual(contig.reads[3].rt[0].name, "BL060c3-LR5.g.ab1") self.assertEqual(contig.reads[3].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(contig.reads[3].rt[0].program, "phrap") self.assertEqual(contig.reads[3].rt[0].padded_start, 617) self.assertEqual(contig.reads[3].rt[0].padded_end, 631) self.assertEqual(contig.reads[3].rt[0].date, "040217:110357") self.assertEqual(contig.reads[3].wr, None) # Read 4 self.assertEqual(contig.reads[4].rd.name, "BL060-LR8.5.g.ab1") self.assertEqual(contig.reads[4].rd.padded_bases, 877) self.assertEqual(contig.reads[4].rd.info_items, 0) self.assertEqual(contig.reads[4].rd.read_tags, 0) center = len(contig.reads[4].rd.sequence)//2 self.assertEqual(contig.reads[4].rd.sequence[:10], "tgCTGCGGTT") self.assertEqual(contig.reads[4].rd.sequence[center-5:center+5], "GGCAGTTTCA") self.assertEqual(contig.reads[4].rd.sequence[-10:], "tactcataaa") self.assertEqual(contig.reads[4].qa.qual_clipping_start, 13) self.assertEqual(contig.reads[4].qa.qual_clipping_end, 729) self.assertEqual(contig.reads[4].qa.align_clipping_start, 1) self.assertEqual(contig.reads[4].qa.align_clipping_end, 877) self.assertEqual(contig.reads[4].ds.chromat_file, "BL060-LR8.5.g.ab1") self.assertEqual(contig.reads[4].ds.phd_file, "BL060-LR8.5.g.ab1.phd.1") self.assertEqual(contig.reads[4].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(contig.reads[4].ds.chem, "term") self.assertEqual(contig.reads[4].ds.dye, "big") self.assertEqual(contig.reads[4].ds.template, "") self.assertEqual(contig.reads[4].ds.direction, "") self.assertEqual(contig.reads[4].rt, None) self.assertEqual(contig.reads[4].wr, None) # Read 5 self.assertEqual(contig.reads[5].rd.name, "BL060-LR3R.b.ab1") self.assertEqual(contig.reads[5].rd.padded_bases, 874) self.assertEqual(contig.reads[5].rd.info_items, 0) self.assertEqual(contig.reads[5].rd.read_tags, 0) center = len(contig.reads[5].rd.sequence)//2 self.assertEqual(contig.reads[5].rd.sequence[:10], "ctCTTAGGAT") self.assertEqual(contig.reads[5].rd.sequence[center-5:center+5], "AACTCACATT") self.assertEqual(contig.reads[5].rd.sequence[-10:], "*CACCCAAac") self.assertEqual(contig.reads[5].qa.qual_clipping_start, 65) self.assertEqual(contig.reads[5].qa.qual_clipping_end, 874) self.assertEqual(contig.reads[5].qa.align_clipping_start, 1) self.assertEqual(contig.reads[5].qa.align_clipping_end, 874) self.assertEqual(contig.reads[5].ds.chromat_file, "BL060-LR3R.b.ab1") self.assertEqual(contig.reads[5].ds.phd_file, "BL060-LR3R.b.ab1.phd.1") self.assertEqual(contig.reads[5].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(contig.reads[5].ds.chem, "term") self.assertEqual(contig.reads[5].ds.dye, "big") self.assertEqual(contig.reads[5].ds.template, "") self.assertEqual(contig.reads[5].ds.direction, "") self.assertEqual(contig.reads[5].rt, None) self.assertEqual(contig.reads[5].wr, None) # Read 6 self.assertEqual(contig.reads[6].rd.name, "BL060-c1-LR3R.b.ab1") self.assertEqual(contig.reads[6].rd.padded_bases, 864) self.assertEqual(contig.reads[6].rd.info_items, 0) self.assertEqual(contig.reads[6].rd.read_tags, 0) center = len(contig.reads[6].rd.sequence)//2 self.assertEqual(contig.reads[6].rd.sequence[:10], "CCaTGTCCAA") self.assertEqual(contig.reads[6].rd.sequence[center-5:center+5], "AAGGGTT*CA") self.assertEqual(contig.reads[6].rd.sequence[-10:], "ACACTCGCga") self.assertEqual(contig.reads[6].qa.qual_clipping_start, 73) self.assertEqual(contig.reads[6].qa.qual_clipping_end, 862) self.assertEqual(contig.reads[6].qa.align_clipping_start, 1) self.assertEqual(contig.reads[6].qa.align_clipping_end, 863) self.assertEqual(contig.reads[6].ds.chromat_file, "BL060-c1-LR3R.b.ab1") self.assertEqual(contig.reads[6].ds.phd_file, "BL060-c1-LR3R.b.ab1.phd.1") self.assertEqual(contig.reads[6].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(contig.reads[6].ds.chem, "term") self.assertEqual(contig.reads[6].ds.dye, "big") self.assertEqual(contig.reads[6].ds.template, "") self.assertEqual(contig.reads[6].ds.direction, "") self.assertEqual(contig.reads[6].rt, None) self.assertEqual(contig.reads[6].wr, None) # Read 7 self.assertEqual(contig.reads[7].rd.name, "BL060-LR3R.b.ab1") self.assertEqual(contig.reads[7].rd.padded_bases, 857) self.assertEqual(contig.reads[7].rd.info_items, 0) self.assertEqual(contig.reads[7].rd.read_tags, 0) center = len(contig.reads[7].rd.sequence)//2 self.assertEqual(contig.reads[7].rd.sequence[:10], "agaaagagga") self.assertEqual(contig.reads[7].rd.sequence[center-5:center+5], "nnnannnnnn") self.assertEqual(contig.reads[7].rd.sequence[-10:], "gtctttgctc") self.assertEqual(contig.reads[7].qa.qual_clipping_start, 548) self.assertEqual(contig.reads[7].qa.qual_clipping_end, 847) self.assertEqual(contig.reads[7].qa.align_clipping_start, 442) self.assertEqual(contig.reads[7].qa.align_clipping_end, 854) self.assertEqual(contig.reads[7].ds.chromat_file, "BL060-LR3R.b.ab1") self.assertEqual(contig.reads[7].ds.phd_file, "BL060-LR3R.b.ab1.phd.1") self.assertEqual(contig.reads[7].ds.time, "Fri Jan 16 09:01:10 2004") self.assertEqual(contig.reads[7].ds.chem, "term") self.assertEqual(contig.reads[7].ds.dye, "big") self.assertEqual(contig.reads[7].ds.template, "") self.assertEqual(contig.reads[7].ds.direction, "") self.assertEqual(contig.reads[7].rt, None) self.assertEqual(contig.reads[7].wr, None) # Read 8 self.assertEqual(contig.reads[8].rd.name, "BL060-c1-LR7.g.ab1") self.assertEqual(contig.reads[8].rd.padded_bases, 878) self.assertEqual(contig.reads[8].rd.info_items, 0) self.assertEqual(contig.reads[8].rd.read_tags, 0) center = len(contig.reads[8].rd.sequence)//2 self.assertEqual(contig.reads[8].rd.sequence[:10], "agTttc*ctc") self.assertEqual(contig.reads[8].rd.sequence[center-5:center+5], "TCATAAAACT") self.assertEqual(contig.reads[8].rd.sequence[-10:], "xxxxxxxxxx") self.assertEqual(contig.reads[8].qa.qual_clipping_start, 20) self.assertEqual(contig.reads[8].qa.qual_clipping_end, 798) self.assertEqual(contig.reads[8].qa.align_clipping_start, 1) self.assertEqual(contig.reads[8].qa.align_clipping_end, 798) self.assertEqual(contig.reads[8].ds.chromat_file, "BL060-c1-LR7.g.ab1") self.assertEqual(contig.reads[8].ds.phd_file, "BL060-c1-LR7.g.ab1.phd.1") self.assertEqual(contig.reads[8].ds.time, "Tue Feb 3 11:01:16 2004") self.assertEqual(contig.reads[8].ds.chem, "term") self.assertEqual(contig.reads[8].ds.dye, "big") self.assertEqual(contig.reads[8].ds.template, "") self.assertEqual(contig.reads[8].ds.direction, "") self.assertEqual(contig.reads[8].rt, None) self.assertEqual(contig.reads[8].wr, None) # Read 9 self.assertEqual(contig.reads[9].rd.name, "BL060-LR7.g.ab1") self.assertEqual(contig.reads[9].rd.padded_bases, 880) self.assertEqual(contig.reads[9].rd.info_items, 0) self.assertEqual(contig.reads[9].rd.read_tags, 0) center = len(contig.reads[9].rd.sequence)//2 self.assertEqual(contig.reads[9].rd.sequence[:10], "ggctaCGCCc") self.assertEqual(contig.reads[9].rd.sequence[center-5:center+5], "ATTGAGTTTC") self.assertEqual(contig.reads[9].rd.sequence[-10:], "tggcgttgcg") self.assertEqual(contig.reads[9].qa.qual_clipping_start, 14) self.assertEqual(contig.reads[9].qa.qual_clipping_end, 765) self.assertEqual(contig.reads[9].qa.align_clipping_start, 4) self.assertEqual(contig.reads[9].qa.align_clipping_end, 765) self.assertEqual(contig.reads[9].ds.chromat_file, "BL060-LR7.g.ab1") self.assertEqual(contig.reads[9].ds.phd_file, "BL060-LR7.g.ab1.phd.1") self.assertEqual(contig.reads[9].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(contig.reads[9].ds.chem, "term") self.assertEqual(contig.reads[9].ds.dye, "big") self.assertEqual(contig.reads[9].ds.template, "") self.assertEqual(contig.reads[9].ds.direction, "") self.assertEqual(contig.reads[9].rt, None) self.assertEqual(contig.reads[9].wr, None) # Read 10 self.assertEqual(contig.reads[10].rd.name, "BL060c5-LR5.g.ab1") self.assertEqual(contig.reads[10].rd.padded_bases, 871) self.assertEqual(contig.reads[10].rd.info_items, 0) self.assertEqual(contig.reads[10].rd.read_tags, 0) center = len(contig.reads[10].rd.sequence)//2 self.assertEqual(contig.reads[10].rd.sequence[:10], "ggtTCGATTA") self.assertEqual(contig.reads[10].rd.sequence[center-5:center+5], "ACCAATTGAC") self.assertEqual(contig.reads[10].rd.sequence[-10:], "ACCACCCatt") self.assertEqual(contig.reads[10].qa.qual_clipping_start, 12) self.assertEqual(contig.reads[10].qa.qual_clipping_end, 767) self.assertEqual(contig.reads[10].qa.align_clipping_start, 1) self.assertEqual(contig.reads[10].qa.align_clipping_end, 871) self.assertEqual(contig.reads[10].ds.chromat_file, "BL060c5-LR5.g.ab1") self.assertEqual(contig.reads[10].ds.phd_file, "BL060c5-LR5.g.ab1.phd.1") self.assertEqual(contig.reads[10].ds.time, "Fri Nov 14 09:46:03 2003") self.assertEqual(contig.reads[10].ds.chem, "term") self.assertEqual(contig.reads[10].ds.dye, "big") self.assertEqual(contig.reads[10].ds.template, "") self.assertEqual(contig.reads[10].ds.direction, "") self.assertEqual(contig.reads[10].rt, None) self.assertEqual(contig.reads[10].wr, None) # Read 11 self.assertEqual(contig.reads[11].rd.name, "BL060c2-LR5.g.ab1") self.assertEqual(contig.reads[11].rd.padded_bases, 839) self.assertEqual(contig.reads[11].rd.info_items, 0) self.assertEqual(contig.reads[11].rd.read_tags, 0) center = len(contig.reads[11].rd.sequence)//2 self.assertEqual(contig.reads[11].rd.sequence[:10], "ggttcatatg") self.assertEqual(contig.reads[11].rd.sequence[center-5:center+5], "TAAAATCAGT") self.assertEqual(contig.reads[11].rd.sequence[-10:], "TCTTGCaata") self.assertEqual(contig.reads[11].qa.qual_clipping_start, 11) self.assertEqual(contig.reads[11].qa.qual_clipping_end, 757) self.assertEqual(contig.reads[11].qa.align_clipping_start, 10) self.assertEqual(contig.reads[11].qa.align_clipping_end, 835) self.assertEqual(contig.reads[11].ds, None) self.assertEqual(len(contig.reads[11].rt), 1) self.assertEqual(contig.reads[11].rt[0].name, "BL060c2-LR5.g.ab1") self.assertEqual(contig.reads[11].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(contig.reads[11].rt[0].program, "phrap") self.assertEqual(contig.reads[11].rt[0].padded_start, 617) self.assertEqual(contig.reads[11].rt[0].padded_end, 631) self.assertEqual(contig.reads[11].rt[0].date, "040217:110357") self.assertEqual(contig.reads[11].wr, None) # Read 12 self.assertEqual(contig.reads[12].rd.name, "BL060c5-LR0R.b.ab1") self.assertEqual(contig.reads[12].rd.padded_bases, 855) self.assertEqual(contig.reads[12].rd.info_items, 0) self.assertEqual(contig.reads[12].rd.read_tags, 0) center = len(contig.reads[12].rd.sequence)//2 self.assertEqual(contig.reads[12].rd.sequence[:10], "cACTCGCGTA") self.assertEqual(contig.reads[12].rd.sequence[center-5:center+5], "CTCGTAAAAT") self.assertEqual(contig.reads[12].rd.sequence[-10:], "aacccctgca") self.assertEqual(contig.reads[12].qa.qual_clipping_start, 94) self.assertEqual(contig.reads[12].qa.qual_clipping_end, 835) self.assertEqual(contig.reads[12].qa.align_clipping_start, 1) self.assertEqual(contig.reads[12].qa.align_clipping_end, 847) self.assertEqual(contig.reads[12].ds.chromat_file, "BL060c5-LR0R.b.ab1") self.assertEqual(contig.reads[12].ds.phd_file, "BL060c5-LR0R.b.ab1.phd.1") self.assertEqual(contig.reads[12].ds.time, "Wed Nov 12 08:16:30 2003") self.assertEqual(contig.reads[12].ds.chem, "term") self.assertEqual(contig.reads[12].ds.dye, "big") self.assertEqual(contig.reads[12].ds.template, "") self.assertEqual(contig.reads[12].ds.direction, "") self.assertEqual(contig.reads[12].rt, None) self.assertEqual(contig.reads[12].wr, None) # Read 13 self.assertEqual(contig.reads[13].rd.name, "BL060c2-LR0R.b.ab1") self.assertEqual(contig.reads[13].rd.padded_bases, 852) self.assertEqual(contig.reads[13].rd.info_items, 0) self.assertEqual(contig.reads[13].rd.read_tags, 0) center = len(contig.reads[13].rd.sequence)//2 self.assertEqual(contig.reads[13].rd.sequence[:10], "cgCGTa*tTG") self.assertEqual(contig.reads[13].rd.sequence[center-5:center+5], "GTAAAATATT") self.assertEqual(contig.reads[13].rd.sequence[-10:], "Atccttgtag") self.assertEqual(contig.reads[13].qa.qual_clipping_start, 33) self.assertEqual(contig.reads[13].qa.qual_clipping_end, 831) self.assertEqual(contig.reads[13].qa.align_clipping_start, 1) self.assertEqual(contig.reads[13].qa.align_clipping_end, 852) self.assertEqual(contig.reads[13].ds.chromat_file, "BL060c2-LR0R.b.ab1") self.assertEqual(contig.reads[13].ds.phd_file, "BL060c2-LR0R.b.ab1.phd.1") self.assertEqual(contig.reads[13].ds.time, "Wed Nov 12 08:16:29 2003") self.assertEqual(contig.reads[13].ds.chem, "term") self.assertEqual(contig.reads[13].ds.dye, "big") self.assertEqual(contig.reads[13].ds.template, "") self.assertEqual(contig.reads[13].ds.direction, "") self.assertEqual(len(contig.reads[13].rt), 1) self.assertEqual(contig.reads[13].rt[0].name, "BL060c5-LR0R.b.ab1") self.assertEqual(contig.reads[13].rt[0].tag_type, "matchElsewhereHighQual") self.assertEqual(contig.reads[13].rt[0].program, "phrap") self.assertEqual(contig.reads[13].rt[0].padded_start, 617) self.assertEqual(contig.reads[13].rt[0].padded_end, 631) self.assertEqual(contig.reads[13].rt[0].date, "040217:110357") self.assertEqual(len(contig.reads[13].wr), 1) self.assertEqual(contig.reads[13].wr[0].name, "BL060c2-LR0R.b.ab1") self.assertEqual(contig.reads[13].wr[0].aligned, "unaligned") self.assertEqual(contig.reads[13].wr[0].program, "phrap") self.assertEqual(contig.reads[13].wr[0].date, "040217:110357") # Make sure there are no more contigs self.assertRaises(StopIteration, next, contigs) class AceTestTwo(unittest.TestCase): """Test parsing example output from CAP3. The sample input file seq.cap.ace was downloaded from: http://genome.cs.mtu.edu/cap/data/seq.cap.ace """ def setUp(self): self.handle = open("Ace/seq.cap.ace") def tearDown(self): self.handle.close() def test_check_ACEParser(self): """Test to check that ACEParser can parse the whole file into one record.""" record=Ace.read(self.handle) self.assertEqual(record.ncontigs, 1) self.assertEqual(record.nreads, 6) self.assertEqual(record.wa, None) self.assertEqual(len(record.contigs), 1) self.assertEqual(len(record.contigs[0].reads), 6) self.assertEqual(record.contigs[0].name, "Contig1") self.assertEqual(record.contigs[0].nbases, 1222) self.assertEqual(record.contigs[0].nreads, 6) self.assertEqual(record.contigs[0].nsegments, 0) self.assertEqual(record.contigs[0].uorc, "U") center = len(record.contigs[0].sequence)//2 self.assertEqual(record.contigs[0].sequence[:10], "AGTTTTAGTT") self.assertEqual(record.contigs[0].sequence[center-5:center+5], "TGTGCGCGCA") self.assertEqual(record.contigs[0].sequence[-10:], "ATATCACATT") center = len(record.contigs[0].quality)//2 self.assertEqual(record.contigs[0].quality[:10], [61, 66, 67, 70, 71, 73, 73, 77, 77, 87]) self.assertEqual(record.contigs[0].quality[center-5:center+5], [97, 97, 97, 97, 97, 97, 97, 97, 97, 97]) self.assertEqual(record.contigs[0].quality[-10:], [56, 51, 49, 41, 38, 39, 45, 44, 49, 46]) self.assertEqual(len(record.contigs[0].af), 6) self.assertEqual(len(record.contigs[0].bs), 0) self.assertEqual(record.contigs[0].af[3].name, "R5") self.assertEqual(record.contigs[0].af[3].coru, "C") self.assertEqual(record.contigs[0].af[3].padded_start, 320) self.assertEqual(record.contigs[0].af[5].name, "R6") self.assertEqual(record.contigs[0].af[5].coru, "C") self.assertEqual(record.contigs[0].af[5].padded_start, 517) self.assertEqual(record.contigs[0].bs, []) self.assertEqual(record.contigs[0].ct, None) self.assertEqual(record.contigs[0].wa, None) self.assertEqual(len(record.contigs[0].reads), 6) self.assertEqual(record.contigs[0].reads[0].rd.name, "R3") self.assertEqual(record.contigs[0].reads[0].rd.padded_bases, 919) self.assertEqual(record.contigs[0].reads[0].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[0].rd.read_tags, 0) center = len(record.contigs[0].reads[0].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[0].rd.sequence[:10], "NNNNNNNNNN") self.assertEqual(record.contigs[0].reads[0].rd.sequence[center-5:center+5], "ATGTGCGCTC") self.assertEqual(record.contigs[0].reads[0].rd.sequence[-10:], "CAGCTCACCA") self.assertEqual(record.contigs[0].reads[0].qa.qual_clipping_start, 55) self.assertEqual(record.contigs[0].reads[0].qa.qual_clipping_end, 916) self.assertEqual(record.contigs[0].reads[0].qa.align_clipping_start, 55) self.assertEqual(record.contigs[0].reads[0].qa.align_clipping_end, 916) self.assertEqual(record.contigs[0].reads[0].ds.chromat_file, "") self.assertEqual(record.contigs[0].reads[0].ds.phd_file, "") self.assertEqual(record.contigs[0].reads[0].ds.time, "") self.assertEqual(record.contigs[0].reads[0].ds.chem, "") self.assertEqual(record.contigs[0].reads[0].ds.dye, "") self.assertEqual(record.contigs[0].reads[0].ds.template, "") self.assertEqual(record.contigs[0].reads[0].ds.direction, "") self.assertEqual(record.contigs[0].reads[0].rt, None) self.assertEqual(record.contigs[0].reads[0].wr, None) self.assertEqual(record.contigs[0].reads[1].rd.name, "R1") self.assertEqual(record.contigs[0].reads[1].rd.padded_bases, 864) self.assertEqual(record.contigs[0].reads[1].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[1].rd.read_tags, 0) center = len(record.contigs[0].reads[1].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[1].rd.sequence[:10], "AGCCGGTACC") self.assertEqual(record.contigs[0].reads[1].rd.sequence[center-5:center+5], "GGGATGGCAC") self.assertEqual(record.contigs[0].reads[1].rd.sequence[-10:], "GGGCTGGGAG") self.assertEqual(record.contigs[0].reads[1].qa.qual_clipping_start, 12) self.assertEqual(record.contigs[0].reads[1].qa.qual_clipping_end, 863) self.assertEqual(record.contigs[0].reads[1].qa.align_clipping_start, 12) self.assertEqual(record.contigs[0].reads[1].qa.align_clipping_end, 863) self.assertEqual(record.contigs[0].reads[1].ds.chromat_file, "") self.assertEqual(record.contigs[0].reads[1].ds.phd_file, "") self.assertEqual(record.contigs[0].reads[1].ds.time, "") self.assertEqual(record.contigs[0].reads[1].ds.chem, "") self.assertEqual(record.contigs[0].reads[1].ds.dye, "") self.assertEqual(record.contigs[0].reads[1].ds.template, "") self.assertEqual(record.contigs[0].reads[1].ds.direction, "") self.assertEqual(record.contigs[0].reads[1].rt, None) self.assertEqual(record.contigs[0].reads[1].wr, None) self.assertEqual(record.contigs[0].reads[2].rd.name, "R2") self.assertEqual(record.contigs[0].reads[2].rd.padded_bases, 1026) self.assertEqual(record.contigs[0].reads[2].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[2].rd.read_tags, 0) center = len(record.contigs[0].reads[2].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[2].rd.sequence[:10], "NNNNNNNNNN") self.assertEqual(record.contigs[0].reads[2].rd.sequence[center-5:center+5], "GGATGCCTGG") self.assertEqual(record.contigs[0].reads[2].rd.sequence[-10:], "GGTTGAGGCC") self.assertEqual(record.contigs[0].reads[2].qa.qual_clipping_start, 55) self.assertEqual(record.contigs[0].reads[2].qa.qual_clipping_end, 1000) self.assertEqual(record.contigs[0].reads[2].qa.align_clipping_start, 55) self.assertEqual(record.contigs[0].reads[2].qa.align_clipping_end, 1000) self.assertEqual(record.contigs[0].reads[2].ds.chromat_file, "") self.assertEqual(record.contigs[0].reads[2].ds.phd_file, "") self.assertEqual(record.contigs[0].reads[2].ds.time, "") self.assertEqual(record.contigs[0].reads[2].ds.chem, "") self.assertEqual(record.contigs[0].reads[2].ds.dye, "") self.assertEqual(record.contigs[0].reads[2].ds.template, "") self.assertEqual(record.contigs[0].reads[2].ds.direction, "") self.assertEqual(record.contigs[0].reads[2].rt, None) self.assertEqual(record.contigs[0].reads[2].wr, None) self.assertEqual(record.contigs[0].reads[3].rd.name, "R5") self.assertEqual(record.contigs[0].reads[3].rd.padded_bases, 925) self.assertEqual(record.contigs[0].reads[3].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[3].rd.read_tags, 0) center = len(record.contigs[0].reads[3].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[3].rd.sequence[:10], "NNNNNNNNNN") self.assertEqual(record.contigs[0].reads[3].rd.sequence[center-5:center+5], "CCTCCCTACA") self.assertEqual(record.contigs[0].reads[3].rd.sequence[-10:], "GCCCCCGGNN") self.assertEqual(record.contigs[0].reads[3].qa.qual_clipping_start, 293) self.assertEqual(record.contigs[0].reads[3].qa.qual_clipping_end, 874) self.assertEqual(record.contigs[0].reads[3].qa.align_clipping_start, 293) self.assertEqual(record.contigs[0].reads[3].qa.align_clipping_end, 874) self.assertEqual(record.contigs[0].reads[3].ds.chromat_file, "") self.assertEqual(record.contigs[0].reads[3].ds.phd_file, "") self.assertEqual(record.contigs[0].reads[3].ds.time, "") self.assertEqual(record.contigs[0].reads[3].ds.chem, "") self.assertEqual(record.contigs[0].reads[3].ds.dye, "") self.assertEqual(record.contigs[0].reads[3].ds.template, "") self.assertEqual(record.contigs[0].reads[3].ds.direction, "") self.assertEqual(record.contigs[0].reads[3].rt, None) self.assertEqual(record.contigs[0].reads[3].wr, None) self.assertEqual(record.contigs[0].reads[4].rd.name, "R4") self.assertEqual(record.contigs[0].reads[4].rd.padded_bases, 816) self.assertEqual(record.contigs[0].reads[4].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[4].rd.read_tags, 0) center = len(record.contigs[0].reads[4].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[4].rd.sequence[:10], "CACTCAGCTC") self.assertEqual(record.contigs[0].reads[4].rd.sequence[center-5:center+5], "TCCAAAGGGT") self.assertEqual(record.contigs[0].reads[4].rd.sequence[-10:], "AGCTGAATCG") self.assertEqual(record.contigs[0].reads[4].qa.qual_clipping_start, 1) self.assertEqual(record.contigs[0].reads[4].qa.qual_clipping_end, 799) self.assertEqual(record.contigs[0].reads[4].qa.align_clipping_start, 1) self.assertEqual(record.contigs[0].reads[4].qa.align_clipping_end, 799) self.assertEqual(record.contigs[0].reads[4].ds.chromat_file, "") self.assertEqual(record.contigs[0].reads[4].ds.phd_file, "") self.assertEqual(record.contigs[0].reads[4].ds.time, "") self.assertEqual(record.contigs[0].reads[4].ds.chem, "") self.assertEqual(record.contigs[0].reads[4].ds.dye, "") self.assertEqual(record.contigs[0].reads[4].ds.template, "") self.assertEqual(record.contigs[0].reads[4].ds.direction, "") self.assertEqual(record.contigs[0].reads[4].rt, None) self.assertEqual(record.contigs[0].reads[4].wr, None) self.assertEqual(record.contigs[0].reads[5].rd.name, "R6") self.assertEqual(record.contigs[0].reads[5].rd.padded_bases, 857) self.assertEqual(record.contigs[0].reads[5].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[5].rd.read_tags, 0) center = len(record.contigs[0].reads[5].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[5].rd.sequence[:10], "CCGGCAGTGA") self.assertEqual(record.contigs[0].reads[5].rd.sequence[center-5:center+5], "AAAAAAAACC") self.assertEqual(record.contigs[0].reads[5].rd.sequence[-10:], "NNNNNNNNNN") self.assertEqual(record.contigs[0].reads[5].qa.qual_clipping_start, 24) self.assertEqual(record.contigs[0].reads[5].qa.qual_clipping_end, 706) self.assertEqual(record.contigs[0].reads[5].qa.align_clipping_start, 24) self.assertEqual(record.contigs[0].reads[5].qa.align_clipping_end, 706) self.assertEqual(record.contigs[0].reads[5].ds.chromat_file, "") self.assertEqual(record.contigs[0].reads[5].ds.phd_file, "") self.assertEqual(record.contigs[0].reads[5].ds.time, "") self.assertEqual(record.contigs[0].reads[5].ds.chem, "") self.assertEqual(record.contigs[0].reads[5].ds.dye, "") self.assertEqual(record.contigs[0].reads[5].ds.template, "") self.assertEqual(record.contigs[0].reads[5].ds.direction, "") self.assertEqual(record.contigs[0].reads[5].rt, None) self.assertEqual(record.contigs[0].reads[5].wr, None) def test_check_record_parser(self): """Test to check that record parser parses each contig into a record.""" contigs=Ace.parse(self.handle) # First (and only) contig contig = next(contigs) self.assertEqual(len(contig.reads), 6) self.assertEqual(contig.name, "Contig1") self.assertEqual(contig.nbases, 1222) self.assertEqual(contig.nreads, 6) self.assertEqual(contig.nsegments, 0) self.assertEqual(contig.uorc, "U") center = len(contig.sequence)//2 self.assertEqual(contig.sequence[:10], "AGTTTTAGTT") self.assertEqual(contig.sequence[center-5:center+5], "TGTGCGCGCA") self.assertEqual(contig.sequence[-10:], "ATATCACATT") center = len(contig.quality)//2 self.assertEqual(contig.quality[:10], [61, 66, 67, 70, 71, 73, 73, 77, 77, 87]) self.assertEqual(contig.quality[center-5:center+5], [97, 97, 97, 97, 97, 97, 97, 97, 97, 97]) self.assertEqual(contig.quality[-10:], [56, 51, 49, 41, 38, 39, 45, 44, 49, 46]) self.assertEqual(len(contig.af), 6) self.assertEqual(len(contig.bs), 0) self.assertEqual(contig.af[3].name, "R5") self.assertEqual(contig.af[3].coru, "C") self.assertEqual(contig.af[3].padded_start, 320) self.assertEqual(contig.af[5].name, "R6") self.assertEqual(contig.af[5].coru, "C") self.assertEqual(contig.af[5].padded_start, 517) self.assertEqual(contig.bs, []) self.assertEqual(contig.ct, None) self.assertEqual(contig.wa, None) self.assertEqual(len(contig.reads), 6) self.assertEqual(contig.reads[0].rd.name, "R3") self.assertEqual(contig.reads[0].rd.padded_bases, 919) self.assertEqual(contig.reads[0].rd.info_items, 0) self.assertEqual(contig.reads[0].rd.read_tags, 0) center = len(contig.reads[0].rd.sequence)//2 self.assertEqual(contig.reads[0].rd.sequence[:10], "NNNNNNNNNN") self.assertEqual(contig.reads[0].rd.sequence[center-5:center+5], "ATGTGCGCTC") self.assertEqual(contig.reads[0].rd.sequence[-10:], "CAGCTCACCA") self.assertEqual(contig.reads[0].qa.qual_clipping_start, 55) self.assertEqual(contig.reads[0].qa.qual_clipping_end, 916) self.assertEqual(contig.reads[0].qa.align_clipping_start, 55) self.assertEqual(contig.reads[0].qa.align_clipping_end, 916) self.assertEqual(contig.reads[0].ds.chromat_file, "") self.assertEqual(contig.reads[0].ds.phd_file, "") self.assertEqual(contig.reads[0].ds.time, "") self.assertEqual(contig.reads[0].ds.chem, "") self.assertEqual(contig.reads[0].ds.dye, "") self.assertEqual(contig.reads[0].ds.template, "") self.assertEqual(contig.reads[0].ds.direction, "") self.assertEqual(contig.reads[0].rt, None) self.assertEqual(contig.reads[0].wr, None) self.assertEqual(contig.reads[1].rd.name, "R1") self.assertEqual(contig.reads[1].rd.padded_bases, 864) self.assertEqual(contig.reads[1].rd.info_items, 0) self.assertEqual(contig.reads[1].rd.read_tags, 0) center = len(contig.reads[1].rd.sequence)//2 self.assertEqual(contig.reads[1].rd.sequence[:10], "AGCCGGTACC") self.assertEqual(contig.reads[1].rd.sequence[center-5:center+5], "GGGATGGCAC") self.assertEqual(contig.reads[1].rd.sequence[-10:], "GGGCTGGGAG") self.assertEqual(contig.reads[1].qa.qual_clipping_start, 12) self.assertEqual(contig.reads[1].qa.qual_clipping_end, 863) self.assertEqual(contig.reads[1].qa.align_clipping_start, 12) self.assertEqual(contig.reads[1].qa.align_clipping_end, 863) self.assertEqual(contig.reads[1].ds.chromat_file, "") self.assertEqual(contig.reads[1].ds.phd_file, "") self.assertEqual(contig.reads[1].ds.time, "") self.assertEqual(contig.reads[1].ds.chem, "") self.assertEqual(contig.reads[1].ds.dye, "") self.assertEqual(contig.reads[1].ds.template, "") self.assertEqual(contig.reads[1].ds.direction, "") self.assertEqual(contig.reads[1].rt, None) self.assertEqual(contig.reads[1].wr, None) self.assertEqual(contig.reads[2].rd.name, "R2") self.assertEqual(contig.reads[2].rd.padded_bases, 1026) self.assertEqual(contig.reads[2].rd.info_items, 0) self.assertEqual(contig.reads[2].rd.read_tags, 0) center = len(contig.reads[2].rd.sequence)//2 self.assertEqual(contig.reads[2].rd.sequence[:10], "NNNNNNNNNN") self.assertEqual(contig.reads[2].rd.sequence[center-5:center+5], "GGATGCCTGG") self.assertEqual(contig.reads[2].rd.sequence[-10:], "GGTTGAGGCC") self.assertEqual(contig.reads[2].qa.qual_clipping_start, 55) self.assertEqual(contig.reads[2].qa.qual_clipping_end, 1000) self.assertEqual(contig.reads[2].qa.align_clipping_start, 55) self.assertEqual(contig.reads[2].qa.align_clipping_end, 1000) self.assertEqual(contig.reads[2].ds.chromat_file, "") self.assertEqual(contig.reads[2].ds.phd_file, "") self.assertEqual(contig.reads[2].ds.time, "") self.assertEqual(contig.reads[2].ds.chem, "") self.assertEqual(contig.reads[2].ds.dye, "") self.assertEqual(contig.reads[2].ds.template, "") self.assertEqual(contig.reads[2].ds.direction, "") self.assertEqual(contig.reads[2].rt, None) self.assertEqual(contig.reads[2].wr, None) self.assertEqual(contig.reads[3].rd.name, "R5") self.assertEqual(contig.reads[3].rd.padded_bases, 925) self.assertEqual(contig.reads[3].rd.info_items, 0) self.assertEqual(contig.reads[3].rd.read_tags, 0) center = len(contig.reads[3].rd.sequence)//2 self.assertEqual(contig.reads[3].rd.sequence[:10], "NNNNNNNNNN") self.assertEqual(contig.reads[3].rd.sequence[center-5:center+5], "CCTCCCTACA") self.assertEqual(contig.reads[3].rd.sequence[-10:], "GCCCCCGGNN") self.assertEqual(contig.reads[3].qa.qual_clipping_start, 293) self.assertEqual(contig.reads[3].qa.qual_clipping_end, 874) self.assertEqual(contig.reads[3].qa.align_clipping_start, 293) self.assertEqual(contig.reads[3].qa.align_clipping_end, 874) self.assertEqual(contig.reads[3].ds.chromat_file, "") self.assertEqual(contig.reads[3].ds.phd_file, "") self.assertEqual(contig.reads[3].ds.time, "") self.assertEqual(contig.reads[3].ds.chem, "") self.assertEqual(contig.reads[3].ds.dye, "") self.assertEqual(contig.reads[3].ds.template, "") self.assertEqual(contig.reads[3].ds.direction, "") self.assertEqual(contig.reads[3].rt, None) self.assertEqual(contig.reads[3].wr, None) self.assertEqual(contig.reads[4].rd.name, "R4") self.assertEqual(contig.reads[4].rd.padded_bases, 816) self.assertEqual(contig.reads[4].rd.info_items, 0) self.assertEqual(contig.reads[4].rd.read_tags, 0) center = len(contig.reads[4].rd.sequence)//2 self.assertEqual(contig.reads[4].rd.sequence[:10], "CACTCAGCTC") self.assertEqual(contig.reads[4].rd.sequence[center-5:center+5], "TCCAAAGGGT") self.assertEqual(contig.reads[4].rd.sequence[-10:], "AGCTGAATCG") self.assertEqual(contig.reads[4].qa.qual_clipping_start, 1) self.assertEqual(contig.reads[4].qa.qual_clipping_end, 799) self.assertEqual(contig.reads[4].qa.align_clipping_start, 1) self.assertEqual(contig.reads[4].qa.align_clipping_end, 799) self.assertEqual(contig.reads[4].ds.chromat_file, "") self.assertEqual(contig.reads[4].ds.phd_file, "") self.assertEqual(contig.reads[4].ds.time, "") self.assertEqual(contig.reads[4].ds.chem, "") self.assertEqual(contig.reads[4].ds.dye, "") self.assertEqual(contig.reads[4].ds.template, "") self.assertEqual(contig.reads[4].ds.direction, "") self.assertEqual(contig.reads[4].rt, None) self.assertEqual(contig.reads[4].wr, None) self.assertEqual(contig.reads[5].rd.name, "R6") self.assertEqual(contig.reads[5].rd.padded_bases, 857) self.assertEqual(contig.reads[5].rd.info_items, 0) self.assertEqual(contig.reads[5].rd.read_tags, 0) center = len(contig.reads[5].rd.sequence)//2 self.assertEqual(contig.reads[5].rd.sequence[:10], "CCGGCAGTGA") self.assertEqual(contig.reads[5].rd.sequence[center-5:center+5], "AAAAAAAACC") self.assertEqual(contig.reads[5].rd.sequence[-10:], "NNNNNNNNNN") self.assertEqual(contig.reads[5].qa.qual_clipping_start, 24) self.assertEqual(contig.reads[5].qa.qual_clipping_end, 706) self.assertEqual(contig.reads[5].qa.align_clipping_start, 24) self.assertEqual(contig.reads[5].qa.align_clipping_end, 706) self.assertEqual(contig.reads[5].ds.chromat_file, "") self.assertEqual(contig.reads[5].ds.phd_file, "") self.assertEqual(contig.reads[5].ds.time, "") self.assertEqual(contig.reads[5].ds.chem, "") self.assertEqual(contig.reads[5].ds.dye, "") self.assertEqual(contig.reads[5].ds.template, "") self.assertEqual(contig.reads[5].ds.direction, "") self.assertEqual(contig.reads[5].rt, None) self.assertEqual(contig.reads[5].wr, None) # Make sure there are no more contigs self.assertRaises(StopIteration, next, contigs) class AceTestThree(unittest.TestCase): """Test parsing example ACE input file for CONSED. The sample input file was downloaded from: http://bozeman.mbt.washington.edu/consed/distributions/README.16.0.txt """ def setUp(self): self.handle = open("Ace/consed_sample.ace") def tearDown(self): self.handle.close() def test_check_ACEParser(self): """Test to check that ACEParser can parse the whole file into one record.""" record=Ace.read(self.handle) self.assertEqual(record.ncontigs, 1) self.assertEqual(record.nreads, 8) self.assertEqual(len(record.wa), 1) self.assertEqual(record.wa[0].tag_type, "phrap_params") self.assertEqual(record.wa[0].program, "phrap") self.assertEqual(record.wa[0].date, "990621:161947") self.assertEqual(record.wa[0].info, ['/usr/local/genome/bin/phrap standard.fasta.screen -new_ace -view', 'phrap version 0.990319']) self.assertEqual(len(record.contigs), 1) self.assertEqual(len(record.contigs[0].reads), 8) self.assertEqual(record.contigs[0].name, "Contig1") self.assertEqual(record.contigs[0].nbases, 1475) self.assertEqual(record.contigs[0].nreads, 8) self.assertEqual(record.contigs[0].nsegments, 156) self.assertEqual(record.contigs[0].uorc, "U") center = len(record.contigs[0].sequence)//2 self.assertEqual(record.contigs[0].sequence[:10], "agccccgggc") self.assertEqual(record.contigs[0].sequence[center-5:center+5], "CTTCCCCAGG") self.assertEqual(record.contigs[0].sequence[-10:], "gttgggtttg") center = len(record.contigs[0].quality)//2 self.assertEqual(record.contigs[0].quality[:10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(record.contigs[0].quality[center-5:center+5], [90, 90, 90, 90, 90, 90, 90, 90, 89, 89]) self.assertEqual(record.contigs[0].quality[-10:], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(len(record.contigs[0].af), 8) self.assertEqual(len(record.contigs[0].bs), 156) self.assertEqual(record.contigs[0].af[4].name, "K26-291s") self.assertEqual(record.contigs[0].af[4].coru, "U") self.assertEqual(record.contigs[0].af[4].padded_start, 828) self.assertEqual(record.contigs[0].af[7].name, "K26-766c") self.assertEqual(record.contigs[0].af[7].coru, "C") self.assertEqual(record.contigs[0].af[7].padded_start, 408) self.assertEqual(record.contigs[0].bs[78].name, "K26-394c") self.assertEqual(record.contigs[0].bs[78].padded_start, 987) self.assertEqual(record.contigs[0].bs[78].padded_end, 987) self.assertEqual(record.contigs[0].bs[155].name, "K26-822c") self.assertEqual(record.contigs[0].bs[155].padded_start, 1303) self.assertEqual(record.contigs[0].bs[155].padded_end, 1475) self.assertEqual(len(record.contigs[0].ct), 3) self.assertEqual(record.contigs[0].ct[0].name, "Contig1") self.assertEqual(record.contigs[0].ct[0].tag_type, "repeat") self.assertEqual(record.contigs[0].ct[0].program, "consed") self.assertEqual(record.contigs[0].ct[0].padded_start, 976) self.assertEqual(record.contigs[0].ct[0].padded_end, 986) self.assertEqual(record.contigs[0].ct[0].date, "971218:180623") self.assertEqual(record.contigs[0].ct[0].info, []) self.assertEqual(record.contigs[0].ct[1].name, "Contig1") self.assertEqual(record.contigs[0].ct[1].tag_type, "comment") self.assertEqual(record.contigs[0].ct[1].program, "consed") self.assertEqual(record.contigs[0].ct[1].padded_start, 996) self.assertEqual(record.contigs[0].ct[1].padded_end, 1007) self.assertEqual(record.contigs[0].ct[1].date, "971218:180623") self.assertEqual(record.contigs[0].ct[1].info, ['This is line 1 of a comment', 'There may be any number of lines']) self.assertEqual(record.contigs[0].ct[2].name, "Contig1") self.assertEqual(record.contigs[0].ct[2].tag_type, "oligo") self.assertEqual(record.contigs[0].ct[2].program, "consed") self.assertEqual(record.contigs[0].ct[2].padded_start, 963) self.assertEqual(record.contigs[0].ct[2].padded_end, 987) self.assertEqual(record.contigs[0].ct[2].date, "971218:180623") self.assertEqual(record.contigs[0].ct[2].info, ['standard.1 acataagacattctaaatttttact 50 U', 'seq from clone']) self.assertEqual(len(record.contigs[0].wa), 1) self.assertEqual(record.contigs[0].wa[0].tag_type, "phrap_params") self.assertEqual(record.contigs[0].wa[0].program, "phrap") self.assertEqual(record.contigs[0].wa[0].date, "990621:161947") self.assertEqual(record.contigs[0].wa[0].info, ['/usr/local/genome/bin/phrap standard.fasta.screen -new_ace -view', 'phrap version 0.990319']) self.assertEqual(len(record.contigs[0].reads), 8) self.assertEqual(record.contigs[0].reads[0].rd.name, "K26-217c") self.assertEqual(record.contigs[0].reads[0].rd.padded_bases, 563) self.assertEqual(record.contigs[0].reads[0].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[0].rd.read_tags, 0) center = len(record.contigs[0].reads[0].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[0].rd.sequence[:10], "tcccCgtgag") self.assertEqual(record.contigs[0].reads[0].rd.sequence[center-5:center+5], "CTCCTGcctg") self.assertEqual(record.contigs[0].reads[0].rd.sequence[-10:], "ggcccccctc") self.assertEqual(record.contigs[0].reads[0].qa.qual_clipping_start, 19) self.assertEqual(record.contigs[0].reads[0].qa.qual_clipping_end, 349) self.assertEqual(record.contigs[0].reads[0].qa.align_clipping_start, 19) self.assertEqual(record.contigs[0].reads[0].qa.align_clipping_end, 424) self.assertEqual(record.contigs[0].reads[0].ds.chromat_file, "K26-217c") self.assertEqual(record.contigs[0].reads[0].ds.phd_file, "K26-217c.phd.1") self.assertEqual(record.contigs[0].reads[0].ds.time, "Thu Sep 12 15:42:38 1996") self.assertEqual(record.contigs[0].reads[0].ds.chem, "") self.assertEqual(record.contigs[0].reads[0].ds.dye, "") self.assertEqual(record.contigs[0].reads[0].ds.template, "") self.assertEqual(record.contigs[0].reads[0].ds.direction, "") self.assertEqual(record.contigs[0].reads[0].rt, None) self.assertEqual(record.contigs[0].reads[0].wr, None) self.assertEqual(record.contigs[0].reads[1].rd.name, "K26-526t") self.assertEqual(record.contigs[0].reads[1].rd.padded_bases, 687) self.assertEqual(record.contigs[0].reads[1].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[1].rd.read_tags, 0) center = len(record.contigs[0].reads[1].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[1].rd.sequence[:10], "ccgtcctgag") self.assertEqual(record.contigs[0].reads[1].rd.sequence[center-5:center+5], "cacagcccT*") self.assertEqual(record.contigs[0].reads[1].rd.sequence[-10:], "Ttttgtttta") self.assertEqual(record.contigs[0].reads[1].qa.qual_clipping_start, 12) self.assertEqual(record.contigs[0].reads[1].qa.qual_clipping_end, 353) self.assertEqual(record.contigs[0].reads[1].qa.align_clipping_start, 9) self.assertEqual(record.contigs[0].reads[1].qa.align_clipping_end, 572) self.assertEqual(record.contigs[0].reads[1].ds.chromat_file, "K26-526t") self.assertEqual(record.contigs[0].reads[1].ds.phd_file, "K26-526t.phd.1") self.assertEqual(record.contigs[0].reads[1].ds.time, "Thu Sep 12 15:42:33 1996") self.assertEqual(record.contigs[0].reads[1].ds.chem, "") self.assertEqual(record.contigs[0].reads[1].ds.dye, "") self.assertEqual(record.contigs[0].reads[1].ds.template, "") self.assertEqual(record.contigs[0].reads[1].ds.direction, "") self.assertEqual(record.contigs[0].reads[1].rt, None) self.assertEqual(record.contigs[0].reads[1].wr, None) self.assertEqual(record.contigs[0].reads[2].rd.name, "K26-961c") self.assertEqual(record.contigs[0].reads[2].rd.padded_bases, 517) self.assertEqual(record.contigs[0].reads[2].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[2].rd.read_tags, 0) center = len(record.contigs[0].reads[2].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[2].rd.sequence[:10], "aatattaccg") self.assertEqual(record.contigs[0].reads[2].rd.sequence[center-5:center+5], "CAGATGGGTT") self.assertEqual(record.contigs[0].reads[2].rd.sequence[-10:], "ctattcaggg") self.assertEqual(record.contigs[0].reads[2].qa.qual_clipping_start, 20) self.assertEqual(record.contigs[0].reads[2].qa.qual_clipping_end, 415) self.assertEqual(record.contigs[0].reads[2].qa.align_clipping_start, 26) self.assertEqual(record.contigs[0].reads[2].qa.align_clipping_end, 514) self.assertEqual(record.contigs[0].reads[2].ds.chromat_file, "K26-961c") self.assertEqual(record.contigs[0].reads[2].ds.phd_file, "K26-961c.phd.1") self.assertEqual(record.contigs[0].reads[2].ds.time, "Thu Sep 12 15:42:37 1996") self.assertEqual(record.contigs[0].reads[2].ds.chem, "") self.assertEqual(record.contigs[0].reads[2].ds.dye, "") self.assertEqual(record.contigs[0].reads[2].ds.template, "") self.assertEqual(record.contigs[0].reads[2].ds.direction, "") self.assertEqual(record.contigs[0].reads[2].rt, None) self.assertEqual(record.contigs[0].reads[2].wr, None) self.assertEqual(record.contigs[0].reads[3].rd.name, "K26-394c") self.assertEqual(record.contigs[0].reads[3].rd.padded_bases, 628) self.assertEqual(record.contigs[0].reads[3].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[3].rd.read_tags, 0) center = len(record.contigs[0].reads[3].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[3].rd.sequence[:10], "ctgcgtatcg") self.assertEqual(record.contigs[0].reads[3].rd.sequence[center-5:center+5], "AGGATTGCTT") self.assertEqual(record.contigs[0].reads[3].rd.sequence[-10:], "aaccctgggt") self.assertEqual(record.contigs[0].reads[3].qa.qual_clipping_start, 18) self.assertEqual(record.contigs[0].reads[3].qa.qual_clipping_end, 368) self.assertEqual(record.contigs[0].reads[3].qa.align_clipping_start, 11) self.assertEqual(record.contigs[0].reads[3].qa.align_clipping_end, 502) self.assertEqual(record.contigs[0].reads[3].ds.chromat_file, "K26-394c") self.assertEqual(record.contigs[0].reads[3].ds.phd_file, "K26-394c.phd.1") self.assertEqual(record.contigs[0].reads[3].ds.time, "Thu Sep 12 15:42:32 1996") self.assertEqual(record.contigs[0].reads[3].ds.chem, "") self.assertEqual(record.contigs[0].reads[3].ds.dye, "") self.assertEqual(record.contigs[0].reads[3].ds.template, "") self.assertEqual(record.contigs[0].reads[3].ds.direction, "") self.assertEqual(record.contigs[0].reads[3].rt, None) self.assertEqual(record.contigs[0].reads[3].wr, None) self.assertEqual(record.contigs[0].reads[4].rd.name, "K26-291s") self.assertEqual(record.contigs[0].reads[4].rd.padded_bases, 556) self.assertEqual(record.contigs[0].reads[4].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[4].rd.read_tags, 0) center = len(record.contigs[0].reads[4].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[4].rd.sequence[:10], "gaggatcgct") self.assertEqual(record.contigs[0].reads[4].rd.sequence[center-5:center+5], "GTgcgaggat") self.assertEqual(record.contigs[0].reads[4].rd.sequence[-10:], "caggcagatg") self.assertEqual(record.contigs[0].reads[4].qa.qual_clipping_start, 11) self.assertEqual(record.contigs[0].reads[4].qa.qual_clipping_end, 373) self.assertEqual(record.contigs[0].reads[4].qa.align_clipping_start, 11) self.assertEqual(record.contigs[0].reads[4].qa.align_clipping_end, 476) self.assertEqual(record.contigs[0].reads[4].ds.chromat_file, "K26-291s") self.assertEqual(record.contigs[0].reads[4].ds.phd_file, "K26-291s.phd.1") self.assertEqual(record.contigs[0].reads[4].ds.time, "Thu Sep 12 15:42:31 1996") self.assertEqual(record.contigs[0].reads[4].ds.chem, "") self.assertEqual(record.contigs[0].reads[4].ds.dye, "") self.assertEqual(record.contigs[0].reads[4].ds.template, "") self.assertEqual(record.contigs[0].reads[4].ds.direction, "") self.assertEqual(record.contigs[0].reads[4].rt, None) self.assertEqual(record.contigs[0].reads[4].wr, None) self.assertEqual(record.contigs[0].reads[5].rd.name, "K26-822c") self.assertEqual(record.contigs[0].reads[5].rd.padded_bases, 593) self.assertEqual(record.contigs[0].reads[5].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[5].rd.read_tags, 0) center = len(record.contigs[0].reads[5].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[5].rd.sequence[:10], "ggggatccg*") self.assertEqual(record.contigs[0].reads[5].rd.sequence[center-5:center+5], "GCaAgacCCt") self.assertEqual(record.contigs[0].reads[5].rd.sequence[-10:], "gttgggtttg") self.assertEqual(record.contigs[0].reads[5].qa.qual_clipping_start, 25) self.assertEqual(record.contigs[0].reads[5].qa.qual_clipping_end, 333) self.assertEqual(record.contigs[0].reads[5].qa.align_clipping_start, 16) self.assertEqual(record.contigs[0].reads[5].qa.align_clipping_end, 593) self.assertEqual(record.contigs[0].reads[5].ds.chromat_file, "K26-822c") self.assertEqual(record.contigs[0].reads[5].ds.phd_file, "K26-822c.phd.1") self.assertEqual(record.contigs[0].reads[5].ds.time, "Thu Sep 12 15:42:36 1996") self.assertEqual(record.contigs[0].reads[5].ds.chem, "") self.assertEqual(record.contigs[0].reads[5].ds.dye, "") self.assertEqual(record.contigs[0].reads[5].ds.template, "") self.assertEqual(record.contigs[0].reads[5].ds.direction, "") self.assertEqual(record.contigs[0].reads[5].rt, None) self.assertEqual(record.contigs[0].reads[5].wr, None) self.assertEqual(record.contigs[0].reads[6].rd.name, "K26-572c") self.assertEqual(record.contigs[0].reads[6].rd.padded_bases, 594) self.assertEqual(record.contigs[0].reads[6].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[6].rd.read_tags, 0) center = len(record.contigs[0].reads[6].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[6].rd.sequence[:10], "agccccgggc") self.assertEqual(record.contigs[0].reads[6].rd.sequence[center-5:center+5], "ggatcACATA") self.assertEqual(record.contigs[0].reads[6].rd.sequence[-10:], "aatagtaaca") self.assertEqual(record.contigs[0].reads[6].qa.qual_clipping_start, 249) self.assertEqual(record.contigs[0].reads[6].qa.qual_clipping_end, 584) self.assertEqual(record.contigs[0].reads[6].qa.align_clipping_start, 1) self.assertEqual(record.contigs[0].reads[6].qa.align_clipping_end, 586) self.assertEqual(record.contigs[0].reads[6].ds.chromat_file, "K26-572c") self.assertEqual(record.contigs[0].reads[6].ds.phd_file, "K26-572c.phd.1") self.assertEqual(record.contigs[0].reads[6].ds.time, "Thu Sep 12 15:42:34 1996") self.assertEqual(record.contigs[0].reads[6].ds.chem, "") self.assertEqual(record.contigs[0].reads[6].ds.dye, "") self.assertEqual(record.contigs[0].reads[6].ds.template, "") self.assertEqual(record.contigs[0].reads[6].ds.direction, "") self.assertEqual(record.contigs[0].reads[6].rt, None) self.assertEqual(record.contigs[0].reads[6].wr, None) self.assertEqual(record.contigs[0].reads[7].rd.name, "K26-766c") self.assertEqual(record.contigs[0].reads[7].rd.padded_bases, 603) self.assertEqual(record.contigs[0].reads[7].rd.info_items, 0) self.assertEqual(record.contigs[0].reads[7].rd.read_tags, 0) center = len(record.contigs[0].reads[7].rd.sequence)//2 self.assertEqual(record.contigs[0].reads[7].rd.sequence[:10], "gaataattgg") self.assertEqual(record.contigs[0].reads[7].rd.sequence[center-5:center+5], "TggCCCATCT") self.assertEqual(record.contigs[0].reads[7].rd.sequence[-10:], "gaaccacacg") self.assertEqual(record.contigs[0].reads[7].qa.qual_clipping_start, 240) self.assertEqual(record.contigs[0].reads[7].qa.qual_clipping_end, 584) self.assertEqual(record.contigs[0].reads[7].qa.align_clipping_start, 126) self.assertEqual(record.contigs[0].reads[7].qa.align_clipping_end, 583) self.assertEqual(record.contigs[0].reads[7].ds.chromat_file, "K26-766c") self.assertEqual(record.contigs[0].reads[7].ds.phd_file, "K26-766c.phd.1") self.assertEqual(record.contigs[0].reads[7].ds.time, "Thu Sep 12 15:42:35 1996") self.assertEqual(record.contigs[0].reads[7].ds.chem, "") self.assertEqual(record.contigs[0].reads[7].ds.dye, "") self.assertEqual(record.contigs[0].reads[7].ds.template, "") self.assertEqual(record.contigs[0].reads[7].ds.direction, "") self.assertEqual(record.contigs[0].reads[7].rt, None) self.assertEqual(record.contigs[0].reads[7].wr, None) def test_check_record_parser(self): """Test to check that record parser parses each contig into a record.""" contigs=Ace.parse(self.handle) # First (and only) contig contig = next(contigs) self.assertEqual(len(contig.reads), 8) self.assertEqual(contig.name, "Contig1") self.assertEqual(contig.nbases, 1475) self.assertEqual(contig.nreads, 8) self.assertEqual(contig.nsegments, 156) self.assertEqual(contig.uorc, "U") center = len(contig.sequence)//2 self.assertEqual(contig.sequence[:10], "agccccgggc") self.assertEqual(contig.sequence[center-5:center+5], "CTTCCCCAGG") self.assertEqual(contig.sequence[-10:], "gttgggtttg") center = len(contig.quality)//2 self.assertEqual(contig.quality[:10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(contig.quality[center-5:center+5], [90, 90, 90, 90, 90, 90, 90, 90, 89, 89]) self.assertEqual(contig.quality[-10:], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) self.assertEqual(len(contig.af), 8) self.assertEqual(len(contig.bs), 156) self.assertEqual(contig.af[4].name, "K26-291s") self.assertEqual(contig.af[4].coru, "U") self.assertEqual(contig.af[4].padded_start, 828) self.assertEqual(contig.af[7].name, "K26-766c") self.assertEqual(contig.af[7].coru, "C") self.assertEqual(contig.af[7].padded_start, 408) self.assertEqual(contig.bs[78].name, "K26-394c") self.assertEqual(contig.bs[78].padded_start, 987) self.assertEqual(contig.bs[78].padded_end, 987) self.assertEqual(contig.bs[155].name, "K26-822c") self.assertEqual(contig.bs[155].padded_start, 1303) self.assertEqual(contig.bs[155].padded_end, 1475) self.assertEqual(len(contig.ct), 3) self.assertEqual(contig.ct[0].name, "Contig1") self.assertEqual(contig.ct[0].tag_type, "repeat") self.assertEqual(contig.ct[0].program, "consed") self.assertEqual(contig.ct[0].padded_start, 976) self.assertEqual(contig.ct[0].padded_end, 986) self.assertEqual(contig.ct[0].date, "971218:180623") self.assertEqual(contig.ct[0].info, []) self.assertEqual(contig.ct[1].name, "Contig1") self.assertEqual(contig.ct[1].tag_type, "comment") self.assertEqual(contig.ct[1].program, "consed") self.assertEqual(contig.ct[1].padded_start, 996) self.assertEqual(contig.ct[1].padded_end, 1007) self.assertEqual(contig.ct[1].date, "971218:180623") self.assertEqual(contig.ct[1].info, ['This is line 1 of a comment', 'There may be any number of lines']) self.assertEqual(contig.ct[2].name, "Contig1") self.assertEqual(contig.ct[2].tag_type, "oligo") self.assertEqual(contig.ct[2].program, "consed") self.assertEqual(contig.ct[2].padded_start, 963) self.assertEqual(contig.ct[2].padded_end, 987) self.assertEqual(contig.ct[2].date, "971218:180623") self.assertEqual(contig.ct[2].info, ['standard.1 acataagacattctaaatttttact 50 U', 'seq from clone']) self.assertEqual(len(contig.wa), 1) self.assertEqual(contig.wa[0].tag_type, "phrap_params") self.assertEqual(contig.wa[0].program, "phrap") self.assertEqual(contig.wa[0].date, "990621:161947") self.assertEqual(contig.wa[0].info, ['/usr/local/genome/bin/phrap standard.fasta.screen -new_ace -view', 'phrap version 0.990319']) self.assertEqual(len(contig.reads), 8) self.assertEqual(contig.reads[0].rd.name, "K26-217c") self.assertEqual(contig.reads[0].rd.padded_bases, 563) self.assertEqual(contig.reads[0].rd.info_items, 0) self.assertEqual(contig.reads[0].rd.read_tags, 0) center = len(contig.reads[0].rd.sequence)//2 self.assertEqual(contig.reads[0].rd.sequence[:10], "tcccCgtgag") self.assertEqual(contig.reads[0].rd.sequence[center-5:center+5], "CTCCTGcctg") self.assertEqual(contig.reads[0].rd.sequence[-10:], "ggcccccctc") self.assertEqual(contig.reads[0].qa.qual_clipping_start, 19) self.assertEqual(contig.reads[0].qa.qual_clipping_end, 349) self.assertEqual(contig.reads[0].qa.align_clipping_start, 19) self.assertEqual(contig.reads[0].qa.align_clipping_end, 424) self.assertEqual(contig.reads[0].ds.chromat_file, "K26-217c") self.assertEqual(contig.reads[0].ds.phd_file, "K26-217c.phd.1") self.assertEqual(contig.reads[0].ds.time, "Thu Sep 12 15:42:38 1996") self.assertEqual(contig.reads[0].ds.chem, "") self.assertEqual(contig.reads[0].ds.dye, "") self.assertEqual(contig.reads[0].ds.template, "") self.assertEqual(contig.reads[0].ds.direction, "") self.assertEqual(contig.reads[0].rt, None) self.assertEqual(contig.reads[0].wr, None) self.assertEqual(contig.reads[1].rd.name, "K26-526t") self.assertEqual(contig.reads[1].rd.padded_bases, 687) self.assertEqual(contig.reads[1].rd.info_items, 0) self.assertEqual(contig.reads[1].rd.read_tags, 0) center = len(contig.reads[1].rd.sequence)//2 self.assertEqual(contig.reads[1].rd.sequence[:10], "ccgtcctgag") self.assertEqual(contig.reads[1].rd.sequence[center-5:center+5], "cacagcccT*") self.assertEqual(contig.reads[1].rd.sequence[-10:], "Ttttgtttta") self.assertEqual(contig.reads[1].qa.qual_clipping_start, 12) self.assertEqual(contig.reads[1].qa.qual_clipping_end, 353) self.assertEqual(contig.reads[1].qa.align_clipping_start, 9) self.assertEqual(contig.reads[1].qa.align_clipping_end, 572) self.assertEqual(contig.reads[1].ds.chromat_file, "K26-526t") self.assertEqual(contig.reads[1].ds.phd_file, "K26-526t.phd.1") self.assertEqual(contig.reads[1].ds.time, "Thu Sep 12 15:42:33 1996") self.assertEqual(contig.reads[1].ds.chem, "") self.assertEqual(contig.reads[1].ds.dye, "") self.assertEqual(contig.reads[1].ds.template, "") self.assertEqual(contig.reads[1].ds.direction, "") self.assertEqual(contig.reads[1].rt, None) self.assertEqual(contig.reads[1].wr, None) self.assertEqual(contig.reads[2].rd.name, "K26-961c") self.assertEqual(contig.reads[2].rd.padded_bases, 517) self.assertEqual(contig.reads[2].rd.info_items, 0) self.assertEqual(contig.reads[2].rd.read_tags, 0) center = len(contig.reads[2].rd.sequence)//2 self.assertEqual(contig.reads[2].rd.sequence[:10], "aatattaccg") self.assertEqual(contig.reads[2].rd.sequence[center-5:center+5], "CAGATGGGTT") self.assertEqual(contig.reads[2].rd.sequence[-10:], "ctattcaggg") self.assertEqual(contig.reads[2].qa.qual_clipping_start, 20) self.assertEqual(contig.reads[2].qa.qual_clipping_end, 415) self.assertEqual(contig.reads[2].qa.align_clipping_start, 26) self.assertEqual(contig.reads[2].qa.align_clipping_end, 514) self.assertEqual(contig.reads[2].ds.chromat_file, "K26-961c") self.assertEqual(contig.reads[2].ds.phd_file, "K26-961c.phd.1") self.assertEqual(contig.reads[2].ds.time, "Thu Sep 12 15:42:37 1996") self.assertEqual(contig.reads[2].ds.chem, "") self.assertEqual(contig.reads[2].ds.dye, "") self.assertEqual(contig.reads[2].ds.template, "") self.assertEqual(contig.reads[2].ds.direction, "") self.assertEqual(contig.reads[2].rt, None) self.assertEqual(contig.reads[2].wr, None) self.assertEqual(contig.reads[3].rd.name, "K26-394c") self.assertEqual(contig.reads[3].rd.padded_bases, 628) self.assertEqual(contig.reads[3].rd.info_items, 0) self.assertEqual(contig.reads[3].rd.read_tags, 0) center = len(contig.reads[3].rd.sequence)//2 self.assertEqual(contig.reads[3].rd.sequence[:10], "ctgcgtatcg") self.assertEqual(contig.reads[3].rd.sequence[center-5:center+5], "AGGATTGCTT") self.assertEqual(contig.reads[3].rd.sequence[-10:], "aaccctgggt") self.assertEqual(contig.reads[3].qa.qual_clipping_start, 18) self.assertEqual(contig.reads[3].qa.qual_clipping_end, 368) self.assertEqual(contig.reads[3].qa.align_clipping_start, 11) self.assertEqual(contig.reads[3].qa.align_clipping_end, 502) self.assertEqual(contig.reads[3].ds.chromat_file, "K26-394c") self.assertEqual(contig.reads[3].ds.phd_file, "K26-394c.phd.1") self.assertEqual(contig.reads[3].ds.time, "Thu Sep 12 15:42:32 1996") self.assertEqual(contig.reads[3].ds.chem, "") self.assertEqual(contig.reads[3].ds.dye, "") self.assertEqual(contig.reads[3].ds.template, "") self.assertEqual(contig.reads[3].ds.direction, "") self.assertEqual(contig.reads[3].rt, None) self.assertEqual(contig.reads[3].wr, None) self.assertEqual(contig.reads[4].rd.name, "K26-291s") self.assertEqual(contig.reads[4].rd.padded_bases, 556) self.assertEqual(contig.reads[4].rd.info_items, 0) self.assertEqual(contig.reads[4].rd.read_tags, 0) center = len(contig.reads[4].rd.sequence)//2 self.assertEqual(contig.reads[4].rd.sequence[:10], "gaggatcgct") self.assertEqual(contig.reads[4].rd.sequence[center-5:center+5], "GTgcgaggat") self.assertEqual(contig.reads[4].rd.sequence[-10:], "caggcagatg") self.assertEqual(contig.reads[4].qa.qual_clipping_start, 11) self.assertEqual(contig.reads[4].qa.qual_clipping_end, 373) self.assertEqual(contig.reads[4].qa.align_clipping_start, 11) self.assertEqual(contig.reads[4].qa.align_clipping_end, 476) self.assertEqual(contig.reads[4].ds.chromat_file, "K26-291s") self.assertEqual(contig.reads[4].ds.phd_file, "K26-291s.phd.1") self.assertEqual(contig.reads[4].ds.time, "Thu Sep 12 15:42:31 1996") self.assertEqual(contig.reads[4].ds.chem, "") self.assertEqual(contig.reads[4].ds.dye, "") self.assertEqual(contig.reads[4].ds.template, "") self.assertEqual(contig.reads[4].ds.direction, "") self.assertEqual(contig.reads[4].rt, None) self.assertEqual(contig.reads[4].wr, None) self.assertEqual(contig.reads[5].rd.name, "K26-822c") self.assertEqual(contig.reads[5].rd.padded_bases, 593) self.assertEqual(contig.reads[5].rd.info_items, 0) self.assertEqual(contig.reads[5].rd.read_tags, 0) center = len(contig.reads[5].rd.sequence)//2 self.assertEqual(contig.reads[5].rd.sequence[:10], "ggggatccg*") self.assertEqual(contig.reads[5].rd.sequence[center-5:center+5], "GCaAgacCCt") self.assertEqual(contig.reads[5].rd.sequence[-10:], "gttgggtttg") self.assertEqual(contig.reads[5].qa.qual_clipping_start, 25) self.assertEqual(contig.reads[5].qa.qual_clipping_end, 333) self.assertEqual(contig.reads[5].qa.align_clipping_start, 16) self.assertEqual(contig.reads[5].qa.align_clipping_end, 593) self.assertEqual(contig.reads[5].ds.chromat_file, "K26-822c") self.assertEqual(contig.reads[5].ds.phd_file, "K26-822c.phd.1") self.assertEqual(contig.reads[5].ds.time, "Thu Sep 12 15:42:36 1996") self.assertEqual(contig.reads[5].ds.chem, "") self.assertEqual(contig.reads[5].ds.dye, "") self.assertEqual(contig.reads[5].ds.template, "") self.assertEqual(contig.reads[5].ds.direction, "") self.assertEqual(contig.reads[5].rt, None) self.assertEqual(contig.reads[5].wr, None) self.assertEqual(contig.reads[6].rd.name, "K26-572c") self.assertEqual(contig.reads[6].rd.padded_bases, 594) self.assertEqual(contig.reads[6].rd.info_items, 0) self.assertEqual(contig.reads[6].rd.read_tags, 0) center = len(contig.reads[6].rd.sequence)//2 self.assertEqual(contig.reads[6].rd.sequence[:10], "agccccgggc") self.assertEqual(contig.reads[6].rd.sequence[center-5:center+5], "ggatcACATA") self.assertEqual(contig.reads[6].rd.sequence[-10:], "aatagtaaca") self.assertEqual(contig.reads[6].qa.qual_clipping_start, 249) self.assertEqual(contig.reads[6].qa.qual_clipping_end, 584) self.assertEqual(contig.reads[6].qa.align_clipping_start, 1) self.assertEqual(contig.reads[6].qa.align_clipping_end, 586) self.assertEqual(contig.reads[6].ds.chromat_file, "K26-572c") self.assertEqual(contig.reads[6].ds.phd_file, "K26-572c.phd.1") self.assertEqual(contig.reads[6].ds.time, "Thu Sep 12 15:42:34 1996") self.assertEqual(contig.reads[6].ds.chem, "") self.assertEqual(contig.reads[6].ds.dye, "") self.assertEqual(contig.reads[6].ds.template, "") self.assertEqual(contig.reads[6].ds.direction, "") self.assertEqual(contig.reads[6].rt, None) self.assertEqual(contig.reads[6].wr, None) self.assertEqual(contig.reads[7].rd.name, "K26-766c") self.assertEqual(contig.reads[7].rd.padded_bases, 603) self.assertEqual(contig.reads[7].rd.info_items, 0) self.assertEqual(contig.reads[7].rd.read_tags, 0) center = len(contig.reads[7].rd.sequence)//2 self.assertEqual(contig.reads[7].rd.sequence[:10], "gaataattgg") self.assertEqual(contig.reads[7].rd.sequence[center-5:center+5], "TggCCCATCT") self.assertEqual(contig.reads[7].rd.sequence[-10:], "gaaccacacg") self.assertEqual(contig.reads[7].qa.qual_clipping_start, 240) self.assertEqual(contig.reads[7].qa.qual_clipping_end, 584) self.assertEqual(contig.reads[7].qa.align_clipping_start, 126) self.assertEqual(contig.reads[7].qa.align_clipping_end, 583) self.assertEqual(contig.reads[7].ds.chromat_file, "K26-766c") self.assertEqual(contig.reads[7].ds.phd_file, "K26-766c.phd.1") self.assertEqual(contig.reads[7].ds.time, "Thu Sep 12 15:42:35 1996") self.assertEqual(contig.reads[7].ds.chem, "") self.assertEqual(contig.reads[7].ds.dye, "") self.assertEqual(contig.reads[7].ds.template, "") self.assertEqual(contig.reads[7].ds.direction, "") self.assertEqual(contig.reads[7].rt, None) self.assertEqual(contig.reads[7].wr, None) # Make sure there are no more contigs self.assertRaises(StopIteration, next, contigs) if __name__ == "__main__": runner = unittest.TextTestRunner(verbosity=2) unittest.main(testRunner=runner)
updownlife/multipleK
dependencies/biopython-1.65/Tests/test_Ace.py
Python
gpl-2.0
118,129
[ "Biopython" ]
5419cee7ddbc56968ecfbe327bb63d2e4f7e8df3386a6f9f18841088b24d629d
# pylint: disable=bad-continuation """ Certificate HTML webview. """ import logging import urllib from datetime import datetime import pytz from uuid import uuid4 from django.conf import settings from django.contrib.auth.models import User from django.http import Http404, HttpResponse from django.template import RequestContext from django.utils.encoding import smart_str from django.utils.translation import ugettext as _ from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from badges.events.course_complete import get_completion_badge from badges.utils import badges_enabled from certificates.api import ( emit_certificate_event, get_active_web_certificate, get_certificate_footer_context, get_certificate_header_context, get_certificate_template, get_certificate_url, has_html_certificates_enabled ) from certificates.models import ( CertificateHtmlViewConfiguration, CertificateSocialNetworks, CertificateStatuses, GeneratedCertificate ) from courseware.access import has_access from edxmako.shortcuts import render_to_response from edxmako.template import Template from eventtracking import tracker from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers from openedx.core.lib.courses import course_image_url from student.models import LinkedInAddToProfileConfiguration from util import organizations_helpers as organization_api from util.views import handle_500 from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError log = logging.getLogger(__name__) def get_certificate_description(mode, certificate_type, platform_name): """ :return certificate_type_description on the basis of current mode """ certificate_type_description = None if mode == 'honor': # Translators: This text describes the 'Honor' course certificate type. certificate_type_description = _("An {cert_type} certificate signifies that a " "learner has agreed to abide by the honor code established by {platform_name} " "and has completed all of the required tasks for this course under its " "guidelines.").format(cert_type=certificate_type, platform_name=platform_name) elif mode == 'verified': # Translators: This text describes the 'ID Verified' course certificate type, which is a higher level of # verification offered by edX. This type of verification is useful for professional education/certifications certificate_type_description = _("A {cert_type} certificate signifies that a " "learner has agreed to abide by the honor code established by {platform_name} " "and has completed all of the required tasks for this course under its " "guidelines. A {cert_type} certificate also indicates that the " "identity of the learner has been checked and " "is valid.").format(cert_type=certificate_type, platform_name=platform_name) elif mode == 'xseries': # Translators: This text describes the 'XSeries' course certificate type. An XSeries is a collection of # courses related to each other in a meaningful way, such as a specific topic or theme, or even an organization certificate_type_description = _("An {cert_type} certificate demonstrates a high level of " "achievement in a program of study, and includes verification of " "the student's identity.").format(cert_type=certificate_type) return certificate_type_description def _update_certificate_context(context, user_certificate, platform_name): """ Build up the certificate web view context using the provided values (Helper method to keep the view clean) """ # Populate dynamic output values using the course/certificate data loaded above certificate_type = context.get('certificate_type') # Override the defaults with any mode-specific static values context['certificate_id_number'] = user_certificate.verify_uuid context['certificate_verify_url'] = "{prefix}{uuid}{suffix}".format( prefix=context.get('certificate_verify_url_prefix'), uuid=user_certificate.verify_uuid, suffix=context.get('certificate_verify_url_suffix') ) # Translators: The format of the date includes the full name of the month context['certificate_date_issued'] = _('{month} {day}, {year}').format( month=user_certificate.modified_date.strftime("%B"), day=user_certificate.modified_date.day, year=user_certificate.modified_date.year ) # Translators: This text represents the verification of the certificate context['document_meta_description'] = _('This is a valid {platform_name} certificate for {user_name}, ' 'who participated in {partner_short_name} {course_number}').format( platform_name=platform_name, user_name=context['accomplishment_copy_name'], partner_short_name=context['organization_short_name'], course_number=context['course_number'] ) # Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar context['document_title'] = _("{partner_short_name} {course_number} Certificate | {platform_name}").format( partner_short_name=context['organization_short_name'], course_number=context['course_number'], platform_name=platform_name ) # Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate # screen. The text describes the accomplishment represented by the certificate information displayed to the user context['accomplishment_copy_description_full'] = _("successfully completed, received a passing grade, and was " "awarded this {platform_name} {certificate_type} " "Certificate of Completion in ").format( platform_name=platform_name, certificate_type=context.get("certificate_type")) certificate_type_description = get_certificate_description(user_certificate.mode, certificate_type, platform_name) if certificate_type_description: context['certificate_type_description'] = certificate_type_description # Translators: This text describes the purpose (and therefore, value) of a course certificate context['certificate_info_description'] = _("{platform_name} acknowledges achievements through " "certificates, which are awarded for course activities " "that {platform_name} students complete.").format( platform_name=platform_name, tos_url=context.get('company_tos_url'), verified_cert_url=context.get('company_verified_certificate_url')) def _update_context_with_basic_info(context, course_id, platform_name, configuration): """ Updates context dictionary with basic info required before rendering simplest certificate templates. """ context['platform_name'] = platform_name context['course_id'] = course_id # Update the view context with the default ConfigurationModel settings context.update(configuration.get('default', {})) # Translators: 'All rights reserved' is a legal term used in copyrighting to protect published content reserved = _("All rights reserved") context['copyright_text'] = u'&copy; {year} {platform_name}. {reserved}.'.format( year=datetime.now(pytz.timezone(settings.TIME_ZONE)).year, platform_name=platform_name, reserved=reserved ) # Translators: This text is bound to the HTML 'title' element of the page and appears # in the browser title bar when a requested certificate is not found or recognized context['document_title'] = _("Invalid Certificate") # Translators: The &amp; characters represent an ampersand character and can be ignored context['company_tos_urltext'] = _("Terms of Service &amp; Honor Code") # Translators: A 'Privacy Policy' is a legal document/statement describing a website's use of personal information context['company_privacy_urltext'] = _("Privacy Policy") # Translators: This line appears as a byline to a header image and describes the purpose of the page context['logo_subtitle'] = _("Certificate Validation") # Translators: Accomplishments describe the awards/certifications obtained by students on this platform context['accomplishment_copy_about'] = _('About {platform_name} Accomplishments').format( platform_name=platform_name ) # Translators: This line appears on the page just before the generation date for the certificate context['certificate_date_issued_title'] = _("Issued On:") # Translators: The Certificate ID Number is an alphanumeric value unique to each individual certificate context['certificate_id_number_title'] = _('Certificate ID Number') context['certificate_info_title'] = _('About {platform_name} Certificates').format( platform_name=platform_name ) context['certificate_verify_title'] = _("How {platform_name} Validates Student Certificates").format( platform_name=platform_name ) # Translators: This text describes the validation mechanism for a certificate file (known as GPG security) context['certificate_verify_description'] = _('Certificates issued by {platform_name} are signed by a gpg key so ' 'that they can be validated independently by anyone with the ' '{platform_name} public key. For independent verification, ' '{platform_name} uses what is called a ' '"detached signature"&quot;".').format(platform_name=platform_name) context['certificate_verify_urltext'] = _("Validate this certificate for yourself") # Translators: This text describes (at a high level) the mission and charter the edX platform and organization context['company_about_description'] = _("{platform_name} offers interactive online classes and MOOCs.").format( platform_name=platform_name) context['company_about_title'] = _("About {platform_name}").format(platform_name=platform_name) context['company_about_urltext'] = _("Learn more about {platform_name}").format(platform_name=platform_name) context['company_courselist_urltext'] = _("Learn with {platform_name}").format(platform_name=platform_name) context['company_careers_urltext'] = _("Work at {platform_name}").format(platform_name=platform_name) context['company_contact_urltext'] = _("Contact {platform_name}").format(platform_name=platform_name) # Translators: This text appears near the top of the certficate and describes the guarantee provided by edX context['document_banner'] = _("{platform_name} acknowledges the following student accomplishment").format( platform_name=platform_name ) def _update_course_context(request, context, course, platform_name): """ Updates context dictionary with course info. """ context['full_course_image_url'] = request.build_absolute_uri(course_image_url(course)) course_title_from_cert = context['certificate_data'].get('course_title', '') accomplishment_copy_course_name = course_title_from_cert if course_title_from_cert else course.display_name context['accomplishment_copy_course_name'] = accomplishment_copy_course_name course_number = course.display_coursenumber if course.display_coursenumber else course.number context['course_number'] = course_number if context['organization_long_name']: # Translators: This text represents the description of course context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_short_name}, ' 'an online learning initiative of ' '{partner_long_name}.').format( partner_short_name=context['organization_short_name'], partner_long_name=context['organization_long_name'], platform_name=platform_name) else: # Translators: This text represents the description of course context['accomplishment_copy_course_description'] = _('a course of study offered by ' '{partner_short_name}.').format( partner_short_name=context['organization_short_name'], platform_name=platform_name) def _update_social_context(request, context, course, user, user_certificate, platform_name): """ Updates context dictionary with info required for social sharing. """ share_settings = configuration_helpers.get_value("SOCIAL_SHARING_SETTINGS", settings.SOCIAL_SHARING_SETTINGS) context['facebook_share_enabled'] = share_settings.get('CERTIFICATE_FACEBOOK', False) context['facebook_app_id'] = configuration_helpers.get_value("FACEBOOK_APP_ID", settings.FACEBOOK_APP_ID) context['facebook_share_text'] = share_settings.get( 'CERTIFICATE_FACEBOOK_TEXT', _("I completed the {course_title} course on {platform_name}.").format( course_title=context['accomplishment_copy_course_name'], platform_name=platform_name ) ) context['twitter_share_enabled'] = share_settings.get('CERTIFICATE_TWITTER', False) context['twitter_share_text'] = share_settings.get( 'CERTIFICATE_TWITTER_TEXT', _("I completed a course at {platform_name}. Take a look at my certificate.").format( platform_name=platform_name ) ) share_url = request.build_absolute_uri(get_certificate_url(course_id=course.id, uuid=user_certificate.verify_uuid)) context['share_url'] = share_url twitter_url = '' if context.get('twitter_share_enabled', False): twitter_url = 'https://twitter.com/intent/tweet?text={twitter_share_text}&url={share_url}'.format( twitter_share_text=smart_str(context['twitter_share_text']), share_url=urllib.quote_plus(smart_str(share_url)) ) context['twitter_url'] = twitter_url context['linked_in_url'] = None # If enabled, show the LinkedIn "add to profile" button # Clicking this button sends the user to LinkedIn where they # can add the certificate information to their profile. linkedin_config = LinkedInAddToProfileConfiguration.current() linkedin_share_enabled = share_settings.get('CERTIFICATE_LINKEDIN', linkedin_config.enabled) if linkedin_share_enabled: context['linked_in_url'] = linkedin_config.add_to_profile_url( course.id, course.display_name, user_certificate.mode, smart_str(share_url) ) def _update_context_with_user_info(context, user, user_certificate): """ Updates context dictionary with user related info. """ user_fullname = user.profile.name context['username'] = user.username context['course_mode'] = user_certificate.mode context['accomplishment_user_id'] = user.id context['accomplishment_copy_name'] = user_fullname context['accomplishment_copy_username'] = user.username context['accomplishment_more_title'] = _("More Information About {user_name}'s Certificate:").format( user_name=user_fullname ) # Translators: This line is displayed to a user who has completed a course and achieved a certification context['accomplishment_banner_opening'] = _("{fullname}, you earned a certificate!").format( fullname=user_fullname ) # Translators: This line congratulates the user and instructs them to share their accomplishment on social networks context['accomplishment_banner_congrats'] = _("Congratulations! This page summarizes what " "you accomplished. Show it off to family, friends, and colleagues " "in your social and professional networks.") # Translators: This line leads the reader to understand more about the certificate that a student has been awarded context['accomplishment_copy_more_about'] = _("More about {fullname}'s accomplishment").format( fullname=user_fullname ) def _get_user_certificate(request, user, course_key, course, preview_mode=None): """ Retrieves user's certificate from db. Creates one in case of preview mode. Returns None if there is no certificate generated for given user otherwise returns `GeneratedCertificate` instance. """ user_certificate = None if preview_mode: # certificate is being previewed from studio if has_access(request.user, 'instructor', course) or has_access(request.user, 'staff', course): user_certificate = GeneratedCertificate( mode=preview_mode, verify_uuid=unicode(uuid4().hex), modified_date=datetime.now().date() ) else: # certificate is being viewed by learner or public try: user_certificate = GeneratedCertificate.eligible_certificates.get( user=user, course_id=course_key, status=CertificateStatuses.downloadable ) except GeneratedCertificate.DoesNotExist: pass return user_certificate def _track_certificate_events(request, context, course, user, user_certificate): """ Tracks web certificate view related events. """ # Badge Request Event Tracking Logic course_key = course.location.course_key if 'evidence_visit' in request.GET: badge_class = get_completion_badge(course_key, user) if not badge_class: log.warning('Visit to evidence URL for badge, but badges not configured for course "%s"', course_key) badges = [] else: badges = badge_class.get_for_user(user) if badges: # There should only ever be one of these. badge = badges[0] tracker.emit( 'edx.badge.assertion.evidence_visited', { 'badge_name': badge.badge_class.display_name, 'badge_slug': badge.badge_class.slug, 'badge_generator': badge.backend, 'issuing_component': badge.badge_class.issuing_component, 'user_id': user.id, 'course_id': unicode(course_key), 'enrollment_mode': badge.badge_class.mode, 'assertion_id': badge.id, 'assertion_image_url': badge.image_url, 'assertion_json_url': badge.assertion_url, 'issuer': badge.data.get('issuer'), } ) else: log.warn( "Could not find badge for %s on course %s.", user.id, course_key, ) # track certificate evidence_visited event for analytics when certificate_user and accessing_user are different if request.user and request.user.id != user.id: emit_certificate_event('evidence_visited', user, unicode(course.id), course, { 'certificate_id': user_certificate.verify_uuid, 'enrollment_mode': user_certificate.mode, 'social_network': CertificateSocialNetworks.linkedin }) def _render_certificate_template(request, context, course, user_certificate): """ Picks appropriate certificate templates and renders it. """ if settings.FEATURES.get('CUSTOM_CERTIFICATE_TEMPLATES_ENABLED', False): custom_template = get_certificate_template(course.id, user_certificate.mode) if custom_template: template = Template( custom_template, output_encoding='utf-8', input_encoding='utf-8', default_filters=['decode.utf8'], encoding_errors='replace', ) context = RequestContext(request, context) return HttpResponse(template.render(context)) return render_to_response("certificates/valid.html", context) def _update_configuration_context(context, configuration): """ Site Configuration will need to be able to override any hard coded content that was put into the context in the _update_certificate_context() call above. For example the 'company_about_description' talks about edX, which we most likely do not want to keep in configurations. So we need to re-apply any configuration/content that we are sourcing from the database. This is somewhat duplicative of the code at the beginning of this method, but we need the configuration at the top as some error code paths require that to be set up early on in the pipeline """ config_key = configuration_helpers.get_value('domain_prefix') config = configuration.get("microsites", {}) if config_key and config: context.update(config.get(config_key, {})) def _update_badge_context(context, course, user): """ Updates context with badge info. """ badge = None if badges_enabled() and course.issue_badges: badges = get_completion_badge(course.location.course_key, user).get_for_user(user) if badges: badge = badges[0] context['badge'] = badge def _update_organization_context(context, course): """ Updates context with organization related info. """ partner_long_name, organization_logo = None, None partner_short_name = course.display_organization if course.display_organization else course.org organizations = organization_api.get_course_organizations(course_id=course.id) if organizations: #TODO Need to add support for multiple organizations, Currently we are interested in the first one. organization = organizations[0] partner_long_name = organization.get('name', partner_long_name) partner_short_name = organization.get('short_name', partner_short_name) organization_logo = organization.get('logo', None) context['organization_long_name'] = partner_long_name context['organization_short_name'] = partner_short_name context['accomplishment_copy_course_org'] = partner_short_name context['organization_logo'] = organization_logo def render_cert_by_uuid(request, certificate_uuid): """ This public view generates an HTML representation of the specified certificate """ try: certificate = GeneratedCertificate.eligible_certificates.get( verify_uuid=certificate_uuid, status=CertificateStatuses.downloadable ) return render_html_view(request, certificate.user.id, unicode(certificate.course_id)) except GeneratedCertificate.DoesNotExist: raise Http404 @handle_500( template_path="certificates/server-error.html", test_func=lambda request: request.GET.get('preview', None) ) def render_html_view(request, user_id, course_id): """ This public view generates an HTML representation of the specified user and course If a certificate is not available, we display a "Sorry!" screen instead """ try: user_id = int(user_id) except ValueError: raise Http404 preview_mode = request.GET.get('preview', None) platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME) configuration = CertificateHtmlViewConfiguration.get_config() # Create the initial view context, bootstrapping with Django settings and passed-in values context = {} _update_context_with_basic_info(context, course_id, platform_name, configuration) invalid_template_path = 'certificates/invalid.html' # Kick the user back to the "Invalid" screen if the feature is disabled if not has_html_certificates_enabled(course_id): log.info( "Invalid cert: HTML certificates disabled for %s. User id: %d", course_id, user_id, ) return render_to_response(invalid_template_path, context) # Load the course and user objects try: course_key = CourseKey.from_string(course_id) user = User.objects.get(id=user_id) course = modulestore().get_course(course_key) # For any other expected exceptions, kick the user back to the "Invalid" screen except (InvalidKeyError, ItemNotFoundError, User.DoesNotExist) as exception: error_str = ( "Invalid cert: error finding course %s or user with id " "%d. Specific error: %s" ) log.info(error_str, course_id, user_id, str(exception)) return render_to_response(invalid_template_path, context) # Load user's certificate user_certificate = _get_user_certificate(request, user, course_key, course, preview_mode) if not user_certificate: log.info( "Invalid cert: User %d does not have eligible cert for %s.", user_id, course_id, ) return render_to_response(invalid_template_path, context) # Get the active certificate configuration for this course # If we do not have an active certificate, we'll need to send the user to the "Invalid" screen # Passing in the 'preview' parameter, if specified, will return a configuration, if defined active_configuration = get_active_web_certificate(course, preview_mode) if active_configuration is None: log.info( "Invalid cert: course %s does not have an active configuration. User id: %d", course_id, user_id, ) return render_to_response(invalid_template_path, context) context['certificate_data'] = active_configuration # Append/Override the existing view context values with any mode-specific ConfigurationModel values context.update(configuration.get(user_certificate.mode, {})) # Append organization info _update_organization_context(context, course) # Append course info _update_course_context(request, context, course, platform_name) # Append user info _update_context_with_user_info(context, user, user_certificate) # Append social sharing info _update_social_context(request, context, course, user, user_certificate, platform_name) # Append/Override the existing view context values with certificate specific values _update_certificate_context(context, user_certificate, platform_name) # Append badge info _update_badge_context(context, course, user) # Append site configuration overrides _update_configuration_context(context, configuration) # Add certificate header/footer data to current context context.update(get_certificate_header_context(is_secure=request.is_secure())) context.update(get_certificate_footer_context()) # Append/Override the existing view context values with any course-specific static values from Advanced Settings context.update(course.cert_html_view_overrides) # Track certificate view events _track_certificate_events(request, context, course, user, user_certificate) # FINALLY, render appropriate certificate return _render_certificate_template(request, context, course, user_certificate)
miptliot/edx-platform
lms/djangoapps/certificates/views/webview.py
Python
agpl-3.0
28,089
[ "VisIt" ]
47b3a11a0ec6ab5e7f65122e7ae64c7a963f1e5b49d2ec4761b64219d421ad8c
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html from dbclass import ThermoDB import readers.reader_utils as reader_utils def readDatabase(dblist): """ Read an EQ3/6 database file """ dataset = [] activity_model = None fugacity_model = None logk_model = 'maier-kelly' logk_model_eqn = 'a_0 ln(T) + a_1 + a_2 T + a_3 / T + a_4 / T^2' temperatures = [] pressures = [] adh = [] bdh = [] bdot = [] elements = {} neutral_species = {} basis_species = {} redox_couples = {} secondary_species = {} free_electron = {} mineral_species = {} gas_species = {} oxides = {} missing_value = '500.0000' line = 0 parsing_header = True # Helper function for interpolating missing array values # (to reduce number of paramters required during use) def fillValues(vals): return reader_utils.fillMissingValues(temperatures, vals, logk_model, missing_value) while line < len(dblist): # Read the header information while parsing_header: dataset.append(dblist[line]) # if dblist[line].split(':')[0] == 'Output package': if dblist[line].strip().startswith('Output package'): assert dblist[line].split(':')[1].strip() == 'eq3', 'EQ3/6 reader detected database in wrong format' if dblist[line].strip().startswith('Data set'): if dblist[line].split(':')[1].strip() in ['com', 'sup']: activity_model = 'debye-huckel' else: print('Activity model in supplied databse not supported') exit() line = line+1 if dblist[line].strip().startswith('+--'): parsing_header = False line = line+1 # Read the miscellaneous data if dblist[line].strip().startswith('Miscellaneous') and dblist[line+1].strip().startswith('+--'): reading_miscellaneous = True line = line+2 while not dblist[line].strip().startswith('+--'): if dblist[line].strip().startswith('temperatures'): line = line+1 while dblist[line].strip()[0].isnumeric(): temperatures.extend([float(item) for item in dblist[line].split()]) line = line+1 if dblist[line].strip().startswith('pressures'): line = line+1 while dblist[line].strip()[0].isnumeric(): pressures.extend([float(item) for item in dblist[line].split()]) line = line+1 if activity_model == 'debye-huckel': # Read Debye-Huckel activity coefficients if 'debye huckel a' in dblist[line]: line = line+1 while len(adh) < len(temperatures): adh.extend([float(item) for item in dblist[line].split()]) line = line+1 if 'debye huckel b' in dblist[line]: line = line+1 while len(bdh) < len(temperatures): bdh.extend([float(item) for item in dblist[line].split()]) line = line+1 if 'bdot' in dblist[line]: line = line+1 while len(bdot) < len(temperatures): bdot.extend([float(item) for item in dblist[line].split()]) line = line+1 # Read the neutral species activity coefficients if 'co2' in dblist[line]: if 'co2' not in neutral_species: neutral_species['co2'] = {} key = dblist[line].split()[1].strip() line = line+1 vals = [] while not dblist[line].split()[0].isalpha(): vals.extend([float(item) for item in dblist[line].split()]) line = line+1 neutral_species['co2'][key] = vals line = line-1 if 'h2o' in dblist[line]: if 'h2o' not in neutral_species: neutral_species['h2o'] = {} key = dblist[line].split()[1].strip() line = line+1 vals = [] while len(vals) < len(temperatures): vals.extend([float(item) for item in dblist[line].split()]) line = line+1 neutral_species['h2o'][key] = vals line = line-1 line = line+1 # Read the elements if dblist[line].strip().startswith('elements') and dblist[line+1].strip().startswith('+--'): line = line+2 while not dblist[line].strip().startswith('+--'): if dblist[line].split(): element, molecular_weight = dblist[line].split() elements[element] = {} elements[element]['molecular weight'] = float(molecular_weight) line = line+1 # Read the basis species if dblist[line].strip().startswith('basis species') and dblist[line+1].strip().startswith('+--'): line = line+2 # This section ends when it finds a line like # +-------- # Some text # +-------- parsing_basis = True while parsing_basis: species = dblist[line].split()[0] basis_species[species] = {} basis_species[species]['elements'] = {} line = line+1 while not dblist[line].strip().startswith('+--'): # Read molar weight if 'mol.wt.' in dblist[line]: basis_species[species]['molecular weight'] = float(dblist[line].split()[3]) # Read radius if 'DHazero' in dblist[line]: basis_species[species]['radius'] = float(dblist[line].split()[3]) # Read charge if 'charge' in dblist[line]: basis_species[species]['charge'] = float(dblist[line].split()[2]) # Read elements if 'element(s)' in dblist[line]: num_elements = int(dblist[line].split()[0]) while len(basis_species[species]['elements']) < num_elements: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): basis_species[species]['elements'][data[i+1]] = float(data[i]) line = line+1 if dblist[line].strip().startswith('+--') and dblist[line+2].strip().startswith('+--'): parsing_basis = False line = line+1 # Read the auxiliary species (redox couples) if dblist[line].strip().startswith('auxiliary basis species') and dblist[line+1].strip().startswith('+--'): line = line+2 # This section ends when it finds a line like # +-------- # Some text # +-------- parsing_auxiliary = True while parsing_auxiliary: species = dblist[line].split()[0] redox_couples[species] = {} redox_couples[species]['elements'] = {} redox_couples[species]['species'] = {} line = line+1 while not dblist[line].strip().startswith('+--'): # Read molecular weight if 'mol.wt.' in dblist[line]: redox_couples[species]['molecular weight'] = float(dblist[line].split()[3]) # Read radius if 'DHazero' in dblist[line]: redox_couples[species]['radius'] = float(dblist[line].split()[3]) # Read charge if 'charge' in dblist[line]: redox_couples[species]['charge'] = float(dblist[line].split()[2]) # Read elements if 'element' in dblist[line]: num_elements = int(dblist[line].split()[0]) while len(redox_couples[species]['elements']) < num_elements: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): redox_couples[species]['elements'][data[i+1]] = float(data[i]) # Species in redox couples # (note: EQ3/6 includes species in this list, so we remove it) if 'species' in dblist[line]: num_species = int(dblist[line].split()[0]) while len(redox_couples[species]['species']) < num_species-1: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): if data[i+1] != species: redox_couples[species]['species'][data[i+1]] = float(data[i]) # Equilibrium constant values if 'logK grid' in dblist[line]: line = line+1 vals = [] while len(vals) < len(temperatures): vals.extend(dblist[line].split()) line = line+1 vals, note = fillValues(vals) redox_couples[species]['logk'] = vals if note: redox_couples[species]['note'] = note line = line+1 if dblist[line].strip().startswith('+--') and dblist[line+2].strip().startswith('+--'): parsing_auxiliary = False line = line+1 # Read the aqueous species if dblist[line].strip().startswith('aqueous species') and dblist[line+1].strip().startswith('+--'): line = line+2 # This section ends when it finds a line like # +-------- # Some text # +-------- parsing_secondary = True while parsing_secondary: species = dblist[line].split()[0] secondary_species[species] = {} secondary_species[species]['elements'] = {} secondary_species[species]['species'] = {} line = line+1 while not dblist[line].strip().startswith('+--'): # Read molecular weight if 'mol.wt.' in dblist[line]: secondary_species[species]['molecular weight'] = float(dblist[line].split()[3]) # Read radius if 'DHazero' in dblist[line]: secondary_species[species]['radius'] = float(dblist[line].split()[3]) # Read charge if 'charge' in dblist[line]: secondary_species[species]['charge'] = float(dblist[line].split()[2]) # Read elements if 'element' in dblist[line]: num_elements = int(dblist[line].split()[0]) while len(secondary_species[species]['elements']) < num_elements: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): secondary_species[species]['elements'][data[i+1]] = float(data[i]) # Basis species in aqueous species # (note: EQ3/6 includes aqueous species in this list, so we remove it) if 'species' in dblist[line]: num_species = int(dblist[line].split()[0]) while len(secondary_species[species]['species']) < num_species-1: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): if data[i+1] != species: secondary_species[species]['species'][data[i+1]] = float(data[i]) # Equilibrium constant values if 'logK grid' in dblist[line]: line = line+1 vals = [] while len(vals) < len(temperatures): vals.extend(dblist[line].split()) line = line+1 vals, note = fillValues(vals) secondary_species[species]['logk'] = vals if note: secondary_species[species]['note'] = note line = line+1 if dblist[line].strip().startswith('+--') and dblist[line+2].strip().startswith('+--'): parsing_secondary = False line = line+1 # Read the mineral (solid) species if dblist[line].strip().startswith('solids') and dblist[line+1].strip().startswith('+--'): line = line+2 # This section ends when it finds a line like # +-------- # Some text # +-------- parsing_mineral = True while parsing_mineral: species = dblist[line].split()[0] mineral_species[species] = {} mineral_species[species]['elements'] = {} mineral_species[species]['species'] = {} line = line+1 while not dblist[line].strip().startswith('+--'): # Read molecular weight if 'mol.wt.' in dblist[line]: mineral_species[species]['molecular weight'] = float(dblist[line].split()[3]) # Read molar volume if 'V0PrTr' in dblist[line]: mineral_species[species]['molar volume'] = float(dblist[line].split()[2]) # Read elements if 'element' in dblist[line]: num_elements = int(dblist[line].split()[0]) while len(mineral_species[species]['elements']) < num_elements: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): mineral_species[species]['elements'][data[i+1]] = float(data[i]) # Basis species in solid species # (note: EQ3/6 includes solid species in this list, so we remove it) if 'species' in dblist[line]: num_species = int(dblist[line].split()[0]) while len(mineral_species[species]['species']) < num_species-1: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): if data[i+1] != species: mineral_species[species]['species'][data[i+1]] = float(data[i]) # Equilibrium constant values if 'logK grid' in dblist[line]: line = line+1 vals = [] while len(vals) < len(temperatures): vals.extend(dblist[line].split()) line = line+1 vals, note = fillValues(vals) mineral_species[species]['logk'] = vals if note: mineral_species[species]['note'] = note line = line+1 if dblist[line].strip().startswith('+--') and dblist[line+2].strip().startswith('+--'): parsing_mineral = False line = line+1 # Read the gas species if dblist[line].strip().startswith('gases') and dblist[line+1].strip().startswith('+--'): line = line+2 # This section ends when it finds a line like # +-------- # Some text # +-------- parsing_gas = True while parsing_gas: species = dblist[line].split()[0] gas_species[species] = {} gas_species[species]['elements'] = {} gas_species[species]['species'] = {} line = line+1 while not dblist[line].strip().startswith('+--'): # Read molecular weight if 'mol.wt.' in dblist[line]: gas_species[species]['molecular weight'] = float(dblist[line].split()[3]) # Read elements if 'element' in dblist[line]: num_elements = int(dblist[line].split()[0]) while len(gas_species[species]['elements']) < num_elements: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): gas_species[species]['elements'][data[i+1]] = float(data[i]) # Basis species in gas species # (note: EQ3/6 includes gas species in this list, so we remove it) if 'species in' in dblist[line]: num_species = int(dblist[line].split()[0]) while len(gas_species[species]['species']) < num_species-1: line = line+1 data = dblist[line].split() for i in range(0, len(data), 2): if data[i+1] != species or not data[i].startswith('-1'): gas_species[species]['species'][data[i+1]] = float(data[i]) # Equilibrium constant values if 'logK grid' in dblist[line]: line = line+1 vals = [] while len(vals) < len(temperatures): vals.extend(dblist[line].split()) line = line+1 vals, note = fillValues(vals) gas_species[species]['logk'] = vals if note: gas_species[species]['note'] = note line = line+1 if dblist[line].strip().startswith('+--') and dblist[line+2].strip().startswith('+--'): parsing_gas = False line = line+1 line = line+1 # After parsing all of the data, save it in a ThermodB class db = ThermoDB() db.format = 'eq36' db.header = dataset db.activity_model = activity_model if activity_model == 'debye-huckel': db.adh = adh db.bdh = bdh db.bdot = bdot db.fugacity_model = fugacity_model db.logk_model = logk_model db.logk_model_eqn = logk_model_eqn db.temperatures = temperatures db.pressures = pressures db.neutral_species = neutral_species db.elements = elements db.basis_species = basis_species db.secondary_species = secondary_species db.mineral_species = mineral_species db.gas_species = gas_species db.redox_couples = redox_couples db.oxides = oxides return db
nuclear-wizard/moose
modules/geochemistry/python/readers/eq36_reader.py
Python
lgpl-2.1
20,307
[ "MOOSE" ]
b4415c23d1e859a6c357b263ef0d141159f18506451d8e8d4003431d85ff3d16
#!/usr/bin/python # -*- coding:utf-8-*- """ Created on Wed Apr 12 09:38:44 2017 @author: etienne cuierrier This script will do a least-square fitting of Gay-Berne parameters from ab initio results The rotationally averaged results from the 4 orthogonal configurations are required (average_X.txt,average_ll.txt,average_z.txt,average_T.txt) with the first columns as the intermolecular distance in angstrom and the second the energy in kj/mol. ****IMPORTANT****: In this script, the T configuration well depth had to be fitted independently, since it the least-square algorithm could not converge. For the graph, it uses the same l and d as the other configurations Usage: from gayberne_fit import gayberne_fit fit_gb = gayberne_fit() # Fitting the parameters fit_gb.fit() #To caculate the GB potential with LAMMPS fit_gb.lammps_df() # To visualize with the desired methods fit_gb.visualize(methods=["MP2", "average", "GB", "MD"]) Requires: numpy matplotlib pandas scipy lammps as a python library gayberne.py (see theory folder) References: Berardi, Roberto, C. Fava, and Claudio Zannoni. "A generalized Gay-Berne intermolecular potential for biaxial particles." Chemical physics letters 236.4-5 (1995): 462-468. The fitting procedure is modified from https://mail.scipy.org/pipermail/scipy-user/2013-April/034406.html TODO: Reduce the number of err_function to 1 Simpler/shorter move_molecule functions Calculate the average from the rotated molecules """ import numpy as np import math import matplotlib.pyplot as plt import glob import pandas as pd import scipy.optimize from collections import OrderedDict from lammps import lammps from gayberne import gb class gayberne_fit(): def __init__(self, temperature=800, initial_global=[20, 4.69, 19.91, 12, 1, 5.22], initial_T=[10, 4, 15, 5.44], cutoff=30, initial_mu=-0.5): """ Reads the files and initialization of the parameters """ #%% loads all the files with .txt extension to create a data frame # Create a list with all files with glob self.fnames = glob.glob("*.txt") # Pandas dataframe with the filename as column name. In the # .txt files, x is the distance in angstrom and y the energy in jk/mol self.df = pd.concat([pd.read_csv(fname, names=[fname.split(".")[0] + "_distance", fname.split(".")[0] + "_energy"], sep=' |\t', engine='python') for fname in self.fnames], axis=1) # The separator in the files are space # temperature for the boltzmann weight for the fit self.temp = temperature # initial global parameter to optimize for the curve fit self.initial_global = initial_global self.initial_T = initial_T # configuration with the color for the graph: red, green, blue, yellow self.colors = {"ll": "r", "X": "g", "z": "b", "T": "y"} # marker with the size if there are different type of graph (ex: # average results, all the results, lammps... self.marker = OrderedDict([("o", 1), ("*", 6), ("-", 1), ("x", 3)]) # cutoff for lammps interaction self.cutoff = cutoff # Estimate for mu wihich will be solved numerically self.initial_mu = initial_mu #%% Functions for the orthogonal configuration of the Gay-Berne potential def configuration(self, x, p, config): """ The Gay-Berne potential for the orthogonal configuration Parameters ---- x: array of distances p: list of parameters for the gay-berne Potentiel config: string of the desired orthogonal configuration Returns --- The calculated potentiel """ if config == "ll": es, d, l, c = p return 4 * es * ((c / (x - d + c))**12 - (c / (x - d + c))**6) if config == "X": eo, d, l, c = p return 4 * eo * ((c / (x - d + c))**12 - (c / (x - d + c))**6) if config == "z": ee, d, l, c = p return 4 * ee * ((c / (x - l + c))**12 - (c / (x - l + c))**6) if config == "T": et, d, l, c = p return 4 * et * ((c / (x - np.sqrt((d**2 + l**2) / 2) + c))**12 - (c / (x - np.sqrt((d**2 + l**2) / 2) + c))**6) #%% individual error function to optimize by least-square fitting with boltzman weight def err_ll(self, p, x, y): return (self.configuration(x, p, "ll") - y) * np.exp(-y / 8.31 * 1000 / self.temp) / np.sum(-np.array(self.df["average_ll_energy"].dropna()) / 8.31 * 1000 / self.temp) def err_X(self, p, x, y): return (self.configuration(x, p, "X") - y) * np.exp(-y / 8.31 * 1000 / self.temp) / np.sum(-np.array(self.df["average_X_energy"].dropna()) / 8.31 * 1000 / self.temp) def err_z(self, p, x, y): return (self.configuration(x, p, "z") - y) * np.exp(-y / 8.31 * 1000 / self.temp) / np.sum(-np.array(self.df["average_z_energy"].dropna()) / 8.31 * 1000 / self.temp) def err_T(self, p, x, y): return (self.configuration(x, p, "T") - y) * np.exp(-y / 8.31 * 1000 / self.temp) / np.sum(-np.array(self.df["average_T_energy"].dropna()) / 8.31 * 1000 / self.temp) #%% global error function def err_global(self, p, x1, x2, x3, y1, y2, y3): """ Global error function to optimize by least-square fitting. T configuration is commented due to convergence problem. Parameters ---- p: list of the gay_berne parameters to optimize [epsilon_ll,d,l,epsilon_X,epsilon_z,sigma_c] x1,x2,x3: array of distances for the configurations y1,y2,y3: arrays of energies for the configurations Returns ---- The concatenated error for each configuration """ # Shared and independent parameter for each configuration : # epsilon_ll, d, l, epsilon_X,epsilon_z, sigma_c ll_parameter = p[0], p[1], p[2], p[5] X_parameter = p[3], p[1], p[2], p[5] z_parameter = p[4], p[1], p[2], p[5] # T_parameter = p[6], p[1], p[2], p[5] err_ll = self.err_ll(ll_parameter, x1, y1) err_X = self.err_X(X_parameter, x2, y2) err_z = self.err_z(z_parameter, x3, y3) # err_T = err_T(p4,x4,y4) return np.concatenate((err_ll, err_X, err_z)) #%% Function to do the least-square fitting def fit(self): """ Least-square fitting of the Gay-Berne potential Returns: ---- Print of the optimized Gay-Berne parameters """ best_global, ier = scipy.optimize.leastsq(self.err_global, self.initial_global, args=(np.array(self.df["average_ll_distance"].dropna()), np.array( self.df["average_X_distance"].dropna()), np.array( self.df["average_z_distance"].dropna()), np.array( self.df["average_ll_energy"].dropna()), np.array( self.df["average_X_energy"].dropna()), np.array(self.df["average_z_energy"].dropna()))) best_T, ier = p_best, ier = scipy.optimize.leastsq(self.err_T, self.initial_T, args=(np.array(self.df["average_T_distance"].dropna()), np.array(self.df["average_T_energy"].dropna()))) # Optimized Gay-Berne parameters self.ll_depth = best_global[0] self.X_depth = best_global[3] self.z_depth = best_global[4] self.T_depth = best_T[0] self.width = best_global[1] self.length = best_global[2] self.sigma = best_global[5] # Nu parameter in gay-berne potential logbase = (self.width**2 + self.length**2) / \ (2 * self.length * self.width) self.nu = math.log(self.ll_depth / self.X_depth, logbase) # Epsilon_z in gay-berne self.epsilon_z = self.z_depth / \ (self.width / logbase)**self.nu # Function to optimize the mu parameter in gay-berne potential def mu_equation(mu): return -self.T_depth + (2. / ((1 / self.X_depth)**(1 / mu) + (1 / self.epsilon_z)**(1 / mu)))**mu self.mu = scipy.optimize.fsolve(mu_equation, self.initial_mu)[0] print("Global fit results") print("ll-by-ll well depth: ", self.ll_depth, "X well depth: ", self.X_depth, "z to z well depth: ", self.z_depth, "T well depth:", self.T_depth, "epsilon z: ", self.epsilon_z, "width d: ", self.width, "length l: ", self.length, "sigma: ", self.sigma, "nu: ", self.nu, "mu: ", self.mu) # Assign each parameter to the corresponding orthogonal configuration def configuration_parameter(self, config): """ Assignation of the parameters to each orthogonal configuration Parameters: ---- config: string of the configuration Returns: list of the parameters """ if config == "ll": return [self.ll_depth, self.width, self.length, self.sigma] if config == "X": return [self.X_depth, self.width, self.length, self.sigma] if config == "z": return [self.z_depth, self.width, self.length, self.sigma] if config == "T": return [self.T_depth, self.width, self.length, self.sigma] # Lammps results from the optimised parameters def lammps_df(self): """ Function to calculate the Gay-Berne potentiel with lammps """ gb_ini = gb(self.width, self.length, self.sigma, self.X_depth, self.epsilon_z, self.nu, self.mu, self.cutoff, x_range=1000) lammps_df = gb_ini.lammps_gb() self.df = pd.concat([self.df, lammps_df], axis=1) def visualize(self, methods=["MP2", "average", "GB", "MD"]): """ Function to visualize with matplotlib the ab initio , the fitted curve lammps""" # add to dataframe the GB potential with the defined parameters, it # will use the lammps intermolecular distance as x since the increment # is small for configuration in self.colors: self.df["GB_" + str(configuration) + "_distance"] = self.df[ "MD_" + str(configuration) + "_distance"] self.df["GB_" + str(configuration) + "_energy"] = self.configuration(self.df["GB_" + str( configuration) + "_distance"], self.configuration_parameter(str(configuration)), str(configuration)) i = 0 for method in methods: for configuration in self.colors: # Remove values with a energy superior to 20 kJ/mol for the # graph df_graph = self.df.where( self.df[str(method) + "_" + str(configuration) + "_energy"] < 20) plt.plot(df_graph[str(method) + "_" + str(configuration) + "_distance"].dropna() / 10., df_graph[str(method) + "_" + str(configuration) + "_energy"].dropna(), self.marker.keys()[i], label=method + " " + str(configuration), color=self.colors[configuration], ms=self.marker.values()[i]) plt.ylabel("Potentiel (kJ/mol)") plt.xlabel("Distance (nm)") i += 1 plt.legend(loc="lower right", ncol=2) plt.show()
EtiCui/Msc-UdeS
dataAnalysis/gayberne_fit.py
Python
mit
11,778
[ "LAMMPS" ]
8cc6ceb77895a779a5d6c8a434100b803efaf3074107bad78de28ce481ff7476
# Copyright (C) 2013 Oskar Maier # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # author Oskar Maier # version r0.1.0 # since 2014-03-20 # status Release # build-in modules # third-party modules import numpy from scipy.ndimage import _ni_support from scipy.ndimage.filters import convolve1d # own modules # code def immerkaer_local(input, size, output=None, mode="reflect", cval=0.0): r""" Estimate the local noise. The input image is assumed to have additive zero mean Gaussian noise. The Immerkaer noise estimation is applied to the image locally over a N-dimensional cube of side-length size. The size of the region should be sufficiently high for a stable noise estimation. Parameters ---------- input : array_like Array of which to estimate the noise. size : integer The local region's side length. output : ndarray, optional The `output` parameter passes an array in which to store the filter output. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 Returns ------- sigmas : array_like Map of the estimated standard deviation of the images Gaussian noise per voxel. Notes ----- Does not take the voxel spacing into account. Works good with medium to strong noise. Tends to underestimate for low noise levels. See also -------- immerkaer """ output = _ni_support._get_output(output, input) footprint = numpy.asarray([1] * size) # build nd-kernel to acquire square root of sum of squared elements kernel = [1, -2, 1] for _ in range(input.ndim - 1): kernel = numpy.tensordot(kernel, [1, -2, 1], 0) divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. # compute laplace of input laplace = separable_convolution(input, [1, -2, 1], numpy.double, mode, cval) # compute factor factor = numpy.sqrt(numpy.pi / 2.) * 1. / ( numpy.sqrt(divider) * numpy.power(footprint.size, laplace.ndim) ) # locally sum laplacian values separable_convolution(numpy.abs(laplace), footprint, output, mode, cval) output *= factor return output def immerkaer(input, mode="reflect", cval=0.0): r""" Estimate the global noise. The input image is assumed to have additive zero mean Gaussian noise. Using a convolution with a Laplacian operator and a subsequent averaging the standard deviation sigma of this noise is estimated. This estimation is global i.e. the noise is assumed to be globally homogeneous over the image. Implementation based on [1]_. Immerkaer suggested a Laplacian-based 2D kernel:: [[ 1, -2, 1], [-2, 4, -1], [ 1, -2, 1]] , which is separable and can therefore be applied by consecutive convolutions with the one dimensional kernel [1, -2, 1]. We generalize from this 1D-kernel to an ND-kernel by applying N consecutive convolutions with the 1D-kernel along all N dimensions. This is equivalent with convolving the image with an ND-kernel constructed by calling >>> kernel1d = numpy.asarray([1, -2, 1]) >>> kernel = kernel1d.copy() >>> for _ in range(input.ndim): >>> kernel = numpy.tensordot(kernel, kernel1d, 0) Parameters ---------- input : array_like Array of which to estimate the noise. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 Returns ------- sigma : float The estimated standard deviation of the images Gaussian noise. Notes ----- Does not take the voxel spacing into account. Works good with medium to strong noise. Tends to underestimate for low noise levels. See also -------- immerkaer_local References ---------- .. [1] John Immerkaer, "Fast Noise Variance Estimation", Computer Vision and Image Understanding, Volume 64, Issue 2, September 1996, Pages 300-302, ISSN 1077-3142 """ # build nd-kernel to acquire square root of sum of squared elements kernel = [1, -2, 1] for _ in range(input.ndim - 1): kernel = numpy.tensordot(kernel, [1, -2, 1], 0) divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. # compute laplace of input and derive noise sigma laplace = separable_convolution(input, [1, -2, 1], None, mode, cval) factor = numpy.sqrt(numpy.pi / 2.) * 1. / ( numpy.sqrt(divider) * numpy.prod(laplace.shape) ) sigma = factor * numpy.abs(laplace).sum() return sigma def separable_convolution(input, weights, output=None, mode="reflect", cval=0.0, origin=0): r""" Calculate a n-dimensional convolution of a separable kernel to a n-dimensional input. Achieved by calling convolution1d along the first axis, obtaining an intermediate image, on which the next convolution1d along the second axis is called and so on. Parameters ---------- input : array_like Array of which to estimate the noise. weights : ndarray One-dimensional sequence of numbers. output : array, optional The `output` parameter passes an array in which to store the filter output. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0.0. Returns ------- output : ndarray Input image convolved with the supplied kernel. """ input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: convolve1d(input, weights, axes[0], output, mode, cval, origin) for ii in range(1, len(axes)): convolve1d(output, weights, axes[ii], output, mode, cval, origin) else: output[...] = input[...] return output
loli/medpy
medpy/filter/noise.py
Python
gpl-3.0
7,547
[ "Gaussian" ]
cc1660c03298a47088f7886402d5c0b75824fa8536c1ad7460aea48d35587a74
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class QuantumEspresso(CMakePackage): """Quantum ESPRESSO is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials. """ homepage = 'http://quantum-espresso.org' url = 'https://gitlab.com/QEF/q-e/-/archive/qe-6.6/q-e-qe-6.6.tar.gz' git = 'https://gitlab.com/QEF/q-e.git' maintainers = ['ye-luo'] version('develop', branch='develop') version('7.0', sha256='85beceb1aaa1678a49e774c085866d4612d9d64108e0ac49b23152c8622880ee') version('6.8', sha256='654855c69864de7ece5ef2f2c0dea2d32698fe51192a8646b1555b0c57e033b2') version('6.7', sha256='fe0ce74ff736b10d2a20c9d59025c01f88f86b00d229c123b1791f1edd7b4315', url='https://gitlab.com/QEF/q-e/-/archive/qe-6.7MaX-Release/q-e-qe-6.7MaX-Release.tar.gz' ) version('6.6', sha256='924656cb083f52e5d2fe71ade05881389dac64b45316f1bdd6dee1c6170a672c') version('6.5', sha256='258b2a8a6280e86dad779e5c56356d8b35dc96d12ff33dabeee914bc03d6d602') version('6.4.1', sha256='b0d7e9f617b848753ad923d8c6ca5490d5d82495f82b032b71a0ff2f2e9cfa08') version('6.4', sha256='781366d03da75516fdcf9100a1caadb26ccdd1dedd942a6f8595ff0edca74bfe') version('6.3', sha256='4067c8fffa957aabbd5cf2439e2fcb6cf3752325393c67a17d99fd09edf8689c') version('6.2.1', sha256='11fe24b4a9d85834f8b6d429baebed8b360a685ecfae222887ed451e118a9156') version('6.2.0', sha256='e204df367c8ea1a50c7534b44481841d835a542a23ae71c3e33ad712fc636c8b') version('6.1.0', sha256='fd2c2eb346b3ca8f08138df5ef3f69b466c256d2119db40eea1b578b0a42c66e') version('6.0.0', sha256='bc77d9553bf5a9253ae74058dffb1d6e5fb61093188e78d3b8d8564755136f19') version('5.4', sha256='e3993fccae9cea04a5c6492e8b961a053a63727051cb5c4eb6008f62cda8f335') version('5.3', sha256='3b26038efb9e3f8ac7a2b950c31d8c29169a3556c0b68c299eb88a4be8dc9048') resource(name='environ', git='https://github.com/environ-developers/Environ.git', tag='v1.1', when='@6.3:6.4 +environ', destination='.' ) resource(name='environ', git='https://github.com/environ-developers/Environ.git', tag='v1.0', when='@6.2.1:6.2 +environ', destination='.' ) variant('cmake', default=True, description='Builds via CMake') with when('+cmake'): depends_on("cmake@3.14.0:", type="build") conflicts('@:6.7', msg='+cmake works since QE v6.8') variant('libxc', default=False, description='Uses libxc') depends_on('libxc@5.1.2:', when='+libxc') # TODO # variant( # 'gpu', default='none', description='Builds with GPU support', # values=('nvidia', 'none'), multi=False # ) variant('openmp', default=False, description='Enables openMP support') # Need OpenMP threaded FFTW and BLAS libraries when configured # with OpenMP support with when('+openmp'): conflicts('^fftw~openmp') conflicts('^amdfftw~openmp') conflicts('^openblas threads=none') conflicts('^openblas threads=pthreads') # Apply upstream patches by default. Variant useful for 3rd party # patches which are incompatible with upstream patches desc = 'Apply recommended upstream patches. May need to be set ' desc = desc + 'to False for third party patches or plugins' variant('patch', default=True, description=desc) variant('mpi', default=True, description='Builds with mpi support') with when('+mpi'): depends_on('mpi') variant('scalapack', default=True, description='Enables scalapack support') with when('+scalapack'): depends_on('scalapack') variant('elpa', default=False, description='Uses elpa as an eigenvalue solver') with when('+elpa'): # CMake builds only support elpa without openmp depends_on('elpa~openmp', when='+cmake') depends_on('elpa+openmp', when='+openmp~cmake') depends_on('elpa~openmp', when='~openmp~cmake') # Elpa is formally supported by @:5.4.0, but QE configure searches # for it in the wrong folders (or tries to download it within # the build directory). Instead of patching Elpa to provide the # folder QE expects as a link, we issue a conflict here. conflicts('@:5.4.0', msg='+elpa requires QE >= 6.0') # Support for HDF5 has been added starting in version 6.1.0 and is # still experimental, therefore we default to False for the variant variant( 'hdf5', default='none', description='Builds with HDF5 support', values=('parallel', 'serial', 'none'), multi=False ) # Versions of HDF5 prior to 1.8.16 lead to QE runtime errors depends_on('hdf5@1.8.16:+fortran+hl+mpi', when='hdf5=parallel') depends_on('hdf5@1.8.16:+fortran+hl~mpi', when='hdf5=serial') # HDF5 support introduced in 6.1.0, but the configure had some limitations. # In recent tests (Oct 2019), GCC and Intel work with the HDF5 Spack # package for the default variant. This is only for hdf5=parallel variant. # Support, for hdf5=serial was introduced in 6.4.1 but required a patch # for the serial (no MPI) case. This patch was to work around an issue # that only manifested itself inside the Spack environment. conflicts( 'hdf5=parallel', when='@:6.0', msg='parallel HDF5 support only in QE 6.1.0 and later' ) conflicts( 'hdf5=serial', when='@:6.4.0', msg='serial HDF5 support only in QE 6.4.1 and later' ) conflicts( 'hdf5=parallel', when='~mpi', msg='parallel HDF5 requires MPI support' ) # QMCPACK converter patch # https://github.com/QMCPACK/qmcpack/tree/develop/external_codes/quantum_espresso variant('qmcpack', default=False, description='Build QE-to-QMCPACK wave function converter') with when('+qmcpack'): # Some QMCPACK converters are incompatible with upstream patches. # HDF5 is a hard requirement. Need to do two HDF5 cases explicitly # since Spack lacks support for expressing NOT operation. conflicts( '@6.4+patch', msg='QE-to-QMCPACK wave function converter requires ' 'deactivatation of upstream patches' ) conflicts( '@6.3:6.4.0 hdf5=serial', msg='QE-to-QMCPACK wave function converter only ' 'supported with parallel HDF5' ) conflicts( 'hdf5=none', msg='QE-to-QMCPACK wave function converter requires HDF5' ) # Enables building Electron-phonon Wannier 'epw.x' executable # http://epw.org.uk/Main/About variant('epw', default=False, description='Builds Electron-phonon Wannier executable') conflicts('~epw', when='+cmake', msg='epw cannot be turned off when using CMake') with when('+epw'): # The first version of Q-E to feature integrated EPW is 6.0.0, # as per http://epw.org.uk/Main/DownloadAndInstall . # Complain if trying to install a version older than this. conflicts('@:5', msg='EPW only available from version 6.0.0 and on') # Below goes some constraints as shown in the link above. # Constraints may be relaxed as successful reports # of different compiler+mpi combinations arrive # TODO: enable building EPW when ~mpi and ~cmake conflicts('~mpi', when='~cmake', msg='EPW needs MPI when ~cmake') # EPW doesn't gets along well with OpenMPI 2.x.x conflicts('^openmpi@2.0.0:2', msg='OpenMPI version incompatible with EPW') # EPW also doesn't gets along well with PGI 17.x + OpenMPI 1.10.7 conflicts('^openmpi@1.10.7%pgi@17.0:17.12', msg='PGI+OpenMPI version combo incompatible with EPW') variant('environ', default=False, description='Enables support for introducing environment effects ' 'into atomistic first-principles simulations.' 'See http://quantum-environ.org/about.html') conflicts('+environ', when='+cmake', msg='environ doesn\'t work with CMake') # Dependencies not affected by variants depends_on('blas') depends_on('lapack') depends_on('fftw-api@3') # CONFLICTS SECTION # Omitted for now due to concretizer bug # MKL with 64-bit integers not supported. # conflicts( # '^mkl+ilp64', # msg='Quantum ESPRESSO does not support MKL 64-bit integer variant' # ) # PATCHES SECTION # THIRD-PARTY PATCHES # NOTE: *SOME* third-party patches will require deactivation of # upstream patches using `~patch` variant # QMCPACK converter patches for QE 6.8, 6.7, 6.4.1, 6.4, and 6.3 conflicts('@:6.2,6.5:6.6', when='+qmcpack', msg='QMCPACK converter NOT available for this version of QE') # Internal compiler error gcc8 and a64fx, I check only 6.5 and 6.6 conflicts('@5.3:', when='%gcc@8 target=a64fx', msg='Internal compiler error with gcc8 and a64fx') conflicts('@6.5:', when='+environ', msg='6.4.x is the latest QE series supported by Environ') # 7.0 patch_url = 'https://raw.githubusercontent.com/QMCPACK/qmcpack/develop/external_codes/quantum_espresso/add_pw2qmcpack_to_qe-7.0.diff' patch_checksum = 'ef60641d8b953b4ba21d9c662b172611305bb63786996ad6e81e7609891677ff' patch(patch_url, sha256=patch_checksum, when='@7.0+qmcpack') # 6.8 patch_url = 'https://raw.githubusercontent.com/QMCPACK/qmcpack/develop/external_codes/quantum_espresso/add_pw2qmcpack_to_qe-6.8.diff' patch_checksum = '69f7fbd72aba810c35a0b034188e45bea8f9f11d3150c0715e1b3518d5c09248' patch(patch_url, sha256=patch_checksum, when='@6.8+qmcpack') # 6.7 patch_url = 'https://raw.githubusercontent.com/QMCPACK/qmcpack/develop/external_codes/quantum_espresso/add_pw2qmcpack_to_qe-6.7.0.diff' patch_checksum = '72564c168231dd4a1279a74e76919af701d47cee9a851db6e205753004fe9bb5' patch(patch_url, sha256=patch_checksum, when='@6.7+qmcpack') # 6.4.1 patch_url = 'https://raw.githubusercontent.com/QMCPACK/qmcpack/develop/external_codes/quantum_espresso/add_pw2qmcpack_to_qe-6.4.1.diff' patch_checksum = '57cb1b06ee2653a87c3acc0dd4f09032fcf6ce6b8cbb9677ae9ceeb6a78f85e2' patch(patch_url, sha256=patch_checksum, when='@6.4.1+qmcpack') # 6.4 patch_url = 'https://raw.githubusercontent.com/QMCPACK/qmcpack/develop/external_codes/quantum_espresso/add_pw2qmcpack_to_qe-6.4.diff' patch_checksum = 'ef08f5089951be902f0854a4dbddaa7b01f08924cdb27decfade6bef0e2b8994' patch(patch_url, sha256=patch_checksum, when='@6.4:6.4.0+qmcpack') # 6.3 patch_url = 'https://raw.githubusercontent.com/QMCPACK/qmcpack/develop/external_codes/quantum_espresso/add_pw2qmcpack_to_qe-6.3.diff' patch_checksum = '2ee346e24926479f5e96f8dc47812173a8847a58354bbc32cf2114af7a521c13' patch(patch_url, sha256=patch_checksum, when='@6.3+qmcpack') # ELPA patch('dspev_drv_elpa.patch', when='@6.1.0:+elpa ^elpa@2016.05.004') patch('dspev_drv_elpa.patch', when='@6.1.0:+elpa ^elpa@2016.05.003') # QE UPSTREAM PATCHES # QE 6.6 fix conpile error when FFT_LIBS is specified. patch('https://gitlab.com/QEF/q-e/-/commit/cf1fedefc20d39f5cd7551ded700ea4c77ad6e8f.diff', sha256='8f179663a8d031aff9b1820a32449942281195b6e7b1ceaab1f729651b43fa58', when='+patch@6.6') # QE 6.5 INTENT(OUT) without settig value in tetra_weights_only(..., ef): # For Fujitsu compiler patch('https://gitlab.com/QEF/q-e/-/commit/8f096b53e75026701c681c508e2c24a9378c0950.diff', sha256='f4f1cce4182b57ac797c8f6ec8460fe375ee96385fcd8f6a61e1460bc957eb67', when='+patch@6.5') # QE 6.5 Fix INTENT # For Fujitsu compiler patch('https://gitlab.com/QEF/q-e/-/commit/c2a86201ed72693ffa50cc99b22f5d3365ae2c2b.diff', sha256='b2dadc0bc008a3ad4b74ae85cc380dd2b63f2ae43a634e6f9d8db8077efcea6c', when='+patch@6.5') # QE 6.3 requires multiple patches to fix MKL detection # There may still be problems on Mac with MKL detection patch('https://gitlab.com/QEF/q-e/commit/0796e1b7c55c9361ecb6515a0979280e78865e36.diff', sha256='bc8c5b8523156cee002d97dab42a5976dffae20605da485a427b902a236d7e6b', when='+patch@6.3:6.3.0') # QE 6.3 `make install` broken and a patch must be applied patch('https://gitlab.com/QEF/q-e/commit/88e6558646dbbcfcafa5f3fa758217f6062ab91c.diff', sha256='b776890d008e16cca28c31299c62f47de0ba606b900b17cbc27c041f45e564ca', when='+patch@6.3:6.3.0') # QE 6.4.1 patch to work around configure issues that only appear in the # Spack environment. We now are able to support: # `spack install qe~mpi~scalapack hdf5=serial` patch('https://gitlab.com/QEF/q-e/commit/5fb1195b0844e1052b7601f18ab5c700f9cbe648.diff', sha256='b1aa3179ee1c069964fb9c21f3b832aebeae54947ce8d3cc1a74e7b154c3c10f', when='+patch@6.4.1:6.5.0') # QE 6.4.1 Fix intent for Fujitsu compiler patch('fj-intent.6.4.1.patch', when='+patch@6.4.1') # QE 6.4.1 Fix intent patch('https://gitlab.com/QEF/q-e/-/commit/c2a86201ed72693ffa50cc99b22f5d3365ae2c2b.diff', sha256='b2dadc0bc008a3ad4b74ae85cc380dd2b63f2ae43a634e6f9d8db8077efcea6c', when='+patch@6.4.1') # QE 6.4.1 Small fixes for XLF compilation patch('https://gitlab.com/QEF/q-e/-/commit/cf088926d68792cbaea48960c222e336a3965df6.diff', sha256='bbceba1fb08d01d548d4393bbcaeae966def13f75884268a0f84448457b8eaa3', when='+patch@6.4.1:6.5.0') # Configure updated to work with AOCC compilers patch('configure_aocc.patch', when='@6.7:6.8 %aocc') # Configure updated to work with NVIDIA compilers patch('nvhpc.patch', when='@6.5 %nvhpc') # Configure updated to work with Fujitsu compilers patch('fj.6.5.patch', when='@6.5+patch %fj') patch('fj.6.6.patch', when='@6.6:6.7+patch %fj') # extlibs_makefile updated to work with fujitsu compilers patch('fj-fox.patch', when='+patch %fj') def cmake_args(self): spec = self.spec cmake_args = [ self.define_from_variant('QE_ENABLE_MPI', 'mpi'), self.define_from_variant('QE_ENABLE_OPENMP', 'openmp'), self.define_from_variant('QE_ENABLE_SCALAPACK', 'scalapack'), self.define_from_variant('QE_ENABLE_ELPA', 'elpa'), self.define_from_variant('QE_ENABLE_LIBXC', 'libxc'), ] # QE prefers taking MPI compiler wrappers as CMake compilers. if '+mpi' in spec: cmake_args.append(self.define('CMAKE_C_COMPILER', spec['mpi'].mpicc)) cmake_args.append(self.define('CMAKE_Fortran_COMPILER', spec['mpi'].mpifc)) if not spec.satisfies('hdf5=none'): cmake_args.append(self.define('QE_ENABLE_HDF5', True)) if '+qmcpack' in spec: cmake_args.append(self.define('QE_ENABLE_PW2QMCPACK', True)) return cmake_args @when("~cmake") def cmake(self, spec, prefix): print("Bypass cmake stage when building via configure") @when("~cmake") def build(self, spec, prefix): print("Bypass build stage when building via configure") @when("~cmake") def install(self, spec, prefix): print("Override install stage when building via configure") prefix_path = prefix.bin if '@:5.4.0' in spec else prefix options = ['-prefix={0}'.format(prefix_path)] # This additional flag is needed anytime the target architecture # does not match the host architecture, which results in a binary that # configure cannot execute on the login node. This is how we detect # cross compilation: If the platform is NOT either Linux or Darwin # and the target=backend, that we are in the cross-compile scenario # scenario. This should cover Cray, BG/Q, and other custom platforms. # The other option is to list out all the platform where you would be # cross compiling explicitly. if not (spec.satisfies('platform=linux') or spec.satisfies('platform=darwin')): if spec.satisfies('target=backend'): options.append('--host') # QE autoconf compiler variables has some limitations: # 1. There is no explicit MPICC variable so we must re-purpose # CC for the case of MPI. # 2. F90 variable is set to be consistent with MPIF90 wrapper # 3. If an absolute path for F90 is set, the build system breaks. # # Thus, due to 2. and 3. the F90 variable is not explictly set # because it would be mostly pointless and could lead to erroneous # behaviour. if '+mpi' in spec: mpi = spec['mpi'] options.append('--enable-parallel=yes') options.append('MPIF90={0}'.format(mpi.mpifc)) options.append('CC={0}'.format(mpi.mpicc)) else: options.append('--enable-parallel=no') options.append('CC={0}'.format(env['SPACK_CC'])) options.append('F77={0}'.format(env['SPACK_F77'])) options.append('F90={0}'.format(env['SPACK_FC'])) if '+openmp' in spec: options.append('--enable-openmp') # QE external BLAS, FFT, SCALAPACK detection is a bit tricky. # More predictable to pass in the correct link line to QE. # If external detection of BLAS, LAPACK and FFT fails, QE # is supposed to revert to internal versions of these libraries # instead -- but more likely it will pickup versions of these # libraries found in its the system path, e.g. Red Hat or # Ubuntu's FFTW3 package. # FFT # FFT detection gets derailed if you pass into the CPPFLAGS, instead # you need to pass it in the FFTW_INCLUDE and FFT_LIBS directory. # QE supports an internal FFTW2, but only an external FFTW3 interface. if '^mkl' in spec: # A seperate FFT library is not needed when linking against MKL options.append( 'FFTW_INCLUDE={0}'.format(join_path(env['MKLROOT'], 'include/fftw'))) if '^fftw@3:' in spec: fftw_prefix = spec['fftw'].prefix options.append('FFTW_INCLUDE={0}'.format(fftw_prefix.include)) if '+openmp' in spec: fftw_ld_flags = spec['fftw:openmp'].libs.ld_flags else: fftw_ld_flags = spec['fftw'].libs.ld_flags options.append('FFT_LIBS={0}'.format(fftw_ld_flags)) if '^amdfftw' in spec: fftw_prefix = spec['amdfftw'].prefix options.append('FFTW_INCLUDE={0}'.format(fftw_prefix.include)) if '+openmp' in spec: fftw_ld_flags = spec['amdfftw:openmp'].libs.ld_flags else: fftw_ld_flags = spec['amdfftw'].libs.ld_flags options.append('FFT_LIBS={0}'.format(fftw_ld_flags)) # External BLAS and LAPACK requires the correct link line into # BLAS_LIBS, do no use LAPACK_LIBS as the autoconf scripts indicate # that this variable is largely ignored/obsolete. # For many Spack packages, lapack.libs = blas.libs, hence it will # appear twice in in link line but this is harmless lapack_blas = spec['lapack'].libs + spec['blas'].libs # qe-6.5 fails to detect MKL for FFT if BLAS_LIBS is set due to # an unfortunate upsteam change in their autoconf/configure: # - qe-6.5/install/m4/x_ac_qe_blas.m4 only sets 'have_blas' # but no 'have_mkl' if BLAS_LIBS is set (which seems to be o.k.) # - however, qe-6.5/install/m4/x_ac_qe_fft.m4 in 6.5 unfortunately # relies on x_ac_qe_blas.m4 to detect MKL and set 'have_mkl' # - qe-5.4 up to 6.4.1 had a different logic and worked fine with # BLAS_LIBS being set # However, MKL is correctly picked up by qe-6.5 for BLAS and FFT if # MKLROOT is set (which SPACK does automatically for ^mkl) if spec.satisfies('@:6.4'): # set even if MKL is selected options.append('BLAS_LIBS={0}'.format(lapack_blas.ld_flags)) else: # behavior changed at 6.5 and later if not spec.satisfies('^mkl'): options.append('BLAS_LIBS={0}'.format(lapack_blas.ld_flags)) if '+scalapack' in spec: if '^mkl' in spec: if '^openmpi' in spec: scalapack_option = 'yes' else: # mpich, intel-mpi scalapack_option = 'intel' else: scalapack_option = 'yes' options.append('--with-scalapack={0}'.format(scalapack_option)) scalapack_lib = spec['scalapack'].libs options.append('SCALAPACK_LIBS={0}'.format(scalapack_lib.ld_flags)) if '+elpa' in spec: # Spec for elpa elpa = spec['elpa'] # Compute the include directory from there: versions # of espresso prior to 6.1 requires -I in front of the directory elpa_include = '' if '@6.1:' in spec else '-I' elpa_include += join_path( elpa.headers.directories[0], 'modules' ) options.extend([ '--with-elpa-include={0}'.format(elpa_include), '--with-elpa-version={0}'.format(elpa.version.version[0]), ]) elpa_suffix = '_openmp' if '+openmp' in elpa else '' # Currently AOCC support only static libraries of ELPA if '%aocc' in spec: options.extend([ '--with-elpa-lib={0}'.format( join_path(elpa.prefix.lib, 'libelpa{elpa_suffix}.a' .format(elpa_suffix=elpa_suffix))) ]) else: options.extend([ '--with-elpa-lib={0}'.format(elpa.libs[0]) ]) if spec.variants['hdf5'].value != 'none': options.append('--with-hdf5={0}'.format(spec['hdf5'].prefix)) if spec.satisfies('@6.4.1,6.5'): options.extend([ '--with-hdf5-include={0}'.format( spec['hdf5'].headers.directories[0] ), '--with-hdf5-libs={0}'.format( spec['hdf5:hl,fortran'].libs.ld_flags ) ]) configure(*options) # Filter file must be applied after configure executes # QE 6.1.0 to QE 6.4 have `-L` missing in front of zlib library # This issue is backported through an internal patch in 6.4.1, but # can't be applied to the '+qmcpack' variant if spec.variants['hdf5'].value != 'none': if (spec.satisfies('@6.1.0:6.4.0') or (spec.satisfies('@6.4.1') and '+qmcpack' in spec)): make_inc = join_path(self.stage.source_path, 'make.inc') zlib_libs = spec['zlib'].prefix.lib + ' -lz' filter_file( zlib_libs, format(spec['zlib'].libs.ld_flags), make_inc ) # QE 6.6 and later has parallel builds fixed if spec.satisfies('@:6.5'): parallel_build_on = False else: parallel_build_on = True if '+epw' in spec: make('all', 'epw', parallel=parallel_build_on) else: make('all', parallel=parallel_build_on) if '+environ' in spec: addsonpatch = Executable('./install/addsonpatch.sh') environpatch = Executable('./Environ/patches/environpatch.sh') makedeps = Executable('./install/makedeps.sh') addsonpatch('Environ', 'Environ/src', 'Modules', '-patch') environpatch('-patch') makedeps() make('pw', parallel=parallel_build_on) if 'platform=darwin' in spec: mkdirp(prefix.bin) install('bin/*.x', prefix.bin) else: make('install')
LLNL/spack
var/spack/repos/builtin/packages/quantum-espresso/package.py
Python
lgpl-2.1
24,677
[ "EPW", "ESPResSo", "QMCPACK", "Quantum ESPRESSO" ]
5d308dd661db9e7ff856d01687c7155ed405c938c34c850de4515d505d3b5aeb
"""\ Main class and helper functions. """ import warnings import collections.abc as cabc from collections import OrderedDict from copy import copy, deepcopy from enum import Enum from functools import partial, singledispatch from pathlib import Path from os import PathLike from textwrap import dedent from typing import Any, Union, Optional # Meta from typing import Iterable, Sequence, Mapping, MutableMapping # Generic ABCs from typing import Tuple, List # Generic import h5py from natsort import natsorted import numpy as np from numpy import ma import pandas as pd from pandas.api.types import infer_dtype, is_string_dtype, is_categorical_dtype from scipy import sparse from scipy.sparse import issparse, csr_matrix from anndata._warnings import ImplicitModificationWarning from .raw import Raw from .index import _normalize_indices, _subset, Index, Index1D, get_vector from .file_backing import AnnDataFileManager, to_memory from .access import ElementRef from .aligned_mapping import ( AxisArrays, AxisArraysView, PairwiseArrays, PairwiseArraysView, Layers, LayersView, ) from .views import ( ArrayView, DictView, DataFrameView, as_view, _resolve_idxs, ) from .sparse_dataset import SparseDataset from .. import utils from ..utils import convert_to_dict, ensure_df_homogeneous from ..logging import anndata_logger as logger from ..compat import ( ZarrArray, ZappyArray, DaskArray, Literal, _slice_uns_sparse_matrices, _move_adj_mtx, _overloaded_uns, OverloadedDict, ) class StorageType(Enum): Array = np.ndarray Masked = ma.MaskedArray Sparse = sparse.spmatrix ZarrArray = ZarrArray ZappyArray = ZappyArray DaskArray = DaskArray @classmethod def classes(cls): return tuple(c.value for c in cls.__members__.values()) # for backwards compat def _find_corresponding_multicol_key(key, keys_multicol): """Find the corresponding multicolumn key.""" for mk in keys_multicol: if key.startswith(mk) and "of" in key: return mk return None # for backwards compat def _gen_keys_from_multicol_key(key_multicol, n_keys): """Generates single-column keys from multicolumn key.""" keys = [f"{key_multicol}{i + 1:03}of{n_keys:03}" for i in range(n_keys)] return keys def _check_2d_shape(X): """\ Check shape of array or sparse matrix. Assure that X is always 2D: Unlike numpy we always deal with 2D arrays. """ if X.dtype.names is None and len(X.shape) != 2: raise ValueError( f"X needs to be 2-dimensional, not {len(X.shape)}-dimensional." ) @singledispatch def _gen_dataframe(anno, length, index_names): if anno is None or len(anno) == 0: return pd.DataFrame(index=pd.RangeIndex(0, length, name=None).astype(str)) for index_name in index_names: if index_name in anno: return pd.DataFrame( anno, index=anno[index_name], columns=[k for k in anno.keys() if k != index_name], ) return pd.DataFrame(anno, index=pd.RangeIndex(0, length, name=None).astype(str)) @_gen_dataframe.register(pd.DataFrame) def _(anno, length, index_names): anno = anno.copy(deep=False) if not is_string_dtype(anno.index): warnings.warn("Transforming to str index.", ImplicitModificationWarning) anno.index = anno.index.astype(str) return anno @_gen_dataframe.register(pd.Series) @_gen_dataframe.register(pd.Index) def _(anno, length, index_names): raise ValueError(f"Cannot convert {type(anno)} to DataFrame") class AnnData(metaclass=utils.DeprecationMixinMeta): """\ An annotated data matrix. :class:`~anndata.AnnData` stores a data matrix :attr:`X` together with annotations of observations :attr:`obs` (:attr:`obsm`, :attr:`obsp`), variables :attr:`var` (:attr:`varm`, :attr:`varp`), and unstructured annotations :attr:`uns`. .. figure:: ../_static/img/anndata_schema.svg :width: 260px An :class:`~anndata.AnnData` object `adata` can be sliced like a :class:`~pandas.DataFrame`, for instance `adata_subset = adata[:, list_of_variable_names]`. :class:`~anndata.AnnData`’s basic structure is similar to R’s ExpressionSet [Huber15]_. If setting an `.h5ad`-formatted HDF5 backing file `.filename`, data remains on the disk but is automatically loaded into memory if needed. Parameters ---------- X A #observations × #variables data matrix. A view of the data is used if the data type matches, otherwise, a copy is made. obs Key-indexed one-dimensional observations annotation of length #observations. var Key-indexed one-dimensional variables annotation of length #variables. uns Key-indexed unstructured annotation. obsm Key-indexed multi-dimensional observations annotation of length #observations. If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype. varm Key-indexed multi-dimensional variables annotation of length #variables. If passing a :class:`~numpy.ndarray`, it needs to have a structured datatype. layers Key-indexed multi-dimensional arrays aligned to dimensions of `X`. dtype Data type used for storage. shape Shape tuple (#observations, #variables). Can only be provided if `X` is `None`. filename Name of backing file. See :class:`h5py.File`. filemode Open mode of backing file. See :class:`h5py.File`. See Also -------- read_h5ad read_csv read_excel read_hdf read_loom read_zarr read_mtx read_text read_umi_tools Notes ----- :class:`~anndata.AnnData` stores observations (samples) of variables/features in the rows of a matrix. This is the convention of the modern classics of statistics [Hastie09]_ and machine learning [Murphy12]_, the convention of dataframes both in R and Python and the established statistics and machine learning packages in Python (statsmodels_, scikit-learn_). Single dimensional annotations of the observation and variables are stored in the :attr:`obs` and :attr:`var` attributes as :class:`~pandas.DataFrame`\\ s. This is intended for metrics calculated over their axes. Multi-dimensional annotations are stored in :attr:`obsm` and :attr:`varm`, which are aligned to the objects observation and variable dimensions respectively. Square matrices representing graphs are stored in :attr:`obsp` and :attr:`varp`, with both of their own dimensions aligned to their associated axis. Additional measurements across both observations and variables are stored in :attr:`layers`. Indexing into an AnnData object can be performed by relative position with numeric indices (like pandas’ :meth:`~pandas.DataFrame.iloc`), or by labels (like :meth:`~pandas.DataFrame.loc`). To avoid ambiguity with numeric indexing into observations or variables, indexes of the AnnData object are converted to strings by the constructor. Subsetting an AnnData object by indexing into it will also subset its elements according to the dimensions they were aligned to. This means an operation like `adata[list_of_obs, :]` will also subset :attr:`obs`, :attr:`obsm`, and :attr:`layers`. Subsetting an AnnData object returns a view into the original object, meaning very little additional memory is used upon subsetting. This is achieved lazily, meaning that the constituent arrays are subset on access. Copying a view causes an equivalent “real” AnnData object to be generated. Attempting to modify a view (at any attribute except X) is handled in a copy-on-modify manner, meaning the object is initialized in place. Here’s an example:: batch1 = adata[adata.obs["batch"] == "batch1", :] batch1.obs["value"] = 0 # This makes batch1 a “real” AnnData object At the end of this snippet: `adata` was not modified, and `batch1` is its own AnnData object with its own data. Similar to Bioconductor’s `ExpressionSet` and :mod:`scipy.sparse` matrices, subsetting an AnnData object retains the dimensionality of its constituent arrays. Therefore, unlike with the classes exposed by :mod:`pandas`, :mod:`numpy`, and `xarray`, there is no concept of a one dimensional AnnData object. AnnDatas always have two inherent dimensions, :attr:`obs` and :attr:`var`. Additionally, maintaining the dimensionality of the AnnData object allows for consistent handling of :mod:`scipy.sparse` matrices and :mod:`numpy` arrays. .. _statsmodels: http://www.statsmodels.org/stable/index.html .. _scikit-learn: http://scikit-learn.org/ """ _BACKED_ATTRS = ["X", "raw.X"] # backwards compat _H5_ALIASES = dict( X={"X", "_X", "data", "_data"}, obs={"obs", "_obs", "smp", "_smp"}, var={"var", "_var"}, uns={"uns"}, obsm={"obsm", "_obsm", "smpm", "_smpm"}, varm={"varm", "_varm"}, layers={"layers", "_layers"}, ) _H5_ALIASES_NAMES = dict( obs={"obs_names", "smp_names", "row_names", "index"}, var={"var_names", "col_names", "index"}, ) def __init__( self, X: Optional[Union[np.ndarray, sparse.spmatrix, pd.DataFrame]] = None, obs: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None, var: Optional[Union[pd.DataFrame, Mapping[str, Iterable[Any]]]] = None, uns: Optional[Mapping[str, Any]] = None, obsm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None, varm: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None, layers: Optional[Mapping[str, Union[np.ndarray, sparse.spmatrix]]] = None, raw: Optional[Mapping[str, Any]] = None, dtype: Optional[Union[np.dtype, str]] = None, shape: Optional[Tuple[int, int]] = None, filename: Optional[PathLike] = None, filemode: Optional[Literal["r", "r+"]] = None, asview: bool = False, *, obsp: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None, varp: Optional[Union[np.ndarray, Mapping[str, Sequence[Any]]]] = None, oidx: Index1D = None, vidx: Index1D = None, ): if asview: if not isinstance(X, AnnData): raise ValueError("`X` has to be an AnnData object.") self._init_as_view(X, oidx, vidx) else: self._init_as_actual( X=X, obs=obs, var=var, uns=uns, obsm=obsm, varm=varm, raw=raw, layers=layers, dtype=dtype, shape=shape, obsp=obsp, varp=varp, filename=filename, filemode=filemode, ) def _init_as_view(self, adata_ref: "AnnData", oidx: Index, vidx: Index): if adata_ref.isbacked and adata_ref.is_view: raise ValueError( "Currently, you cannot index repeatedly into a backed AnnData, " "that is, you cannot make a view of a view." ) self._is_view = True if isinstance(oidx, (int, np.integer)): if not (-adata_ref.n_obs <= oidx < adata_ref.n_obs): raise IndexError(f"Observation index `{oidx}` is out of range.") oidx += adata_ref.n_obs * (oidx < 0) oidx = slice(oidx, oidx + 1, 1) if isinstance(vidx, (int, np.integer)): if not (-adata_ref.n_vars <= vidx < adata_ref.n_vars): raise IndexError(f"Variable index `{vidx}` is out of range.") vidx += adata_ref.n_vars * (vidx < 0) vidx = slice(vidx, vidx + 1, 1) if adata_ref.is_view: prev_oidx, prev_vidx = adata_ref._oidx, adata_ref._vidx adata_ref = adata_ref._adata_ref oidx, vidx = _resolve_idxs((prev_oidx, prev_vidx), (oidx, vidx), adata_ref) # self._adata_ref is never a view self._adata_ref = adata_ref self._oidx = oidx self._vidx = vidx # the file is the same as of the reference object self.file = adata_ref.file # views on attributes of adata_ref obs_sub = adata_ref.obs.iloc[oidx] var_sub = adata_ref.var.iloc[vidx] self._obsm = adata_ref.obsm._view(self, (oidx,)) self._varm = adata_ref.varm._view(self, (vidx,)) self._layers = adata_ref.layers._view(self, (oidx, vidx)) self._obsp = adata_ref.obsp._view(self, oidx) self._varp = adata_ref.varp._view(self, vidx) # Speical case for old neighbors, backwards compat. Remove in anndata 0.8. uns_new = _slice_uns_sparse_matrices( copy(adata_ref._uns), self._oidx, adata_ref.n_obs ) # fix categories self._remove_unused_categories(adata_ref.obs, obs_sub, uns_new) self._remove_unused_categories(adata_ref.var, var_sub, uns_new) # set attributes self._obs = DataFrameView(obs_sub, view_args=(self, "obs")) self._var = DataFrameView(var_sub, view_args=(self, "var")) self._uns = uns_new self._n_obs = len(self.obs) self._n_vars = len(self.var) # set data if self.isbacked: self._X = None # set raw, easy, as it’s immutable anyways... if adata_ref._raw is not None: # slicing along variables axis is ignored self._raw = adata_ref.raw[oidx] self._raw._adata = self else: self._raw = None def _init_as_actual( self, X=None, obs=None, var=None, uns=None, obsm=None, varm=None, varp=None, obsp=None, raw=None, layers=None, dtype=None, shape=None, filename=None, filemode=None, ): # view attributes self._is_view = False self._adata_ref = None self._oidx = None self._vidx = None # ---------------------------------------------------------------------- # various ways of initializing the data # ---------------------------------------------------------------------- # If X is a data frame, we store its indices for verification x_indices = [] # init from file if filename is not None: self.file = AnnDataFileManager(self, filename, filemode) else: self.file = AnnDataFileManager(self, None) # init from AnnData if isinstance(X, AnnData): if any((obs, var, uns, obsm, varm, obsp, varp)): raise ValueError( "If `X` is a dict no further arguments must be provided." ) X, obs, var, uns, obsm, varm, obsp, varp, layers, raw = ( X._X, X.obs, X.var, X.uns, X.obsm, X.varm, X.obsp, X.varp, X.layers, X.raw, ) # init from DataFrame elif isinstance(X, pd.DataFrame): # to verify index matching, we wait until obs and var are DataFrames if obs is None: obs = pd.DataFrame(index=X.index) elif not isinstance(X.index, pd.RangeIndex): x_indices.append(("obs", "index", X.index)) if var is None: var = pd.DataFrame(index=X.columns) elif not isinstance(X.columns, pd.RangeIndex): x_indices.append(("var", "columns", X.columns)) X = ensure_df_homogeneous(X, "X") # ---------------------------------------------------------------------- # actually process the data # ---------------------------------------------------------------------- # check data type of X if X is not None: for s_type in StorageType: if isinstance(X, s_type.value): break else: class_names = ", ".join(c.__name__ for c in StorageType.classes()) raise ValueError( f"`X` needs to be of one of {class_names}, not {type(X)}." ) if shape is not None: raise ValueError("`shape` needs to be `None` if `X` is not `None`.") _check_2d_shape(X) # if type doesn’t match, a copy is made, otherwise, use a view if dtype is None and X.dtype != np.float32: warnings.warn( f"X.dtype being converted to np.float32 from {X.dtype}. In the next " "version of anndata (0.9) conversion will not be automatic. Pass " "dtype explicitly to avoid this warning. Pass " "`AnnData(X, dtype=X.dtype, ...)` to get the future behavour.", FutureWarning, stacklevel=3, ) dtype = np.float32 elif dtype is None: dtype = np.float32 if issparse(X) or isinstance(X, ma.MaskedArray): # TODO: maybe use view on data attribute of sparse matrix # as in readwrite.read_10x_h5 if X.dtype != np.dtype(dtype): X = X.astype(dtype) elif isinstance(X, ZarrArray): X = X.astype(dtype) else: # is np.ndarray or a subclass, convert to true np.ndarray X = np.array(X, dtype, copy=False) # data matrix and shape self._X = X self._n_obs, self._n_vars = self._X.shape else: self._X = None self._n_obs = len([] if obs is None else obs) self._n_vars = len([] if var is None else var) # check consistency with shape if shape is not None: if self._n_obs == 0: self._n_obs = shape[0] else: if self._n_obs != shape[0]: raise ValueError("`shape` is inconsistent with `obs`") if self._n_vars == 0: self._n_vars = shape[1] else: if self._n_vars != shape[1]: raise ValueError("`shape` is inconsistent with `var`") # annotations self._obs = _gen_dataframe(obs, self._n_obs, ["obs_names", "row_names"]) self._var = _gen_dataframe(var, self._n_vars, ["var_names", "col_names"]) # now we can verify if indices match! for attr_name, x_name, idx in x_indices: attr = getattr(self, attr_name) if isinstance(attr.index, pd.RangeIndex): attr.index = idx elif not idx.equals(attr.index): raise ValueError(f"Index of {attr_name} must match {x_name} of X.") # unstructured annotations self.uns = uns or OrderedDict() # TODO: Think about consequences of making obsm a group in hdf self._obsm = AxisArrays(self, 0, vals=convert_to_dict(obsm)) self._varm = AxisArrays(self, 1, vals=convert_to_dict(varm)) self._obsp = PairwiseArrays(self, 0, vals=convert_to_dict(obsp)) self._varp = PairwiseArrays(self, 1, vals=convert_to_dict(varp)) # Backwards compat for connectivities matrices in uns["neighbors"] _move_adj_mtx({"uns": self._uns, "obsp": self._obsp}) self._check_dimensions() self._check_uniqueness() if self.filename: assert not isinstance( raw, Raw ), "got raw from other adata but also filename?" if {"raw", "raw.X"} & set(self.file): raw = dict(X=None, **raw) if not raw: self._raw = None elif isinstance(raw, cabc.Mapping): self._raw = Raw(self, **raw) else: # is a Raw from another AnnData self._raw = Raw(self, raw._X, raw.var, raw.varm) # clean up old formats self._clean_up_old_format(uns) # layers self._layers = Layers(self, layers) def __sizeof__(self, show_stratified=None) -> int: def get_size(X): if issparse(X): X_csr = csr_matrix(X) return X_csr.data.nbytes + X_csr.indptr.nbytes + X_csr.indices.nbytes else: return X.__sizeof__() size = 0 attrs = list(["_X", "_obs", "_var"]) attrs_multi = list(["_uns", "_obsm", "_varm", "varp", "_obsp", "_layers"]) for attr in attrs + attrs_multi: if attr in attrs_multi: keys = getattr(self, attr).keys() s = sum([get_size(getattr(self, attr)[k]) for k in keys]) else: s = get_size(getattr(self, attr)) if s > 0 and show_stratified: str_attr = attr.replace("_", ".") + " " * (7 - len(attr)) print(f"Size of {str_attr}: {'%3.2f' % (s / (1024 ** 2))} MB") size += s return size def _gen_repr(self, n_obs, n_vars) -> str: if self.isbacked: backed_at = f" backed at {str(self.filename)!r}" else: backed_at = "" descr = f"AnnData object with n_obs × n_vars = {n_obs} × {n_vars}{backed_at}" for attr in [ "obs", "var", "uns", "obsm", "varm", "layers", "obsp", "varp", ]: keys = getattr(self, attr).keys() if len(keys) > 0: descr += f"\n {attr}: {str(list(keys))[1:-1]}" return descr def __repr__(self) -> str: if self.is_view: return "View of " + self._gen_repr(self.n_obs, self.n_vars) else: return self._gen_repr(self.n_obs, self.n_vars) def __eq__(self, other): """Equality testing""" raise NotImplementedError( "Equality comparisons are not supported for AnnData objects, " "instead compare the desired attributes." ) @property def shape(self) -> Tuple[int, int]: """Shape of data matrix (:attr:`n_obs`, :attr:`n_vars`).""" return self.n_obs, self.n_vars @property def X(self) -> Optional[Union[np.ndarray, sparse.spmatrix, ArrayView]]: """Data matrix of shape :attr:`n_obs` × :attr:`n_vars`.""" if self.isbacked: if not self.file.is_open: self.file.open() X = self.file["X"] if isinstance(X, h5py.Group): X = SparseDataset(X) # This is so that we can index into a backed dense dataset with # indices that aren’t strictly increasing if self.is_view: X = _subset(X, (self._oidx, self._vidx)) elif self.is_view and self._adata_ref.X is None: X = None elif self.is_view: X = as_view( _subset(self._adata_ref.X, (self._oidx, self._vidx)), ElementRef(self, "X"), ) else: X = self._X return X # if self.n_obs == 1 and self.n_vars == 1: # return X[0, 0] # elif self.n_obs == 1 or self.n_vars == 1: # if issparse(X): X = X.toarray() # return X.flatten() # else: # return X @X.setter def X(self, value: Optional[Union[np.ndarray, sparse.spmatrix]]): if value is None: if self.isbacked: raise NotImplementedError( "Cannot currently remove data matrix from backed object." ) if self.is_view: self._init_as_actual(self.copy()) self._X = None return if not isinstance(value, StorageType.classes()) and not np.isscalar(value): if hasattr(value, "to_numpy") and hasattr(value, "dtypes"): value = ensure_df_homogeneous(value, "X") else: # TODO: asarray? asanyarray? value = np.array(value) # If indices are both arrays, we need to modify them # so we don’t set values like coordinates # This can occur if there are succesive views if ( self.is_view and isinstance(self._oidx, np.ndarray) and isinstance(self._vidx, np.ndarray) ): oidx, vidx = np.ix_(self._oidx, self._vidx) else: oidx, vidx = self._oidx, self._vidx if ( np.isscalar(value) or (hasattr(value, "shape") and (self.shape == value.shape)) or (self.n_vars == 1 and self.n_obs == len(value)) or (self.n_obs == 1 and self.n_vars == len(value)) ): if not np.isscalar(value) and self.shape != value.shape: # For assigning vector of values to 2d array or matrix # Not neccesary for row of 2d array value = value.reshape(self.shape) if self.isbacked: if self.is_view: X = self.file["X"] if isinstance(X, h5py.Group): X = SparseDataset(X) X[oidx, vidx] = value else: self._set_backed("X", value) else: if self.is_view: if sparse.issparse(self._adata_ref._X) and isinstance( value, np.ndarray ): value = sparse.coo_matrix(value) self._adata_ref._X[oidx, vidx] = value else: self._X = value else: raise ValueError( f"Data matrix has wrong shape {value.shape}, " f"need to be {self.shape}." ) @X.deleter def X(self): self.X = None @property def layers(self) -> Union[Layers, LayersView]: """\ Dictionary-like object with values of the same dimensions as :attr:`X`. Layers in AnnData are inspired by loompy’s :ref:`loomlayers`. Return the layer named `"unspliced"`:: adata.layers["unspliced"] Create or replace the `"spliced"` layer:: adata.layers["spliced"] = ... Assign the 10th column of layer `"spliced"` to the variable a:: a = adata.layers["spliced"][:, 10] Delete the `"spliced"` layer:: del adata.layers["spliced"] Return layers’ names:: adata.layers.keys() """ return self._layers @layers.setter def layers(self, value): layers = Layers(self, vals=convert_to_dict(value)) if self.is_view: self._init_as_actual(self.copy()) self._layers = layers @layers.deleter def layers(self): self.layers = dict() @property def raw(self) -> Raw: """\ Store raw version of :attr:`X` and :attr:`var` as `.raw.X` and `.raw.var`. The :attr:`raw` attribute is initialized with the current content of an object by setting:: adata.raw = adata Its content can be deleted:: adata.raw = None # or del adata.raw Upon slicing an AnnData object along the obs (row) axis, :attr:`raw` is also sliced. Slicing an AnnData object along the vars (columns) axis leaves :attr:`raw` unaffected. Note that you can call:: adata.raw[:, 'orig_variable_name'].X to retrieve the data associated with a variable that might have been filtered out or "compressed away" in :attr:`X`. """ return self._raw @raw.setter def raw(self, value: "AnnData"): if value is None: del self.raw elif not isinstance(value, AnnData): raise ValueError("Can only init raw attribute with an AnnData object.") else: if self.is_view: self._init_as_actual(self.copy()) self._raw = Raw(value) @raw.deleter def raw(self): if self.is_view: self._init_as_actual(self.copy()) self._raw = None @property def n_obs(self) -> int: """Number of observations.""" return self._n_obs @property def n_vars(self) -> int: """Number of variables/features.""" return self._n_vars def _set_dim_df(self, value: pd.DataFrame, attr: str): if not isinstance(value, pd.DataFrame): raise ValueError(f"Can only assign pd.DataFrame to {attr}.") value_idx = self._prep_dim_index(value.index, attr) if self.is_view: self._init_as_actual(self.copy()) setattr(self, f"_{attr}", value) self._set_dim_index(value_idx, attr) def _prep_dim_index(self, value, attr: str) -> pd.Index: """Prepares index to be uses as obs_names or var_names for AnnData object.AssertionError If a pd.Index is passed, this will use a reference, otherwise a new index object is created. """ if self.shape[attr == "var"] != len(value): raise ValueError( f"Length of passed value for {attr}_names is {len(value)}, but this AnnData has shape: {self.shape}" ) if isinstance(value, pd.Index) and not isinstance( value.name, (str, type(None)) ): raise ValueError( f"AnnData expects .{attr}.index.name to be a string or None, " f"but you passed a name of type {type(value.name).__name__!r}" ) else: value = pd.Index(value) if not isinstance(value.name, (str, type(None))): value.name = None # fmt: off if ( not isinstance(value, pd.RangeIndex) and not infer_dtype(value) in ("string", "bytes") ): sample = list(value[: min(len(value), 5)]) warnings.warn(dedent( f""" AnnData expects .{attr}.index to contain strings, but got values like: {sample} Inferred to be: {infer_dtype(value)} """ ), # noqa stacklevel=2, ) # fmt: on return value def _set_dim_index(self, value: pd.Index, attr: str): # Assumes _prep_dim_index has been run if self.is_view: self._init_as_actual(self.copy()) getattr(self, attr).index = value for v in getattr(self, f"{attr}m").values(): if isinstance(v, pd.DataFrame): v.index = value @property def obs(self) -> pd.DataFrame: """One-dimensional annotation of observations (`pd.DataFrame`).""" return self._obs @obs.setter def obs(self, value: pd.DataFrame): self._set_dim_df(value, "obs") @obs.deleter def obs(self): self.obs = pd.DataFrame(index=self.obs_names) @property def obs_names(self) -> pd.Index: """Names of observations (alias for `.obs.index`).""" return self.obs.index @obs_names.setter def obs_names(self, names: Sequence[str]): names = self._prep_dim_index(names, "obs") self._set_dim_index(names, "obs") @property def var(self) -> pd.DataFrame: """One-dimensional annotation of variables/ features (`pd.DataFrame`).""" return self._var @var.setter def var(self, value: pd.DataFrame): self._set_dim_df(value, "var") @var.deleter def var(self): self.var = pd.DataFrame(index=self.var_names) @property def var_names(self) -> pd.Index: """Names of variables (alias for `.var.index`).""" return self.var.index @var_names.setter def var_names(self, names: Sequence[str]): names = self._prep_dim_index(names, "var") self._set_dim_index(names, "var") @property def uns(self) -> MutableMapping: """Unstructured annotation (ordered dictionary).""" uns = self._uns if self.is_view: uns = DictView(uns, view_args=(self, "_uns")) uns = _overloaded_uns(self, uns) return uns @uns.setter def uns(self, value: MutableMapping): if not isinstance(value, MutableMapping): raise ValueError( "Only mutable mapping types (e.g. dict) are allowed for `.uns`." ) if isinstance(value, (OverloadedDict, DictView)): value = value.copy() if self.is_view: self._init_as_actual(self.copy()) self._uns = value @uns.deleter def uns(self): self.uns = OrderedDict() @property def obsm(self) -> Union[AxisArrays, AxisArraysView]: """\ Multi-dimensional annotation of observations (mutable structured :class:`~numpy.ndarray`). Stores for each key a two or higher-dimensional :class:`~numpy.ndarray` of length `n_obs`. Is sliced with `data` and `obs` but behaves otherwise like a :term:`mapping`. """ return self._obsm @obsm.setter def obsm(self, value): obsm = AxisArrays(self, 0, vals=convert_to_dict(value)) if self.is_view: self._init_as_actual(self.copy()) self._obsm = obsm @obsm.deleter def obsm(self): self.obsm = dict() @property def varm(self) -> Union[AxisArrays, AxisArraysView]: """\ Multi-dimensional annotation of variables/features (mutable structured :class:`~numpy.ndarray`). Stores for each key a two or higher-dimensional :class:`~numpy.ndarray` of length `n_vars`. Is sliced with `data` and `var` but behaves otherwise like a :term:`mapping`. """ return self._varm @varm.setter def varm(self, value): varm = AxisArrays(self, 1, vals=convert_to_dict(value)) if self.is_view: self._init_as_actual(self.copy()) self._varm = varm @varm.deleter def varm(self): self.varm = dict() @property def obsp(self) -> Union[PairwiseArrays, PairwiseArraysView]: """\ Pairwise annotation of observations, a mutable mapping with array-like values. Stores for each key a two or higher-dimensional :class:`~numpy.ndarray` whose first two dimensions are of length `n_obs`. Is sliced with `data` and `obs` but behaves otherwise like a :term:`mapping`. """ return self._obsp @obsp.setter def obsp(self, value): obsp = PairwiseArrays(self, 0, vals=convert_to_dict(value)) if self.is_view: self._init_as_actual(self.copy()) self._obsp = obsp @obsp.deleter def obsp(self): self.obsp = dict() @property def varp(self) -> Union[PairwiseArrays, PairwiseArraysView]: """\ Pairwise annotation of observations, a mutable mapping with array-like values. Stores for each key a two or higher-dimensional :class:`~numpy.ndarray` whose first two dimensions are of length `n_var`. Is sliced with `data` and `var` but behaves otherwise like a :term:`mapping`. """ return self._varp @varp.setter def varp(self, value): varp = PairwiseArrays(self, 1, vals=convert_to_dict(value)) if self.is_view: self._init_as_actual(self.copy()) self._varp = varp @varp.deleter def varp(self): self.varp = dict() def obs_keys(self) -> List[str]: """List keys of observation annotation :attr:`obs`.""" return self._obs.keys().tolist() def var_keys(self) -> List[str]: """List keys of variable annotation :attr:`var`.""" return self._var.keys().tolist() def obsm_keys(self) -> List[str]: """List keys of observation annotation :attr:`obsm`.""" return list(self._obsm.keys()) def varm_keys(self) -> List[str]: """List keys of variable annotation :attr:`varm`.""" return list(self._varm.keys()) def uns_keys(self) -> List[str]: """List keys of unstructured annotation.""" return sorted(list(self._uns.keys())) @property def isbacked(self) -> bool: """`True` if object is backed on disk, `False` otherwise.""" return self.filename is not None @property def is_view(self) -> bool: """`True` if object is view of another AnnData object, `False` otherwise.""" return self._is_view @property def filename(self) -> Optional[Path]: """\ Change to backing mode by setting the filename of a `.h5ad` file. - Setting the filename writes the stored data to disk. - Setting the filename when the filename was previously another name moves the backing file from the previous file to the new file. If you want to copy the previous file, use `copy(filename='new_filename')`. """ return self.file.filename @filename.setter def filename(self, filename: Optional[PathLike]): # convert early for later comparison filename = None if filename is None else Path(filename) # change from backing-mode back to full loading into memory if filename is None: if self.filename is not None: self.file._to_memory_mode() else: # both filename and self.filename are None # do nothing return else: if self.filename is not None: if self.filename != filename: # write the content of self to the old file # and close the file self.write() self.filename.rename(filename) else: # do nothing return else: # change from memory to backing-mode # write the content of self to disk self.write(filename, force_dense=True) # open new file for accessing self.file.open(filename, "r+") # as the data is stored on disk, we can safely set self._X to None self._X = None def _set_backed(self, attr, value): from .._io.utils import write_attribute write_attribute(self.file._file, attr, value) def _normalize_indices(self, index: Optional[Index]) -> Tuple[slice, slice]: return _normalize_indices(index, self.obs_names, self.var_names) # TODO: this is not quite complete... def __delitem__(self, index: Index): obs, var = self._normalize_indices(index) # TODO: does this really work? if not self.isbacked: del self._X[obs, var] else: X = self.file["X"] del X[obs, var] self._set_backed("X", X) if var == slice(None): del self._obs.iloc[obs, :] if obs == slice(None): del self._var.iloc[var, :] def __getitem__(self, index: Index) -> "AnnData": """Returns a sliced view of the object.""" oidx, vidx = self._normalize_indices(index) return AnnData(self, oidx=oidx, vidx=vidx, asview=True) def _remove_unused_categories(self, df_full, df_sub, uns): for k in df_full: if not is_categorical_dtype(df_full[k]): continue all_categories = df_full[k].cat.categories with pd.option_context("mode.chained_assignment", None): df_sub[k] = df_sub[k].cat.remove_unused_categories() # also correct the colors... color_key = f"{k}_colors" if color_key not in uns: continue color_vec = uns[color_key] if np.array(color_vec).ndim == 0: # Make 0D arrays into 1D ones uns[color_key] = np.array(color_vec)[(None,)] elif len(color_vec) != len(all_categories): # Reset colors del uns[color_key] else: idx = np.where(np.in1d(all_categories, df_sub[k].cat.categories))[0] uns[color_key] = np.array(color_vec)[(idx,)] def rename_categories(self, key: str, categories: Sequence[Any]): """\ Rename categories of annotation `key` in :attr:`obs`, :attr:`var`, and :attr:`uns`. Only supports passing a list/array-like `categories` argument. Besides calling `self.obs[key].cat.categories = categories` – similar for :attr:`var` - this also renames categories in unstructured annotation that uses the categorical annotation `key`. Parameters ---------- key Key for observations or variables annotation. categories New categories, the same number as the old categories. """ if isinstance(categories, Mapping): raise ValueError("Only list-like `categories` is supported.") if key in self.obs: old_categories = self.obs[key].cat.categories.tolist() self.obs[key].cat.rename_categories(categories, inplace=True) elif key in self.var: old_categories = self.var[key].cat.categories.tolist() self.var[key].cat.rename_categories(categories, inplace=True) else: raise ValueError(f"{key} is neither in `.obs` nor in `.var`.") # this is not a good solution # but depends on the scanpy conventions for storing the categorical key # as `groupby` in the `params` slot for k1, v1 in self.uns.items(): if not ( isinstance(v1, Mapping) and "params" in v1 and "groupby" in v1["params"] and v1["params"]["groupby"] == key ): continue for k2, v2 in v1.items(): # picks out the recarrays that are named according to the old # categories if isinstance(v2, np.ndarray) and v2.dtype.names is not None: if list(v2.dtype.names) == old_categories: self.uns[k1][k2].dtype.names = categories else: logger.warning( f"Omitting {k1}/{k2} as old categories do not match." ) def strings_to_categoricals(self, df: Optional[pd.DataFrame] = None): """\ Transform string annotations to categoricals. Only affects string annotations that lead to less categories than the total number of observations. Params ------ df If `df` is `None`, modifies both :attr:`obs` and :attr:`var`, otherwise modifies `df` inplace. Notes ----- Turns the view of an :class:`~anndata.AnnData` into an actual :class:`~anndata.AnnData`. """ dont_modify = False # only necessary for backed views if df is None: dfs = [self.obs, self.var] if self.is_view and self.isbacked: dont_modify = True else: dfs = [df] for df in dfs: string_cols = [ key for key in df.columns if infer_dtype(df[key]) == "string" ] for key in string_cols: c = pd.Categorical(df[key]) # TODO: We should only check if non-null values are unique, but # this would break cases where string columns with nulls could # be written as categorical, but not as string. # Possible solution: https://github.com/theislab/anndata/issues/504 if len(c.categories) >= len(c): continue # Ideally this could be done inplace sorted_categories = natsorted(c.categories) if not np.array_equal(c.categories, sorted_categories): c = c.reorder_categories(sorted_categories) if dont_modify: raise RuntimeError( "Please call `.strings_to_categoricals()` on full " "AnnData, not on this view. You might encounter this" "error message while copying or writing to disk." ) df[key] = c logger.info(f"... storing {key!r} as categorical") _sanitize = strings_to_categoricals # backwards compat def _inplace_subset_var(self, index: Index1D): """\ Inplace subsetting along variables dimension. Same as `adata = adata[:, index]`, but inplace. """ adata_subset = self[:, index].copy() if adata_subset._has_X(): dtype = adata_subset.X.dtype else: dtype = None self._init_as_actual(adata_subset, dtype=dtype) def _inplace_subset_obs(self, index: Index1D): """\ Inplace subsetting along variables dimension. Same as `adata = adata[index, :]`, but inplace. """ adata_subset = self[index].copy() if adata_subset._has_X(): dtype = adata_subset.X.dtype else: dtype = None self._init_as_actual(adata_subset, dtype=dtype) # TODO: Update, possibly remove def __setitem__( self, index: Index, val: Union[int, float, np.ndarray, sparse.spmatrix] ): if self.is_view: raise ValueError("Object is view and cannot be accessed with `[]`.") obs, var = self._normalize_indices(index) if not self.isbacked: self._X[obs, var] = val else: X = self.file["X"] X[obs, var] = val self._set_backed("X", X) def __len__(self) -> int: return self.shape[0] def transpose(self) -> "AnnData": """\ Transpose whole object. Data matrix is transposed, observations and variables are interchanged. Ignores `.raw`. """ if not self.isbacked: X = self.X else: X = self.file["X"] if self.is_view: raise ValueError( "You’re trying to transpose a view of an `AnnData`, " "which is currently not implemented. Call `.copy()` before transposing." ) def t_csr(m: sparse.spmatrix) -> sparse.csr_matrix: return m.T.tocsr() if sparse.isspmatrix_csr(m) else m.T return AnnData( X=t_csr(X) if X is not None else None, obs=self.var, var=self.obs, # we're taking a private attributes here to be able to modify uns of the original object uns=self._uns, obsm=self.varm.flipped(), varm=self.obsm.flipped(), obsp=self.varp.copy(), varp=self.obsp.copy(), filename=self.filename, layers={k: t_csr(v) for k, v in self.layers.items()}, dtype=self.X.dtype.name if X is not None else "float32", ) T = property(transpose) def to_df(self, layer=None) -> pd.DataFrame: """\ Generate shallow :class:`~pandas.DataFrame`. The data matrix :attr:`X` is returned as :class:`~pandas.DataFrame`, where :attr:`obs_names` initializes the index, and :attr:`var_names` the columns. * No annotations are maintained in the returned object. * The data matrix is densified in case it is sparse. Params ------ layer : str Key for `.layers`. """ if layer is not None: X = self.layers[layer] elif not self._has_X(): raise ValueError("X is None, cannot convert to dataframe.") else: X = self.X if issparse(X): X = X.toarray() return pd.DataFrame(X, index=self.obs_names, columns=self.var_names) def _get_X(self, use_raw=False, layer=None): """\ Convenience method for getting expression values with common arguments and error handling. """ is_layer = layer is not None if use_raw and is_layer: raise ValueError( "Cannot use expression from both layer and raw. You provided:" f"`use_raw={use_raw}` and `layer={layer}`" ) if is_layer: return self.layers[layer] elif use_raw: if self.raw is None: raise ValueError("This AnnData doesn’t have a value in `.raw`.") return self.raw.X else: return self.X def obs_vector(self, k: str, *, layer: Optional[str] = None) -> np.ndarray: """\ Convenience function for returning a 1 dimensional ndarray of values from :attr:`X`, :attr:`layers`\\ `[k]`, or :attr:`obs`. Made for convenience, not performance. Intentionally permissive about arguments, for easy iterative use. Params ------ k Key to use. Should be in :attr:`var_names` or :attr:`obs`\\ `.columns`. layer What layer values should be returned from. If `None`, :attr:`X` is used. Returns ------- A one dimensional nd array, with values for each obs in the same order as :attr:`obs_names`. """ if layer == "X": if "X" in self.layers: pass else: warnings.warn( "In a future version of AnnData, access to `.X` by passing" " `layer='X'` will be removed. Instead pass `layer=None`.", FutureWarning, ) layer = None return get_vector(self, k, "obs", "var", layer=layer) def var_vector(self, k, *, layer: Optional[str] = None) -> np.ndarray: """\ Convenience function for returning a 1 dimensional ndarray of values from :attr:`X`, :attr:`layers`\\ `[k]`, or :attr:`obs`. Made for convenience, not performance. Intentionally permissive about arguments, for easy iterative use. Params ------ k Key to use. Should be in :attr:`obs_names` or :attr:`var`\\ `.columns`. layer What layer values should be returned from. If `None`, :attr:`X` is used. Returns ------- A one dimensional nd array, with values for each var in the same order as :attr:`var_names`. """ if layer == "X": if "X" in self.layers: pass else: warnings.warn( "In a future version of AnnData, access to `.X` by passing " "`layer='X'` will be removed. Instead pass `layer=None`.", FutureWarning, ) layer = None return get_vector(self, k, "var", "obs", layer=layer) @utils.deprecated("obs_vector") def _get_obs_array(self, k, use_raw=False, layer=None): """\ Get an array from the layer (default layer='X') along the :attr:`obs` dimension by first looking up `obs.keys` and then :attr:`obs_names`. """ if not use_raw or k in self.obs.columns: return self.obs_vector(k=k, layer=layer) else: return self.raw.obs_vector(k) @utils.deprecated("var_vector") def _get_var_array(self, k, use_raw=False, layer=None): """\ Get an array from the layer (default layer='X') along the :attr:`var` dimension by first looking up `var.keys` and then :attr:`var_names`. """ if not use_raw or k in self.var.columns: return self.var_vector(k=k, layer=layer) else: return self.raw.var_vector(k) def _mutated_copy(self, **kwargs): """Creating AnnData with attributes optionally specified via kwargs.""" if self.isbacked: if "X" not in kwargs or (self.raw is not None and "raw" not in kwargs): raise NotImplementedError( "This function does not currently handle backed objects " "internally, this should be dealt with before." ) new = {} for key in ["obs", "var", "obsm", "varm", "obsp", "varp", "layers"]: if key in kwargs: new[key] = kwargs[key] else: new[key] = getattr(self, key).copy() if "X" in kwargs: new["X"] = kwargs["X"] new["dtype"] = new["X"].dtype elif self._has_X(): new["X"] = self.X.copy() new["dtype"] = new["X"].dtype if "uns" in kwargs: new["uns"] = kwargs["uns"] else: new["uns"] = deepcopy(self._uns) if "raw" in kwargs: new["raw"] = kwargs["raw"] elif self.raw is not None: new["raw"] = self.raw.copy() return AnnData(**new) def to_memory(self) -> "AnnData": """Load backed AnnData object into memory. Example ------- .. code:: python import anndata backed = anndata.read_h5ad("file.h5ad", backed="r") mem = backed[backed.obs["cluster"] == "a", :].to_memory() """ if not self.isbacked: raise ValueError("Object is already in memory.") else: elems = {"X": to_memory(self.X)} if self.raw is not None: elems["raw"] = { "X": to_memory(self.raw.X), "var": self.raw.var, "varm": self.raw.varm, } adata = self._mutated_copy(**elems) self.file.close() return adata def copy(self, filename: Optional[PathLike] = None) -> "AnnData": """Full copy, optionally on disk.""" if not self.isbacked: if self.is_view and self._has_X(): # TODO: How do I unambiguously check if this is a copy? # Subsetting this way means we don’t have to have a view type # defined for the matrix, which is needed for some of the # current distributed backend. Specifically Dask. return self._mutated_copy( X=_subset(self._adata_ref.X, (self._oidx, self._vidx)).copy() ) else: return self._mutated_copy() else: from .._io import read_h5ad from .._io.write import _write_h5ad if filename is None: raise ValueError( "To copy an AnnData object in backed mode, " "pass a filename: `.copy(filename='myfilename.h5ad')`. " "To load the object into memory, use `.to_memory()`." ) mode = self.file._filemode _write_h5ad(filename, self) return read_h5ad(filename, backed=mode) def concatenate( self, *adatas: "AnnData", join: str = "inner", batch_key: str = "batch", batch_categories: Sequence[Any] = None, uns_merge: Optional[str] = None, index_unique: Optional[str] = "-", fill_value=None, ) -> "AnnData": """\ Concatenate along the observations axis. The :attr:`uns`, :attr:`varm` and :attr:`obsm` attributes are ignored. Currently, this works only in `'memory'` mode. .. note:: For more flexible and efficient concatenation, see: :func:`~anndata.concat`. Parameters ---------- adatas AnnData matrices to concatenate with. Each matrix is referred to as a “batch”. join Use intersection (`'inner'`) or union (`'outer'`) of variables. batch_key Add the batch annotation to :attr:`obs` using this key. batch_categories Use these as categories for the batch annotation. By default, use increasing numbers. uns_merge Strategy to use for merging entries of uns. These strategies are applied recusivley. Currently implemented strategies include: * `None`: The default. The concatenated object will just have an empty dict for `uns`. * `"same"`: Only entries which have the same value in all AnnData objects are kept. * `"unique"`: Only entries which have one unique value in all AnnData objects are kept. * `"first"`: The first non-missing value is used. * `"only"`: A value is included if only one of the AnnData objects has a value at this path. index_unique Make the index unique by joining the existing index names with the batch category, using `index_unique='-'`, for instance. Provide `None` to keep existing indices. fill_value Scalar value to fill newly missing values in arrays with. Note: only applies to arrays and sparse matrices (not dataframes) and will only be used if `join="outer"`. .. note:: If not provided, the default value is `0` for sparse matrices and `np.nan` for numpy arrays. See the examples below for more information. Returns ------- :class:`~anndata.AnnData` The concatenated :class:`~anndata.AnnData`, where `adata.obs[batch_key]` stores a categorical variable labeling the batch. Notes ----- .. warning:: If you use `join='outer'` this fills 0s for sparse data when variables are absent in a batch. Use this with care. Dense data is filled with `NaN`. See the examples. Examples -------- Joining on intersection of variables. >>> adata1 = AnnData( ... np.array([[1, 2, 3], [4, 5, 6]]), ... dict(obs_names=['s1', 's2'], anno1=['c1', 'c2']), ... dict(var_names=['a', 'b', 'c'], annoA=[0, 1, 2]), ... ) >>> adata2 = AnnData( ... np.array([[1, 2, 3], [4, 5, 6]]), ... dict(obs_names=['s3', 's4'], anno1=['c3', 'c4']), ... dict(var_names=['d', 'c', 'b'], annoA=[0, 1, 2]), ... ) >>> adata3 = AnnData( ... np.array([[1, 2, 3], [4, 5, 6]]), ... dict(obs_names=['s1', 's2'], anno2=['d3', 'd4']), ... dict(var_names=['d', 'c', 'b'], annoA=[0, 2, 3], annoB=[0, 1, 2]), ... ) >>> adata = adata1.concatenate(adata2, adata3) >>> adata AnnData object with n_obs × n_vars = 6 × 2 obs: 'anno1', 'anno2', 'batch' var: 'annoA-0', 'annoA-1', 'annoA-2', 'annoB-2' >>> adata.X array([[2., 3.], [5., 6.], [3., 2.], [6., 5.], [3., 2.], [6., 5.]], dtype=float32) >>> adata.obs anno1 anno2 batch s1-0 c1 NaN 0 s2-0 c2 NaN 0 s3-1 c3 NaN 1 s4-1 c4 NaN 1 s1-2 NaN d3 2 s2-2 NaN d4 2 >>> adata.var.T b c annoA-0 1 2 annoA-1 2 1 annoA-2 3 2 annoB-2 2 1 Joining on the union of variables. >>> outer = adata1.concatenate(adata2, adata3, join='outer') >>> outer AnnData object with n_obs × n_vars = 6 × 4 obs: 'anno1', 'anno2', 'batch' var: 'annoA-0', 'annoA-1', 'annoA-2', 'annoB-2' >>> outer.var.T a b c d annoA-0 0.0 1.0 2.0 NaN annoA-1 NaN 2.0 1.0 0.0 annoA-2 NaN 3.0 2.0 0.0 annoB-2 NaN 2.0 1.0 0.0 >>> outer.var_names Index(['a', 'b', 'c', 'd'], dtype='object') >>> outer.X array([[ 1., 2., 3., nan], [ 4., 5., 6., nan], [nan, 3., 2., 1.], [nan, 6., 5., 4.], [nan, 3., 2., 1.], [nan, 6., 5., 4.]], dtype=float32) >>> outer.X.sum(axis=0) array([nan, 25., 23., nan], dtype=float32) >>> import pandas as pd >>> Xdf = pd.DataFrame(outer.X, columns=outer.var_names) >>> Xdf a b c d 0 1.0 2.0 3.0 NaN 1 4.0 5.0 6.0 NaN 2 NaN 3.0 2.0 1.0 3 NaN 6.0 5.0 4.0 4 NaN 3.0 2.0 1.0 5 NaN 6.0 5.0 4.0 >>> Xdf.sum() a 5.0 b 25.0 c 23.0 d 10.0 dtype: float32 One way to deal with missing values is to use masked arrays: >>> from numpy import ma >>> outer.X = ma.masked_invalid(outer.X) >>> outer.X masked_array( data=[[1.0, 2.0, 3.0, --], [4.0, 5.0, 6.0, --], [--, 3.0, 2.0, 1.0], [--, 6.0, 5.0, 4.0], [--, 3.0, 2.0, 1.0], [--, 6.0, 5.0, 4.0]], mask=[[False, False, False, True], [False, False, False, True], [ True, False, False, False], [ True, False, False, False], [ True, False, False, False], [ True, False, False, False]], fill_value=1e+20, dtype=float32) >>> outer.X.sum(axis=0).data array([ 5., 25., 23., 10.], dtype=float32) The masked array is not saved but has to be reinstantiated after saving. >>> outer.write('./test.h5ad') >>> from anndata import read_h5ad >>> outer = read_h5ad('./test.h5ad') >>> outer.X array([[ 1., 2., 3., nan], [ 4., 5., 6., nan], [nan, 3., 2., 1.], [nan, 6., 5., 4.], [nan, 3., 2., 1.], [nan, 6., 5., 4.]], dtype=float32) For sparse data, everything behaves similarly, except that for `join='outer'`, zeros are added. >>> from scipy.sparse import csr_matrix >>> adata1 = AnnData( ... csr_matrix([[0, 2, 3], [0, 5, 6]], dtype=np.float32), ... dict(obs_names=['s1', 's2'], anno1=['c1', 'c2']), ... dict(var_names=['a', 'b', 'c']), ... ) >>> adata2 = AnnData( ... csr_matrix([[0, 2, 3], [0, 5, 6]], dtype=np.float32), ... dict(obs_names=['s3', 's4'], anno1=['c3', 'c4']), ... dict(var_names=['d', 'c', 'b']), ... ) >>> adata3 = AnnData( ... csr_matrix([[1, 2, 0], [0, 5, 6]], dtype=np.float32), ... dict(obs_names=['s5', 's6'], anno2=['d3', 'd4']), ... dict(var_names=['d', 'c', 'b']), ... ) >>> adata = adata1.concatenate(adata2, adata3, join='outer') >>> adata.var_names Index(['a', 'b', 'c', 'd'], dtype='object') >>> adata.X.toarray() array([[0., 2., 3., 0.], [0., 5., 6., 0.], [0., 3., 2., 0.], [0., 6., 5., 0.], [0., 0., 2., 1.], [0., 6., 5., 0.]], dtype=float32) """ from .merge import concat, merge_outer, merge_dataframes, merge_same if self.isbacked: raise ValueError("Currently, concatenate does only work in memory mode.") if len(adatas) == 0: return self.copy() elif len(adatas) == 1 and not isinstance(adatas[0], AnnData): adatas = adatas[0] # backwards compatibility all_adatas = (self,) + tuple(adatas) out = concat( all_adatas, axis=0, join=join, label=batch_key, keys=batch_categories, uns_merge=uns_merge, fill_value=fill_value, index_unique=index_unique, pairwise=False, ) # Backwards compat (some of this could be more efficient) # obs used to always be an outer join out.obs = concat( [AnnData(sparse.csr_matrix(a.shape), obs=a.obs) for a in all_adatas], axis=0, join="outer", label=batch_key, keys=batch_categories, index_unique=index_unique, ).obs # Removing varm del out.varm # Implementing old-style merging of var if batch_categories is None: batch_categories = np.arange(len(all_adatas)).astype(str) pat = rf"-({'|'.join(batch_categories)})$" out.var = merge_dataframes( [a.var for a in all_adatas], out.var_names, partial(merge_outer, batch_keys=batch_categories, merge=merge_same), ) out.var = out.var.iloc[ :, ( out.var.columns.str.extract(pat, expand=False) .fillna("") .argsort(kind="stable") ), ] return out def var_names_make_unique(self, join: str = "-"): # Important to go through the setter so obsm dataframes are updated too self.var_names = utils.make_index_unique(self.var.index, join) var_names_make_unique.__doc__ = utils.make_index_unique.__doc__ def obs_names_make_unique(self, join: str = "-"): # Important to go through the setter so obsm dataframes are updated too self.obs_names = utils.make_index_unique(self.obs.index, join) obs_names_make_unique.__doc__ = utils.make_index_unique.__doc__ def _check_uniqueness(self): if not self.obs.index.is_unique: utils.warn_names_duplicates("obs") if not self.var.index.is_unique: utils.warn_names_duplicates("var") def __contains__(self, key: Any): raise AttributeError( "AnnData has no attribute __contains__, don’t check `in adata`." ) def _check_dimensions(self, key=None): if key is None: key = {"obs", "var", "obsm", "varm"} else: key = {key} if "obs" in key and len(self._obs) != self._n_obs: raise ValueError( "Observations annot. `obs` must have number of rows of `X`" f" ({self._n_obs}), but has {self._obs.shape[0]} rows." ) if "var" in key and len(self._var) != self._n_vars: raise ValueError( "Variables annot. `var` must have number of columns of `X`" f" ({self._n_vars}), but has {self._var.shape[0]} rows." ) if "obsm" in key: obsm = self._obsm if ( not all([o.shape[0] == self._n_obs for o in obsm.values()]) and len(obsm.dim_names) != self._n_obs ): raise ValueError( "Observations annot. `obsm` must have number of rows of `X`" f" ({self._n_obs}), but has {len(obsm)} rows." ) if "varm" in key: varm = self._varm if ( not all([v.shape[0] == self._n_vars for v in varm.values()]) and len(varm.dim_names) != self._n_vars ): raise ValueError( "Variables annot. `varm` must have number of columns of `X`" f" ({self._n_vars}), but has {len(varm)} rows." ) def write_h5ad( self, filename: Optional[PathLike] = None, compression: Optional[Literal["gzip", "lzf"]] = None, compression_opts: Union[int, Any] = None, force_dense: Optional[bool] = None, as_dense: Sequence[str] = (), ): """\ Write `.h5ad`-formatted hdf5 file. .. note:: Setting compression to `'gzip'` can save disk space but will slow down writing and subsequent reading. Prior to v0.6.16, this was the default for parameter `compression`. Generally, if you have sparse data that are stored as a dense matrix, you can dramatically improve performance and reduce disk space by converting to a :class:`~scipy.sparse.csr_matrix`:: from scipy.sparse import csr_matrix adata.X = csr_matrix(adata.X) Parameters ---------- filename Filename of data file. Defaults to backing file. compression See the h5py :ref:`dataset_compression`. compression_opts See the h5py :ref:`dataset_compression`. as_dense Sparse arrays in AnnData object to write as dense. Currently only supports `X` and `raw/X`. force_dense Write sparse data as a dense matrix. Defaults to `True` if object is backed, otherwise to `False`. """ from .._io.write import _write_h5ad if filename is None and not self.isbacked: raise ValueError("Provide a filename!") if filename is None: filename = self.filename _write_h5ad( Path(filename), self, compression=compression, compression_opts=compression_opts, force_dense=force_dense, as_dense=as_dense, ) if self.isbacked: self.file.filename = filename write = write_h5ad # a shortcut and backwards compat def write_csvs(self, dirname: PathLike, skip_data: bool = True, sep: str = ","): """\ Write annotation to `.csv` files. It is not possible to recover the full :class:`~anndata.AnnData` from these files. Use :meth:`write` for this. Parameters ---------- dirname Name of directory to which to export. skip_data Skip the data matrix :attr:`X`. sep Separator for the data. """ from .._io.write import write_csvs write_csvs(dirname, self, skip_data=skip_data, sep=sep) def write_loom(self, filename: PathLike, write_obsm_varm: bool = False): """\ Write `.loom`-formatted hdf5 file. Parameters ---------- filename The filename. """ from .._io.write import write_loom write_loom(filename, self, write_obsm_varm=write_obsm_varm) def write_zarr( self, store: Union[MutableMapping, PathLike], chunks: Union[bool, int, Tuple[int, ...], None] = None, ): """\ Write a hierarchical Zarr array store. Parameters ---------- store The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class. chunks Chunk shape. """ from .._io.write import write_zarr write_zarr(store, self, chunks=chunks) def chunked_X(self, chunk_size: Optional[int] = None): """\ Return an iterator over the rows of the data matrix :attr:`X`. Parameters ---------- chunk_size Row size of a single chunk. """ if chunk_size is None: # Should be some adaptive code chunk_size = 6000 start = 0 n = self.n_obs for _ in range(int(n // chunk_size)): end = start + chunk_size yield (self.X[start:end], start, end) start = end if start < n: yield (self.X[start:n], start, n) def chunk_X( self, select: Union[int, Sequence[int], np.ndarray] = 1000, replace: bool = True, ): """\ Return a chunk of the data matrix :attr:`X` with random or specified indices. Parameters ---------- select Depending on the type: :class:`int` A random chunk with `select` rows will be returned. :term:`sequence` (e.g. a list, tuple or numpy array) of :class:`int` A chunk with these indices will be returned. replace If `select` is an integer then `True` means random sampling of indices with replacement, `False` without replacement. """ if isinstance(select, int): select = select if select < self.n_obs else self.n_obs choice = np.random.choice(self.n_obs, select, replace) elif isinstance(select, (np.ndarray, cabc.Sequence)): choice = np.asarray(select) else: raise ValueError("select should be int or array") reverse = None if self.isbacked: # h5py can only slice with a sorted list of unique index values # so random batch with indices [2, 2, 5, 3, 8, 10, 8] will fail # this fixes the problem indices, reverse = np.unique(choice, return_inverse=True) selection = self.X[indices.tolist()] else: selection = self.X[choice] selection = selection.toarray() if issparse(selection) else selection return selection if reverse is None else selection[reverse] def _has_X(self) -> bool: """ Check if X is None. This is more efficient than trying `adata.X is None` for views, since creating views (at least anndata's kind) can be expensive. """ if not self.is_view: return self.X is not None else: return self._adata_ref.X is not None # -------------------------------------------------------------------------- # all of the following is for backwards compat # -------------------------------------------------------------------------- @property @utils.deprecated("is_view") def isview(self): return self.is_view def _clean_up_old_format(self, uns): # multicolumn keys # all of the rest is only for backwards compat for bases in [["obs", "smp"], ["var"]]: axis = bases[0] for k in [f"{p}{base}_keys_multicol" for p in ["", "_"] for base in bases]: if uns and k in uns: keys = list(uns[k]) del uns[k] break else: keys = [] # now, for compat, fill the old multicolumn entries into obsm and varm # and remove them from obs and var m_attr = getattr(self, f"_{axis}m") for key in keys: m_attr[key] = self._get_and_delete_multicol_field(axis, key) def _get_and_delete_multicol_field(self, a, key_multicol): keys = [] for k in getattr(self, a).columns: if k.startswith(key_multicol): keys.append(k) values = getattr(self, a)[keys].values getattr(self, a).drop(keys, axis=1, inplace=True) return values
theislab/anndata
anndata/_core/anndata.py
Python
bsd-3-clause
75,713
[ "Bioconductor" ]
7f37e91b7e2d24a1dc3b8c2b729d41384a9183941edf88d3d2c4d3c8afc83149
""" Module holding function(s) creating the pilot wrapper. This is a DIRAC-free module, so it could possibly be used also outside of DIRAC installations. The main client of this module is the SiteDirector, that invokes the functions here more or less like this:: pilotFilesCompressedEncodedDict = getPilotFilesCompressedEncodedDict(pilotFiles) localPilot = pilotWrapperScript(pilotFilesCompressedEncodedDict, pilotOptions, pilotExecDir) _writePilotWrapperFile(localPilot=localPilot) """ import os import tempfile import base64 import bz2 pilotWrapperContent = """#!/bin/bash /usr/bin/env python << EOF # imports import os import stat import tempfile import sys import shutil import base64 import bz2 import logging import time import urllib2 import tarfile # setting up the logging formatter = logging.Formatter(fmt='%%(asctime)s UTC %%(levelname)-8s %%(message)s', datefmt='%%Y-%%m-%%d %%H:%%M:%%S') logging.Formatter.converter = time.gmtime try: screen_handler = logging.StreamHandler(stream=sys.stdout) except TypeError: # python2.6 screen_handler = logging.StreamHandler(strm=sys.stdout) screen_handler.setFormatter(formatter) logger = logging.getLogger('pilotLogger') logger.setLevel(logging.DEBUG) logger.addHandler(screen_handler) # just logging the environment as first thing print '===========================================================' logger.debug('Environment of execution host\\n') for key, val in os.environ.iteritems(): logger.debug(key + '=' + val) print '===========================================================\\n' # putting ourselves in the right directory pilotExecDir = '%(pilotExecDir)s' if not pilotExecDir: pilotExecDir = os.getcwd() pilotWorkingDirectory = tempfile.mkdtemp(suffix='pilot', prefix='DIRAC_', dir=pilotExecDir) pilotWorkingDirectory = os.path.realpath(pilotWorkingDirectory) os.chdir(pilotWorkingDirectory) logger.info("Launching dirac-pilot script from %%s" %%os.getcwd()) """ def pilotWrapperScript(pilotFilesCompressedEncodedDict=None, pilotOptions='', pilotExecDir='', envVariables=None, location=''): """ Returns the content of the pilot wrapper script. The pilot wrapper script is a bash script that invokes the system python. Linux only. :param pilotFilesCompressedEncodedDict: this is a possible dict of name:compressed+encoded content files. the proxy can be part of this, and of course the pilot files :type pilotFilesCompressedEncodedDict: dict :param pilotOptions: options with which to start the pilot :type pilotOptions: basestring :param pilotExecDir: pilot execution directory :type pilotExecDir: basestring :param envVariables: dictionary of environment variables :type envVariables: dict :param location: location where to get the pilot files :type location: basestring :returns: content of the pilot wrapper :rtype: basestring """ if pilotFilesCompressedEncodedDict is None: pilotFilesCompressedEncodedDict = {} if envVariables is None: envVariables = {} compressedString = "" for pfName, encodedPf in pilotFilesCompressedEncodedDict.iteritems(): # are there some pilot files to unpack? # then we create the unpacking string compressedString += """ try: with open('%(pfName)s', 'w') as fd: fd.write(bz2.decompress(base64.b64decode(\"\"\"%(encodedPf)s\"\"\"))) os.chmod('%(pfName)s', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) except BaseException as x: print >> sys.stderr, x shutil.rmtree(pilotWorkingDirectory) sys.exit(-1) """ % {'encodedPf': encodedPf, 'pfName': pfName} envVariablesString = "" for name, value in envVariables.iteritems(): # are there some environment variables to add? envVariablesString += """ os.environ[\"%(name)s\"]=\"%(value)s\" """ % {'name': name, 'value': value} # add X509_USER_PROXY to etablish pilot env in Cluster WNs if 'proxy' in pilotFilesCompressedEncodedDict: envVariablesString += """ os.environ['X509_USER_PROXY'] = os.path.join(pilotWorkingDirectory, 'proxy') """ # now building the actual pilot wrapper localPilot = pilotWrapperContent % {'pilotExecDir': pilotExecDir} if compressedString: localPilot += """ # unpacking lines logger.info("But first unpacking pilot files") %s """ % compressedString if envVariablesString: localPilot += """ # Modifying the environment %s """ % envVariablesString if location: localPilot += """ # Getting the pilot files logger.info("Getting the pilot files from %(location)s") # Getting the json file rJson = urllib2.urlopen('https://' + '%(location)s' + '/pilot/pilot.json') with open('pilot.json', 'wb') as pj: pj.write(rJson.read()) pj.close() # Getting the tar file rTar = urllib2.urlopen('https://' + '%(location)s' + '/pilot/pilot.tar') with open('pilot.tar', 'wb') as pt: pt.write(rTar.read()) pt.close() with tarfile.open('pilot.tar', 'r') as pt: pt.extractall() pt.close() """ % {'location': location} localPilot += """ # now finally launching the pilot script (which should be called dirac-pilot.py) cmd = "python dirac-pilot.py %s" logger.info('Executing: %%s' %% cmd) sys.stdout.flush() os.system(cmd) # and cleaning up shutil.rmtree(pilotWorkingDirectory) EOF """ % pilotOptions return localPilot def getPilotFilesCompressedEncodedDict(pilotFiles, proxy=None): """ this function will return the dictionary of pilot files names : encodedCompressedContent that we are going to send :param pilotFiles: list of pilot files (list of location on the disk) :type pilotFiles: list :param proxy: the proxy to send :type proxy: X509Chain """ pilotFilesCompressedEncodedDict = {} for pf in pilotFiles: with open(pf, "r") as fd: pfContent = fd.read() pfContentEncoded = base64.b64encode(bz2.compress(pfContent, 9)) pilotFilesCompressedEncodedDict[os.path.basename(pf)] = pfContentEncoded if proxy is not None: compressedAndEncodedProxy = base64.b64encode(bz2.compress(proxy.dumpAllToString()['Value'])) pilotFilesCompressedEncodedDict['proxy'] = compressedAndEncodedProxy return pilotFilesCompressedEncodedDict def _writePilotWrapperFile(workingDirectory=None, localPilot=''): """ write the localPilot string to a file, rurn the file name :param workingDirectory: the directory where to store the pilot wrapper file :type workingDirectory: basestring :param localPilot: content of the pilot wrapper :type localPilot: basestring :returns: file name of the pilot wrapper :rtype: basestring """ fd, name = tempfile.mkstemp(suffix='_pilotwrapper.py', prefix='DIRAC_', dir=workingDirectory) with os.fdopen(fd, 'w') as pilotWrapper: pilotWrapper.write(localPilot) return name
chaen/DIRAC
WorkloadManagementSystem/Utilities/PilotWrapper.py
Python
gpl-3.0
7,049
[ "DIRAC" ]
356bf5ce508f9a16cf27d3733e2d8d394122f4df8b01bfb46a5e595e05b24376
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # ''' A low level interface to libcint library. It's recommended to use the Mole.intor method to drive the integral evaluation funcitons. ''' import warnings import ctypes import numpy from pyscf import lib libcgto = lib.load_library('libcgto') ANG_OF = 1 NPRIM_OF = 2 NCTR_OF = 3 KAPPA_OF = 4 PTR_EXP = 5 PTR_COEFF = 6 BAS_SLOTS = 8 def getints(intor_name, atm, bas, env, shls_slice=None, comp=None, hermi=0, aosym='s1', ao_loc=None, cintopt=None, out=None): r'''1e and 2e integral generator. Args: intor_name : str ================================ ============= Function Expression ================================ ============= "int1e_ovlp" ( \| \) "int1e_nuc" ( \| nuc \| \) "int1e_kin" (.5 \| p dot p\) "int1e_ia01p" (#C(0 1) \| nabla-rinv \| cross p\) "int1e_giao_irjxp" (#C(0 1) \| r cross p\) "int1e_cg_irxp" (#C(0 1) \| rc cross p\) "int1e_giao_a11part" (-.5 \| nabla-rinv \| r\) "int1e_cg_a11part" (-.5 \| nabla-rinv \| rc\) "int1e_a01gp" (g \| nabla-rinv cross p \|\) "int1e_igkin" (#C(0 .5) g \| p dot p\) "int1e_igovlp" (#C(0 1) g \|\) "int1e_ignuc" (#C(0 1) g \| nuc \|\) "int1e_z" ( \| zc \| \) "int1e_zz" ( \| zc zc \| \) "int1e_r" ( \| rc \| \) "int1e_r2" ( \| rc dot rc \| \) "int1e_rr" ( \| rc rc \| \) "int1e_rrr" ( \| rc rc rc \| \) "int1e_rrrr" ( \| rc rc rc rc \| \) "int1e_pnucp" (p* \| nuc dot p \| \) "int1e_prinvxp" (p* \| rinv cross p \| \) "int1e_ipovlp" (nabla \|\) "int1e_ipkin" (.5 nabla \| p dot p\) "int1e_ipnuc" (nabla \| nuc \|\) "int1e_iprinv" (nabla \| rinv \|\) "int1e_rinv" (\| rinv \|\) "int1e_pnucxp" (p* \| nuc cross p \| \) "int1e_irp" ( \| rc nabla \| \) "int1e_irrp" ( \| rc rc nabla \| \) "int1e_irpr" ( \| rc nabla rc \| \) "int1e_ggovlp" ( \| g g \| \) "int1e_ggkin" (.5 \| g g p dot p \| \) "int1e_ggnuc" ( \| g g nuc \| \) "int1e_grjxp" ( \| g r cross p \| \) "ECPscalar" AREP ECP integrals, similar to int1e_nuc "ECPscalar_ipnuc" (nabla i | ECP | ), similar to int1e_ipnuc "ECPscalar_iprinv" similar to int1e_iprinv for a specific atom "ECPscalar_ignuc" similar to int1e_ignuc "ECPscalar_iprinvip" similar to int1e_iprinvip "ECPso" < | Spin-orbit ECP | > "int1e_ovlp_spinor" ( \| \) "int1e_nuc_spinor" ( \| nuc \|\) "int1e_srsr_spinor" (sigma dot r \| sigma dot r\) "int1e_sr_spinor" (sigma dot r \|\) "int1e_srsp_spinor" (sigma dot r \| sigma dot p\) "int1e_spsp_spinor" (sigma dot p \| sigma dot p\) "int1e_sp_spinor" (sigma dot p \|\) "int1e_spnucsp_spinor" (sigma dot p \| nuc \| sigma dot p\) "int1e_srnucsr_spinor" (sigma dot r \| nuc \| sigma dot r\) "int1e_govlp_spinor" (g \|\) "int1e_gnuc_spinor" (g \| nuc \|\) "int1e_cg_sa10sa01_spinor" (.5 sigma cross rc \| sigma cross nabla-rinv \|\) "int1e_cg_sa10sp_spinor" (.5 rc cross sigma \| sigma dot p\) "int1e_cg_sa10nucsp_spinor" (.5 rc cross sigma \| nuc \| sigma dot p\) "int1e_giao_sa10sa01_spinor" (.5 sigma cross r \| sigma cross nabla-rinv \|\) "int1e_giao_sa10sp_spinor" (.5 r cross sigma \| sigma dot p\) "int1e_giao_sa10nucsp_spinor" (.5 r cross sigma \| nuc \| sigma dot p\) "int1e_sa01sp_spinor" (\| nabla-rinv cross sigma \| sigma dot p\) "int1e_spgsp_spinor" (g sigma dot p \| sigma dot p\) "int1e_spgnucsp_spinor" (g sigma dot p \| nuc \| sigma dot p\) "int1e_spgsa01_spinor" (g sigma dot p \| nabla-rinv cross sigma \|\) "int1e_spspsp_spinor" (sigma dot p \| sigma dot p sigma dot p\) "int1e_spnuc_spinor" (sigma dot p \| nuc \|\) "int1e_ipovlp_spinor" (nabla \|\) "int1e_ipkin_spinor" (.5 nabla \| p dot p\) "int1e_ipnuc_spinor" (nabla \| nuc \|\) "int1e_iprinv_spinor" (nabla \| rinv \|\) "int1e_ipspnucsp_spinor" (nabla sigma dot p \| nuc \| sigma dot p\) "int1e_ipsprinvsp_spinor" (nabla sigma dot p \| rinv \| sigma dot p\) "int2e" ( \, \| \, \) "int2e_ig1" (#C(0 1) g \, \| \, \) "int2e_gg1" (g g \, \| \, \) "int2e_g1g2" (g \, \| g \, \) "int2e_p1vxp1" ( p* \, cross p \| \, \) ; SSO "int2e_spinor" (, \| \, \) "int2e_spsp1_spinor" (sigma dot p \, sigma dot p \| \, \) "int2e_spsp1spsp2_spinor" (sigma dot p \, sigma dot p \| sigma dot p \, sigma dot p \) "int2e_srsr1_spinor" (sigma dot r \, sigma dot r \| \,\) "int2e_srsr1srsr2_spinor" (sigma dot r \, sigma dot r \| sigma dot r \, sigma dot r\) "int2e_cg_sa10sp1_spinor" (.5 rc cross sigma \, sigma dot p \| \,\) "int2e_cg_sa10sp1spsp2_spinor" (.5 rc cross sigma \, sigma dot p \| sigma dot p \, sigma dot p \) "int2e_giao_sa10sp1_spinor" (.5 r cross sigma \, sigma dot p \| \,\) "int2e_giao_sa10sp1spsp2_spinor" (.5 r cross sigma \, sigma dot p \| sigma dot p \, sigma dot p \) "int2e_g1_spinor" (g \, \| \,\) "int2e_spgsp1_spinor" (g sigma dot p \, sigma dot p \| \,\) "int2e_g1spsp2_spinor" (g \, \| sigma dot p \, sigma dot p\) "int2e_spgsp1spsp2_spinor" (g sigma dot p \, sigma dot p \| sigma dot p \, sigma dot p\) "int2e_spv1_spinor" (sigma dot p \, \| \,\) "int2e_vsp1_spinor" (\, sigma dot p \| \,\) "int2e_spsp2_spinor" (\, \| sigma dot p \, sigma dot p\) "int2e_spv1spv2_spinor" (sigma dot p \, \| sigma dot p \,\) "int2e_vsp1spv2_spinor" (\, sigma dot p \| sigma dot p \,\) "int2e_spv1vsp2_spinor" (sigma dot p \, \| \, sigma dot p\) "int2e_vsp1vsp2_spinor" (\, sigma dot p \| \, sigma dot p\) "int2e_spv1spsp2_spinor" (sigma dot p \, \| sigma dot p \, sigma dot p\) "int2e_vsp1spsp2_spinor" (\, sigma dot p \| sigma dot p \, sigma dot p\) "int2e_ig1" (#C(0 1) g \, \| \, \) "int2e_ip1" (nabla \, \| \,\) "int2e_ip1_spinor" (nabla \, \| \,\) "int2e_ipspsp1_spinor" (nabla sigma dot p \, sigma dot p \| \,\) "int2e_ip1spsp2_spinor" (nabla \, \| sigma dot p \, sigma dot p\) "int2e_ipspsp1spsp2_spinor" (nabla sigma dot p \, sigma dot p \| sigma dot p \, sigma dot p\) "int2e_ipsrsr1_spinor" (nabla sigma dot r \, sigma dot r \| \,\) "int2e_ip1srsr2_spinor" (nabla \, \| sigma dot r \, sigma dot r\) "int2e_ipsrsr1srsr2_spinor" (nabla sigma dot r \, sigma dot r \| sigma dot r \, sigma dot r\) "int2e_ip1" (nabla \, \| \,\) "int2e_ssp1ssp2_spinor" ( \, sigma dot p \| gaunt \| \, sigma dot p\) "int2e_cg_ssa10ssp2_spinor" (rc cross sigma \, \| gaunt \| \, sigma dot p\) "int2e_giao_ssa10ssp2_spinor" (r cross sigma \, \| gaunt \| \, sigma dot p\) "int2e_gssp1ssp2_spinor" (g \, sigma dot p \| gaunt \| \, sigma dot p\) "int2e_ipip1" ( nabla nabla \, \| \, \) "int2e_ipvip1" ( nabla \, nabla \| \, \) "int2e_ip1ip2" ( nabla \, \| nabla \, \) "int3c2e_ip1" (nabla \, \| \) "int3c2e_ip2" ( \, \| nabla\) "int2c2e_ip1" (nabla \| r12 \| \) "int3c2e_spinor" (nabla \, \| \) "int3c2e_spsp1_spinor" (nabla \, \| \) "int3c2e_ip1_spinor" (nabla \, \| \) "int3c2e_ip2_spinor" ( \, \| nabla\) "int3c2e_ipspsp1_spinor" (nabla sigma dot p \, sigma dot p \| \) "int3c2e_spsp1ip2_spinor" (sigma dot p \, sigma dot p \| nabla \) "ECPscalar_spinor" AREP ECP integrals, similar to int1e_nuc "ECPscalar_ipnuc_spinor" (nabla i | ECP | ), similar to int1e_ipnuc "ECPscalar_iprinv_spinor" similar to int1e_iprinv for a specific atom "ECPscalar_ignuc_spinor" similar to int1e_ignuc "ECPscalar_iprinvip_spinor" similar to int1e_iprinvip "ECPso_spinor" < | sigam dot Spin-orbit ECP | > ================================ ============= atm : int32 ndarray libcint integral function argument bas : int32 ndarray libcint integral function argument env : float64 ndarray libcint integral function argument Kwargs: shls_slice : 8-element list (ish_start, ish_end, jsh_start, jsh_end, ksh_start, ksh_end, lsh_start, lsh_end) comp : int Components of the integrals, e.g. int1e_ipovlp has 3 components. hermi : int (1e integral only) Symmetry of the 1e integrals | 0 : no symmetry assumed (default) | 1 : hermitian | 2 : anti-hermitian aosym : str (2e integral only) Symmetry of the 2e integrals | 4 or '4' or 's4': 4-fold symmetry (default) | '2ij' or 's2ij' : symmetry between i, j in (ij|kl) | '2kl' or 's2kl' : symmetry between k, l in (ij|kl) | 1 or '1' or 's1': no symmetry out : ndarray (2e integral only) array to store the 2e AO integrals Returns: ndarray of 1-electron integrals, can be either 2-dim or 3-dim, depending on comp Examples: >>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g') >>> gto.getints('int1e_ipnuc_sph', mol._atm, mol._bas, mol._env, comp=3) # <nabla i | V_nuc | j> [[[ 0. 0. ] [ 0. 0. ]] [[ 0. 0. ] [ 0. 0. ]] [[ 0.10289944 0.48176097] [-0.48176097 -0.10289944]]] ''' intor_name, comp = _get_intor_and_comp(intor_name, comp) if (intor_name.startswith('int1e') or intor_name.startswith('ECP') or intor_name.startswith('int2c2e')): return getints2c(intor_name, atm, bas, env, shls_slice, comp, hermi, ao_loc, cintopt, out) elif intor_name.startswith('int2e') or intor_name.startswith('int4c1e'): return getints4c(intor_name, atm, bas, env, shls_slice, comp, aosym, ao_loc, cintopt, out) elif intor_name.startswith('int3c'): return getints3c(intor_name, atm, bas, env, shls_slice, comp, aosym, ao_loc, cintopt, out) else: raise KeyError('Unknown intor %s' % intor_name) def _get_intor_and_comp(intor_name, comp=None): intor_name = ascint3(intor_name) if comp is None: try: if '_spinor' in intor_name: fname = intor_name.replace('_spinor', '') comp = _INTOR_FUNCTIONS[fname][1] else: fname = intor_name.replace('_sph', '').replace('_cart', '') comp = _INTOR_FUNCTIONS[fname][0] except KeyError: warnings.warn('Function %s not found. Set its comp to 1' % intor_name) comp = 1 return intor_name, comp _INTOR_FUNCTIONS = { # Functiona name : (comp-for-scalar, comp-for-spinor) 'int1e_ovlp' : (1, 1), 'int1e_nuc' : (1, 1), 'int1e_kin' : (1, 1), 'int1e_ia01p' : (3, 3), 'int1e_giao_irjxp' : (3, 3), 'int1e_cg_irxp' : (3, 3), 'int1e_giao_a11part' : (9, 9), 'int1e_cg_a11part' : (9, 9), 'int1e_a01gp' : (9, 9), 'int1e_igkin' : (3, 3), 'int1e_igovlp' : (3, 3), 'int1e_ignuc' : (3, 3), 'int1e_pnucp' : (1, 1), 'int1e_z' : (1, 1), 'int1e_zz' : (1, 1), 'int1e_r' : (3, 3), 'int1e_r2' : (1, 1), 'int1e_rr' : (9, 9), 'int1e_rrr' : (27, 27), 'int1e_rrrr' : (81, 81), 'int1e_z_origj' : (1, 1), 'int1e_zz_origj' : (1, 1), 'int1e_r_origj' : (3, 3), 'int1e_rr_origj' : (9, 9), 'int1e_r2_origj' : (1, 1), 'int1e_r4_origj' : (1, 1), 'int1e_p4' : (1, 1), 'int1e_prinvp' : (1, 1), 'int1e_prinvxp' : (3, 3), 'int1e_pnucxp' : (3, 3), 'int1e_irp' : (9, 9), 'int1e_irrp' : (27, 27), 'int1e_irpr' : (27, 27), 'int1e_ggovlp' : (9, 9), 'int1e_ggkin' : (9, 9), 'int1e_ggnuc' : (9, 9), 'int1e_grjxp' : (9, 9), 'int2e' : (1, 1), 'int2e_ig1' : (3, 3), 'int2e_gg1' : (9, 9), 'int2e_g1g2' : (9, 9), 'int2e_ip1v_rc1' : (9, 9), 'int2e_ip1v_r1' : (9, 9), 'int2e_ipvg1_xp1' : (9, 9), 'int2e_ipvg2_xp1' : (9, 9), 'int2e_p1vxp1' : (3, 3), 'int1e_inuc_rcxp' : (3, 3), 'int1e_inuc_rxp' : (3, 3), 'int1e_sigma' : (12,3), 'int1e_spsigmasp' : (12,3), 'int1e_srsr' : (4, 1), 'int1e_sr' : (4, 1), 'int1e_srsp' : (4, 1), 'int1e_spsp' : (4, 1), 'int1e_sp' : (4, 1), 'int1e_spnucsp' : (4, 1), 'int1e_sprinvsp' : (4, 1), 'int1e_srnucsr' : (4, 1), 'int1e_sprsp' : (12,3), 'int1e_govlp' : (3, 3), 'int1e_gnuc' : (3, 3), 'int1e_cg_sa10sa01' : (36,9), 'int1e_cg_sa10sp' : (12,3), 'int1e_cg_sa10nucsp' : (12,3), 'int1e_giao_sa10sa01' : (36,9), 'int1e_giao_sa10sp' : (12,3), 'int1e_giao_sa10nucsp' : (12,3), 'int1e_sa01sp' : (12,3), 'int1e_spgsp' : (12,3), 'int1e_spgnucsp' : (12,3), 'int1e_spgsa01' : (36,9), 'int2e_spsp1' : (4, 1), 'int2e_spsp1spsp2' : (16,1), 'int2e_srsr1' : (4, 1), 'int2e_srsr1srsr2' : (16,1), 'int2e_cg_sa10sp1' : (12,3), 'int2e_cg_sa10sp1spsp2' : (48,3), 'int2e_giao_sa10sp1' : (12,3), 'int2e_giao_sa10sp1spsp2' : (48,3), 'int2e_g1' : (12,3), 'int2e_spgsp1' : (12,3), 'int2e_g1spsp2' : (12,3), 'int2e_spgsp1spsp2' : (48,3), 'int2e_pp1' : (1, 1), 'int2e_pp2' : (1, 1), 'int2e_pp1pp2' : (1, 1), 'int1e_spspsp' : (4, 1), 'int1e_spnuc' : (4, 1), 'int2e_spv1' : (4, 1), 'int2e_vsp1' : (4, 1), 'int2e_spsp2' : (4, 1), 'int2e_spv1spv2' : (16,1), 'int2e_vsp1spv2' : (16,1), 'int2e_spv1vsp2' : (16,1), 'int2e_vsp1vsp2' : (16,1), 'int2e_spv1spsp2' : (16,1), 'int2e_vsp1spsp2' : (16,1), 'int1e_ipovlp' : (3, 3), 'int1e_ipkin' : (3, 3), 'int1e_ipnuc' : (3, 3), 'int1e_iprinv' : (3, 3), 'int1e_rinv' : (1, 1), 'int1e_ipspnucsp' : (12,3), 'int1e_ipsprinvsp' : (12,3), 'int1e_ippnucp' : (3, 3), 'int1e_ipprinvp' : (3, 3), 'int2e_ip1' : (3, 3), 'int2e_ip2' : (3, 3), 'int2e_ipspsp1' : (12,3), 'int2e_ip1spsp2' : (12,3), 'int2e_ipspsp1spsp2' : (48,3), 'int2e_ipsrsr1' : (12,3), 'int2e_ip1srsr2' : (12,3), 'int2e_ipsrsr1srsr2' : (48,3), 'int2e_ssp1ssp2' : (16,1), 'int2e_ssp1sps2' : (16,1), 'int2e_sps1ssp2' : (16,1), 'int2e_sps1sps2' : (16,1), 'int2e_cg_ssa10ssp2' : (48,3), 'int2e_giao_ssa10ssp2' : (18,3), 'int2e_gssp1ssp2' : (18,3), 'int2e_gauge_r1_ssp1ssp2' : (None, 1), 'int2e_gauge_r1_ssp1sps2' : (None, 1), 'int2e_gauge_r1_sps1ssp2' : (None, 1), 'int2e_gauge_r1_sps1sps2' : (None, 1), 'int2e_gauge_r2_ssp1ssp2' : (None, 1), 'int2e_gauge_r2_ssp1sps2' : (None, 1), 'int2e_gauge_r2_sps1ssp2' : (None, 1), 'int2e_gauge_r2_sps1sps2' : (None, 1), 'int1e_ipipovlp' : (9, 9), 'int1e_ipovlpip' : (9, 9), 'int1e_ipipkin' : (9, 9), 'int1e_ipkinip' : (9, 9), 'int1e_ipipnuc' : (9, 9), 'int1e_ipnucip' : (9, 9), 'int1e_ipiprinv' : (9, 9), 'int1e_iprinvip' : (9, 9), 'int2e_ipip1' : (9, 9), 'int2e_ipvip1' : (9, 9), 'int2e_ip1ip2' : (9, 9), 'int1e_ipippnucp' : (9, 9), 'int1e_ippnucpip' : (9, 9), 'int1e_ipipprinvp' : (9, 9), 'int1e_ipprinvpip' : (9, 9), 'int1e_ipipspnucsp' : (36,9), 'int1e_ipspnucspip' : (36,9), 'int1e_ipipsprinvsp' : (36,9), 'int1e_ipsprinvspip' : (36,9), 'int3c2e' : (1, 1), 'int3c2e_ip1' : (3, 3), 'int3c2e_ip2' : (3, 3), 'int3c2e_pvp1' : (1, 1), 'int3c2e_pvxp1' : (3, 3), 'int2c2e_ip1' : (3, 3), 'int2c2e_ip2' : (3, 3), 'int3c2e_ig1' : (3, 3), 'int3c2e_spsp1' : (4, 1), 'int3c2e_ipspsp1' : (12,3), 'int3c2e_spsp1ip2' : (12,3), 'int3c2e_ipip1' : (9, 9), 'int3c2e_ipip2' : (9, 9), 'int3c2e_ipvip1' : (9, 9), 'int3c2e_ip1ip2' : (9, 9), 'int2c2e_ip1ip2' : (9, 9), 'int2c2e_ipip1' : (9, 9), 'int3c1e' : (1, 1), 'int3c1e_p2' : (1, 1), 'int3c1e_iprinv' : (3, 3), 'int2c2e' : (1, 1), 'int2e_yp' : (1, 1), 'int2e_stg' : (1, 1), 'int2e_coulerf' : (1, 1), 'ECPscalar' : (1, None), 'ECPscalar_ipnuc' : (3, None), 'ECPscalar_iprinv' : (3, None), 'ECPscalar_ignuc' : (3, None), 'ECPscalar_iprinvip' : (9, None), 'ECPso' : (3, 1), } def getints2c(intor_name, atm, bas, env, shls_slice=None, comp=1, hermi=0, ao_loc=None, cintopt=None, out=None): atm = numpy.asarray(atm, dtype=numpy.int32, order='C') bas = numpy.asarray(bas, dtype=numpy.int32, order='C') env = numpy.asarray(env, dtype=numpy.double, order='C') natm = atm.shape[0] nbas = bas.shape[0] if shls_slice is None: shls_slice = (0, nbas, 0, nbas) else: assert(shls_slice[1] <= nbas and shls_slice[3] <= nbas) if ao_loc is None: ao_loc = make_loc(bas, intor_name) i0, i1, j0, j1 = shls_slice[:4] naoi = ao_loc[i1] - ao_loc[i0] naoj = ao_loc[j1] - ao_loc[j0] if intor_name.endswith('_cart') or intor_name.endswith('_sph'): mat = numpy.ndarray((naoi,naoj,comp), numpy.double, out, order='F') drv_name = 'GTOint2c' else: mat = numpy.ndarray((naoi,naoj,comp), numpy.complex, out, order='F') if '2c2e' in intor_name: assert(hermi != lib.HERMITIAN and hermi != lib.ANTIHERMI) drv_name = 'GTOint2c_spinor' if mat.size > 0: if cintopt is None: cintopt = make_cintopt(atm, bas, env, intor_name) fn = getattr(libcgto, drv_name) fn(getattr(libcgto, intor_name), mat.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(comp), ctypes.c_int(hermi), (ctypes.c_int*4)(*(shls_slice[:4])), ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm), bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas), env.ctypes.data_as(ctypes.c_void_p)) mat = mat.transpose(2,0,1) if comp == 1: mat = mat[0] return mat def getints3c(intor_name, atm, bas, env, shls_slice=None, comp=1, aosym='s1', ao_loc=None, cintopt=None, out=None): atm = numpy.asarray(atm, dtype=numpy.int32, order='C') bas = numpy.asarray(bas, dtype=numpy.int32, order='C') env = numpy.asarray(env, dtype=numpy.double, order='C') natm = atm.shape[0] nbas = bas.shape[0] if shls_slice is None: shls_slice = (0, nbas, 0, nbas, 0, nbas) if 'ssc' in intor_name or 'spinor' in intor_name: bas = numpy.asarray(numpy.vstack((bas,bas)), dtype=numpy.int32) shls_slice = (0, nbas, 0, nbas, nbas, nbas*2) nbas = bas.shape[0] else: assert(shls_slice[1] <= nbas and shls_slice[3] <= nbas and shls_slice[5] <= nbas) i0, i1, j0, j1, k0, k1 = shls_slice[:6] if ao_loc is None: ao_loc = make_loc(bas, intor_name) if 'ssc' in intor_name: ao_loc[k0:] = ao_loc[k0] + make_loc(bas[k0:], 'cart') elif 'spinor' in intor_name: # The auxbasis for electron-2 is in real spherical representation ao_loc[k0:] = ao_loc[k0] + make_loc(bas[k0:], 'sph') naok = ao_loc[k1] - ao_loc[k0] if aosym in ('s1',): naoi = ao_loc[i1] - ao_loc[i0] naoj = ao_loc[j1] - ao_loc[j0] shape = (naoi, naoj, naok, comp) else: aosym = 's2ij' nij = ao_loc[i1]*(ao_loc[i1]+1)//2 - ao_loc[i0]*(ao_loc[i0]+1)//2 shape = (nij, naok, comp) if 'spinor' in intor_name: mat = numpy.ndarray(shape, numpy.complex, out, order='F') drv = libcgto.GTOr3c_drv fill = getattr(libcgto, 'GTOr3c_fill_'+aosym) else: mat = numpy.ndarray(shape, numpy.double, out, order='F') drv = libcgto.GTOnr3c_drv fill = getattr(libcgto, 'GTOnr3c_fill_'+aosym) if mat.size > 0: # Generating opt for all indices leads to large overhead and poor OMP # speedup for solvent model and COSX functions. In these methods, # the third index of the three center integrals corresponds to a # large number of grids. Initializing the opt for the third index is # not necessary. if cintopt is None: if '3c2e' in intor_name: # TODO: Libcint-3.14 and newer version support to compute # int3c2e without the opt for the 3rd index. #cintopt = make_cintopt(atm, bas[:max(i1, j1)], env, intor_name) cintopt = lib.c_null_ptr() else: cintopt = make_cintopt(atm, bas, env, intor_name) drv(getattr(libcgto, intor_name), fill, mat.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(comp), (ctypes.c_int*6)(*(shls_slice[:6])), ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm), bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas), env.ctypes.data_as(ctypes.c_void_p)) mat = numpy.rollaxis(mat, -1, 0) if comp == 1: mat = mat[0] return mat def getints4c(intor_name, atm, bas, env, shls_slice=None, comp=1, aosym='s1', ao_loc=None, cintopt=None, out=None): aosym = _stand_sym_code(aosym) atm = numpy.asarray(atm, dtype=numpy.int32, order='C') bas = numpy.asarray(bas, dtype=numpy.int32, order='C') env = numpy.asarray(env, dtype=numpy.double, order='C') c_atm = atm.ctypes.data_as(ctypes.c_void_p) c_bas = bas.ctypes.data_as(ctypes.c_void_p) c_env = env.ctypes.data_as(ctypes.c_void_p) natm = atm.shape[0] nbas = bas.shape[0] ao_loc = make_loc(bas, intor_name) if '_spinor' in intor_name: assert(aosym == 's1') if aosym == 's8': assert(shls_slice is None) from pyscf.scf import _vhf nao = ao_loc[-1] nao_pair = nao*(nao+1)//2 out = numpy.ndarray((nao_pair*(nao_pair+1)//2), buffer=out) if nao_pair == 0: return out if cintopt is None: cintopt = make_cintopt(atm, bas, env, intor_name) drv = _vhf.libcvhf.GTO2e_cart_or_sph drv(getattr(libcgto, intor_name), cintopt, out.ctypes.data_as(ctypes.c_void_p), ao_loc.ctypes.data_as(ctypes.c_void_p), c_atm, ctypes.c_int(natm), c_bas, ctypes.c_int(nbas), c_env) return out else: if shls_slice is None: shls_slice = (0, nbas, 0, nbas, 0, nbas, 0, nbas) elif len(shls_slice) == 4: shls_slice = shls_slice + (0, nbas, 0, nbas) else: assert(shls_slice[1] <= nbas and shls_slice[3] <= nbas and shls_slice[5] <= nbas and shls_slice[7] <= nbas) i0, i1, j0, j1, k0, k1, l0, l1 = shls_slice naoi = ao_loc[i1] - ao_loc[i0] naoj = ao_loc[j1] - ao_loc[j0] naok = ao_loc[k1] - ao_loc[k0] naol = ao_loc[l1] - ao_loc[l0] if aosym in ('s4', 's2ij'): nij = [naoi * (naoi + 1) // 2] assert(numpy.all(ao_loc[i0:i1]-ao_loc[i0] == ao_loc[j0:j1]-ao_loc[j0])) else: nij = [naoi, naoj] if aosym in ('s4', 's2kl'): nkl = [naok * (naok + 1) // 2] assert(numpy.all(ao_loc[k0:k1]-ao_loc[k0] == ao_loc[l0:l1]-ao_loc[l0])) else: nkl = [naok, naol] shape = [comp] + nij + nkl if '_spinor' in intor_name: drv = libcgto.GTOr4c_drv fill = libcgto.GTOr4c_fill_s1 out = numpy.ndarray(shape[::-1], dtype=numpy.complex, buffer=out, order='F') out = numpy.rollaxis(out, -1, 0) else: drv = libcgto.GTOnr2e_fill_drv fill = getattr(libcgto, 'GTOnr2e_fill_'+aosym) out = numpy.ndarray(shape, buffer=out) if out.size > 0: if cintopt is None: cintopt = make_cintopt(atm, bas, env, intor_name) prescreen = lib.c_null_ptr() drv(getattr(libcgto, intor_name), fill, prescreen, out.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(comp), (ctypes.c_int*8)(*shls_slice), ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, c_atm, ctypes.c_int(natm), c_bas, ctypes.c_int(nbas), c_env) if comp == 1: out = out[0] return out def getints_by_shell(intor_name, shls, atm, bas, env, comp=1): r'''For given 2, 3 or 4 shells, interface for libcint to get 1e, 2e, 2-center-2e or 3-center-2e integrals Args: intor_name : str See also :func:`getints` for the supported intor_name shls : list of int The AO shell-ids of the integrals atm : int32 ndarray libcint integral function argument bas : int32 ndarray libcint integral function argument env : float64 ndarray libcint integral function argument Kwargs: comp : int Components of the integrals, e.g. int1e_ipovlp has 3 components. Returns: ndarray of 2-dim to 5-dim, depending on the integral type (1e, 2e, 3c-2e, 2c2e) and the value of comp Examples: The gradients of the spherical 2e integrals >>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g') >>> gto.getints_by_shell('int2e_ip1_sph', (0,1,0,1), mol._atm, mol._bas, mol._env, comp=3) [[[[[-0. ]]]] [[[[-0. ]]]] [[[[-0.08760462]]]]] ''' intor_name, comp = _get_intor_and_comp(intor_name, comp) atm = numpy.asarray(atm, dtype=numpy.int32, order='C') bas = numpy.asarray(bas, dtype=numpy.int32, order='C') env = numpy.asarray(env, dtype=numpy.double, order='C') natm = ctypes.c_int(atm.shape[0]) nbas = ctypes.c_int(bas.shape[0]) if intor_name.endswith('_cart'): dtype = numpy.double def num_cgto_of(basid): l = bas[basid,ANG_OF] return (l+1)*(l+2)//2 * bas[basid,NCTR_OF] elif intor_name.endswith('_sph'): dtype = numpy.double def num_cgto_of(basid): l = bas[basid,ANG_OF] return (l*2+1) * bas[basid,NCTR_OF] else: from pyscf.gto.mole import len_spinor dtype = numpy.complex def num_cgto_of(basid): l = bas[basid,ANG_OF] k = bas[basid,KAPPA_OF] return len_spinor(l,k) * bas[basid,NCTR_OF] null = lib.c_null_ptr() if intor_name.startswith('int3c'): assert(len(shls) == 3) di = num_cgto_of(shls[0]) dj = num_cgto_of(shls[1]) l = bas[shls[2],ANG_OF] if intor_name.endswith('_ssc'): # mixed spherical-cartesian dk = (l+1)*(l+2)//2 * bas[shls[2],NCTR_OF] else: dk = (l*2+1) * bas[shls[2],NCTR_OF] buf = numpy.empty((di,dj,dk,comp), dtype, order='F') fintor = getattr(libcgto, intor_name) fintor(buf.ctypes.data_as(ctypes.c_void_p), null, (ctypes.c_int*3)(*shls), atm.ctypes.data_as(ctypes.c_void_p), natm, bas.ctypes.data_as(ctypes.c_void_p), nbas, env.ctypes.data_as(ctypes.c_void_p), null, null) if comp == 1: return buf.reshape(di,dj,dk) else: return buf.transpose(3,0,1,2) elif intor_name.startswith('int2e') or intor_name.startswith('int4c'): assert(len(shls) == 4) di, dj, dk, dl = [num_cgto_of(x) for x in shls] buf = numpy.empty((di,dj,dk,dl,comp), dtype, order='F') fintor = getattr(libcgto, intor_name) fintor(buf.ctypes.data_as(ctypes.c_void_p), null, (ctypes.c_int*4)(*shls), atm.ctypes.data_as(ctypes.c_void_p), natm, bas.ctypes.data_as(ctypes.c_void_p), nbas, env.ctypes.data_as(ctypes.c_void_p), null, null) if comp == 1: return buf.reshape(di,dj,dk,dl) else: return buf.transpose(4,0,1,2,3) elif (intor_name.startswith('int2c') or '1e' in intor_name or 'ECP' in intor_name): assert(len(shls) == 2) di = num_cgto_of(shls[0]) dj = num_cgto_of(shls[1]) buf = numpy.empty((di,dj,comp), dtype, order='F') fintor = getattr(libcgto, intor_name) fintor(buf.ctypes.data_as(ctypes.c_void_p), null, (ctypes.c_int*2)(*shls), atm.ctypes.data_as(ctypes.c_void_p), natm, bas.ctypes.data_as(ctypes.c_void_p), nbas, env.ctypes.data_as(ctypes.c_void_p), null, null) if comp == 1: return buf.reshape(di,dj) else: return buf.transpose(2,0,1) else: raise RuntimeError('Unknown intor %s' % intor_name) def make_loc(bas, key): if 'cart' in key: l = bas[:,ANG_OF] dims = (l+1)*(l+2)//2 * bas[:,NCTR_OF] elif 'sph' in key: dims = (bas[:,ANG_OF]*2+1) * bas[:,NCTR_OF] else: # spinor l = bas[:,ANG_OF] k = bas[:,KAPPA_OF] dims = (l*4+2) * bas[:,NCTR_OF] dims[k<0] = (l[k<0] * 2 + 2) * bas[k<0,NCTR_OF] dims[k>0] = (l[k>0] * 2 ) * bas[k>0,NCTR_OF] ao_loc = numpy.empty(len(dims)+1, dtype=numpy.int32) ao_loc[0] = 0 dims.cumsum(dtype=numpy.int32, out=ao_loc[1:]) return ao_loc def make_cintopt(atm, bas, env, intor): intor = intor.replace('_sph','').replace('_cart','').replace('_spinor','') c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C') c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C') c_env = numpy.asarray(env, dtype=numpy.double, order='C') natm = c_atm.shape[0] nbas = c_bas.shape[0] cintopt = lib.c_null_ptr() # TODO: call specific ECP optimizers for each intor. if intor[:3] == 'ECP': foptinit = libcgto.ECPscalar_optimizer foptinit(ctypes.byref(cintopt), c_atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm), c_bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas), c_env.ctypes.data_as(ctypes.c_void_p)) return ctypes.cast(cintopt, _ecpoptHandler) else: foptinit = getattr(libcgto, intor+'_optimizer') foptinit(ctypes.byref(cintopt), c_atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm), c_bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas), c_env.ctypes.data_as(ctypes.c_void_p)) return ctypes.cast(cintopt, _cintoptHandler) class _cintoptHandler(ctypes.c_void_p): def __del__(self): libcgto.CINTdel_optimizer(ctypes.byref(self)) class _ecpoptHandler(ctypes.c_void_p): def __del__(self): libcgto.ECPdel_optimizer(ctypes.byref(self)) def _stand_sym_code(sym): if isinstance(sym, int): return 's%d' % sym elif sym[0] in 'sS': return sym.lower() else: return 's' + sym.lower() def ascint3(intor_name): '''convert cint2 function name to cint3 function name''' if intor_name.startswith('cint'): intor_name = intor_name[1:] if not intor_name.endswith(('_sph', '_cart', '_spinor', '_ssc')): intor_name = intor_name + '_spinor' return intor_name if __name__ == '__main__': from pyscf import gto mol = gto.Mole() mol.verbose = 0 mol.output = None mol.atom.extend([ ["H", (0, 0, 0 )], ["H", (0, 0, 1 )], ]) mol.basis = {"H": 'cc-pvdz'} mol.build() mol.set_rinv_origin(mol.atom_coord(0)) for i in range(mol.nbas): for j in range(mol.nbas): print(i, j, getints_by_shell('int1e_prinvxp_sph', (i,j), mol._atm, mol._bas, mol._env, 3))
gkc1000/pyscf
pyscf/gto/moleintor.py
Python
apache-2.0
37,174
[ "PySCF" ]
9be72b94a5a2f192315ee08bb6ea3e3ac60b8bc7790638f19b8234d7394954bd
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """ Deep learning via word2vec's "skip-gram and CBOW models", using either hierarchical softmax or negative sampling [1]_ [2]_. The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/ and extended with additional functionality. For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/ **Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training** (70x speedup compared to plain NumPy implementation [3]_). Initialize a model with e.g.:: >>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4) Persist a model to disk with:: >>> model.save(fname) >>> model = Word2Vec.load(fname) # you can continue training with the loaded model! The model can also be instantiated from an existing file on disk in the word2vec C format:: >>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format >>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format You can perform various syntactic/semantic NLP word tasks with the model. Some of them are already built-in:: >>> model.most_similar(positive=['woman', 'king'], negative=['man']) [('queen', 0.50882536), ...] >>> model.doesnt_match("breakfast cereal dinner lunch".split()) 'cereal' >>> model.similarity('woman', 'man') 0.73723527 >>> model['computer'] # raw numpy vector of a word array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32) and so on. If you're finished training a model (=no more updates, only querying), you can do >>> model.init_sims(replace=True) to trim unneeded model memory = use (much) less RAM. Note that there is a :mod:`gensim.models.phrases` module which lets you automatically detect phrases longer than one word. Using phrases, you can learn a word2vec model where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`: >>> bigram_transformer = gensim.models.Phrases(sentences) >>> model = Word2Vec(bigram_transformed[sentences], size=100, ...) .. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013. .. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013. .. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/ """ from __future__ import division # py3 "true division" import logging import sys import os import heapq from timeit import default_timer from copy import deepcopy from collections import defaultdict import threading import itertools from gensim.utils import keep_vocab_item try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\ uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\ ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc from six import iteritems, itervalues, string_types from types import GeneratorType logger = logging.getLogger("gensim.models.word2vec") try: from gensim.models.word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION except ImportError: # failed... fall back to plain numpy (20-80x slower training than the above) FAST_VERSION = -1 def train_sentence_sg(model, sentence, alpha, work=None): """ Update skip-gram model by training on a single sentence. The sentence is a list of string tokens, which are looked up in the model's vocab dictionary. Called internally from `Word2Vec.train()`. This is the non-optimized, Python version. If you have cython installed, gensim will use the optimized version from word2vec_inner instead. """ # ------- if model.unknown_word is not None: word_vocabs = [model.vocab[w] for w in sentence if model.vocab[w].sample_int > model.random.rand() * 2 ** 32] else: word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and model.vocab[w].sample_int > model.random.rand() * 2 ** 32] # ------- for pos, word in enumerate(word_vocabs): reduced_window = model.random.randint(model.window) # `b` in the original word2vec code # now go over all words from the (reduced) window, predicting each one in turn start = max(0, pos - model.window + reduced_window) for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start): # don't train on the `word` itself if pos2 != pos: train_sg_pair(model, model.index2word[word.index], word2.index, alpha) return len(word_vocabs) def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None): """ Update CBOW model by training on a single sentence. The sentence is a list of string tokens, which are looked up in the model's vocab dictionary. Called internally from `Word2Vec.train()`. This is the non-optimized, Python version. If you have cython installed, gensim will use the optimized version from word2vec_inner instead. """ if model.unknown_word is not None: word_vocabs = [model.vocab[w] for w in sentence if model.vocab[w].sample_int > model.random.rand() * 2 ** 32] else: word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and model.vocab[w].sample_int > model.random.rand() * 2 ** 32] for pos, word in enumerate(word_vocabs): reduced_window = model.random.randint(model.window) # `b` in the original word2vec code start = max(0, pos - model.window + reduced_window) window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start) word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)] l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x vector_size if word2_indices and model.cbow_mean: l1 /= len(word2_indices) train_cbow_pair(model, word, word2_indices, l1, alpha) return len(word_vocabs) def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True, context_vectors=None, context_locks=None): if context_vectors is None: context_vectors = model.syn0 if context_locks is None: context_locks = model.syn0_lockf if word not in model.vocab: return predict_word = model.vocab[word] # target word (NN output) l1 = context_vectors[context_index] # input word (NN input/projection layer) lock_factor = context_locks[context_index] neu1e = zeros(l1.shape) if model.hs: # work on the entire tree at once, to push as much work into numpy's C routines as possible (performance) l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate if learn_hidden: model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output neu1e += dot(ga, l2a) # save error if model.negative: # use this word (label = 1) + `negative` other random words not from this sentence (label = 0) word_indices = [predict_word.index] while len(word_indices) < model.negative + 1: w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1])) if w != predict_word.index: word_indices.append(w) l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate if learn_hidden: model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output neu1e += dot(gb, l2b) # save error if learn_vectors: l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1) return neu1e def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True): neu1e = zeros(l1.shape) if model.hs: l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate if learn_hidden: model.syn1[word.point] += outer(ga, l1) # learn hidden -> output neu1e += dot(ga, l2a) # save error if model.negative: # use this word (label = 1) + `negative` other random words not from this sentence (label = 0) word_indices = [word.index] while len(word_indices) < model.negative + 1: w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1])) if w != word.index: word_indices.append(w) l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate if learn_hidden: model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output neu1e += dot(gb, l2b) # save error if learn_vectors: # learn input -> hidden, here for all words in the window separately if not model.cbow_mean and input_word_indices: neu1e /= len(input_word_indices) for i in input_word_indices: model.syn0[i] += neu1e * model.syn0_lockf[i] return neu1e # could move this import up to where train_* is imported, # but for now just do it separately incase there are unforseen bugs in score_ try: from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow except ImportError: def score_sentence_sg(model, sentence, work=None): """ Obtain likelihood score for a single sentence in a fitted skip-gram representaion. The sentence is a list of Vocab objects (or None, when the corresponding word is not in the vocabulary). Called internally from `Word2Vec.score()`. This is the non-optimized, Python version. If you have cython installed, gensim will use the optimized version from word2vec_inner instead. """ log_prob_sentence = 0.0 if model.negative: raise RuntimeError("scoring is only available for HS=True") word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab] for pos, word in enumerate(word_vocabs): if word is None: continue # OOV word in the input sentence => skip # now go over all words from the window, predicting each one in turn start = max(0, pos - model.window) for pos2, word2 in enumerate(sentence[start:(pos + model.window + 1)], start): # don't train on OOV words and on the `word` itself if word2 and not (pos2 == pos): log_prob_sentence += score_sg_pair(model, word, word2) return log_prob_sentence def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None): """ Obtain likelihood score for a single sentence in a fitted CBOW representaion. The sentence is a list of Vocab objects (or None, where the corresponding word is not in the vocabulary. Called internally from `Word2Vec.score()`. This is the non-optimized, Python version. If you have cython installed, gensim will use the optimized version from word2vec_inner instead. """ log_prob_sentence = 0.0 if model.negative: raise RuntimeError("scoring is only available for HS=True") word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab] for pos, word in enumerate(word_vocabs): if word is None: continue # OOV word in the input sentence => skip start = max(0, pos - model.window) window_pos = enumerate(sentence[start:(pos + model.window + 1)], start) word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)] l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size if word2_indices and model.cbow_mean: l1 /= len(word2_indices) log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1) return log_prob_sentence def score_sg_pair(model, word, word2): l1 = model.syn0[word2.index] l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size sgn = -1.0 ** word.code # ch function, 0-> 1, 1 -> -1 lprob = -log(1.0 + exp(-sgn * dot(l1, l2a.T))) return sum(lprob) def score_cbow_pair(model, word, word2_indices, l1): l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size sgn = -1.0 ** word.code # ch function, 0-> 1, 1 -> -1 lprob = -log(1.0 + exp(-sgn * dot(l1, l2a.T))) return sum(lprob) class CallableWord(object): def __init__(self, word): self.word = word def __call__(self, *args, **kwargs): return self.word class Vocab(object): """ A single vocabulary item, used internally for collecting per-word frequency/sampling info, and for constructing binary trees (incl. both word leaves and inner nodes). """ def __init__(self, **kwargs): self.count = 0 self.__dict__.update(kwargs) def __lt__(self, other): # used for sorting in a priority queue return self.count < other.count def __str__(self): vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')] return "%s(%s)" % (self.__class__.__name__, ', '.join(vals)) class Word2Vec(utils.SaveLoad): """ Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/ The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`. """ def __init__( self, sentences=None, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001, sg=1, hs=1, negative=0, cbow_mean=0, hashfxn=hash, iter=1, null_word=0, trim_rule=None,unknown_word=None): """ Initialize the model from an iterable of `sentences`. Each sentence is a list of words (unicode strings) that will be used for training. The `sentences` iterable can be simply a list, but for larger corpora, consider an iterable that streams the sentences directly from disk/network. See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in this module for such examples. If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it in some other way. `sg` defines the training algorithm. By default (`sg=1`), skip-gram is used. Otherwise, `cbow` is employed. `size` is the dimensionality of the feature vectors. `window` is the maximum distance between the current and predicted word within a sentence. `alpha` is the initial learning rate (will linearly drop to zero as training progresses). `seed` = for the random number generator. Initial vectors for each word are seeded with a hash of the concatenation of word + str(seed). `min_count` = ignore all words with total frequency lower than this. `max_vocab_size` = limit RAM during vocabulary building; if there are more unique words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM. Set to `None` for no limit (default). `sample` = threshold for configuring which higher-frequency words are randomly downsampled; default is 0 (off), useful value is 1e-5. `workers` = use this many worker threads to train the model (=faster training with multicore machines). `hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0). `negative` = if > 0, negative sampling will be used, the int for negative specifies how many "noise words" should be drawn (usually between 5-20). `cbow_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean. Only applies when cbow is used. `hashfxn` = hash function to use to randomly initialize weights, for increased training reproducibility. Default is Python's rudimentary built in hash function. `iter` = number of iterations (epochs) over the corpus. `trim_rule` = vocabulary trimming rule, specifies whether certain words should remain in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count). Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT. Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part of the model. `unknown_word` : if provided, this string is used for words that appear less than min_count in the vocabulary. All queries to the Word2Vec object that target words that do not appear in the trained vocabulary are redirected to target the entry for the unknown_word. """ self.vocab = {} # mapping from a word (string) to a Vocab object self.index2word = [] # map from a word's matrix index (int) to word (string) self.sg = int(sg) self.cum_table = None # for negative sampling self.vector_size = int(size) self.layer1_size = int(size) if size % 4 != 0: logger.warning("consider setting layer size to a multiple of 4 for greater performance") self.alpha = float(alpha) self.window = int(window) self.max_vocab_size = max_vocab_size self.seed = seed self.random = random.RandomState(seed) self.min_count = min_count self.sample = sample self.workers = workers self.min_alpha = min_alpha self.hs = hs self.negative = negative self.cbow_mean = int(cbow_mean) self.hashfxn = hashfxn self.iter = iter self.null_word = null_word self.train_count = 0 self.total_train_time = 0 self.unknown_word = unknown_word if sentences is not None: if isinstance(sentences, GeneratorType): raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.") self.build_vocab(sentences, trim_rule=trim_rule) self.train(sentences) def make_cum_table(self, power=0.75, domain=2 ** 31 - 1): """ Create a cumulative-distribution table using stored vocabulary word counts for drawing random words in the negative-sampling training routines. To draw a word index, choose a random integer up to the maximum value in the table (cum_table[-1]), then finding that integer's sorted insertion point (as if by bisect_left or ndarray.searchsorted()). That insertion point is the drawn index, coming up in proportion equal to the increment at that slot. Called internally from 'build_vocab()'. """ vocab_size = len(self.index2word) self.cum_table = zeros(vocab_size, dtype=uint32) # compute sum of all power (Z in paper) train_words_pow = float(sum([self.vocab[word].count ** power for word in self.vocab])) cumulative = 0.0 for word_index in range(vocab_size): cumulative += self.vocab[self.index2word[word_index]].count ** power / train_words_pow self.cum_table[word_index] = round(cumulative * domain) if len(self.cum_table) > 0: assert self.cum_table[-1] == domain def create_binary_tree(self): """ Create a binary Huffman tree using stored vocabulary word counts. Frequent words will have shorter binary codes. Called internally from `build_vocab()`. """ logger.info("constructing a huffman tree from %i words", len(self.vocab)) # build the huffman tree heap = list(itervalues(self.vocab)) heapq.heapify(heap) for i in xrange(len(self.vocab) - 1): min1, min2 = heapq.heappop(heap), heapq.heappop(heap) heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2)) # recurse over the tree, assigning a binary code to each vocabulary word if heap: max_depth, stack = 0, [(heap[0], [], [])] while stack: node, codes, points = stack.pop() if node.index < len(self.vocab): # leaf node => store its path from the root node.code, node.point = codes, points max_depth = max(len(codes), max_depth) else: # inner node => continue recursion points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32) stack.append((node.left, array(list(codes) + [0], dtype=uint8), points)) stack.append((node.right, array(list(codes) + [1], dtype=uint8), points)) logger.info("built huffman tree with maximum node depth %i", max_depth) def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None): """ Build vocabulary from a sequence of sentences (can be a once-only generator stream). Each sentence must be a list of unicode strings. """ self.scan_vocab(sentences, trim_rule=trim_rule) # initial survey self.scale_vocab(keep_raw_vocab, trim_rule=trim_rule) # trim by min_count & precalculate downsampling self.finalize_vocab() # build tables & arrays def scan_vocab(self, sentences, progress_per=10000, trim_rule=None): """Do an initial scan of all words appearing in sentences.""" logger.info("collecting all words and their counts") sentence_no = -1 total_words = 0 min_reduce = 1 vocab = defaultdict(int) for sentence_no, sentence in enumerate(sentences): if sentence_no % progress_per == 0: logger.info("PROGRESS: at sentence #%i, processed %i words, keeping %i word types", sentence_no, sum(itervalues(vocab)) + total_words, len(vocab)) for word in sentence: vocab[word] += 1 if self.max_vocab_size and len(vocab) > self.max_vocab_size: total_words += utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule) min_reduce += 1 total_words += sum(itervalues(vocab)) logger.info("collected %i word types from a corpus of %i raw words and %i sentences", len(vocab), total_words, sentence_no + 1) self.corpus_count = sentence_no + 1 self.raw_vocab = vocab def scale_vocab(self, min_count=None, sample=None, dry_run=False, keep_raw_vocab=False, trim_rule=None): """ Apply vocabulary settings for `min_count` (discarding less-frequent words) and `sample` (controlling the downsampling of more-frequent words). Calling with `dry_run=True` will only simulate the provided settings and report the size of the retained vocabulary, effective corpus length, and estimated memory requirements. Results are both printed via logging and returned as a dict. Delete the raw vocabulary after the scaling is done to free up RAM, unless `keep_raw_vocab` is set. """ min_count = min_count or self.min_count sample = sample or self.sample # Discard words less-frequent than min_count if not dry_run: self.index2word = [] # make stored settings match these applied settings self.min_count = min_count self.sample = sample if self.unknown_word is not None: self.index2word.append(self.unknown_word) self.unknown_vocab = Vocab(count=0, index=0) self.vocab = defaultdict(CallableWord(self.unknown_vocab)) else: self.vocab = {} drop_unique, drop_total, retain_total, original_total = 0, 0, 0, 0 retain_words = [] if self.unknown_word is not None: retain_words.append(self.unknown_word) for word, v in iteritems(self.raw_vocab): if keep_vocab_item(word, v, min_count, trim_rule=trim_rule): retain_words.append(word) retain_total += v original_total += v if not dry_run: self.vocab[word] = Vocab(count=v, index=len(self.index2word)) self.index2word.append(word) else: if self.unknown_word is not None: retain_total += v original_total += v if not dry_run: self.unknown_vocab.count += v else: drop_unique += 1 drop_total += v original_total += v logger.info("min_count=%d retains %i unique words (drops %i)", min_count, len(retain_words), drop_unique) logger.info("min_count leaves %i word corpus (%i%% of original %i)", retain_total, retain_total * 100 / max(original_total, 1), original_total) if self.unknown_word is not None: self.raw_vocab[self.unknown_word] = self.unknown_vocab.count logger.info("unknown vocab item: count=%i, index=%i", self.unknown_vocab.count, self.unknown_vocab.index) # Precalculate each vocabulary item's threshold for sampling if not sample: # no words downsampled threshold_count = retain_total elif sample < 1.0: # traditional meaning: set parameter as proportion of total threshold_count = sample * retain_total else: # new shorthand: sample >= 1 means downsample all words with higher count than sample threshold_count = int(sample * (3 + sqrt(5)) / 2) downsample_total, downsample_unique = 0, 0 for w in retain_words: v = self.raw_vocab[w] word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v) if word_probability < 1.0: downsample_unique += 1 downsample_total += word_probability * v else: word_probability = 1.0 downsample_total += v if not dry_run: self.vocab[w].sample_int = int(round(word_probability * 2 ** 32)) if not dry_run and not keep_raw_vocab: logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab)) self.raw_vocab = defaultdict(int) logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique) logger.info("downsampling leaves estimated %i word corpus (%.1f%% of prior %i)", downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total) # return from each step: words-affected, resulting-corpus-size report_values = {'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique, 'downsample_total': int(downsample_total)} # print extra memory estimates report_values['memory'] = self.estimate_memory(vocab_size=len(retain_words)) return report_values def finalize_vocab(self): """Build tables and model weights based on final vocabulary settings.""" if not self.index2word: self.scale_vocab() if self.hs: # add info about each word's Huffman encoding self.create_binary_tree() if self.negative: # build the table for drawing random words (for negative sampling) self.make_cum_table() if self.null_word: # create null pseudo-word for padding when using concatenative L1 (run-of-words) # this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter word, v = '\0', Vocab(count=1, sample_int=0) v.index = len(self.vocab) self.index2word.append(word) self.vocab[word] = v # set initial input/projection and hidden weights self.reset_weights() def reset_from(self, other_model): """ Borrow shareable pre-built structures (like vocab) from the other_model. Useful if testing multiple models in parallel on the same corpus. """ self.vocab = other_model.vocab self.index2word = other_model.index2word self.cum_table = other_model.cum_table self.corpus_count = other_model.corpus_count self.reset_weights() def _do_train_job(self, job, alpha, inits): work, neu1 = inits tally = 0 raw_tally = 0 for sentence in job: if self.sg: tally += train_sentence_sg(self, sentence, alpha, work) else: tally += train_sentence_cbow(self, sentence, alpha, work, neu1) raw_tally += len(sentence) return (tally, raw_tally) def _raw_word_count(self, items): return sum(len(item) for item in items) def train(self, sentences, total_words=None, word_count=0, chunksize=100, total_examples=None, queue_factor=2, report_delay=1): """ Update the model's neural weights from a sequence of sentences (can be a once-only generator stream). For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.) To support linear learning-rate decay from (initial) alpha to min_alpha, either total_examples (count of sentences) or total_words (count of raw words in sentences) should be provided, unless the sentences are the same as those that were used to initially build the vocabulary. """ if FAST_VERSION < 0: import warnings warnings.warn("C extension not loaded for Word2Vec, training will be slow. " "Install a C compiler and reinstall gensim for fast training.") self.neg_labels = [] if self.negative > 0: # precompute negative labels optimization for pure-python training self.neg_labels = zeros(self.negative + 1) self.neg_labels[0] = 1. logger.info("training model with %i workers on %i vocabulary and %i features, " "using sg=%s hs=%s sample=%s and negative=%s", self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative) if not self.vocab: raise RuntimeError("you must first build vocabulary before training the model") if not hasattr(self, 'syn0'): raise RuntimeError("you must first finalize vocabulary before training the model") if total_words is None and total_examples is None: if self.corpus_count: total_examples = self.corpus_count logger.info("expecting %i examples, matching count from corpus used for vocabulary survey", total_examples) else: raise ValueError( "you must provide either total_words or total_examples, to enable alpha and progress calculations") if self.iter > 1: sentences = utils.RepeatCorpusNTimes(sentences, self.iter) total_words = total_words and total_words * self.iter total_examples = total_examples and total_examples * self.iter def worker_init(): work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL) return (work, neu1) def worker_one_job(job, inits): items, alpha = job if items is None: # signal to finish return False # train & return tally tally, raw_tally = self._do_train_job(items, alpha, inits) progress_queue.put((len(items), tally, raw_tally)) # report progress return True def worker_loop(): """Train the model, lifting lists of sentences from the jobs queue.""" init = worker_init() while True: job = job_queue.get() if not worker_one_job(job, init): break start, next_report = default_timer(), 1.0 # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :( if self.workers > 0: job_queue = Queue(maxsize=queue_factor * self.workers) else: job_queue = FakeJobQueue(worker_init, worker_one_job) progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers) workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)] for thread in workers: thread.daemon = True # make interrupting the process with ctrl+c easier thread.start() pushed_words = 0 pushed_examples = 0 example_count = 0 trained_word_count = 0 raw_word_count = word_count push_done = False done_jobs = 0 next_alpha = self.alpha jobs_source = enumerate(utils.grouper(sentences, chunksize)) # fill jobs queue with (sentence, alpha) job tuples while True: try: job_no, items = next(jobs_source) logger.debug("putting job #%i in the queue at alpha %.05f", job_no, next_alpha) job_queue.put((items, next_alpha)) # update the learning rate before every next job if self.min_alpha < next_alpha: if total_examples: # examples-based decay pushed_examples += len(items) next_alpha = self.alpha - (self.alpha - self.min_alpha) * (pushed_examples / total_examples) else: # words-based decay pushed_words += self._raw_word_count(items) next_alpha = self.alpha - (self.alpha - self.min_alpha) * (pushed_words / total_words) next_alpha = max(next_alpha, self.min_alpha) except StopIteration: logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1) for _ in xrange(self.workers): job_queue.put((None, 0)) # give the workers heads up that they can finish -- no more work! push_done = True try: while done_jobs < (job_no + 1) or not push_done: examples, trained_words, raw_words = progress_queue.get( push_done) # only block after all jobs pushed example_count += examples trained_word_count += trained_words # only words in vocab & sampled raw_word_count += raw_words done_jobs += 1 elapsed = default_timer() - start if elapsed >= next_report: if total_examples: # examples-based progress % logger.info("PROGRESS: at %.2f%% examples, %.0f words/s", 100.0 * example_count / total_examples, trained_word_count / elapsed) else: # words-based progress % logger.info("PROGRESS: at %.2f%% words, %.0f words/s", 100.0 * raw_word_count / total_words, trained_word_count / elapsed) next_report = elapsed + report_delay # don't flood log, wait report_delay seconds else: # loop ended by job count; really done break except Empty: pass # already out of loop; continue to next push elapsed = default_timer() - start logger.info("training on %i raw words took %.1fs, %.0f trained words/s", raw_word_count, elapsed, trained_word_count / elapsed if elapsed else 0.0) if total_examples and total_examples != example_count: logger.warn("supplied example count (%i) did not equal expected count (%i)", example_count, total_examples) if total_words and total_words != raw_word_count: logger.warn("supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words) self.train_count += 1 # number of times train() has been called self.total_train_time += elapsed self.clear_sims() return trained_word_count def _score_job_words(self, sentence, inits): work, neu1 = inits if self.sg: return score_sentence_sg(self, sentence, work) else: return score_sentence_cbow(self, sentence, work, neu1) # basics copied from the train() function def score(self, sentences, total_sentences=int(1e9), chunksize=100, queue_factor=2, report_delay=1): """ Score the log probability for a sequence of sentences (can be a once-only generator stream). Each sentence must be a list of unicode strings. This does not change the fitted model in any way (see Word2Vec.train() for that) Note that you should specify total_sentences; we'll run into problems if you ask to score more than the default See the article by Taddy [taddy]_ for examples of how to use such scores in document classification. .. [taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations, in Proceedings of the 2015 Conference of the Association of Computational Linguistics. """ if FAST_VERSION < 0: import warnings warnings.warn("C extension compilation failed, scoring will be slow. " "Install a C compiler and reinstall gensim for fastness.") logger.info("scoring sentences with %i workers on %i vocabulary and %i features, " "using sg=%s hs=%s sample=%s and negative=%s", self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative) if not self.vocab: raise RuntimeError("you must first build vocabulary before scoring new data") if not self.hs: raise RuntimeError("we have only implemented score for hs") def worker_init(): work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum) neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL) return (work, neu1) def worker_one_job(job, inits): if job is None: # signal to finish return False ns = 0 for (id, sentence) in job: sentence_scores[id] = self._score_job_words(sentence, inits) ns += 1 progress_queue.put(ns) # report progress return True def worker_loop(): """Train the model, lifting lists of sentences from the jobs queue.""" init = worker_init() while True: job = job_queue.get() if not worker_one_job(job, init): break start, next_report = default_timer(), 1.0 # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :( if self.workers > 0: job_queue = Queue(maxsize=queue_factor * self.workers) else: job_queue = FakeJobQueue(worker_init, worker_one_job) progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers) workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)] for thread in workers: thread.daemon = True # make interrupting the process with ctrl+c easier thread.start() sentence_count = 0 sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL) push_done = False done_jobs = 0 jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize)) # fill jobs queue with (id, sentence) job items while True: try: job_no, items = next(jobs_source) logger.debug("putting job #%i in the queue", job_no) job_queue.put(items) except StopIteration: logger.info( "reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1) for _ in xrange(self.workers): job_queue.put(None) # give the workers heads up that they can finish -- no more work! push_done = True try: while done_jobs < (job_no+1) or not push_done: ns = progress_queue.get(push_done) # only block after all jobs pushed sentence_count += ns done_jobs += 1 elapsed = default_timer() - start if elapsed >= next_report: logger.info( "PROGRESS: at %.2f%% sentences, %.0f sentences/s", 100.0 * sentence_count, sentence_count / elapsed) next_report = elapsed + report_delay # don't flood log, wait report_delay seconds else: # loop ended by job count; really done break except Empty: pass # already out of loop; continue to next push elapsed = default_timer() - start self.clear_sims() logger.info("scoring %i sentences took %.1fs, %.0f sentences/s" % (sentence_count, elapsed, sentence_count / elapsed if elapsed else 0.0)) return sentence_scores[:sentence_count] def clear_sims(self): self.syn0norm = None def reset_weights(self): """Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary.""" logger.info("resetting layer weights") self.syn0 = empty((len(self.vocab), self.vector_size), dtype=REAL) # randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once for i in xrange(len(self.vocab)): # construct deterministic seed from word AND seed argument self.syn0[i] = self.seeded_vector(self.index2word[i] + str(self.seed)) if self.hs: self.syn1 = zeros((len(self.vocab), self.layer1_size), dtype=REAL) if self.negative: self.syn1neg = zeros((len(self.vocab), self.layer1_size), dtype=REAL) self.syn0norm = None self.syn0_lockf = ones(len(self.vocab), dtype=REAL) # zeros suppress learning def seeded_vector(self, seed_string): """Create one 'random' vector (but deterministic by seed_string)""" # Note: built-in hash() may vary by Python version or even (in Py3.x) per launch once = random.RandomState(uint32(self.hashfxn(seed_string))) return (once.rand(self.vector_size) - 0.5) / self.vector_size def save_word2vec_format(self, fname, fvocab=None, binary=False): """ Store the input-hidden weight matrix in the same format used by the original C word2vec-tool, for compatibility. """ if fvocab is not None: logger.info("storing vocabulary in %s" % (fvocab)) with utils.smart_open(fvocab, 'wb') as vout: for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count): vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count))) logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.vector_size, fname)) assert (len(self.vocab), self.vector_size) == self.syn0.shape with utils.smart_open(fname, 'wb') as fout: fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape)) # store in sorted order: most frequent words at the top for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count): row = self.syn0[vocab.index] if binary: fout.write(utils.to_utf8(word) + b" " + row.tostring()) else: fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row)))) @classmethod def load_word2vec_format(cls, fname, fvocab=None, binary=False, norm_only=True, encoding='utf8'): """ Load the input-hidden weight matrix from the original C word2vec-tool format. Note that the information stored in the file is incomplete (the binary tree is missing), so while you can query for word similarity etc., you cannot continue training with a model loaded this way. `binary` is a boolean indicating whether the data is in binary word2vec format. `norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory. Word counts are read from `fvocab` filename, if set (this is the file generated by `-save-vocab` flag of the original C tool). If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`. """ counts = None if fvocab is not None: logger.info("loading word counts from %s", fvocab) counts = {} with utils.smart_open(fvocab) as fin: for line in fin: word, count = utils.to_unicode(line).strip().split() counts[word] = int(count) logger.info("loading projection weights from %s", fname) with utils.smart_open(fname) as fin: header = utils.to_unicode(fin.readline(), encoding=encoding) vocab_size, vector_size = map(int, header.split()) # throws for invalid file format result = cls(size=vector_size) result.syn0 = zeros((vocab_size, vector_size), dtype=REAL) def add_word(word, weights): word_id = len(result.vocab) if word in result.vocab: logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname) return if counts is None: # most common scenario: no vocab file given. just make up some bogus counts, in descending order result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id) elif word in counts: # use count from the vocab file result.vocab[word] = Vocab(index=word_id, count=counts[word]) else: # vocab file given, but word is missing -- set count to None (TODO: or raise?) logger.warning("vocabulary file is incomplete: '%s' is missing", word) result.vocab[word] = Vocab(index=word_id, count=None) result.syn0[word_id] = weights result.index2word.append(word) if binary: binary_len = dtype(REAL).itemsize * vector_size for line_no in xrange(vocab_size): # mixed text and binary: read text first, then binary word = [] while True: ch = fin.read(1) if ch == b' ': break if ch != b'\n': # ignore newlines in front of words (some binary files have) word.append(ch) word = utils.to_unicode(b''.join(word), encoding=encoding) weights = fromstring(fin.read(binary_len), dtype=REAL) add_word(word, weights) else: for line_no, line in enumerate(fin): parts = utils.to_unicode(line.rstrip(), encoding=encoding).split(" ") if len(parts) != vector_size + 1: raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no)) word, weights = parts[0], list(map(REAL, parts[1:])) add_word(word, weights) if result.syn0.shape[0] != len(result.vocab): logger.info( "duplicate words detected, shrinking matrix size from %i to %i", result.syn0.shape[0], len(result.vocab) ) result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)]) assert (len(result.vocab), result.vector_size) == result.syn0.shape logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname)) result.init_sims(norm_only) return result def intersect_word2vec_format(self, fname, binary=False, encoding='utf8'): """ Merge the input-hidden weight matrix from the original C word2vec-tool format given, where it intersects with the current vocabulary. (No words are added to the existing vocabulary, but intersecting words adopt the file's weights, and non-intersecting words are left alone.) `binary` is a boolean indicating whether the data is in binary word2vec format. """ overlap_count = 0 logger.info("loading projection weights from %s" % (fname)) with utils.smart_open(fname) as fin: header = utils.to_unicode(fin.readline(), encoding=encoding) vocab_size, vector_size = map(int, header.split()) # throws for invalid file format if not vector_size == self.vector_size: raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname)) # TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)? if binary: binary_len = dtype(REAL).itemsize * vector_size for line_no in xrange(vocab_size): # mixed text and binary: read text first, then binary word = [] while True: ch = fin.read(1) if ch == b' ': break if ch != b'\n': # ignore newlines in front of words (some binary files have) word.append(ch) word = utils.to_unicode(b''.join(word), encoding=encoding) weights = fromstring(fin.read(binary_len), dtype=REAL) if word in self.vocab: overlap_count += 1 self.syn0[self.vocab[word].index] = weights self.syn0_lockf[self.vocab[word].index] = 0.0 # lock it else: for line_no, line in enumerate(fin): parts = utils.to_unicode(line.rstrip(), encoding=encoding).split(" ") if len(parts) != vector_size + 1: raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no)) word, weights = parts[0], list(map(REAL, parts[1:])) if word in self.vocab: overlap_count += 1 self.syn0[self.vocab[word].index] = weights logger.info("merged %d vectors into %s matrix from %s" % (overlap_count, self.syn0.shape, fname)) def most_similar(self, positive=[], negative=[], topn=10): """ Find the top-N most similar words. Positive words contribute positively towards the similarity, negative words negatively. This method computes cosine similarity between a simple mean of the projection weight vectors of the given words and the vectors for each word in the model. The method corresponds to the `word-analogy` and `distance` scripts in the original word2vec implementation. If topn is False, most_similar returns the vector of similarity scores. Example:: >>> trained_model.most_similar(positive=['woman', 'king'], negative=['man']) [('queen', 0.50882536), ...] """ self.init_sims() if isinstance(positive, string_types) and not negative: # allow calls like most_similar('dog'), as a shorthand for most_similar(['dog']) positive = [positive] # add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words positive = [(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word for word in positive] negative = [(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word for word in negative] # compute the weighted average of all words all_words, mean = set(), [] for word, weight in positive + negative: if isinstance(word, ndarray): mean.append(weight * word) elif word in self.vocab: mean.append(weight * self.syn0norm[self.vocab[word].index]) all_words.add(self.vocab[word].index) else: raise KeyError("word '%s' not in vocabulary" % word) if not mean: raise ValueError("cannot compute similarity with no input") mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL) dists = dot(self.syn0norm, mean) if not topn: return dists best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True) # ignore (don't return) words from the input result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words] return result[:topn] def most_similar_cosmul(self, positive=[], negative=[], topn=10): """ Find the top-N most similar words, using the multiplicative combination objective proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute positively towards the similarity, negative words negatively, but with less susceptibility to one large distance dominating the calculation. In the common analogy-solving case, of two positive and one negative examples, this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg. Additional positive or negative examples contribute to the numerator or denominator, respectively – a potentially sensible but untested extension of the method. (With a single positive example, rankings will be the same as in the default most_similar.) Example:: >>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london']) [(u'iraq', 0.8488819003105164), ...] .. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014. """ self.init_sims() if isinstance(positive, string_types) and not negative: # allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog']) positive = [positive] all_words = set() def word_vec(word): if isinstance(word, ndarray): return word elif word in self.vocab: all_words.add(self.vocab[word].index) return self.syn0norm[self.vocab[word].index] else: raise KeyError("word '%s' not in vocabulary" % word) positive = [word_vec(word) for word in positive] negative = [word_vec(word) for word in negative] if not positive: raise ValueError("cannot compute similarity with no input") # equation (4) of Levy & Goldberg "Linguistic Regularities...", # with distances shifted to [0,1] per footnote (7) pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive] neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative] dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001) if not topn: return dists best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True) # ignore (don't return) words from the input result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words] return result[:topn] def doesnt_match(self, words): """ Which word from the given list doesn't go with the others? Example:: >>> trained_model.doesnt_match("breakfast cereal dinner lunch".split()) 'cereal' """ self.init_sims() words = [word for word in words if word in self.vocab] # filter out OOV words logger.debug("using words %s" % words) if not words: raise ValueError("cannot select a word from an empty list") vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL) mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL) dists = dot(vectors, mean) return sorted(zip(dists, words))[0][1] def __getitem__(self, words): """ Accept a single word or a list of words as input. If a single word: returns the word's representations in vector space, as a 1D numpy array. Multiple words: return the words' representations in vector space, as a 2d numpy array: #words x #vector_size. Matrix rows are in the same order as in input. Example:: >>> trained_model['office'] array([ -1.40128313e-02, ...]) >>> trained_model[['office', 'products']] array([ -1.40128313e-02, ...] [ -1.70425311e-03, ...] ...) """ if isinstance(words, string_types): # allow calls like trained_model['office'], as a shorthand for trained_model[['office']] return self.syn0[self.vocab[words].index] return vstack([self.syn0[self.vocab[word].index] for word in words]) def __contains__(self, word): return word in self.vocab def similarity(self, w1, w2): """ Compute cosine similarity between two words. Example:: >>> trained_model.similarity('woman', 'man') 0.73723527 >>> trained_model.similarity('woman', 'woman') 1.0 """ return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2])) def n_similarity(self, ws1, ws2): """ Compute cosine similarity between two sets of words. Example:: >>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant']) 0.61540466561049689 >>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant']) 1.0000000000000004 >>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant') True """ v1 = [self[word] for word in ws1] v2 = [self[word] for word in ws2] return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0))) def init_sims(self, replace=False): """ Precompute L2-normalized vectors. If `replace` is set, forget the original vectors and only keep the normalized ones = saves lots of memory! Note that you **cannot continue training** after doing a replace. The model becomes effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`. """ if getattr(self, 'syn0norm', None) is None or replace: logger.info("precomputing L2-norms of word weight vectors") if replace: for i in xrange(self.syn0.shape[0]): self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1)) self.syn0norm = self.syn0 if hasattr(self, 'syn1'): del self.syn1 else: self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL) def estimate_memory(self, vocab_size=None, report=None): """Estimate required memory for a model using current settings and provided vocabulary size.""" vocab_size = vocab_size or len(self.vocab) report = report or {} report['vocab'] = vocab_size * (700 if self.hs else 500) report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize if self.hs: report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize if self.negative: report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize report['total'] = sum(report.values()) logger.info("estimated required memory for %i words and %i dimensions: %i bytes", vocab_size, self.vector_size, report['total']) return report @staticmethod def log_accuracy(section): correct, incorrect = len(section['correct']), len(section['incorrect']) if correct + incorrect > 0: logger.info("%s: %.1f%% (%i/%i)" % ( section['section'], 100.0 * correct / (correct + incorrect), correct, correct + incorrect)) def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar): """ Compute accuracy of the model. `questions` is a filename where lines are 4-tuples of words, split into sections by ": SECTION NAME" lines. See https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt for an example. The accuracy is reported (=printed to log and returned as a list) for each section separately, plus there's one aggregate summary at the end. Use `restrict_vocab` to ignore all questions containing a word whose frequency is not in the top-N most frequent words (default top 30,000). This method corresponds to the `compute-accuracy` script of the original C word2vec. """ ok_vocab = dict(sorted(iteritems(self.vocab), key=lambda item: -item[1].count)[:restrict_vocab]) ok_index = set(v.index for v in itervalues(ok_vocab)) sections, section = [], None for line_no, line in enumerate(utils.smart_open(questions)): # TODO: use level3 BLAS (=evaluate multiple questions at once), for speed line = utils.to_unicode(line) if line.startswith(': '): # a new section starts => store the old section if section: sections.append(section) self.log_accuracy(section) section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []} else: if not section: raise ValueError("missing section header before line #%i in %s" % (line_no, questions)) try: a, b, c, expected = [word.lower() for word in line.split()] # TODO assumes vocabulary preprocessing uses lowercase, too... except: logger.info("skipping invalid line #%i in %s" % (line_no, questions)) if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab: logger.debug("skipping line #%i with OOV words: %s" % (line_no, line.strip())) continue ignore = set(self.vocab[v].index for v in [a, b, c]) # indexes of words to ignore predicted = None # find the most likely prediction, ignoring OOV words and input words sims = most_similar(self, positive=[b, c], negative=[a], topn=False) for index in matutils.argsort(sims, reverse=True): if index in ok_index and index not in ignore: predicted = self.index2word[index] if predicted != expected: logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted) break if predicted == expected: section['correct'].append((a, b, c, expected)) else: section['incorrect'].append((a, b, c, expected)) if section: # store the last section, too sections.append(section) self.log_accuracy(section) total = {'section': 'total', 'correct': sum((s['correct'] for s in sections), []), 'incorrect': sum((s['incorrect'] for s in sections), []), } self.log_accuracy(total) sections.append(total) return sections def __str__(self): return "%s(vocab=%s, size=%s, alpha=%s)" % ( self.__class__.__name__, len(self.index2word), self.vector_size, self.alpha) def save(self, *args, **kwargs): # don't bother storing the cached normalized vectors, recalculable table kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table']) super(Word2Vec, self).save(*args, **kwargs) save.__doc__ = utils.SaveLoad.save.__doc__ @classmethod def load(cls, *args, **kwargs): model = super(Word2Vec, cls).load(*args, **kwargs) # update older models if hasattr(model, 'table'): delattr(model, 'table') # discard in favor of cum_table if model.negative and hasattr(model, 'index2word'): model.make_cum_table() # rebuild cum_table from vocabulary if not hasattr(model, 'corpus_count'): model.corpus_count = None for v in model.vocab.values(): if hasattr(v, 'sample_int'): break # already 0.12.0+ style int probabilities else: v.sample_int = int(round(v.sample_probability * 2 ** 32)) del v.sample_probability if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'): model.syn0_lockf = ones(len(model.syn0), dtype=REAL) if not hasattr(model, 'random'): model.random = random.RandomState(model.seed) if not hasattr(model, 'train_count'): model.train_count = 0 model.total_train_time = 0 return model class FakeJobQueue(object): """Pretends to be a Queue; does equivalent of work_loop in calling thread.""" def __init__(self, init_fn, job_fn): self.inits = init_fn() self.job_fn = job_fn def put(self, job): self.job_fn(job, self.inits) class BrownCorpus(object): """Iterate over sentences from the Brown corpus (part of NLTK data).""" def __init__(self, dirname): self.dirname = dirname def __iter__(self): for fname in os.listdir(self.dirname): fname = os.path.join(self.dirname, fname) if not os.path.isfile(fname): continue for line in utils.smart_open(fname): line = utils.to_unicode(line) # each file line is a single sentence in the Brown corpus # each token is WORD/POS_TAG token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2] # ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff) words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()] if not words: # don't bother sending out empty sentences continue yield words class Text8Corpus(object): """Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip .""" def __init__(self, fname, max_sentence_length=1000): self.fname = fname self.max_sentence_length = max_sentence_length def __iter__(self): # the entire corpus is one gigantic line -- there are no sentence marks at all # so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens sentence, rest = [], b'' with utils.smart_open(self.fname) as fin: while True: text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM if text == rest: # EOF sentence.extend(rest.split()) # return the last chunk of words, too (may be shorter/longer) if sentence: yield sentence break last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration words, rest = ( utils.to_unicode(text[:last_token]).split(), text[last_token:].strip()) if last_token >= 0 else ( [], text) sentence.extend(words) while len(sentence) >= self.max_sentence_length: yield sentence[:self.max_sentence_length] sentence = sentence[self.max_sentence_length:] class LineSentence(object): """ Simple format: one sentence = one line; words already preprocessed and separated by whitespace. """ def __init__(self, source, max_sentence_length=10000, limit=None): """ `source` can be either a string or a file object. Clip the file to the first `limit` lines (or no clipped if limit is None, the default). Example:: sentences = LineSentence('myfile.txt') Or for compressed files:: sentences = LineSentence('compressed_text.txt.bz2') sentences = LineSentence('compressed_text.txt.gz') """ self.source = source self.max_sentence_length = max_sentence_length self.limit = limit def __iter__(self): """Iterate through the lines in the source.""" try: # Assume it is a file-like object and try treating it as such # Things that don't have seek will trigger an exception self.source.seek(0) for line in itertools.islice(self.source, self.limit): line = utils.to_unicode(line).split() i = 0 while i < len(line): yield line[i : i + self.max_sentence_length] i += self.max_sentence_length except AttributeError: # If it didn't work like a file, use it as a string filename with utils.smart_open(self.source) as fin: for line in itertools.islice(fin, self.limit): line = utils.to_unicode(line).split() i = 0 while i < len(line): yield line[i : i + self.max_sentence_length] i += self.max_sentence_length # Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8 if __name__ == "__main__": logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO) logging.info("running %s", " ".join(sys.argv)) logging.info("using optimization %s", FAST_VERSION) # check and process cmdline input program = os.path.basename(sys.argv[0]) if len(sys.argv) < 2: print(globals()['__doc__'] % locals()) sys.exit(1) infile = sys.argv[1] from gensim.models.word2vec import Word2Vec # avoid referencing __main__ in pickle seterr(all='raise') # don't ignore numpy errors # model = Word2Vec(LineSentence(infile), size=200, min_count=5, workers=4) model = Word2Vec(Text8Corpus(infile), size=200, min_count=5, workers=1) if len(sys.argv) > 3: outfile = sys.argv[3] model.save(outfile + '.model') model.save_word2vec_format(outfile + '.model.bin', binary=True) model.save_word2vec_format(outfile + '.model.txt', binary=False) if len(sys.argv) > 2: questions_file = sys.argv[2] model.accuracy(sys.argv[2]) logging.info("finished running %s", program)
sjebbara/gensim
gensim/models/word2vec.py
Python
gpl-3.0
74,528
[ "VisIt" ]
3977ff1895ac43fa4abbaf257268e2da5703b4d6fac46decde1cea7bd34ce9a9
''' synbiochem (c) University of Manchester 2015 synbiochem is licensed under the MIT License. To view a copy of this license, visit <http://opensource.org/licenses/MIT/>. @author: neilswainston ''' # pylint: disable=too-many-public-methods import unittest from synbiochemdev.utils import struct_utils class Test(unittest.TestCase): '''Test class for structure_utils.''' def test_get_seq_struct(self): '''Tests get_seq_struct method.''' seq_structs = struct_utils.get_seq_structs(['1DWI']) self.assertTrue(all([len(v[0]) == len(v[1]) for v in seq_structs.values()])) if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] unittest.main()
neilswainston/development-py
synbiochemdev/utils/test/test_struct_utils.py
Python
mit
735
[ "VisIt" ]
5054b4c02fa8668162ec9ba0ea3b37280973a36a419e50771f816d16c3093d07
from epsilon.extime import Time from nevow.livetrial import testcase from nevow import tags, loaders from nevow.athena import expose from axiom.store import Store from axiom.userbase import LoginMethod from axiom.dependency import installOn from xmantissa.webtheme import getLoader from xmantissa.people import Person, EmailAddress from xquotient import compose, mimeutil, smtpout from xquotient.inbox import Inbox from xquotient.test.test_inbox import testMessageFactory class _ComposeTestMixin: def _getComposeFragment( self, composeFragFactory=compose.ComposeFragment): s = Store() LoginMethod(store=s, internal=False, protocol=u'email', localpart=u'default', domain=u'host', verified=True, account=s) installOn(Inbox(store=s), s) smtpout.FromAddress( store=s, address=u'moe@divmod.com').setAsDefault() composer = compose.Composer(store=s) installOn(composer, s) composeFrag = composeFragFactory(composer) composeFrag.jsClass = u'Quotient.Test.ComposeController' composeFrag.setFragmentParent(self) composeFrag.docFactory = getLoader(composeFrag.fragmentName) return (s, composeFrag) class ComposeTestCase(testcase.TestCase, _ComposeTestMixin): """ Tests for Quotient.Compose.Controller """ jsClass = u'Quotient.Test.ComposeTestCase' docFactory = loaders.stan(tags.div[ tags.div(render=tags.directive('liveTest'))[ tags.div(render=tags.directive('composer'), style='visibility: hidden'), tags.div(id='mantissa-footer')]]) def render_composer(self, ctx, data): """ Make a bunch of people and give them email addresses """ (s, composeFrag) = self._getComposeFragment() def makePerson(email, name): EmailAddress(store=s, address=email, person=Person(store=s, name=name)) makePerson(u'maboulkheir@divmod.com', u'Moe Aboulkheir') makePerson(u'localpart@domain', u'Tobias Knight') makePerson(u'madonna@divmod.com', u'Madonna') makePerson(u'kilroy@foo', u'') return ctx.tag[composeFrag] class AddrPassthroughComposeFragment(compose.ComposeFragment): """ L{xquotient.compose.ComposeFragment} subclass which overrides L{_sendOrSave} to return a list of the flattened recipient addresses that were submitted via the compose form """ def _sendOrSave(self, **k): """ @return: sequence of C{unicode} email addresses """ return [addr.pseudoFormat() for addr in k['toAddresses']] class ComposeToAddressTestCase(testcase.TestCase, _ComposeTestMixin): """ Tests for the behaviour of recipient addresses in L{xquotient.compose.ComposeFragment} """ jsClass = u'Quotient.Test.ComposeToAddressTestCase' def __init__(self): testcase.TestCase.__init__(self) self.perTestData = {} def getComposeWidget(self, key, toAddresses): """ @param key: unique identifier for the test method @param toAddresses: sequence of C{unicode} email addresses which should be wrapped in L{xquotient.mimeutil.EmailAddress} instances and passed to the L{ComposeFragment} constructor. These will be used as the initial content of the client-side toAddresses form input when the fragment is rendered @rtype: L{AddrPassthroughComposeFragment} """ def composeFragFactory(composer): return AddrPassthroughComposeFragment( composer, recipients={'to': [mimeutil.EmailAddress(e, False) for e in toAddresses]}) (s, frag) = self._getComposeFragment( composeFragFactory=composeFragFactory) self.perTestData[key] = (s, frag) return frag expose(getComposeWidget) class ComposeAutoCompleteTestCase(testcase.TestCase): """ Tests for compose autocomplete """ jsClass = u'Quotient.Test.ComposeAutoCompleteTestCase'
twisted/quotient
xquotient/test/livetest_compose.py
Python
mit
4,363
[ "MOE" ]
16ef69e229c301ffc59d9fe65771a26797a52d796e81ad01d93abf61c8d75f53
"""Utility functions for plotting M/EEG data """ from __future__ import print_function # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Denis Engemann <denis.engemann@gmail.com> # Martin Luessi <mluessi@nmr.mgh.harvard.edu> # Eric Larson <larson.eric.d@gmail.com> # Mainak Jas <mainak@neuro.hut.fi> # # License: Simplified BSD import math from functools import partial import difflib import webbrowser from warnings import warn import tempfile import numpy as np from ..io import show_fiff from ..utils import verbose, set_config COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74', '#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493'] def _setup_vmin_vmax(data, vmin, vmax, norm=False): """Aux function to handle vmin and vmax parameters""" if vmax is None and vmin is None: vmax = np.abs(data).max() if norm: vmin = 0. else: vmin = -vmax else: if callable(vmin): vmin = vmin(data) elif vmin is None: if norm: vmin = 0. else: vmin = np.min(data) if callable(vmax): vmax = vmax(data) elif vmax is None: vmax = np.max(data) return vmin, vmax def plt_show(show=True, **kwargs): """Helper to show a figure while suppressing warnings""" import matplotlib.pyplot as plt if show: plt.show(warn=False, **kwargs) def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None): """ Adjust subplot parameters to give specified padding. Note. For plotting please use this function instead of plt.tight_layout Parameters ---------- pad : float padding between the figure edge and the edges of subplots, as a fraction of the font-size. h_pad : float Padding height between edges of adjacent subplots. Defaults to `pad_inches`. w_pad : float Padding width between edges of adjacent subplots. Defaults to `pad_inches`. fig : instance of Figure Figure to apply changes to. """ import matplotlib.pyplot as plt fig = plt.gcf() if fig is None else fig fig.canvas.draw() try: # see https://github.com/matplotlib/matplotlib/issues/2654 fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad) except Exception: warn('Matplotlib function \'tight_layout\' is not supported.' ' Skipping subplot adjusment.') else: try: fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad)) except Exception: pass def _check_delayed_ssp(container): """ Aux function to be used for interactive SSP selection """ if container.proj is True or\ all(p['active'] for p in container.info['projs']): raise RuntimeError('Projs are already applied. Please initialize' ' the data with proj set to False.') elif len(container.info['projs']) < 1: raise RuntimeError('No projs found in evoked.') def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'): """Return a colormap similar to that used by mne_analyze Parameters ---------- limits : list (or array) of length 3 or 6 Bounds for the colormap, which will be mirrored across zero if length 3, or completely specified (and potentially asymmetric) if length 6. format : str Type of colormap to return. If 'matplotlib', will return a matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will return an RGBA array of shape (256, 4). Returns ------- cmap : instance of matplotlib.pyplot.colormap | array A teal->blue->gray->red->yellow colormap. Notes ----- For this will return a colormap that will display correctly for data that are scaled by the plotting function to span [-fmax, fmax]. Examples -------- The following code will plot a STC using standard MNE limits: colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15]) brain = stc.plot('fsaverage', 'inflated', 'rh', colormap) brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False) """ # Ensure limits is an array limits = np.asarray(limits, dtype='float') if len(limits) != 3 and len(limits) != 6: raise ValueError('limits must have 3 or 6 elements') if len(limits) == 3 and any(limits < 0.): raise ValueError('if 3 elements, limits must all be non-negative') if any(np.diff(limits) <= 0): raise ValueError('limits must be monotonically increasing') if format == 'matplotlib': from matplotlib import colors if len(limits) == 3: limits = (np.concatenate((-np.flipud(limits), limits)) + limits[-1]) / (2 * limits[-1]) else: limits = (limits - np.min(limits)) / np.max(limits - np.min(limits)) cdict = {'red': ((limits[0], 0.0, 0.0), (limits[1], 0.0, 0.0), (limits[2], 0.5, 0.5), (limits[3], 0.5, 0.5), (limits[4], 1.0, 1.0), (limits[5], 1.0, 1.0)), 'green': ((limits[0], 1.0, 1.0), (limits[1], 0.0, 0.0), (limits[2], 0.5, 0.5), (limits[3], 0.5, 0.5), (limits[4], 0.0, 0.0), (limits[5], 1.0, 1.0)), 'blue': ((limits[0], 1.0, 1.0), (limits[1], 1.0, 1.0), (limits[2], 0.5, 0.5), (limits[3], 0.5, 0.5), (limits[4], 0.0, 0.0), (limits[5], 0.0, 0.0))} return colors.LinearSegmentedColormap('mne_analyze', cdict) elif format == 'mayavi': if len(limits) == 3: limits = np.concatenate((-np.flipud(limits), [0], limits)) /\ limits[-1] else: limits = np.concatenate((limits[:3], [0], limits[3:])) limits /= np.max(np.abs(limits)) r = np.array([0, 0, 0, 0, 1, 1, 1]) g = np.array([1, 0, 0, 0, 0, 0, 1]) b = np.array([1, 1, 1, 0, 0, 0, 0]) a = np.array([1, 1, 0, 0, 0, 1, 1]) xp = (np.arange(256) - 128) / 128.0 colormap = np.r_[[np.interp(xp, limits, 255 * c) for c in [r, g, b, a]]].T return colormap else: raise ValueError('format must be either matplotlib or mayavi') def _toggle_options(event, params): """Toggle options (projectors) dialog""" import matplotlib.pyplot as plt if len(params['projs']) > 0: if params['fig_proj'] is None: _draw_proj_checkbox(event, params, draw_current_state=False) else: # turn off options dialog plt.close(params['fig_proj']) del params['proj_checks'] params['fig_proj'] = None def _toggle_proj(event, params): """Operation to perform when proj boxes clicked""" # read options if possible if 'proj_checks' in params: bools = [x[0].get_visible() for x in params['proj_checks'].lines] for bi, (b, p) in enumerate(zip(bools, params['projs'])): # see if they tried to deactivate an active one if not b and p['active']: bools[bi] = True else: bools = [True] * len(params['projs']) compute_proj = False if 'proj_bools' not in params: compute_proj = True elif not np.array_equal(bools, params['proj_bools']): compute_proj = True # if projectors changed, update plots if compute_proj is True: params['plot_update_proj_callback'](params, bools) def _get_help_text(params): """Aux function for customizing help dialogs text.""" text, text2 = list(), list() text.append(u'\u2190 : \n') text.append(u'\u2192 : \n') text.append(u'\u2193 : \n') text.append(u'\u2191 : \n') text.append(u'- : \n') text.append(u'+ or = : \n') text.append(u'Home : \n') text.append(u'End : \n') text.append(u'Page down : \n') text.append(u'Page up : \n') text.append(u'F11 : \n') text.append(u'? : \n') text.append(u'Esc : \n\n') text.append(u'Mouse controls\n') text.append(u'click on data :\n') text2.append('Navigate left\n') text2.append('Navigate right\n') text2.append('Scale down\n') text2.append('Scale up\n') text2.append('Toggle full screen mode\n') text2.append('Open help box\n') text2.append('Quit\n\n\n') if 'raw' in params: text2.insert(4, 'Reduce the time shown per view\n') text2.insert(5, 'Increase the time shown per view\n') text.append(u'click elsewhere in the plot :\n') if 'ica' in params: text.append(u'click component name :\n') text2.insert(2, 'Navigate components down\n') text2.insert(3, 'Navigate components up\n') text2.insert(8, 'Reduce the number of components per view\n') text2.insert(9, 'Increase the number of components per view\n') text2.append('Mark bad channel\n') text2.append('Vertical line at a time instant\n') text2.append('Show topography for the component\n') else: text.append(u'click channel name :\n') text2.insert(2, 'Navigate channels down\n') text2.insert(3, 'Navigate channels up\n') text2.insert(8, 'Reduce the number of channels per view\n') text2.insert(9, 'Increase the number of channels per view\n') text2.append('Mark bad channel\n') text2.append('Vertical line at a time instant\n') text2.append('Mark bad channel\n') elif 'epochs' in params: text.append(u'right click :\n') text2.insert(4, 'Reduce the number of epochs per view\n') text2.insert(5, 'Increase the number of epochs per view\n') if 'ica' in params: text.append(u'click component name :\n') text2.insert(2, 'Navigate components down\n') text2.insert(3, 'Navigate components up\n') text2.insert(8, 'Reduce the number of components per view\n') text2.insert(9, 'Increase the number of components per view\n') text2.append('Mark component for exclusion\n') text2.append('Vertical line at a time instant\n') text2.append('Show topography for the component\n') else: text.append(u'click channel name :\n') text.append(u'right click channel name :\n') text2.insert(2, 'Navigate channels down\n') text2.insert(3, 'Navigate channels up\n') text2.insert(8, 'Reduce the number of channels per view\n') text2.insert(9, 'Increase the number of channels per view\n') text.insert(10, u'b : \n') text2.insert(10, 'Toggle butterfly plot on/off\n') text.insert(11, u'h : \n') text2.insert(11, 'Show histogram of peak-to-peak values\n') text2.append('Mark bad epoch\n') text2.append('Vertical line at a time instant\n') text2.append('Mark bad channel\n') text2.append('Plot ERP/ERF image\n') text.append(u'middle click :\n') text2.append('Show channel name (butterfly plot)\n') text.insert(11, u'o : \n') text2.insert(11, 'View settings (orig. view only)\n') return ''.join(text), ''.join(text2) def _prepare_trellis(n_cells, max_col): """Aux function """ import matplotlib.pyplot as plt if n_cells == 1: nrow = ncol = 1 elif n_cells <= max_col: nrow, ncol = 1, n_cells else: nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1)) axes = [axes] if ncol == nrow == 1 else axes.flatten() for ax in axes[n_cells:]: # hide unused axes ax.set_visible(False) return fig, axes def _draw_proj_checkbox(event, params, draw_current_state=True): """Toggle options (projectors) dialog""" from matplotlib import widgets projs = params['projs'] # turn on options dialog labels = [p['desc'] for p in projs] actives = ([p['active'] for p in projs] if draw_current_state else [True] * len(params['projs'])) width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5 height = len(projs) / 6.0 + 0.5 fig_proj = figure_nobar(figsize=(width, height)) fig_proj.canvas.set_window_title('SSP projection vectors') params['fig_proj'] = fig_proj # necessary for proper toggling ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False) proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives) # change already-applied projectors to red for ii, p in enumerate(projs): if p['active'] is True: for x in proj_checks.lines[ii]: x.set_color('r') # make minimal size # pass key presses from option dialog over proj_checks.on_clicked(partial(_toggle_proj, params=params)) params['proj_checks'] = proj_checks # this should work for non-test cases try: fig_proj.canvas.draw() fig_proj.show(warn=False) except Exception: pass def _layout_figure(params): """Function for setting figure layout. Shared with raw and epoch plots""" size = params['fig'].get_size_inches() * params['fig'].dpi scroll_width = 25 hscroll_dist = 25 vscroll_dist = 10 l_border = 100 r_border = 10 t_border = 35 b_border = 40 # only bother trying to reset layout if it's reasonable to do so if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist: return # convert to relative units scroll_width_x = scroll_width / size[0] scroll_width_y = scroll_width / size[1] vscroll_dist /= size[0] hscroll_dist /= size[1] l_border /= size[0] r_border /= size[0] t_border /= size[1] b_border /= size[1] # main axis (traces) ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist ax_y = hscroll_dist + scroll_width_y + b_border ax_height = 1.0 - ax_y - t_border pos = [l_border, ax_y, ax_width, ax_height] params['ax'].set_position(pos) if 'ax2' in params: params['ax2'].set_position(pos) params['ax'].set_position(pos) # vscroll (channels) pos = [ax_width + l_border + vscroll_dist, ax_y, scroll_width_x, ax_height] params['ax_vscroll'].set_position(pos) # hscroll (time) pos = [l_border, b_border, ax_width, scroll_width_y] params['ax_hscroll'].set_position(pos) if 'ax_button' in params: # options button pos = [l_border + ax_width + vscroll_dist, b_border, scroll_width_x, scroll_width_y] params['ax_button'].set_position(pos) if 'ax_help_button' in params: pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border, scroll_width_x * 2, scroll_width_y] params['ax_help_button'].set_position(pos) params['fig'].canvas.draw() @verbose def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ', read_limit=np.inf, max_str=30, verbose=None): """Compare the contents of two fiff files using diff and show_fiff Parameters ---------- fname_1 : str First file to compare. fname_2 : str Second file to compare. fname_out : str | None Filename to store the resulting diff. If None, a temporary file will be created. show : bool If True, show the resulting diff in a new tab in a web browser. indent : str How to indent the lines. read_limit : int Max number of bytes of data to read from a tag. Can be np.inf to always read all data (helps test read completion). max_str : int Max number of characters of string representation to print for each tag's data. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- fname_out : str The filename used for storing the diff. Could be useful for when a temporary file is used. """ file_1 = show_fiff(fname_1, output=list, indent=indent, read_limit=read_limit, max_str=max_str) file_2 = show_fiff(fname_2, output=list, indent=indent, read_limit=read_limit, max_str=max_str) diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2) if fname_out is not None: f = open(fname_out, 'wb') else: f = tempfile.NamedTemporaryFile('wb', delete=False, suffix='.html') fname_out = f.name with f as fid: fid.write(diff.encode('utf-8')) if show is True: webbrowser.open_new_tab(fname_out) return fname_out def figure_nobar(*args, **kwargs): """Make matplotlib figure with no toolbar""" from matplotlib import rcParams, pyplot as plt old_val = rcParams['toolbar'] try: rcParams['toolbar'] = 'none' fig = plt.figure(*args, **kwargs) # remove button press catchers (for toolbar) cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys()) for key in cbs: fig.canvas.callbacks.disconnect(key) except Exception as ex: raise ex finally: rcParams['toolbar'] = old_val return fig def _helper_raw_resize(event, params): """Helper for resizing""" size = ','.join([str(s) for s in params['fig'].get_size_inches()]) set_config('MNE_BROWSE_RAW_SIZE', size) _layout_figure(params) def _plot_raw_onscroll(event, params, len_channels=None): """Interpret scroll events""" if len_channels is None: len_channels = len(params['info']['ch_names']) orig_start = params['ch_start'] if event.step < 0: params['ch_start'] = min(params['ch_start'] + params['n_channels'], len_channels - params['n_channels']) else: # event.key == 'up': params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0) if orig_start != params['ch_start']: _channels_changed(params, len_channels) def _channels_changed(params, len_channels): """Helper function for dealing with the vertical shift of the viewport.""" if params['ch_start'] + params['n_channels'] > len_channels: params['ch_start'] = len_channels - params['n_channels'] if params['ch_start'] < 0: params['ch_start'] = 0 params['plot_fun']() def _plot_raw_time(value, params): """Deal with changed time value""" info = params['info'] max_times = params['n_times'] / float(info['sfreq']) - params['duration'] if value > max_times: value = params['n_times'] / info['sfreq'] - params['duration'] if value < 0: value = 0 if params['t_start'] != value: params['t_start'] = value params['hsel_patch'].set_x(value) def _plot_raw_onkey(event, params): """Interpret key presses""" import matplotlib.pyplot as plt if event.key == 'escape': plt.close(params['fig']) elif event.key == 'down': params['ch_start'] += params['n_channels'] _channels_changed(params, len(params['info']['ch_names'])) elif event.key == 'up': params['ch_start'] -= params['n_channels'] _channels_changed(params, len(params['info']['ch_names'])) elif event.key == 'right': value = params['t_start'] + params['duration'] _plot_raw_time(value, params) params['update_fun']() params['plot_fun']() elif event.key == 'left': value = params['t_start'] - params['duration'] _plot_raw_time(value, params) params['update_fun']() params['plot_fun']() elif event.key in ['+', '=']: params['scale_factor'] *= 1.1 params['plot_fun']() elif event.key == '-': params['scale_factor'] /= 1.1 params['plot_fun']() elif event.key == 'pageup': n_channels = params['n_channels'] + 1 offset = params['ax'].get_ylim()[0] / n_channels params['offsets'] = np.arange(n_channels) * offset + (offset / 2.) params['n_channels'] = n_channels params['ax'].set_yticks(params['offsets']) params['vsel_patch'].set_height(n_channels) _channels_changed(params, len(params['info']['ch_names'])) elif event.key == 'pagedown': n_channels = params['n_channels'] - 1 if n_channels == 0: return offset = params['ax'].get_ylim()[0] / n_channels params['offsets'] = np.arange(n_channels) * offset + (offset / 2.) params['n_channels'] = n_channels params['ax'].set_yticks(params['offsets']) params['vsel_patch'].set_height(n_channels) if len(params['lines']) > n_channels: # remove line from view params['lines'][n_channels].set_xdata([]) params['lines'][n_channels].set_ydata([]) _channels_changed(params, len(params['info']['ch_names'])) elif event.key == 'home': duration = params['duration'] - 1.0 if duration <= 0: return params['duration'] = duration params['hsel_patch'].set_width(params['duration']) params['update_fun']() params['plot_fun']() elif event.key == 'end': duration = params['duration'] + 1.0 if duration > params['raw'].times[-1]: duration = params['raw'].times[-1] params['duration'] = duration params['hsel_patch'].set_width(params['duration']) params['update_fun']() params['plot_fun']() elif event.key == '?': _onclick_help(event, params) elif event.key == 'f11': mng = plt.get_current_fig_manager() mng.full_screen_toggle() def _mouse_click(event, params): """Vertical select callback""" if event.button != 1: return if event.inaxes is None: if params['n_channels'] > 100: return ax = params['ax'] ylim = ax.get_ylim() pos = ax.transData.inverted().transform((event.x, event.y)) if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]: return params['label_click_fun'](pos) # vertical scrollbar changed if event.inaxes == params['ax_vscroll']: ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0) if params['ch_start'] != ch_start: params['ch_start'] = ch_start params['plot_fun']() # horizontal scrollbar changed elif event.inaxes == params['ax_hscroll']: _plot_raw_time(event.xdata - params['duration'] / 2, params) params['update_fun']() params['plot_fun']() elif event.inaxes == params['ax']: params['pick_bads_fun'](event) def _select_bads(event, params, bads): """Helper for selecting bad channels onpick. Returns updated bads list.""" # trade-off, avoid selecting more than one channel when drifts are present # however for clean data don't click on peaks but on flat segments def f(x, y): return y(np.mean(x), x.std() * 2) lines = event.inaxes.lines for line in lines: ydata = line.get_ydata() if not isinstance(ydata, list) and not np.isnan(ydata).any(): ymin, ymax = f(ydata, np.subtract), f(ydata, np.add) if ymin <= event.ydata <= ymax: this_chan = vars(line)['ch_name'] if this_chan in params['info']['ch_names']: ch_idx = params['ch_start'] + lines.index(line) if this_chan not in bads: bads.append(this_chan) color = params['bad_color'] line.set_zorder(-1) else: while this_chan in bads: bads.remove(this_chan) color = vars(line)['def_color'] line.set_zorder(0) line.set_color(color) params['ax_vscroll'].patches[ch_idx].set_color(color) break else: x = np.array([event.xdata] * 2) params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim())) params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.])) params['vertline_t'].set_text('%0.3f' % x[0]) return bads def _onclick_help(event, params): """Function for drawing help window""" import matplotlib.pyplot as plt text, text2 = _get_help_text(params) width = 6 height = 5 fig_help = figure_nobar(figsize=(width, height), dpi=80) fig_help.canvas.set_window_title('Help') ax = plt.subplot2grid((8, 5), (0, 0), colspan=5) ax.set_title('Keyboard shortcuts') plt.axis('off') ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2) ax1.set_yticklabels(list()) plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold', ha='right') plt.axis('off') ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3) ax2.set_yticklabels(list()) plt.text(0, 1, text2, fontname='STIXGeneral', va='top') plt.axis('off') tight_layout(fig=fig_help) # this should work for non-test cases try: fig_help.canvas.draw() fig_help.show(warn=False) except Exception: pass class ClickableImage(object): """ Display an image so you can click on it and store x/y positions. Takes as input an image array (can be any array that works with imshow, but will work best with images. Displays the image and lets you click on it. Stores the xy coordinates of each click, so now you can superimpose something on top of it. Upon clicking, the x/y coordinate of the cursor will be stored in self.coords, which is a list of (x, y) tuples. Parameters ---------- imdata: ndarray The image that you wish to click on for 2-d points. **kwargs : dict Keyword arguments. Passed to ax.imshow. Notes ----- .. versionadded:: 0.9.0 """ def __init__(self, imdata, **kwargs): """Display the image for clicking.""" from matplotlib.pyplot import figure self.coords = [] self.imdata = imdata self.fig = figure() self.ax = self.fig.add_subplot(111) self.ymax = self.imdata.shape[0] self.xmax = self.imdata.shape[1] self.im = self.ax.imshow(imdata, aspect='auto', extent=(0, self.xmax, 0, self.ymax), picker=True, **kwargs) self.ax.axis('off') self.fig.canvas.mpl_connect('pick_event', self.onclick) plt_show() def onclick(self, event): """Mouse click handler. Parameters ---------- event: matplotlib event object The matplotlib object that we use to get x/y position. """ mouseevent = event.mouseevent self.coords.append((mouseevent.xdata, mouseevent.ydata)) def plot_clicks(self, **kwargs): """Plot the x/y positions stored in self.coords. Parameters ---------- **kwargs : dict Arguments are passed to imshow in displaying the bg image. """ from matplotlib.pyplot import subplots f, ax = subplots() ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs) xlim, ylim = [ax.get_xlim(), ax.get_ylim()] xcoords, ycoords = zip(*self.coords) ax.scatter(xcoords, ycoords, c='r') ann_text = np.arange(len(self.coords)).astype(str) for txt, coord in zip(ann_text, self.coords): ax.annotate(txt, coord, fontsize=20, color='r') ax.set_xlim(xlim) ax.set_ylim(ylim) plt_show() def to_layout(self, **kwargs): """Turn coordinates into an MNE Layout object. Normalizes by the image you used to generate clicks Parameters ---------- **kwargs : dict Arguments are passed to generate_2d_layout """ from mne.channels.layout import generate_2d_layout coords = np.array(self.coords) lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs) return lt def _fake_click(fig, ax, point, xform='ax', button=1): """Helper to fake a click at a relative point within axes.""" if xform == 'ax': x, y = ax.transAxes.transform_point(point) elif xform == 'data': x, y = ax.transData.transform_point(point) else: raise ValueError('unknown transform') try: fig.canvas.button_press_event(x, y, button, False, None) except Exception: # for old MPL fig.canvas.button_press_event(x, y, button, False) def add_background_image(fig, im, set_ratios=None): """Add a background image to a plot. Adds the image specified in `im` to the figure `fig`. This is generally meant to be done with topo plots, though it could work for any plot. Note: This modifies the figure and/or axes in place. Parameters ---------- fig: plt.figure The figure you wish to add a bg image to. im: ndarray A numpy array that works with a call to plt.imshow(im). This will be plotted as the background of the figure. set_ratios: None | str Set the aspect ratio of any axes in fig to the value in set_ratios. Defaults to None, which does nothing to axes. Returns ------- ax_im: instance of the create matplotlib axis object corresponding to the image you added. Notes ----- .. versionadded:: 0.9.0 """ if set_ratios is not None: for ax in fig.axes: ax.set_aspect(set_ratios) ax_im = fig.add_axes([0, 0, 1, 1]) ax_im.imshow(im, aspect='auto') ax_im.set_zorder(-1) return ax_im
yousrabk/mne-python
mne/viz/utils.py
Python
bsd-3-clause
30,405
[ "Mayavi" ]
258d41dca10ea690a0715db8b25c09045d60ead8b991da71ca7b41ba5b613034
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import subprocess import os #import traceback import sys import simplejson as json from ga_parser_class import * def ConstructMyGaParser(jsondata): """ JsonData class called to construct my object < Take jsondata > Return a List of object JsonData """ ListeJsonData = [] annotation = jsondata['annotation'] workflow_name = jsondata['name'] nb_steps = len(jsondata['steps']) for i in range(nb_steps): step_id = jsondata['steps'][str(i)]['id'] tname = jsondata['steps'][str(i)]['name'] ttool_id = jsondata['steps'][str(i)]['tool_id'] ttype = jsondata['steps'][str(i)]['type'] ttool_version = jsondata['steps'][str(i)]['tool_version'] tinput_name = jsondata['steps'][str(i)]['inputs'] toutput_name = jsondata['steps'][str(i)]['outputs'] tuser_output_name = jsondata['steps'][str(i)]['user_outputs'] ttool_state = jsondata['steps'][str(i)]['tool_state'] ttool_errors = jsondata['steps'][str(i)]['tool_errors'] ListeJsonData.append(JsonData(workflow_name, annotation, step_id, \ tname, ttool_id, ttype, ttool_version, tinput_name, toutput_name, \ tuser_output_name, ttool_state, ttool_errors)) return ListeJsonData def getjson(infile): try: data = infile.read() infile.close() #if not limited to 4500 characters ? try: jsondata = json.loads(data) return jsondata except: sys.exit("Not a json file !! Bye bye...") #except Exception as e: #top = traceback.extract_stack()[-1] #print(', '.join([type(e).__name__, os.path.basename(top[0]), \ #str(top[1])])) except: sys.exit("Your file is not a text file readable !! Bye "+\ "bye...") def getworkflowparams(ListeJsonData): """ < Get workflow parameters if it is possible... (advanced edit option enable on workflow creation) """ aff = True params_tools = [] for JsonData in ListeJsonData: step_id = JsonData.step_id tname = JsonData.tname ttool_id = JsonData.ttool_id ttype = JsonData.ttype ttool_version = JsonData.ttool_version tinput_name = JsonData.tinput_name toutput_name = JsonData.toutput_name tuser_output_name = JsonData.tuser_output_name ttool_state = JsonData.ttool_state ttool_errors = JsonData.ttool_errors if ttool_errors is None: print("id : "+str(step_id)) print("tool name : "+tname) if ttool_id is not None: print("tool_id : " +ttool_id) print("type : " +ttype) if ttool_version is not None: print("version : " +ttool_version) if tinput_name: print("Input : ") JsonData.ParseInOutVal(tinput_name, "input", aff) if toutput_name: print("Output : ") JsonData.ParseInOutVal(toutput_name, "output", aff) if ttype == "tool": print("#"*40) #params_tools = ttool_state.replace("\\","") #params_tools = eval(ttool_state) params_tools = json.loads(ttool_state) JsonData.ParseParams(params_tools, aff) print("\n\n") else: print("This tool is in error state" +tname) def getinterpreter(filetype): """ Try to get the interpreter from filetype < Get the content of the "file" linux command on a specific executable > Return the interpreter """ if re.search("Bourne-Again shell", filetype, flags=re.I): interpreter = 'bash' elif re.search("python", filetype, flags=re.I): interpreter = 'python' elif re.search("perl", filetype, flags=re.I): interpreter = 'perl' elif re.search("Tenex C shell", filetype, flags=re.I): interpreter = 'tcsh' elif re.search("C shell", filetype, flags=re.I): interpreter = 'csh' elif re.search("Korn shell", filetype, flags=re.I): interpreter = 'ksh' elif re.search("zsh", filetype, flags=re.I): interpreter = 'zsh' elif re.search("PHP", filetype, flags=re.I): interpreter = 'php' else: interpreter = 'sh' return interpreter def GetProgHelpParam(execline, record_separator, field_separator, fromfile): """ Try to parse the Help section of a program stdout < Get a command line to execute [Help section of this program] < Get one or two delimiter (eventually a regexp) for RS and FS > Return the list of short/(eventually)long arguments > Return eventually a description of each argument and a general description of the program > Return the interpreter used > Return the program basename """ short_args = [] long_args = [] descrips = [] general_descrip = [] interpreter = "" # initialize dialup with undefined option DIALS = "U" first_message = "\n Try [A]utomatic search (ok for well "+\ "formatted help). If there is short (e.g. -f) and long options "+\ "available (e.g. --file), you will have to choose [M]anual, "+\ "to check each output.\n If you have only [L]ong options, e.g. "+\ "--myoption, choose [L]ong options.\n" +\ " If you have only [S]hort options, choose 'S'.\n"+\ " Please note that [A]utomatic search could overwrite your field "+\ "separator.\n" +\ " [M]anual choices also allow you to add general description.\n"+\ " [A]|L|S|M:" each_dial = "#"*12+"\n Try [A]utomatic search (could overwrite"+\ " your separator) ? Or, is it a [L]ong or a [S]hort Option (the "+\ "important part is the beginning of the line) ? Or add this "+\ "content to the general [D]escription ? Or [I]gnore ? \n"+\ " [A]|L|S|D|I:" sep_r = re.compile(record_separator, flags=re.U) sep_f = re.compile(field_separator, flags=re.U) fs = field_separator std_help_pattern = re.compile(r"^(?P<short>\w+,?" + fs + ")?"+\ "(--?(?P<long>\w+([-=]?(\w+)?)*)" + fs + ")?(?P<descrip>((.+"+\ "(\n|\r\n?)?)*))", re.M|re.U|re.I) if fromfile is False: print("Try to parse default help output from program '%s'" \ % execline) # some problems with "<" or ">" because we need to generate a xml # content results = sanitize(sendcommand(execline)) else: results = execline print("\n\n"+"#"*25+"\n") print("Output that should be parse : \n\n"+"#"*25+"\n %s" % results) print("\n"+"#"*25+"\n\n") generic_opt = raw_input(first_message) argsarray = re.split(sep_r, results) print("\n\n"+"#"*25+"\n\n") for arg in argsarray: #the_option = arg.split() the_option = re.split(sep_f, arg) """ generic_opt is automatic -> Same as bellow without asking anything """ if generic_opt == "A" or generic_opt == "": # for debug: some search are bad because of a wrong # field separator #print arg content_in_arg = std_help_pattern.search(arg) results = content_in_arg.groupdict() if results: if results['short']: short_o = "-" + results['short'].strip(' ,;') short_args.append(short_o) else: short_args.append("") if results['long']: long_o = "--" + results['long'].strip(' ,;') long_args.append(long_o) else: long_args.append("") if results['descrip']: descrips.append(results['descrip']) else: descrips.append("") continue if generic_opt == "M": print("\n\n"+"#"*12+"\n") print(arg) print("\n") DIALS = raw_input(each_dial) if DIALS == "A" or DIALS == "" or DIALS == "U": content_in_arg = std_help_pattern.search(arg) results = content_in_arg.groupdict() if results: if results['short']: short_o = "-" + results['short'].strip(' ,;') short_args.append(short_o) else: short_args.append("") if results['long']: long_o = "--" + results['long'].strip(' ,;') long_args.append(long_o) else: long_args.append("") if results['descrip']: descrips.append(results['descrip']) else: descrips.append("") continue if DIALS != "D" and DIALS != "I": try: first_opt = the_option[0] if len(the_option) >= 3: short_args.append("-"+first_opt) long_args.append(the_option[1]) descrips.append(" ".join(the_option[2:])) elif len(the_option) == 2: if(generic_opt == "L") or (DIALS == "L"): short_args.append("") long_args.append(first_opt) descrips.append(" ".join(the_option[1:])) elif(generic_opt == "S") or (DIALS == "S"): short_args.append("-"+first_opt) long_args.append("") descrips.append(" ".join(the_option[1:])) else: descrips.append(" ".join(the_option[1:])) long_args.append("") short_args.append("") except: print("Warning: Bad field separator ?!!") elif DIALS == "D": general_descrip.append(arg) else: DIALS = "U" #undefined interpreter_list = ["bash", "csh", "ksh", "sh", "python", "php", \ "perl", "tcsh", "zsh", "java"] progname = execline.split()[0] # check if progname is an iterpreter or the program itself (executable) for arg0 in interpreter_list: if progname == arg0: # check for dash in next args to see if it is option or # the program name itself for next_arg in execline.split()[1:]: if next_arg[0] != "-": progname = next_arg break interpreter = arg0 if interpreter == "": filetype = sendcommand("file "+progname) interpreter = getinterpreter(filetype) progshortname = os.path.basename(progname) print(progshortname) print(interpreter) print("\n"+"#"*25+"\n") for value in short_args: print("\t"+value) print("\n"+"#"*25+"\n") for long_value in long_args: print("\t"+long_value) print("\n"+"#"*25+"\n") for descrip in descrips: print("\t"+descrip) print("\n"+"#"*25+"\n") for g_descrip in general_descrip: print("\t"+g_descrip) mybigdict = {} mybigdict["short_args"] = [] mybigdict["short_args"] = short_args mybigdict["long_args"] = [] mybigdict["long_args"] = long_args mybigdict["descrips_args"] = [] mybigdict["descrips_args"] = descrips mybigdict["general_descrip"] = [] mybigdict["general_descrip"] = general_descrip mybigdict["progfullname"] = progname mybigdict["progshortname"] = progshortname mybigdict["interpreter"] = interpreter ##for debug #print(mybigdict) return mybigdict def GetVersion(progfullname): """ Try to get version for the program and ask version for the tool to create """ vprogoutput = sendcommand(progfullname+ \ " --version") vprog = re.search(r"\d+(\.\d+)*\s", vprogoutput, \ re.M|re.U) if vprog: print("Find version:"+vprog.group().encode('utf8')) vprog = vprog.group().encode('utf8') else: print("Could not retrieve version information for "+\ "program "+progfullname) getvprog = raw_input("Please tell me your program "+\ "version :") vprog = getvprog vtool = raw_input("Please tell me your OnlineTool "+\ "version :") return (vprog, vtool) def pisewrapper(ListeJsonData, fname): """ Galaxy file transformer to pise wrapper (xml) < Take the content of the ga file (json/galaxy format) > Return an xml file (pise format) in the same directory of ga file file. """ writewn = False if type(fname) is str: xmlfile = open(fname, "w") else: xmlfile = fname In = [] Out = [] aff = False param_tools = [] headers = "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n"+\ "<!DOCTYPE pise SYSTEM \"PARSER/pise.dtd\">\n" for JsonData in ListeJsonData: #print(str(JsonData)) wn = JsonData.wn ann = JsonData.ann step_id = JsonData.step_id tname = JsonData.tname ttool_id = JsonData.ttool_id ttype = JsonData.ttype ttool_version = JsonData.ttool_version tinput_name = JsonData.tinput_name toutput_name = JsonData.toutput_name tuser_output_name = JsonData.tuser_output_name ttool_state = JsonData.ttool_state ttool_errors = JsonData.ttool_errors if ttool_errors is None: if tinput_name: In = JsonData.ParseInOutVal(tinput_name, "input", aff) if toutput_name: Out = JsonData.ParseInOutVal(toutput_name, "output", aff) if ttype == "tool": params_tools = json.loads(ttool_state) opts = JsonData.ParseParams(params_tools, aff) #headers + #parameter type infile if writewn == False: headers += '<pise>\n'+\ '<head>\n'+\ ' '*2 +'<title>'+wn+'</title>\n'+\ ' '*2 +'<description>'+ann+'</description>\n'+\ ' '*2 +'<authors>ParserConverterXML</authors>\n'+\ '</head>\n'+\ ' '*2 +'<command>'+wn+'</command>\n'+\ ' '*2 +'<parameters>\n' xmlfile.write(headers) writewn = True xmlfile.write(' '*10 +'<parameter type="InFile" ismandatory="'+\ '1">\n') xmlfile.write(' '*12 +'<name>'+tname+'</name>\n') xmlfile.write(' '*10 +'</parameter>\n') xmlfile.write(' '*10 +'<parameter type="InFile" ismandatory="'+\ '1">\n') xmlfile.write(' '*12 +'<name>'+str(tinput_name)+'</name>\n') xmlfile.write(' '*10 +'</parameter>\n') footers = ' '*2 +'</parameters>\n'+\ '</pise>' xmlfile.write(footers) xmlfile.close() def sanitize(string): charsdict = { '<': '&lt;', '>': '&gt;', '&': '&amp;', '"': '&quot;' } for k, v in charsdict.iteritems(): string = string.replace(k, v) return string def sendcommand(cmd): """ Take a shell bash command Return the stdout value for a pipe subprocess """ proc = subprocess.Popen("bash", shell=True, stdin=subprocess.PIPE, \ stdout=subprocess.PIPE, stderr=subprocess.PIPE) return proc.communicate(cmd)[0]
remyd1/XMLparser-wrapper
gaclass/utils.py
Python
apache-2.0
15,409
[ "Galaxy" ]
c01d2a3e10fc35dea60811ec31f9d9f74053dfc3da8e709308ea59a79a0718e5
"""foldtrajectory - folds atoms into the periodic computational box. Usage: python -m ase.io.foldtrajectory infile.traj outfile.traj In molecular dynamics simulations with periodic boundary conditions, atoms sometimes move out of one side of the computational box and in through the other. Such atoms have coordinates outside the box. This facilitates analysis of e.g. diffusion, but can be problematic when plotting. This script reads through a trajectory file, and write a new one where all atoms are mapped into the computational box. If there are axes with free boundary conditions, the corresponding coordinate is left unchanged. SIDE EFFECT: All energies, forces and stresses are removed (yes, this can be considered as a bug!) """ from __future__ import print_function import sys from ase.io.trajectory import PickleTrajectory if len(sys.argv) != 3: print(__doc__) sys.exit(1) infile = PickleTrajectory(sys.argv[1]) outfile = None for atoms in infile: atoms.set_scaled_positions(atoms.get_scaled_positions()) atoms.set_calculator(None) # or the singlepointcalculator fails! if outfile is None: outfile = PickleTrajectory(sys.argv[2], 'w') outfile.write(atoms) outfile.close()
grhawk/ASE
tools/ase/io/foldtrajectory.py
Python
gpl-2.0
1,246
[ "ASE" ]
29eddb0f1b0d7b176b732047d719ca373e5343bc13042daad590a31854cb1112
"""Predicates providing information about the state of the current player.""" import datetime from apps.managers.challenge_mgr import challenge_mgr from apps.managers.player_mgr.models import Profile from apps.widgets.badges.models import BadgeAward from apps.widgets.resource_goal.models import EnergyGoal from apps.managers.score_mgr.models import ScoreboardEntry def allocated_raffle_ticket(user): """Returns True if the user has any allocated tickets.""" return user.raffleticket_set.count() > 0 def badge_awarded(user, badge_slug): """Returns True if the badge is awarded to the user.""" for awarded in BadgeAward.objects.filter(profile=user.get_profile()): if awarded.badge.slug == badge_slug: return True return False def changed_theme(user): """returns True if the user change their theme.""" theme = user.get_profile().theme if not theme: return False else: return theme != challenge_mgr.get_challenge().theme def daily_energy_goal_count(user, count): """Returns True if the number of consecutively meeting daily energy goal equals to count.""" team = user.get_profile().team if team: goals = EnergyGoal.objects.filter(team=team, goal_status='Below the goal').order_by("date") if goals: date = goals[0].date count = 0 for goal in goals: if (goal.date - date) == datetime.timedelta(days=1): count += 1 else: count = 0 if count == 2: return True date = goal.date return False def daily_visit_count(user, count): """Returns True if the number of the user daily visit equals to count.""" return user.get_profile().daily_visit_count >= count def has_points(user, points): """Returns True if the user has more than the specified points.""" return user.get_profile().points() >= points def is_admin(user): """Returns True if the user is an admin.""" return user.is_staff or user.is_superuser def posted_to_wall(user): """Returns True if the user posted to their wall and False otherwise.""" if user.post_set.filter(style_class="user_post").count() > 0: return True return False def referring_count(user, count): """Returns True if the user have referred at least [count] new players.""" return Profile.objects.filter(referring_user=user).count() >= count def set_profile_pic(user): """Returns True if the user posted to their wall and False otherwise.""" if user.avatar_set.filter(primary=True).count() > 0: return True return False def team_member_point_percent(user, points, percent): """Returns True if the user's team has at least [percent] members got at least [points].""" team = user.get_profile().team if team: current_round = challenge_mgr.get_round_name() point_count = ScoreboardEntry.objects.filter(profile__team=team, points__gte=points, round_name=current_round, ).count() return point_count * 100 / team.profile_set.count() >= percent return False
jtakayama/makahiki-draft
makahiki/apps/managers/predicate_mgr/player_predicates.py
Python
mit
3,326
[ "VisIt" ]
3d021459f6235d76e97eb4b83fa6812230ffddd37c309dbefb6ea880e6fc0359
#!/usr/bin/env python import sys sys.path.append('/opt/lib/python2.7/site-packages/') import math import numpy as np import pylab import nest import nest.raster_plot import nest.topology as tp import uuid def run(total, ratio_e, ratio_i, extent, wt_max, wt_min, delay_min, delay_max, ac_amp, ac_freq): pop_e = total*ratio_e pop_i = total*ratio_i ratio = ratio_e/ratio_i rad_e = int(math.sqrt(pop_e)) rad_i = int(math.sqrt(pop_i)) u = str(uuid.uuid1()) # ratio = ratio nest.ResetKernel() nest.SetKernelStatus({'local_num_threads': 8}) nest.SetDefaults('iaf_psc_alpha', { 'tau_m' : 20.0, 'V_th' : 20.0, 'E_L' : 10.0, 't_ref' : 2.0, 'V_reset' : 0.0, 'C_m' : 200.0, 'V_m' : 0.0 }) e = tp.CreateLayer({ 'rows': int(rad_e), 'columns': int(rad_e), 'elements': 'iaf_psc_alpha', 'extent': [extent, extent], 'edge_wrap': True }) i = tp.CreateLayer({ 'rows': int(rad_i), 'columns': int(rad_i), 'elements': 'iaf_psc_alpha', 'extent': [extent, extent], 'edge_wrap': True }) # nest.CopyModel('stdp_synapse', 'excitatory', {'Wmax': 10.0}) nest.CopyModel('static_synapse', 'excitatory') nest.CopyModel('static_synapse', 'inhibitory') e_i = { 'connection_type': 'divergent', 'synapse_model': 'excitatory', 'weights': { # 'uniform': { 'min': 0.0, 'max': wt_max } 'gaussian': {'p_center': 1., 'sigma': 1.} }, 'delays': { 'uniform': { 'min': delay_min, 'max': delay_max } } } i_e = { 'connection_type': 'divergent', 'synapse_model': 'inhibitory', 'weights': { # 'uniform': { 'min': wt_min*ratio, 'max': 0.0 } 'gaussian': {'p_center': -1.*ratio, 'sigma': 1.} }, 'delays': { 'uniform': { 'min': delay_min, 'max': delay_max } } } nest.CopyModel('ac_generator', 'ac', {'amplitude': ac_amp, 'frequency': ac_freq}) nest.CopyModel('dc_generator', 'dc', {'amplitude': ac_amp}) ac = tp.CreateLayer({ 'rows': 1, 'columns': 1, 'elements': 'ac', 'extent': [extent, extent] }) detector = tp.CreateLayer({ 'rows': 1, 'columns': 1, 'elements': 'spike_detector', 'extent': [extent, extent] }) tp.ConnectLayers(e, i, e_i) tp.ConnectLayers(i, e, i_e) tp.ConnectLayers(ac, e, {'connection_type': 'divergent'}) tp.ConnectLayers(ac, i, {'connection_type': 'divergent'}) tp.ConnectLayers(e, detector, {'connection_type': 'divergent'}) # tp.ConnectLayers(i, detector, {'connection_type': 'divergent'}) nest.Simulate(1000) n1 = e[0] + 1 n1id = tuple([x for x in xrange(n1, int(n1+(rad_e*rad_e)))]) n1c = nest.GetConnections(n1id) w1 = nest.GetStatus(n1c, 'weight') # pylab.hist(w1, bins=100) pylab.figure() n2 = i[0] + 1 n2id = tuple([x for x in xrange(n2, int(n2+(rad_i*rad_i)))]) n2c = nest.GetConnections(n2id) w2 = nest.GetStatus(n2c, 'weight') # pylab.hist(w2, bins=100) # pylab.figure() spike_rows = 1 spike_id = detector[0]+1 spike_ids = tuple([x for x in xrange(spike_id, spike_id+(spike_rows*spike_rows))]) # nest.raster_plot.from_device(spike_ids, hist=True) # find how many times it spiked sid = nest.GetStatus(spike_ids, 'events') # h = pylab.hist(sid[0]['times'], bins=100) h = np.histogram(sid[0]['times'], bins=100) h = h[0][h[0] > 0.] # pylab.show() print "Spiked " + str(len(h)) + " times" return ({ 'spikes': int(len(h)), 'total': total, 'ratio_e': ratio_e, 'ratio_i': ratio_i, 'extent': extent, 'wt_max': wt_max, 'wt_min': wt_min, 'delay_min': delay_min, 'delay_max': delay_max, 'ac_amp': ac_amp, 'ac_freq': ac_freq, }) total = 1000 ratio_e = 0.8 ratio_i = 0.2 extent = 1.0 wt_max = .1 # keep increasing this to see awesome bifurcation effect, around 0.8-0.9 wt_min = -.1 delay_min = 0.1 delay_max = 0.2 ac_amp = 300.0 ac_freq = 100.0 results = [] for ac_freq in xrange(2, 50, 2): for ac_amp in xrange(300, 500, 10): r = {} try: r = run(total, ratio_e, ratio_i, extent, wt_max, wt_min, delay_min, delay_max, float(ac_amp), float(ac_freq)) results.append(r) except Exception as e: results.append({ 'spikes': 0, 'total': total, 'ratio_e': ratio_e, 'ratio_i': ratio_i, 'extent': extent, 'wt_max': wt_max, 'wt_min': wt_min, 'delay_min': delay_min, 'delay_max': delay_max, 'ac_amp': ac_amp, 'ac_freq': ac_freq }) pylab.close('all') print "---------------------------------------------------------------" print r # print "total = " + str(total) # print "ratio_e = " + str(ratio_e) # print "ratio_i = " + str(ratio_i) # print "extent = " + str(extent) # print "wt_max = " + str(wt_max) # print "wt_min = " + str(wt_min) # print "delay_min = " + str(delay_min) # print "delay_max = " + str(delay_max) # print "ac_amp = " + str(ac_amp) # print "ac_freq = " + str(ac_freq) print "---------------------------------------------------------------" amps = [a[ac_amp] for a in results] freqs = [a[ac_freq] for a in results] sps = [a[spikes] for a in results] from mpl_toolkits.mplot3d import Axes3D ax = Axes3D(pylab.figure()) ax.scatter(freqs, amps, sps) pylab.show()
synergetics/nest_expermiments
oscillation frequency preference/oscillation_inhibition.py
Python
mit
5,328
[ "Gaussian" ]
1b0c0b677266ba2189f24b64d8d2b7e55640268c9bf20dc09f4b23c2eb68f567
#!/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2014 Stefan Seefeld # All rights reserved. # # This file is part of OpenVSIP. It is made available under the # license contained in the accompanying LICENSE.BSD file. # # This demo is based on the k-Ω Beamformer Example by Randall Judd # (http://portals.omg.org/hpec/files/vsipl/CD/JuddKO.pdf). # See also the implementations in # https://github.com/rrjudd/jvsip/blob/master/examples/komegaExamples from vsip import random from vsip.signal import * from vsip.signal import window from vsip.signal.fftm import * from vsip.signal.fir import fir from vsip.selgen import generation from vsip.math import elementwise as elm from vsip.math import reductions as reduce from vsip.math import matvec from vsip.signal.freqswap import freqswap from vsip.selgen.clip import clip import math from matplotlib import pyplot speed=1500 # propagation speed rate=1024 # sample rate nts=1024 # length of time series spacing=1.5 # sensor spacing sensors=128 # number of sensors averages=8 # data sets to average # tones in hertz for sinusoids present in simulated signal sim_freqs=[450, 300, 150, 50, 55, 95] # corresponding bearings in degrees for above sinusoids sim_bearings=[50, 130, 130, 90, 120, 70] # number of simulated noise directions nnoise=64 class time_series(object): """ Simulate acoustic data with narrow band point sources from multiple directions and isotropic, band-limited noise. """ def __init__(self): # length of narrow band simulated noise signal L = int(2 * rate/(sensors * spacing/speed) + nts + 1) # Kaiser window kernel to reject out of band noise kernel = window.kaiser(float, 6, 1) # Window-based FIR filter design using above kernel self.fir = fir(kernel, symmetry.none, 2*L, 2, obj_state.save, 0, alg_hint.time) # band-limited noise vectors self.noise = vsip.vector(float, 2*L) self.bl_noise = vsip.vector(float, L) # Generate white gaussian noise self.rand = vsip.random.rand(float, 7, True) # time series indices self.t = generation.ramp(float, 0.,1./rate, nts) self.t_dt = vsip.vector(float, nts) # simulated data matrix in which each row corresponds to a sensor self.data = vsip.matrix(float, sensors, nts) self.d_t = spacing/speed # simulate narrow band data composed purely of sinusoids specified earlier def nb_sim(self): for i in range(len(sim_freqs)): # pick a center frequence for sinusoid f=sim_freqs[i] # Calculate effective angle based on its bearing b=self.d_t * math.cos(sim_bearings[i] * math.pi/180.0) for j in range(sensors): # Introduce phase shifts in selected sinusoid in accordance with the sensor ID dt = float(j) * b self.t_dt = self.t + dt self.t_dt *= 2 * math.pi * f self.t_dt = elm.cos(self.t_dt) self.t_dt *= 3 # Multiplex phase shifted sinusoids corresponding to a specific sensor self.data[j,:] += self.t_dt def noise_sim(self): #sensor-to-sensor travel time d_t=self.d_t * rate # array travel time at end o_0 = d_t * sensors + 1 # angle step a_stp = math.pi/nnoise for j in range(nnoise): a_crct = math.cos(float(j) * a_stp) # Generate white noise self.noise = self.rand.randn(self.noise.length()) # Get it colored by rejecting out-of-band components self.fir(self.noise, self.bl_noise) # Adjust noise variance self.bl_noise *= 12./nnoise for i in range(sensors): offset = int(o_0 + i * d_t * a_crct) # Mix noise with data to model a noisy channel self.data[i,:] += self.bl_noise[offset:offset + nts] # Subtract average value from mixture to remove any biasing self.data -= reduce.meanval(self.data) def reset(self): self.data[:] = 0. def __call__(self): return self.data class k_omega(object): def __init__(self): # number of frequency bins frequencies=int(nts/2) + 1 # Space-frequency matrix self.cfreq=vsip.matrix(complex, sensors, frequencies) # Space-time matrix self.rfreq=vsip.matrix(float, sensors, frequencies) # K-omega output matrix initialized self.gram=vsip.matrix(float, sensors, frequencies, 0) # FFT object along time domain self.rcfftm=fftm(float, fwd, sensors, nts, 1, vsip.row, 0, alg_hint.time) # FFT object along spatial domain self.ccfftm=fftm(complex, fwd, sensors, frequencies, 1, vsip.col, 0, alg_hint.time) # Window taper object around time axis self.ts_taper=window.hanning(float, nts) # Window taper object around spatial axis self.array_taper=window.hanning(float, sensors) def __call__(self, data): # data tapers # Reject high frequency components along time axis data = matvec.vmmul(vsip.row, self.ts_taper, data) # Reject high frequency components along spatial axis data = matvec.vmmul(vsip.col, self.array_taper, data) # 2D FFT: charateristic of k-omega beamforming, first around time axis self.rcfftm(data, self.cfreq) # and then around spatial axis self.ccfftm(self.cfreq) # Calculate averaged power spectrum self.rfreq = elm.magsq(self.cfreq) self.rfreq *= 1.0/averages # Accumulate result in k-omega output self.gram += self.rfreq def get(self): return self.gram def main(): # initialize input/ouput objects ts=time_series() kw=k_omega() for i in range(averages): # initialize time series ts.reset() # Simulate narrow band acoustic data ts.nb_sim() # Simulate colored gaussian noise ts.noise_sim() # Perform k-omega beamforming kw(ts()) # Obtain the result of beamforming gram=kw.get() # rearrange to bring zero-frequency component at the center of spectrum for i in range(gram.size(1)): gram[:,i] = freqswap(gram[:,i]) # Post-process the beamformer output to make it suitable for charting max, idx = reduce.maxval(gram) avg = reduce.meanval(gram) gram = clip(gram,0.0,max,avg/100000.0,max) # plot log-magnitude of beamformed power spectrum gram = elm.log10(gram) min, idx = reduce.minval(gram) gram -= min # Normalize the log-magnitude plot max, idx = reduce.maxval(gram) gram *= 1.0/max fig = pyplot.figure(1,figsize=(10,4)) ax = fig.add_axes([0.10,0.10,0.85,0.80]) ax.set_yticklabels(['0','0','30','60','90','120','150','180']) ax.yaxis.set_ticks_position('right') pyplot.imshow(gram) # Labeling plot axis appropriately pyplot.title(u'K-Ω Beamformer Output') pyplot.xlabel('Frequency') pyplot.ylabel(r'$\frac{cos(\theta)}{\lambda}$',fontsize=16,rotation='horizontal') pyplot.colorbar() # Display the plot pyplot.show() if __name__ == '__main__': main()
openvsip/openvsip
share/examples/python/beamformer.py
Python
gpl-2.0
7,062
[ "Gaussian" ]
b020dba587e70910966579647a0386f0e694d7318162e64aec8964dd26eab29d
'''Various functions to read PDB files for Dragonfly''' from __future__ import print_function from builtins import range import sys import logging import os from collections import OrderedDict from six.moves.urllib.request import urlopen import numpy as np from scipy.interpolate import interp1d try: import pyfftw WITH_PYFFTW = True except ImportError: sys.stderr.write('No PyFFTW. FFTs will be slow\n') WITH_PYFFTW = False def fetch_pdb(pdb_code): '''Get PDB file from aux directory if available, else download from RCSB''' print(pdb_code) if os.path.isfile('aux/%s.pdb' % (pdb_code.upper())): pass else: pdb_string = urlopen('http://www.rcsb.org/pdb/files/%s.pdb' % pdb_code.upper()) with open('aux/%s.pdb' % pdb_code.upper()) as fptr: fptr.write(pdb_string) def _find_atom_types(pdb_file): atoms = [] with open(pdb_file) as fin: for line in fin: line = line.strip() if line[0:4] == "ATOM" or line[0:6] == "HETATM": atom_label = line[76:78].lstrip() if atom_label not in atoms: atoms.append(atom_label) return atoms def _interp_scattering(aux_dir, elem): with open(os.path.join(aux_dir, elem.lower()+".nff")) as fptr: lines = [l.strip().split() for l in fptr.readlines()] arr = np.asarray(lines[1:]).astype('float') energy, scatt_f0, scatt_f1 = arr.T i_f0 = interp1d(energy, scatt_f0, kind='linear') i_f1 = interp1d(energy, scatt_f1, kind='linear') return i_f0, i_f1 def _find_mass(aux_dir, elem): with open(os.path.join(aux_dir, "atom_mass.txt")) as fptr: lines = [l.strip().split() for l in fptr.readlines()] for line, mass in lines: if line.lower() == elem.lower(): return float(mass) return None def _make_scatt_list(atom_types, aux_dir, energy): scatt_list = OrderedDict() for elem in atom_types: scatt_f0, _ = _interp_scattering(aux_dir, elem) mass = _find_mass(aux_dir, elem) scatt_list[elem.upper()] = [float(scatt_f0(energy)), mass] return scatt_list def _wavelength_in_A_to_eV(wavelength_in_A): # pylint: disable=C0103 return 12398.419 / wavelength_in_A def _append_atom(atomlist, atom, pdb_line): atomlist.append([atom[0], float(pdb_line[30:38].strip()), float(pdb_line[38:46].strip()), float(pdb_line[46:54].strip()), atom[1]]) def _get_atom_coords(pdb_file, scatt): tmp_atoms = [] with open(pdb_file) as fin: for line in fin: line = line.strip() if line[0:4] == "ATOM" or line[0:6] == "HETATM": # occupany > 50 % || one of either if occupany = 50 % (occ, tag) = (float(line[56:60]), line[16]) if (occ > 0.5) | ((occ == 0.5) & (tag != "B")): atom_label = line[76:78].lstrip().upper() if atom_label in scatt: _append_atom(tmp_atoms, scatt[atom_label], line) else: logstr = line[76:78] + " not in the current atom list" logging.info(logstr) return np.asarray(tmp_atoms) def _read_symmetry(pdb_file): '''First symmetry operation is identity, followed by non-trivial symmetries''' sym_list = [] trans_list = [] with open(pdb_file) as fin: for line in fin: line = line.strip() if line[13:18] == "BIOMT": sym_list.append([float(line[24:33]), float(line[34:43]), float(line[44:53])]) trans_list.append(float(line[58:68])) sym_arr = np.asarray(sym_list).reshape(-1, 3, 3) trans_arr = np.asarray(trans_list).reshape(-1, 3) return sym_arr, trans_arr def _apply_symmetry(atoms, sym_list, trans_list): if len(sym_list) == 0: return atoms org_atoms = atoms[:, 1:4].T.copy() f0s = np.asarray([atoms[:, 0]]).T.copy() mass = np.asarray([atoms[:, 4]]).T.copy() total_ms = len(sym_list)*np.sum(mass) / 1.0e6 logging.info("Mass of particle (MDa), %.3f", total_ms) out_atoms = np.zeros((len(sym_list),)+atoms.shape) for i, sym_op in enumerate(sym_list): trans = trans_list[i] vecs = sym_op.dot(org_atoms).T + trans to_app = np.concatenate((f0s, vecs, mass), axis=1) out_atoms[i] = to_app.copy() return out_atoms.reshape(-1, 5) def atoms_to_density_map(atoms, voxel_size): '''Create electron density map from atom coordinate list''' (x, y, z) = atoms[:, 1:4].T.copy() (x_min, x_max) = (x.min(), x.max()) (y_min, y_max) = (y.min(), y.max()) (z_min, z_max) = (z.min(), z.max()) grid_len = max([x_max - x_min, y_max - y_min, z_max - z_min]) r_val = np.int(np.ceil(grid_len / voxel_size)) if r_val % 2 == 0: r_val += 1 logging.info("Length of particle (voxels), %d", r_val) elec_den = atoms[:, 0].copy() x = (x-0.5*(x_max+x_min-grid_len))/voxel_size y = (y-0.5*(y_max+y_min-grid_len))/voxel_size z = (z-0.5*(z_max+z_min-grid_len))/voxel_size bins = np.arange(r_val+1) all_bins = np.vstack((bins, bins, bins)) coords = np.asarray([x, y, z]) integ = np.floor(coords) frac = coords - integ ix, iy, iz = tuple(integ) # pylint: disable=C0103 fx, fy, fz = tuple(frac) # pylint: disable=C0103 cx, cy, cz = 1.-fx, 1.-fy, 1.-fz # pylint: disable=C0103 h_total = np.histogramdd(np.asarray([ix, iy, iz]).T, weights=elec_den*cx*cy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix, iy, iz+1]).T, weights=elec_den*cx*cy*fz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix, iy+1, iz]).T, weights=elec_den*cx*fy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix, iy+1, iz+1]).T, weights=elec_den*cx*fy*fz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1, iy, iz]).T, weights=elec_den*fx*cy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1, iy, iz+1]).T, weights=elec_den*fx*cy*fz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1, iy+1, iz]).T, weights=elec_den*fx*fy*cz, bins=all_bins)[0] h_total += np.histogramdd(np.asarray([ix+1, iy+1, iz+1]).T, weights=elec_den*fx*fy*fz, bins=all_bins)[0] return h_total def low_pass_filter_density_map(in_arr, damping=-1., thr=1.E-3, num_cycles=2, threads=4): '''Convolve density map by Gaussian with given damping coefficient''' xl, yl, zl = in_arr.shape # pylint: disable=C0103 xx, yy, zz = np.mgrid[-1:1:xl*1j, -1:1:yl*1j, -1:1:zl*1j] # pylint: disable=C0103 fil = np.fft.ifftshift(np.exp(damping*(xx*xx + yy*yy + zz*zz))) out_arr = in_arr.copy() if WITH_PYFFTW: for _ in range(num_cycles): ft_arr = fil * pyfftw.interfaces.numpy_fft.fftn(out_arr, planner_effort='FFTW_ESTIMATE', threads=threads) out_arr = np.real(pyfftw.interfaces.numpy_fft.ifftn(ft_arr, planner_effort='FFTW_ESTIMATE', threads=threads)) out_arr *= (out_arr > thr) else: for _ in range(num_cycles): ft_arr = fil * np.fft.fftn(out_arr) out_arr = np.real(np.fft.ifftn(ft_arr)) out_arr *= (out_arr > thr) return out_arr.copy() def process(pdb_file, aux_dir, wavelength): '''Get atom scattering list from PDB file Generates list of coordinates, scattering f1 and mass for each atom ''' energy = _wavelength_in_A_to_eV(wavelength) atom_types = _find_atom_types(pdb_file) scatt_list = _make_scatt_list(atom_types, aux_dir, energy) atoms = _get_atom_coords(pdb_file, scatt_list) sym_l, trans_l = _read_symmetry(pdb_file) return _apply_symmetry(atoms, sym_l, trans_l)
duaneloh/Dragonfly
utils/py_src/process_pdb.py
Python
gpl-3.0
8,297
[ "Gaussian" ]
61e11305569fc1d1733968121eee872530cba029189524d58ace35c724b6c78b
import h2o_cmd, h2o_util import h2o2 as h2o import re, random, math from h2o_test import check_sandbox_for_errors, dump_json, verboseprint import h2o_nodes from tabulate import tabulate # recursive walk an object check that it has valid numbers only (no "" or nan or inf def check_obj_has_good_numbers(obj, hierarchy="", curr_depth=0, max_depth=4, allowNaN=False): """Represent instance of a class as JSON. Arguments: obj -- any object Return: String that represent JSON-encoded object. """ def serialize(obj, hierarchy="", curr_depth=0): """Recursively walk object's hierarchy. Limit to max_depth""" if curr_depth>max_depth: return if isinstance(obj, (bool, int, long, float, basestring)): try: number = float(obj) print "Yay!", hierarchy, number except: if obj is None: print "Not Yay! how come you're giving me None for a coefficient? %s %s" % (hierarchy, obj) elif str(obj)=="": print "Not Yay! how come you're giving me an empty string for a coefficient? %s %s" % (hierarchy, obj) else: raise Exception("%s %s %s is not a valid float" % (hierarchy, obj, type(obj))) # hack for now number = 0.0 if not allowNaN and math.isnan(number): raise Exception("%s %s is a NaN" % (hierarchy, obj)) if not allowNaN and math.isinf(number): raise Exception("%s %s is a Inf" % (hierarchy, obj)) return number elif isinstance(obj, dict): obj = obj.copy() for key in obj: obj[key] = serialize(obj[key], hierarchy + ".%" % key, curr_depth+1) return obj elif isinstance(obj, (list, tuple)): return [serialize(item, hierarchy + "[%s]" % i, curr_depth+1) for (i, item) in enumerate(obj)] elif hasattr(obj, '__dict__'): return serialize(obj.__dict__, hierarchy, curr_depth+1) else: return repr(obj) # Don't know how to handle, convert to string return (serialize(obj, hierarchy, curr_depth+1)) #************************************************************88 # where do we get the CM? def simpleCheckGLM(self, model, parameters, labelList, labelListUsed, allowFailWarning=False, allowZeroCoeff=False, prettyPrint=False, noPrint=False, maxExpectedIterations=None, doNormalized=False, allowNaN=False): # FIX! the structure is all different return warnings = '' # binomial = model.binomial residual_deviance = model.training_metrics.residual_deviance threshold = model.training_metrics.threshold check_obj_has_good_numbers(threshold, 'threshold', allowNaN=allowNaN) auc = model.AUC # NaN if not logistic # check_obj_has_good_numbers(auc, 'model.AUC') best_lambda_idx = model.best_lambda_idx model_category = model.model_category name = model.name residual_degrees_of_freedom = model.residual_degrees_of_freedom # is this no longer used? coefficients_magnitude = model.coefficients_magnitude null_deviance = model.null_deviance check_obj_has_good_numbers(null_deviance, 'model.null_deviance', allowNaN=allowNaN) null_degrees_of_freedom = model.null_degrees_of_freedom check_obj_has_good_numbers(null_degrees_of_freedom, 'model.null_degrees_of_freedom', allowNaN=allowNaN) domains = model.domains # when is is this okay to be NaN? AIC = model.AIC check_obj_has_good_numbers(AIC, 'model.AIC', allowNaN=allowNaN) names = model.names coeffs_names = model.coefficients_table.data[0] # these are returned as quoted strings. Turn them into numbers temp = model.coefficients_table.data[1] assert len(coeffs_names)==len(temp), "%s %s" % (len(coeffs_names), len(temp)) # we need coefficients to be floats or empty check_obj_has_good_numbers(temp, 'model.coeffs', allowNaN=False) # print "temp", temp[0:10] # print "temp[5489:5500]", temp[5489:5500] # UPDATE: None (null json) is legal for coeffs coeffs = map(lambda x : float(x) if (x is not None and str(x) != "") else 0, temp) intercept = coeffs[-1] interceptName = coeffs_names[-1] assert interceptName == 'Intercept' assert len(coeffs) == len(coeffs_names), "%s %s" % (len(coeffs), len(coeffs_names)) # FIX! if a coeff is zeroed/ignored, it doesn't show up? # get rid of intercept in glm response # assert (len(coeffs)-1) == len(labelListUsed, \ # "%s %s %s %s" % (len(coeffs), len(labelListUsed), coeffs, labelListUsed) # labelList still has the response column? # ignored columns aren't in model.names, but output response is. # labelListUsed has the response col removed so add 1 # Hmm..dropped coefficients again? can't do this check? # assert len(model.names) == len(labelListUsed), \ # "%s %s %s %s" % (len(model.names), len(labelListUsed), model.names, labelList) # this is no longer true! # assert model.threshold!=0 print "len(coeffs)", len(coeffs) print "coeffs:", coeffs # last one is intercept if interceptName != "Intercept" or abs(intercept)<1e-26: raise Exception("'Intercept' should be last in coeffs_names %s %s" % (interceptName, intercept)) y = parameters['response_column'] cString = "\n" for i,c in enumerate(coeffs_names): cString += "%s: %.5e " % (coeffs_names[i], coeffs[i]) print cString print "\nH2O intercept:\t\t%.5e" % intercept print "\nTotal # of coeffs:", len(coeffs_names) # intercept is buried in there too absIntercept = abs(float(intercept)) self.assertGreater(absIntercept, 1e-26, ( "abs. value of GLM coeffs['Intercept'] is " + str(absIntercept) + ", not >= 1e-26 for Intercept" + "\n" + "parameters:" + dump_json(parameters) )) if (not allowZeroCoeff) and (len(coeffs)>1): s = 0.0 for c in coeffs: s += abs(float(c)) self.assertGreater(s, 1e-26, ( "sum of abs. value of GLM coeffs/intercept is " + str(s) + ", not >= 1e-26\n" + "parameters:" + dump_json(parameters) )) # shouldn't have any errors check_sandbox_for_errors() return (warnings, coeffs, intercept) #************************************************************88 def pickRandGlmParams(paramDict, params): colX = 0 randomGroupSize = random.randint(1,len(paramDict)) for i in range(randomGroupSize): randomKey = random.choice(paramDict.keys()) randomV = paramDict[randomKey] randomValue = random.choice(randomV) params[randomKey] = randomValue if (randomKey=='x'): colX = randomValue # Only identity, log and inverse links are allowed for family=gaussian. # force legal family/ink combos if 'family' not in params: # defaults to gaussian if 'link' in params and params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'): params['link'] = None elif params['family'] is not None and 'link' in params and params['link'] is not None: # only log/identity is legal? if params['family'] == 'poisson': if params['link'] not in ('identity', 'log', 'familyDefault'): params['link'] = None # only tweedie/tweedie is legal? elif params['family'] == 'tweedie': if params['link'] not in ('tweedie'): params['link'] = None elif params['family'] == 'binomial': # only logit and log if params['link'] not in ('logit', 'log', 'familyDefault'): params['link'] = None elif params['family'] == 'gaussian': if params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'): params['link'] = None elif params['family'] is None: # defaults to gaussian if 'link' in params and params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'): params['link'] = None if 'lambda_search' in params and params['lambda_search']==1: if 'nlambdas' in params and params['nlambdas']<=1: params['nlambdas'] = 2 return colX def simpleCheckGLMScore(self, glmScore, family='gaussian', allowFailWarning=False, **kwargs): warnings = None if 'warnings' in glmScore: warnings = glmScore['warnings'] # stop on failed x = re.compile("failed", re.IGNORECASE) # don't stop if fail to converge c = re.compile("converge", re.IGNORECASE) for w in warnings: print "\nwarning:", w if re.search(x,w) and not allowFailWarning: if re.search(c,w): # ignore the fail to converge warning now pass else: # stop on other 'fail' warnings (are there any? fail to solve? raise Exception(w) validation = glmScore['validation'] validation['err'] = h2o_util.cleanseInfNan(validation['err']) validation['nullDev'] = h2o_util.cleanseInfNan(validation['nullDev']) validation['resDev'] = h2o_util.cleanseInfNan(validation['resDev']) print "%15s %s" % ("err:\t", validation['err']) print "%15s %s" % ("nullDev:\t", validation['nullDev']) print "%15s %s" % ("resDev:\t", validation['resDev']) # threshold only there if binomial? # auc only for binomial if family=="binomial": print "%15s %s" % ("AUC:\t", validation['AUC']) print "%15s %s" % ("threshold:\t", validation['threshold']) err = False if family=="poisson" or family=="gaussian": if 'AIC' not in validation: print "AIC is missing from the glm json response" err = True if not allowNaN and math.isnan(validation['err']): print "Why is this err = 'nan'?? %6s %s" % ("err:\t", validation['err']) err = True if not allowNaN and math.isnan(validation['resDev']): print "Why is this resDev = 'nan'?? %6s %s" % ("resDev:\t", validation['resDev']) err = True if err: raise Exception ("How am I supposed to tell that any of these errors should be ignored?") # legal? if not allowNaN and math.isnan(validation['nullDev']): ## emsg = "Why is this nullDev = 'nan'?? %6s %s" % ("nullDev:\t", validation['nullDev']) ## raise Exception(emsg) pass def oldSimpleCheckGLM(self, glm, colX, allowFailWarning=False, allowZeroCoeff=False, prettyPrint=False, noPrint=False, maxExpectedIterations=None, doNormalized=False, **kwargs): # if we hit the max_iter, that means it probably didn't converge. should be 1-maxExpectedIter # h2o GLM will verboseprint the result and print errors. # so don't have to do that # different when cross validation is used? No trainingErrorDetails? GLMModel = glm['glm_model'] if not GLMModel: raise Exception("GLMModel didn't exist in the glm response? %s" % dump_json(glm)) warnings = None if 'warnings' in GLMModel and GLMModel['warnings']: warnings = GLMModel['warnings'] # stop on failed x = re.compile("failed", re.IGNORECASE) # don't stop if fail to converge c = re.compile("converge", re.IGNORECASE) for w in warnings: print "\nwarning:", w if re.search(x,w) and not allowFailWarning: if re.search(c,w): # ignore the fail to converge warning now pass else: # stop on other 'fail' warnings (are there any? fail to solve? raise Exception(w) # for key, value in glm.iteritems(): print key # not in GLMGrid? # FIX! don't get GLMParams if it can't solve? GLMParams = GLMModel['glm'] family = GLMParams["family"] # number of submodels = number of lambda # min of 2. lambda_max is first submodels = GLMModel['submodels'] # since all our tests?? only use one lambda, the best_lamda_idx should = 1 best_lambda_idx = GLMModel['best_lambda_idx'] print "best_lambda_idx:", best_lambda_idx lambda_max = GLMModel['lambda_max'] print "lambda_max:", lambda_max # currently lambda_max is not set by tomas. ..i.e.not valid if 1==0 and (lambda_max <= submodels[best_lambda_idx].lambda_value): raise Exception("lambda_max %s should always be > the lambda result %s we're checking" % (lambda_max, submodels[best_lambda_idx].lambda_value)) # submodels0 = submodels[0] # submodels1 = submodels[-1] # hackery to make it work when there's just one if (best_lambda_idx >= len(submodels)) or (best_lambda_idx < 0): raise Exception("best_lambda_idx: %s should point to one of lambdas (which has len %s)" % (best_lambda_idx, len(submodels))) if (best_lambda_idx >= len(submodels)) or (best_lambda_idx < 0): raise Exception("best_lambda_idx: %s should point to one of submodels (which has len %s)" % (best_lambda_idx, len(submodels))) submodels1 = submodels[best_lambda_idx] # hackery to make it work when there's just one iterations = submodels1['iteration'] print "GLMModel/iterations:", iterations # if we hit the max_iter, that means it probably didn't converge. should be 1-maxExpectedIter if maxExpectedIterations is not None and iterations > maxExpectedIterations: raise Exception("Convergence issue? GLM did iterations: %d which is greater than expected: %d" % (iterations, maxExpectedIterations) ) if 'validation' not in submodels1: raise Exception("Should be a 'validation' key in submodels1: %s" % dump_json(submodels1)) validationsList = submodels1['validation'] validations = validationsList # xval. compare what we asked for and what we got. n_folds = kwargs.setdefault('n_folds', None) print "GLMModel/validations" validations['null_deviance'] = h2o_util.cleanseInfNan(validations['null_deviance']) validations['residual_deviance'] = h2o_util.cleanseInfNan(validations['residual_deviance']) print "%15s %s" % ("null_deviance:\t", validations['null_deviance']) print "%15s %s" % ("residual_deviance:\t", validations['residual_deviance']) # threshold only there if binomial? # auc only for binomial if family=="binomial": print "%15s %s" % ("auc:\t", validations['auc']) best_threshold = validations['best_threshold'] thresholds = validations['thresholds'] print "%15s %s" % ("best_threshold:\t", best_threshold) # have to look up the index for the cm, from the thresholds list best_index = None for i,t in enumerate(thresholds): if t >= best_threshold: # ends up using next one if not present best_index = i break assert best_index!=None, "%s %s" % (best_threshold, thresholds) print "Now printing the right 'best_threshold' %s from '_cms" % best_threshold # cm = glm['glm_model']['submodels'][0]['validation']['_cms'][-1] submodels = glm['glm_model']['submodels'] # FIX! this isn't right if we have multiple lambdas? different submodels? cms = submodels[0]['validation']['_cms'] self.assertEqual(len(thresholds), len(cms), msg="thresholds %s and cm %s should be lists of the same size. %s" % (len(thresholds), len(cms), thresholds)) # FIX! best_threshold isn't necessarily in the list. jump out if >= assert best_index<len(cms), "%s %s" % (best_index, len(cms)) # if we want 0.5..rounds to int # mid = len(cms)/2 # cm = cms[mid] cm = cms[best_index] print "cm:", dump_json(cm['_arr']) predErr = cm['_predErr'] classErr = cm['_classErr'] # compare to predErr # pctWrong = h2o_gbm.pp_cm_summary(cm['_arr']); # FIX! pctWrong = 0 print "predErr:", predErr print "calculated pctWrong from cm:", pctWrong print "classErr:", classErr # self.assertLess(pctWrong, 9,"Should see less than 9% error (class = 4)") print "\nTrain\n==========\n" # print h2o_gbm.pp_cm(cm['_arr']) if family=="poisson" or family=="gaussian": print "%15s %s" % ("AIC:\t", validations['AIC']) coefficients_names = GLMModel['coefficients_names'] # print "coefficients_names:", coefficients_names idxs = submodels1['idxs'] print "idxs:", idxs coefficients_names = coefficients_names # always check both normalized and normal coefficients norm_beta = submodels1['norm_beta'] # if norm_beta and len(coefficients_names)!=len(norm_beta): # print len(coefficients_names), len(norm_beta) # raise Exception("coefficients_names and normalized_norm_beta from h2o json not same length. coefficients_names: %s normalized_norm_beta: %s" % (coefficients_names, norm_beta)) # beta = submodels1['beta'] # print "beta:", beta # if len(coefficients_names)!=len(beta): # print len(coefficients_names), len(beta) # raise Exception("coefficients_names and beta from h2o json not same length. coefficients_names: %s beta: %s" % (coefficients_names, beta)) # test wants to use normalized? if doNormalized: beta_used = norm_beta else: beta_used = beta coefficients = {} # create a dictionary with name, beta (including intercept) just like v1 for i,b in zip(idxs, beta_used[:-1]): name = coefficients_names[i] coefficients[name] = b print "len(idxs)", len(idxs), "len(beta_used)", len(beta_used) print "coefficients:", coefficients print "beta:", beta print "norm_beta:", norm_beta coefficients['Intercept'] = beta_used[-1] print "len(coefficients_names)", len(coefficients_names) print "len(idxs)", len(idxs) print "idxs[-1]", idxs[-1] print "intercept demapping info:", \ "coefficients_names[-i]:", coefficients_names[-1], \ "idxs[-1]:", idxs[-1], \ "coefficients_names[idxs[-1]]:", coefficients_names[idxs[-1]], \ "beta_used[-1]:", beta_used[-1], \ "coefficients['Intercept']", coefficients['Intercept'] # last one is intercept interceptName = coefficients_names[idxs[-1]] if interceptName != "Intercept" or abs(beta_used[-1])<1e-26: raise Exception("'Intercept' should be last in coefficients_names and beta %s %s %s" %\ (idxs[-1], beta_used[-1], "-"+interceptName+"-")) # idxs has the order for non-zero coefficients, it's shorter than beta_used and coefficients_names # new 5/28/14. glm can point to zero coefficients # for i in idxs: # if beta_used[i]==0.0: ## raise Exception("idxs shouldn't point to any 0 coefficients i: %s %s:" % (i, beta_used[i])) if len(idxs) > len(beta_used): raise Exception("idxs shouldn't be longer than beta_used %s %s" % (len(idxs), len(beta_used))) intercept = coefficients.pop('Intercept', None) # intercept demapping info: idxs[-1]: 54 coefficients_names[[idxs[-1]]: Intercept beta_used[-1]: -6.6866753099 # the last one shoudl be 'Intercept' ? coefficients_names.pop() # have to skip the output col! get it from kwargs # better always be there! y = kwargs['response'] # the dict keys are column headers if they exist...how to order those? new: use the 'coefficients_names' # from the response # Tomas created 'coefficients_names which is the coefficient list in order. # Just use it to index coefficients! works for header or no-header cases # I guess now we won't print the "None" cases for dropped columns (constant columns!) # Because Tomas doesn't get everything in 'coefficients_names' if dropped by GLMQuery before # he gets it? def add_to_coefficient_list_and_string(c, cList, cString): if c in coefficients: cValue = coefficients[c] cValueString = "%s: %.5e " % (c, cValue) else: print "Warning: didn't see '" + c + "' in json coefficient response.",\ "Inserting 'None' with assumption it was dropped due to constant column)" cValue = None cValueString = "%s: %s " % (c, cValue) cList.append(cValue) # we put each on newline for easy comparison to R..otherwise keep condensed if prettyPrint: cValueString = "H2O coefficient " + cValueString + "\n" # not mutable? return cString + cValueString # creating both a string for printing and a list of values cString = "" cList = [] # print in order using col_names # coefficients_names is input only now..same for header or no header, or expanded enums for c in coefficients_names: cString = add_to_coefficient_list_and_string(c, cList, cString) if prettyPrint: print "\nH2O intercept:\t\t%.5e" % intercept print cString else: if not noPrint: print "\nintercept:", intercept, cString print "\nTotal # of coefficients:", len(coefficients_names) # pick out the coefficent for the column we enabled for enhanced checking. Can be None. # FIX! temporary hack to deal with disappearing/renaming columns in GLM if (not allowZeroCoeff) and (colX is not None): absXCoeff = abs(float(coefficients[str(colX)])) # add kwargs to help debug without looking at console log self.assertGreater(absXCoeff, 1e-26, ( "abs. value of GLM coefficients['" + str(colX) + "'] is " + str(absXCoeff) + ", not >= 1e-26 for X=" + str(colX) + "\n" + "kwargs:" + dump_json(kwargs) )) # intercept is buried in there too absIntercept = abs(float(intercept)) self.assertGreater(absIntercept, 1e-26, ( "abs. value of GLM coefficients['Intercept'] is " + str(absIntercept) + ", not >= 1e-26 for Intercept" + "\n" + "kwargs:" + dump_json(kwargs) )) # this is good if we just want min or max # maxCoeff = max(coefficients, key=coefficients.get) # for more, just invert the dictionary and ... if (len(coefficients)>0): maxKey = max([(abs(coefficients[x]),x) for x in coefficients])[1] print "H2O Largest abs. coefficient value:", maxKey, coefficients[maxKey] minKey = min([(abs(coefficients[x]),x) for x in coefficients])[1] print "H2O Smallest abs. coefficient value:", minKey, coefficients[minKey] else: print "Warning, no coefficients returned. Must be intercept only?" # many of the GLM tests aren't single column though. # quick and dirty check: if all the coefficients are zero, # something is broken # intercept is in there too, but this will get it okay # just sum the abs value up..look for greater than 0 # skip this test if there is just one coefficient. Maybe pointing to a non-important coeff? if (not allowZeroCoeff) and (len(coefficients)>1): s = 0.0 for c in coefficients: v = coefficients[c] s += abs(float(v)) self.assertGreater(s, 1e-26, ( "sum of abs. value of GLM coefficients/intercept is " + str(s) + ", not >= 1e-26\n" + "kwargs:" + dump_json(kwargs) )) print "submodels1, run_time (milliseconds):", submodels1['run_time'] # shouldn't have any errors check_sandbox_for_errors() return (warnings, cList, intercept) # compare this glm to last one. since the files are concatenations, # the results should be similar? 10% of first is allowed delta def compareToFirstGlm(self, key, glm, firstglm): # if isinstance(firstglm[key], list): # in case it's not a list allready (err is a list) verboseprint("compareToFirstGlm key:", key) verboseprint("compareToFirstGlm glm[key]:", glm[key]) # key could be a list or not. if a list, don't want to create list of that list # so use extend on an empty list. covers all cases? if type(glm[key]) is list: kList = glm[key] firstkList = firstglm[key] elif type(glm[key]) is dict: raise Exception("compareToFirstGLm: Not expecting dict for " + key) else: kList = [glm[key]] firstkList = [firstglm[key]] print "kbn:", kList, firstkList for k, firstk in zip(kList, firstkList): # delta must be a positive number ? delta = .1 * abs(float(firstk)) msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg) self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current") def simpleCheckGLMGrid(self, glmGridResult, colX=None, allowFailWarning=False, **kwargs): # "grid": { # "destination_keys": [ # "GLMGridResults__8222a49156af52532a34fb3ce4304308_0", # "GLMGridResults__8222a49156af52532a34fb3ce4304308_1", # "GLMGridResults__8222a49156af52532a34fb3ce4304308_2" # ] # }, destination_key = glmGridResult['grid']['destination_keys'][0] inspectGG = h2o_nodes.nodes[0].glm_view(destination_key) models = inspectGG['glm_model']['submodels'] verboseprint("GLMGrid inspect GLMGrid model 0(best):", dump_json(models[0])) g = simpleCheckGLM(self, inspectGG, colX, allowFailWarning=allowFailWarning, **kwargs) # just to get some save_model testing for i,m in enumerate(glmGridResult['grid']['destination_keys']): print "Saving model", m, "to model"+str(i) h2o_nodes.nodes[0].save_model(model=m, path='model'+str(i), force=1) return g # This gives me a comma separated x string, for all the columns, with cols with # missing values, enums, and optionally matching a pattern, removed. useful for GLM # since it removes rows with any col with NA # get input from this. # (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \ # h2o_cmd.columnInfoFromInspect(parseResult['destination_key', # exceptionOnMissingValues=False, timeoutSecs=300) def goodXFromColumnInfo(y, num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None, colTypeDict=None, colNameDict=None, keepPattern=None, key=None, timeoutSecs=120, returnIgnoreX=False, noPrint=False, returnStringX=True): y = str(y) # if we pass a key, means we want to get the info ourselves here if key is not None: (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \ h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False, max_column_display=99999999, timeoutSecs=timeoutSecs) num_cols = len(colNameDict) # now remove any whose names don't match the required keepPattern if keepPattern is not None: keepX = re.compile(keepPattern) else: keepX = None x = range(num_cols) # need to walk over a copy, cause we change x xOrig = x[:] ignore_x = [] # for use by RF for k in xOrig: name = colNameDict[k] # remove it if it has the same name as the y output if str(k)== y: # if they pass the col index as y if not noPrint: print "Removing %d because name: %s matches output %s" % (k, str(k), y) x.remove(k) # rf doesn't want it in ignore list # ignore_x.append(k) elif name == y: # if they pass the name as y if not noPrint: print "Removing %d because name: %s matches output %s" % (k, name, y) x.remove(k) # rf doesn't want it in ignore list # ignore_x.append(k) elif keepX is not None and not keepX.match(name): if not noPrint: print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern) x.remove(k) ignore_x.append(k) # missing values reports as constant also. so do missing first. # remove all cols with missing values # could change it against num_rows for a ratio elif k in missingValuesDict: value = missingValuesDict[k] if not noPrint: print "Removing %d with name: %s because it has %d missing values" % (k, name, value) x.remove(k) ignore_x.append(k) elif k in constantValuesDict: value = constantValuesDict[k] if not noPrint: print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value)) x.remove(k) ignore_x.append(k) # this is extra pruning.. # remove all cols with enums, if not already removed elif k in enumSizeDict: value = enumSizeDict[k] if not noPrint: print "Removing %d %s because it has enums of size: %d" % (k, name, value) x.remove(k) ignore_x.append(k) if not noPrint: print "x has", len(x), "cols" print "ignore_x has", len(ignore_x), "cols" # this is probably used in 'cols" in v2, which can take numbers if returnStringX: x = ",".join(map(str, x)) ignore_x = ",".join(map(lambda x: "C" + str(x+1), ignore_x)) if not noPrint: print "\nx:", x print "\nignore_x:", ignore_x if returnIgnoreX: return ignore_x else: return x
PawarPawan/h2o-v3
py2/h2o_glm.py
Python
apache-2.0
29,862
[ "Gaussian" ]
d1e049e96eb32d9070a767a54d43b61aad6874333b983021177b434514030a5c
# -*- coding: utf-8 -*- # # This file is part of Invenio-Query-Parser. # Copyright (C) 2014, 2015, 2016 CERN. # # Invenio-Query-Parser is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio-Query-Parser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. # # In applying this licence, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """Implement query printer.""" from invenio_query_parser import ast from invenio_query_parser.visitor import make_visitor from ..ast import SpiresOp from ..config import SPIRES_KEYWORDS class SpiresToInvenio(object): visitor = make_visitor() # pylint: disable=W0613,E0102 @visitor(ast.AndOp) def visit(self, node, left, right): return type(node)(left, right) @visitor(ast.OrOp) def visit(self, node, left, right): return type(node)(left, right) @visitor(ast.KeywordOp) def visit(self, node, left, right): return type(node)(left, right) @visitor(ast.RangeOp) def visit(self, node, left, right): return type(node)(left, right) @visitor(ast.NotOp) def visit(self, node, op): return type(node)(op) @visitor(ast.GreaterOp) def visit(self, node, op): return type(node)(op) @visitor(ast.LowerOp) def visit(self, node, op): return type(node)(op) @visitor(ast.GreaterEqualOp) def visit(self, node, op): return type(node)(op) @visitor(ast.LowerEqualOp) def visit(self, node, op): return type(node)(op) @visitor(ast.Keyword) def visit(self, node): return type(node)(node.value) @visitor(ast.Value) def visit(self, node): return type(node)(node.value) @visitor(ast.WildcardQuery) def visit(self, node): return type(node)(node.value) @visitor(ast.ValueQuery) def visit(self, node, op): return type(node)(op) @visitor(ast.SingleQuotedValue) def visit(self, node): return type(node)(node.value) @visitor(ast.DoubleQuotedValue) def visit(self, node): return type(node)(node.value) @visitor(ast.RegexValue) def visit(self, node): return type(node)(node.value) @visitor(ast.EmptyQuery) def visit(self, node): return type(node)(node.value) @visitor(SpiresOp) def visit(self, node, left, right): left.value = SPIRES_KEYWORDS[left.value.lower()] if (left.value is 'author') and (type(right) is not ast.WildcardQuery): return ast.KeywordOp(left, ast.DoubleQuotedValue(right.value)) return ast.KeywordOp(left, right) # pylint: enable=W0612,E0102
Panos512/inspire-next
inspirehep/modules/search/walkers/spires_to_invenio.py
Python
gpl-2.0
3,296
[ "VisIt" ]
08991256890895aec43695ad75d085210574729fba3d872e513e7897284fe24e
#!/usr/bin/env python # -*- encoding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import h2o from h2o.estimators import H2OGeneralizedLinearEstimator from h2o.exceptions import H2OTypeError from tests import pyunit_utils def test_glm_params(): H2OGeneralizedLinearEstimator() H2OGeneralizedLinearEstimator(nfolds=5, seed=1000, alpha=0.5) df = h2o.H2OFrame.from_python({"response": [1, 2, 3, 4, 5], "a": [0, 1, 0, 1, 0], "b": [-1, 3, 7, 11, 20], "n": [0] * 5, "w": [1] * 5}) model = H2OGeneralizedLinearEstimator() model.training_frame = df model.validation_frame = df model.nfolds = 3 model.keep_cross_validation_predictions = True model.keep_cross_validation_fold_assignment = True model.fold_assignment = "random" model.fold_column = "b" model.response_column = "response" model.ignored_columns = ["x", "y"] model.ignore_const_cols = True model.score_each_iteration = True model.offset_column = "n" model.weights_column = "w" model.family = "MultiNomial" model.family = "GAUSSIAN" model.family = "Twee-die" model.family = "'poIssoN'" model.tweedie_variance_power = 1 model.tweedie_link_power = 2 model.solver = "CoordinateDescentNaive" try: model.fold_assignment = "pseudo-random" assert False except H2OTypeError: pass try: model.ignored_columns = "c" assert False except H2OTypeError: pass if __name__ == "__main__": pyunit_utils.standalone_test(test_glm_params) else: test_glm_params()
h2oai/h2o-3
h2o-py/tests/testdir_algos/glm/pyunit_glm_parameters.py
Python
apache-2.0
1,653
[ "Gaussian" ]
1180ed32310996a596d601ea230e36c0d5f438b881abcfbb2ae76f72d83744dd
# Copyright (C) 2012,2013, 2017 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ********************************** espressopp.interaction.VSphereSelf ********************************** This class provides methods to compute forces and energies of the VSphereSelf potential. .. math:: U = e_1\left(\frac{4}{3}\pi \sigma^2\right)^{\frac{3}{2}} + \frac{a_1 {N_b}^3}{\sigma^6} + \frac{a_2}{N_b} \sigma^2 Reference: Flactuating soft-sphere approach to coars-graining of polymer melts, Soft matter, 2010, 6, 2282 .. function:: espressopp.interaction.VSphereSelf(e1, a1, a2, Nb, cutoff, shift) :param e1: (default: 0.0) :param a1: (default: 1.0) :param a2: (default: 0.0) :param Nb: (default: 1) :param cutoff: (default: infinity) :param shift: (default: 0.0) :type e1: real :type a1: real :type a2: real :type Nb: int :type cutoff: :type shift: real .. function:: espressopp.interaction.SelfVSphere(system, potential) :param system: :param potential: :type system: :type potential: .. function:: espressopp.interaction.SelfVSphere.getPotential() :rtype: .. function:: espressopp.interaction.SelfVSphere.setPotential(potential) :param potential: :type potential: """ from espressopp import pmi, infinity from espressopp.esutil import * from espressopp.interaction.Potential import * from espressopp.interaction.Interaction import * from _espressopp import interaction_VSphereSelf, interaction_SelfVSphere class VSphereSelfLocal(PotentialLocal, interaction_VSphereSelf): def __init__(self, e1=0.0, a1=1.0, a2=0.0, Nb=1, cutoff=infinity, shift=0.0): """Initialize the local VSphere object.""" if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): if shift == "auto": cxxinit(self, interaction_VSphereSelf, e1, a1, a2, Nb, cutoff) else: cxxinit(self, interaction_VSphereSelf, e1, a1, a2, Nb, cutoff, shift) class SelfVSphereLocal(InteractionLocal, interaction_SelfVSphere): def __init__(self, system, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_SelfVSphere, system, potential) def setPotential(self, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, potential) def getPotential(self): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getPotential(self) if pmi.isController: class VSphereSelf(Potential): 'The VSphereSelf potential.' pmiproxydefs = dict( cls = 'espressopp.interaction.VSphereSelfLocal', pmiproperty = ['e1', 'a1', 'a2', 'Nb'] ) class SelfVSphere(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.interaction.SelfVSphereLocal', pmicall = ['setPotential','getPotential'] )
govarguz/espressopp
src/interaction/VSphereSelf.py
Python
gpl-3.0
4,028
[ "ESPResSo" ]
fa40f0bfaceba03c99e669c4dbe3c48428034a82f913c16723e35209d296448b
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest from bigdl.dllib.feature.image import * from bigdl.orca.test_zoo_utils import ZooTestCase import tensorflow as tf import numpy as np import os from bigdl.orca.tfpark import KerasModel resource_path = os.path.join(os.path.split(__file__)[0], "../resources") class TestTFParkModel(ZooTestCase): def setup_method(self, method): tf.keras.backend.clear_session() super(TestTFParkModel, self).setup_method(method) def create_multi_input_output_model(self): data1 = tf.keras.layers.Input(shape=[10]) data2 = tf.keras.layers.Input(shape=[10]) x1 = tf.keras.layers.Flatten()(data1) x1 = tf.keras.layers.Dense(10, activation='relu')(x1) pred1 = tf.keras.layers.Dense(2, activation='softmax')(x1) x2 = tf.keras.layers.Flatten()(data2) x2 = tf.keras.layers.Dense(10, activation='relu')(x2) pred2 = tf.keras.layers.Dense(2)(x2) model = tf.keras.models.Model(inputs=[data1, data2], outputs=[pred1, pred2]) model.compile(optimizer='rmsprop', loss=['sparse_categorical_crossentropy', 'mse']) return model def create_training_data(self): np.random.seed(20) x = np.random.rand(20, 10) y = np.random.randint(0, 2, (20)) return x, y def test_training_with_validation_data_distributed_multi_heads(self): keras_model = self.create_multi_input_output_model() model = KerasModel(keras_model) x, y = self.create_training_data() val_x, val_y = self.create_training_data() model.fit([x, x], [y, y], validation_data=([val_x, val_x], [val_y, val_y]), batch_size=4, distributed=True) def test_invalid_data_handling(self): keras_model = self.create_multi_input_output_model() model = KerasModel(keras_model) x, y = self.create_training_data() val_x, val_y = self.create_training_data() # Number doesn't match with pytest.raises(AssertionError) as excinfo: model.fit([x, x], [y, y, y], batch_size=4, distributed=True) assert "model_target number does not match data number" in str(excinfo.value) # Dict as input with pytest.raises(AssertionError) as excinfo: model.fit({"input_1": x}, [y, y], batch_size=4, distributed=True) assert "all model_input names should exist in data" in str(excinfo.value) if __name__ == "__main__": pytest.main([__file__])
intel-analytics/BigDL
python/orca/test/bigdl/orca/tfpark/test_keras_model.py
Python
apache-2.0
3,083
[ "ORCA" ]
995cfdb45020c878c5c56881b37f0e88c2348a31733444c14b24f7bd6ec668ae
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import argparse import ast import linecache import logging import os import re import sys from collections import Counter import jinja2 from django.utils.text import slugify from sanity_utils import find_files _paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}') @jinja2.evalcontextfilter def nl2br(eval_ctx, value): result = u'\n\n'.join( u'<p>%s</p>' % p.replace('\n', '<br>\n') for p in _paragraph_re.split(jinja2.escape(value))) if eval_ctx.autoescape: result = jinja2.Markup(result) return result REPORT_TEMPLATE = """ <!DOCTYPE html> <html> <body> <div class="container"> <h1>DocCov Report</h1> <div class="progress"> <div class="progress-bar" role="progressbar" style="width: {{ percentage }}%;"> {{ percentage }}% ({{ grand_totals.n_documented }}/{{ grand_totals.n_total }}) </div> </div> <hr> <table class="table table-striped table-condensed"> <tr> <th>Filename</th> <th>Documented</th> <th>Undocumented</th> <th>Total</th> <th colspan="2">Percentage documented</th> </tr> {% for file in files_by_percentage %} <tr> <td><a href="#{{ file.id }}">{{ file.path }}</a></td> <td class="text-right">{{ file.totals.n_documented }}</td> <td class="text-right">{{ file.totals.n_undocumented }}</td> <td class="text-right">{{ file.totals.n_total }}</td> <td class="text-right">{{ file.percentage }}%</td> <td width=50%> <div class="progress-bar" role="progressbar" style="width: {{ file.percentage }}%;" ><span>{{ file.percentage }}%</span></div> </td> </tr> {% endfor %} </table> {% for file in files %} <h2 id="{{ file.id }}">{{ file.path }}</h2> <div class="progress"> <div class="progress-bar" role="progressbar" style="width: {{ file.percentage }}%;"> {{ file.percentage }}% ({{ file.totals.n_documented }}/{{ file.totals.n_total }}) </div> </div> <table class="table table-striped"> <tr> <th>Line</th> <th>Object</th> <th>Docstring</th> <th>Errors</th> </tr> {% for m in file.object_stats %} <tr class="bg-{{ m.klass }}"> <td class="text-right">{{ m.line }}</td> <td>{{ m.obj }}</td> {% if m.docinfo %} <td>{{ m.docinfo.docstring|nl2br }}</td> <td> {% set ve = m.docinfo.validation_errors|sort %} {% if ve %} <ul>{% for e in ve %}<li>{{ e }}</li>{% endfor %}</ul> {% endif %} </td> {% else %} <td colspan="2"><i>Not documented</i></td> {% endif %} </tr> {% endfor %} </table> {% endfor %} </div> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootswatch/3.3.0/journal/bootstrap.min.css"> <style>.bg-success{background: #dff0d8 !important}</style> </body> </html> """.strip() IGNORED_FUNCTIONS = set([ '__abs__', '__add__', '__all__', '__and__', '__builtins__', '__cached__', '__concat__', '__contains__', '__delitem__', '__doc__', '__eq__', '__file__', '__floordiv__', '__ge__', '__getitem__', '__gt__', '__iadd__', '__iand__', '__iconcat__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__', '__index__', '__inv__', '__invert__', '__ior__', '__ipow__', '__irshift__', '__isub__', '__itruediv__', '__ixor__', '__le__', '__loader__', '__lshift__', '__lt__', '__mod__', '__mul__', '__name__', '__ne__', '__neg__', '__not__', '__or__', '__package__', '__pos__', '__pow__', '__rshift__', '__setitem__', '__spec__', '__sub__', '__truediv__', '__xor__', # The usual suspects '__str__', '__repr__', # CBV generics: "dispatch", "form_invalid", "form_valid", "get", "get_context_data", "get_form", "get_form_class", "get_form_kwargs", "get_object", "get_success_url", "post", ]) IGNORED_CLASSES = set([ "Labels", "Meta", ]) IGNORED_FIRST_ARGS = set([ "self", "cls" # classmethods ]) IGNORED_ARGS = set([]) ARG_RE = re.compile(r":param\s+([a-z_0-9]+)", re.I) INTERNAL_NAME_RE = re.compile("^_[^_].+$") DOCCOV_DIRECTIVE_COMMENT_RE = re.compile("doccov:\s*(.+)", re.I) class ReturnValueVisitor(ast.NodeVisitor): def __init__(self): self.returned_values = set() @property def has_valueful_return(self): return bool(self.returned_values) def visit_Return(self, node): # noqa (N802) retval = node.value if retval: self.returned_values.add(retval) class Validator(object): disabling_directives = [] def validate(self, docinfo): pass class GenericDocstringValidator(Validator): def validate(self, docinfo): docstring = docinfo.docstring if docstring: if len(docstring) < 15: yield "Docstring too short" sep = (".\n" if "\n" in docstring else ".") if sep not in docstring: yield "Docstring doesn't seem to have an opening sentence" else: if not self.can_elide_docstring(docinfo): yield "Docstring missing" def can_elide_docstring(self, docinfo): if docinfo.name == "__init__" and not docinfo.required_args: return True return False class ArgValidator(Validator): disabling_directives = ("noargs",) def validate(self, docinfo): for arg in sorted(docinfo.missing_args): yield u"Missing mention of arg `%s`" % arg for arg in sorted(docinfo.extraneous_args): yield u"Extraneous mention of arg `%s`" % arg class ReturnValidator(Validator): disabling_directives = ("noreturn",) def validate(self, docinfo): node = docinfo.node docstring = docinfo.docstring if not isinstance(node, ast.FunctionDef): return rvv = ReturnValueVisitor() rvv.visit(node) if rvv.has_valueful_return: if not (":return" in docstring or ":rtype" in docstring): yield u"Undocumented return value(s)" class DocInfo(object): validator_classes = [ GenericDocstringValidator, ArgValidator, ReturnValidator ] def __init__(self, node, filename): self.node = node self.filename = filename self.directives = "" directive_match = DOCCOV_DIRECTIVE_COMMENT_RE.search(linecache.getline(filename, node.lineno)) if directive_match: self.directives = directive_match.group(1).lower() self.name = getattr(node, "name", None) or "" self.docstring = (self.parse_docstring(node) or u"").strip() self.named_args = ([a.arg for a in node.args.args] if hasattr(node, "args") else []) if self.named_args and self.named_args[0] in IGNORED_FIRST_ARGS: self.named_args.pop(0) self.required_args = set(arg for arg in self.named_args if arg not in IGNORED_ARGS) self.mentioned_args = set(self.parse_arg_mentions(self.docstring)) self.missing_args = self.required_args - self.mentioned_args self.extraneous_args = self.mentioned_args - self.required_args - set(["args", "kwargs"]) self.validation_errors = list(self.validate()) self.valid = not self.validation_errors def _prevalidate(self): if "migrations" in self.filename: # Nothing in migration files is documentation-worthwhile return False if "ignore" in self.directives: return False if INTERNAL_NAME_RE.match(self.name): return False return True def validate(self): if not self._prevalidate(): return for validator_class in self.validator_classes: validator = validator_class() if any((directive in self.directives) for directive in validator_class.disabling_directives): continue for error in validator.validate(self): yield error @staticmethod def parse_docstring(node): if node.body and isinstance(node.body[0], ast.Expr): if isinstance(node.body[0].value, ast.Str): value = node.body[0].value.s return value @staticmethod def parse_arg_mentions(docstring): return set(m.group(1) for m in ARG_RE.finditer(docstring)) class DocStringVisitor(ast.NodeVisitor): def __init__(self, filename): self.filename = filename self.objects = {} self._class_stack = [] def _get_name(self, node): name = node.name if self._class_stack: name = "::".join([c.name for c in self._class_stack] + [name]) line = node.lineno if isinstance(node, ast.FunctionDef): return (line, "func", name + "()") elif isinstance(node, ast.ClassDef): return (line, "class", name) else: raise NotImplementedError("Not implemented: name for %s" % node) def visit_FunctionDef(self, node): # noqa (N802) if node.name in IGNORED_FUNCTIONS: return self.objects[self._get_name(node)] = DocInfo(node, self.filename) def visit_ClassDef(self, node): # noqa (N802) if node.name in IGNORED_CLASSES: return self.objects[self._get_name(node)] = DocInfo(node, self.filename) self._class_stack.append(node) self.generic_visit(node) self._class_stack.pop(-1) class DocCov(object): def __init__(self): self.filenames = set() self.objects_by_file = {} self.log = logging.getLogger("DocCov") def check_files(self): for filename in sorted(self.filenames): self.check_file(filename) def check_file(self, filename): with open(filename, "rb") as inf: data = inf.read() try: tree = ast.parse(data, filename) except SyntaxError: self.log.exception("Can't parse %s" % filename) return visitor = DocStringVisitor(filename=filename) visitor.visit(tree) self.objects_by_file[filename] = visitor.objects def add_root(self, path): path = os.path.realpath(path) if os.path.isdir(path): for filepath in find_files(path, allowed_extensions=(".py",)): if filepath.startswith("test_"): self.log.info("Skipping: %s" % filepath) continue self.filenames.add(filepath) elif path.endswith('.py'): self.filenames.add(path) def write_report(self, output_file): template_file_list = [] common_prefix = os.path.commonprefix(self.objects_by_file.keys()) grand_totals = Counter() for path, objects in sorted(self.objects_by_file.items()): clean_path = path[len(common_prefix):].replace(os.sep, "/") n_documented = sum([1 for m in objects.values() if m and m.valid]) n_total = float(len(objects)) n_undocumented = n_total - n_documented file_totals = Counter({ "n_documented": n_documented, "n_total": n_total, "n_undocumented": n_undocumented }) grand_totals += file_totals if n_total: object_stats = [{ "type": type, "line": line, "obj": obj, "klass": "success" if (docinfo and docinfo.valid) else "error", "docinfo": docinfo } for ((line, type, obj), docinfo) in sorted(objects.items())] template_file_list.append({ "id": slugify(clean_path), "path": clean_path, "totals": file_totals, "percentage": round(n_documented / float(n_total) * 100, 1), "object_stats": object_stats }) env = jinja2.Environment() env.filters["nl2br"] = nl2br data = env.from_string(REPORT_TEMPLATE).render({ "percentage": round(grand_totals["n_documented"] / float(grand_totals["n_total"]) * 100, 2), "grand_totals": grand_totals, "files": template_file_list, "files_by_percentage": sorted(template_file_list, key=lambda f: (f["percentage"], f["id"])) }) output_file.write(data) def main(): ap = argparse.ArgumentParser() ap.add_argument("-o", "--output", dest="output", type=argparse.FileType("w", encoding="utf-8"), default=sys.stdout) ap.add_argument("-v", "--verbose", dest="verbose", action="store_true") ap.add_argument("-q", "--quiet", dest="quiet", action="store_true") ap.add_argument("roots", metavar="root", nargs="+") args = ap.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) elif args.quiet: logging.basicConfig(level=9001) else: logging.basicConfig(level=logging.INFO) dc = DocCov() for root in args.roots: dc.add_root(root) dc.check_files() dc.write_report(args.output) if __name__ == '__main__': main()
hrayr-artunyan/shuup
_misc/doccov.py
Python
agpl-3.0
13,318
[ "VisIt" ]
af09c128105c1b396eabebaad64d3dc2a5accc26b9cb3b98b018c6bfabffc390
""" Main script which executes the run cycle. It will start and remain running until it is shut-down externally, and will execute the deposit.run method repeatedly. """ from octopus.core import app, initialise, add_configuration import logging from logging import Formatter from logging.handlers import RotatingFileHandler file_handler = RotatingFileHandler('jperswordoutlog', maxBytes=1000000000, backupCount=5) file_handler.setLevel(logging.INFO) file_handler.setFormatter(Formatter( '%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d %(module)s %(funcName)s]' )) app.logger.addHandler(file_handler) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("-d", "--debug", action="store_true", help="pycharm debug support enable") parser.add_argument("-c", "--config", help="additional configuration to load (e.g. for testing)") args = parser.parse_args() if args.config: add_configuration(app, args.config) pycharm_debug = app.config.get('DEBUG_PYCHARM', False) if args.debug: pycharm_debug = True if pycharm_debug: app.config['DEBUG'] = False import pydevd pydevd.settrace(app.config.get('DEBUG_SERVER_HOST', 'localhost'), port=app.config.get('DEBUG_SERVER_PORT', 51234), stdoutToServer=True, stderrToServer=True) print "STARTED IN REMOTE DEBUG MODE" initialise() from service import deposit import time, sys col_counter = 0 while True: app.logger.info(u"Starting SWORDv2 Runner") deposit.run(fail_on_error=True) print ".", sys.stdout.flush() col_counter += 1 if col_counter >= 36: print "" col_counter = 0 time.sleep(app.config.get("RUN_THROTTLE"))
JiscPER/jper-sword-out
service/runner.py
Python
apache-2.0
1,819
[ "Octopus" ]
bc78d960ee6de45b304cfc57508b87b2eaee5b7ea2cf3d1fabadaa18bcc167fb
# ----------------------------------------------------------------------------- # Copyright (c) 2015--, biocore development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ----------------------------------------------------------------------------- """ Application controller for vsearch v1.1.1 """ from os.path import abspath, join, dirname, splitext from burrito.parameters import ValuedParameter, FlagParameter from burrito.util import (CommandLineApplication, ResultPath, ApplicationError) class Vsearch(CommandLineApplication): """ Vsearch ApplicationController """ _command = 'vsearch' _input_handler = '_input_as_parameters' _parameters = { # Output to specified FASTA file '--output': ValuedParameter('--', Name='output', Delimiter=' ', IsPath=True), # Filename for UCLUST-like output '--uc': ValuedParameter('--', Name='uc', Delimiter=' ', IsPath=True), # Filename for BLAST-like tab-separated output '--blast6out': ValuedParameter('--', Name='blast6out', Delimiter=' ', IsPath=True), # ID percent for OTU, by default is 97% '--id': ValuedParameter('--', Name='id', Delimiter=' ', IsPath=False, Value=None), # ID definition, 0-4=CD-HIT,all,int,MBL,BLAST (default vsearch: 2) '--iddef': ValuedParameter('--', Name='iddef', Delimiter=' ', IsPath=False, Value=None), # Number of hits to accept and show per strand (default vsearch: 1) '--maxaccepts': ValuedParameter('--', Name='maxaccepts', Delimiter=' ', Value=None), # Number of non-matching hits to consider (default vsearch: 32) '--maxrejects': ValuedParameter('--', Name='maxrejects', Delimiter=' ', Value=None), # Indicate that input sequences are presorted '--usersort': FlagParameter('--', Name='usersort'), # Take into account the abundance annotations present # in the input fasta file '--sizein': FlagParameter('--', Name='sizein'), # Add abundance annotations to the output fasta files '--sizeout': FlagParameter('--', Name='sizeout'), # Dereplicate exact sequences in the given FASTA file '--derep_fulllength': ValuedParameter('--', Name='derep_fulllength', Delimiter=' ', IsPath=True), # Dereplicate plus or both strands (default vsearch: plus) '--strand': ValuedParameter('--', Name='strand', Delimiter=' ', IsPath=False), # Discard sequences with an abundance value greater than integer '--maxuniquesize': ValuedParameter('--', Name='maxuniquesize', Delimiter=' ', IsPath=False), # Discard sequences with an abundance value smaller than integer '--minuniquesize': ValuedParameter('--', Name='minuniquesize', Delimiter=' ', IsPath=False), # Abundance sort sequences in given FASTA file '--sortbysize': ValuedParameter('--', Name='sortbysize', Delimiter=' ', IsPath=True), # When using --sortbysize, discard sequences # with an abundance value greater than maxsize '--maxsize': ValuedParameter('--', Name='maxsize', Delimiter=' ', IsPath=False), # When using --sortbysize, discard sequences # with an abundance value smaller than minsize '--minsize': ValuedParameter('--', Name='minsize', Delimiter=' ', IsPath=False), # Output cluster consensus sequences to FASTA file '--consout': ValuedParameter('--', Name='consout', Delimiter=' ', IsPath=True), # Chimera detection: min abundance ratio of parent vs chimera # (default vsearch: 2.0) '--abskew': ValuedParameter('--', Name='abskew', Delimiter=' ', IsPath=False, Value=None), # Detect chimeras de novo '--uchime_denovo': ValuedParameter('--', Name='uchime_denovo', Delimiter=' ', IsPath=True), # Detect chimeras using a reference database '--uchime_ref': ValuedParameter('--', Name='uchime_ref', Delimiter=' ', IsPath=True), # Output chimera alignments to 3-way alignment file (filepath) '--uchimealns': ValuedParameter('--', Name='uchimealns', Delimiter=' ', IsPath=True), # Output chimeric sequences to file (filepath) '--chimeras': ValuedParameter('--', Name='chimeras', Delimiter=' ', IsPath=True), # Output non-chimera filepath '--nonchimeras': ValuedParameter('--', Name='nonchimeras', Delimiter=' ', IsPath=True), # Reference database for --uchime_ref '--db': ValuedParameter('--', Name='db', Delimiter=' ', IsPath=True), # Output to chimera info to tab-separated file '--uchimeout': ValuedParameter('--', Name='uchimeout', Delimiter=' ', IsPath=True), # Number of computation threads to use (1 to 256) # note: by default, keep the value set to 1 for all commands # since otherwise (if no other value is given) VSEARCH will use # all available cores '--threads': ValuedParameter('--', Name='threads', Delimiter=' ', IsPath=False, Value="1"), # Write messages, timing and memory info to file '--log': ValuedParameter('--', Name='log', Delimiter=' ', IsPath=True) } _suppress_stdout = False _suppress_stderr = False def _input_as_parameters(self, data): """ Set the input path (a fasta filepath) """ # The list of values which can be passed on a per-run basis allowed_values = ['--uc', '--output', '--sortbysize', '--consout', '--uchime_denovo', '--derep_fulllength', '--maxuniquesize', '--minuniquesize', '--sizein', '--sizeout', '--strand', '--threads', '--uchime_ref', '--chimeras', '--nonchimeras', '--db', '--uchimeout', '--blast6out', '--abskew', '--sortbysize', '--maxsize', '--minsize'] unsupported_parameters = set(data.keys()) - set(allowed_values) if unsupported_parameters: raise ApplicationError( "Unsupported parameter(s) passed when calling vsearch: %s" % ' '.join(unsupported_parameters)) for v in allowed_values: # turn the parameter off so subsequent runs are not # affected by parameter settings from previous runs self.Parameters[v].off() if v in data: # turn the parameter on if specified by the user self.Parameters[v].on(data[v]) return '' def _get_result_paths(self, data): """ Set the result paths """ result = {} result['Output'] = ResultPath( Path=self.Parameters['--output'].Value, IsWritten=self.Parameters['--output'].isOn()) result['ClusterFile'] = ResultPath( Path=self.Parameters['--uc'].Value, IsWritten=self.Parameters['--uc'].isOn()) # uchime 3-way global alignments result['Output_aln'] = ResultPath( Path=self.Parameters['--uchimealns'].Value, IsWritten=self.Parameters['--uchimealns'].isOn()) # uchime tab-separated format result['Output_tabular'] = ResultPath( Path=self.Parameters['--uchimeout'].Value, IsWritten=self.Parameters['--uchimeout'].isOn()) # chimeras fasta file output result['Output_chimeras'] = ResultPath( Path=self.Parameters['--chimeras'].Value, IsWritten=self.Parameters['--chimeras'].isOn()) # nonchimeras fasta file output result['Output_nonchimeras'] = ResultPath( Path=self.Parameters['--nonchimeras'].Value, IsWritten=self.Parameters['--nonchimeras'].isOn()) # log file result['LogFile'] = ResultPath( Path=self.Parameters['--log'].Value, IsWritten=self.Parameters['--log'].isOn()) return result def getHelp(self): """Method that points to documentation""" help_str = """ VSEARCH is hosted at: https://github.com/torognes/vsearch Please cite the above URL if this wrapper is used in published work. """ return help_str def vsearch_dereplicate_exact_seqs( fasta_filepath, output_filepath, output_uc=False, working_dir=None, strand="both", maxuniquesize=None, minuniquesize=None, sizein=False, sizeout=True, log_name="derep.log", HALT_EXEC=False): """ Generates clusters and fasta file of dereplicated subsequences Parameters ---------- fasta_filepath : string input filepath of fasta file to be dereplicated output_filepath : string write the dereplicated sequences to output_filepath working_dir : string, optional directory path for storing intermediate output output_uc : boolean, optional uutput dereplication results in a file using a uclust-like format strand : string, optional when searching for strictly identical sequences, check the 'strand' only (default: both) or check the plus strand only maxuniquesize : integer, optional discard sequences with an abundance value greater than maxuniquesize minuniquesize : integer, optional discard sequences with an abundance value smaller than integer sizein : boolean, optional take into account the abundance annotations present in the input fasta file, (search for the pattern "[>;]size=integer[;]" in sequence headers) sizeout : boolean, optional add abundance annotations to the output fasta file (add the pattern ";size=integer;" to sequence headers) log_name : string, optional specifies log filename HALT_EXEC : boolean, optional used for debugging app controller Return ------ output_filepath : string filepath to dereplicated fasta file uc_filepath : string filepath to dereplication results in uclust-like format log_filepath : string filepath to log file """ # write all vsearch output files to same directory # as output_filepath if working_dir is not specified if not working_dir: working_dir = dirname(abspath(output_filepath)) app = Vsearch(WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) log_filepath = join(working_dir, log_name) uc_filepath = None if output_uc: root_name = splitext(abspath(output_filepath))[0] uc_filepath = join(working_dir, '%s.uc' % root_name) app.Parameters['--uc'].on(uc_filepath) if maxuniquesize: app.Parameters['--maxuniquesize'].on(maxuniquesize) if minuniquesize: app.Parameters['--minuniquesize'].on(minuniquesize) if sizein: app.Parameters['--sizein'].on() if sizeout: app.Parameters['--sizeout'].on() if (strand == "both" or strand == "plus"): app.Parameters['--strand'].on(strand) else: raise ValueError("Option --strand accepts only 'both'" "or 'plus' values") app.Parameters['--derep_fulllength'].on(fasta_filepath) app.Parameters['--output'].on(output_filepath) app.Parameters['--log'].on(log_filepath) app_result = app() return output_filepath, uc_filepath, log_filepath def vsearch_sort_by_abundance( fasta_filepath, output_filepath, working_dir=None, minsize=None, maxsize=None, log_name="abundance_sort.log", HALT_EXEC=False): """ Fasta entries are sorted by decreasing abundance (Fasta entries are assumed to be dereplicated with the pattern "[>;]size=integer[;]" present in the read label, ex. use function vsearch_dereplicate_exact_seqs prior to calling this function) Parameters ---------- fasta_filepath : string input fasta file (dereplicated fasta) output_filepath : string output filepath for the sorted sequences in fasta format working_dir : string, optional working directory to store intermediate files minsize : integer, optional discard sequences with an abundance value smaller than minsize maxsize : integer, optional discard sequences with an abundance value greater than maxsize log_name : string, optional log filename HALT_EXEC : boolean, optional used for debugging app controller Return ------ output_filepath : string filepath to sorted fasta file log_filepath : string filepath to log file """ # set working dir to same directory as the output # file (if not provided) if not working_dir: working_dir = dirname(output_filepath) app = Vsearch(WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) log_filepath = join(working_dir, log_name) if minsize: app.Parameters['--minsize'].on(minsize) if maxsize: app.Parameters['--maxsize'].on(maxsize) app.Parameters['--sortbysize'].on(fasta_filepath) app.Parameters['--output'].on(output_filepath) app.Parameters['--log'].on(log_filepath) app_result = app() return output_filepath, log_filepath def vsearch_chimera_filter_de_novo( fasta_filepath, working_dir, output_chimeras=True, output_nonchimeras=True, output_alns=False, output_tabular=False, log_name="vsearch_uchime_de_novo_chimera_filtering.log", HALT_EXEC=False): """ Detect chimeras present in the fasta-formatted filename, without external references (i.e. de novo). Automatically sort the sequences in filename by decreasing abundance beforehand. Output chimeras and non-chimeras to FASTA files and/or 3-way global alignments and/or tabular output. Parameters ---------- fasta_filepath : string input fasta file (dereplicated fasta with pattern [>;]size=integer[;] in the fasta header) working_dir : string directory path for all output files output_chimeras : boolean, optional output chimeric sequences to file, in fasta format output_nonchimeras : boolean, optional output nonchimeric sequences to file, in fasta format output_alns : boolean, optional output 3-way global alignments (parentA, parentB, chimera) in human readable format to file output_tabular : boolean, optional output results using the uchime tab-separated format of 18 fields (see Vsearch user manual) HALT_EXEC : boolean, optional used for debugging app controller Return ------ output_chimera_filepath : string filepath to chimeric fasta sequences output_non_chimera_filepath : string filepath to nonchimeric fasta sequences output_alns_filepath : string filepath to chimeric sequences alignment file output_tabular_filepath : string filepath to chimeric sequences tabular output file log_filepath : string filepath to log file """ app = Vsearch(WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if not (output_chimeras or output_nonchimeras or output_alns or output_tabular): raise ValueError("At least one output format (output_chimeras," "output_nonchimeras, output_alns, output_tabular)" "must be selected") output_chimera_filepath = None output_non_chimera_filepath = None output_alns_filepath = None output_tabular_filepath = None # set output filepaths if output_chimeras: output_chimera_filepath = join(working_dir, 'uchime_chimeras.fasta') app.Parameters['--chimeras'].on(output_chimera_filepath) if output_nonchimeras: output_non_chimera_filepath = join(working_dir, 'uchime_non_chimeras.fasta') app.Parameters['--nonchimeras'].on(output_non_chimera_filepath) if output_alns: output_alns_filepath = join(working_dir, 'uchime_alignments.txt') app.Parameters['--uchimealns'].on(output_alns_filepath) if output_tabular: output_tabular_filepath = join(working_dir, 'uchime_tabular.txt') app.Parameters['--uchimeout'].on(output_tabular_filepath) log_filepath = join(working_dir, log_name) app.Parameters['--uchime_denovo'].on(fasta_filepath) app.Parameters['--log'].on(log_filepath) app_result = app() return output_chimera_filepath, output_non_chimera_filepath,\ output_alns_filepath, output_tabular_filepath, log_filepath def vsearch_chimera_filter_ref( fasta_filepath, working_dir, db_filepath, output_chimeras=True, output_nonchimeras=True, output_alns=False, output_tabular=False, log_name="vsearch_uchime_ref_chimera_filtering.log", threads=1, HALT_EXEC=False): """ Detect chimeras present in the fasta-formatted filename, with an external reference (i.e. database). Output chimeras and non-chimeras to FASTA files and/or 3-way global alignments and/or tabular output. Parameters ---------- fasta_filepath : string input fasta file (dereplicated fasta) working_dir : string directory path for all output files db_filepath : string filepath to reference database output_chimeras : boolean, optional output chimeric sequences to file, in fasta format output_nonchimeras : boolean, optional output nonchimeric sequences to file, in fasta format output_alns : boolean, optional output 3-way global alignments (parentA, parentB, chimera) in human readable format to file output_tabular : boolean, optional output results using the uchime tab-separated format of 18 fields (see Vsearch user manual) threads : integer, optional number of computation threads to use (1 to 256) HALT_EXEC : boolean, optional used for debugging app controller Return ------ output_chimera_filepath : string filepath to chimeric fasta sequences output_non_chimera_filepath : string filepath to nonchimeric fasta sequences output_alns_filepath : string filepath to chimeric sequences alignment file output_tabular_filepath : string filepath to chimeric sequences tabular output file log_filepath : string filepath to log file """ app = Vsearch(WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if not (output_chimeras or output_nonchimeras or output_alns or output_tabular): raise ValueError("At least one output format (output_chimeras," "output_nonchimeras, output_alns, output_tabular)" "must be selected") output_chimera_filepath = None output_non_chimera_filepath = None output_alns_filepath = None output_tabular_filepath = None # set output filepaths if output_chimeras: output_chimera_filepath = join(working_dir, 'uchime_chimeras.fasta') app.Parameters['--chimeras'].on(output_chimera_filepath) if output_nonchimeras: output_non_chimera_filepath = join(working_dir, 'uchime_non_chimeras.fasta') app.Parameters['--nonchimeras'].on(output_non_chimera_filepath) if output_alns: output_alns_filepath = join(working_dir, 'uchime_alignments.txt') app.Parameters['--uchimealns'].on(output_alns_filepath) if output_tabular: output_tabular_filepath = join(working_dir, 'uchime_tabular.txt') app.Parameters['--uchimeout'].on(output_tabular_filepath) log_filepath = join(working_dir, log_name) app.Parameters['--db'].on(db_filepath) app.Parameters['--uchime_ref'].on(fasta_filepath) app.Parameters['--log'].on(log_filepath) app_result = app() return output_chimera_filepath, output_non_chimera_filepath,\ output_alns_filepath, output_tabular_filepath, log_filepath
biocore/burrito-fillings
bfillings/vsearch.py
Python
bsd-3-clause
21,714
[ "BLAST" ]
f8c6b51b1600010c341d7cb00a5d20b058ee7c0c334c64c12d7c26b0b09b07fb
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for testing `LinearOperator` and sub-classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import numpy as np import six from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util from tensorflow.contrib.linalg.python.ops import linear_operator_util from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test @six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init class LinearOperatorDerivedClassTest(test.TestCase): """Tests for derived classes. Subclasses should implement every abstractmethod, and this will enable all test methods to work. """ # Absolute/relative tolerance for tests. _atol = { dtypes.float16: 1e-3, dtypes.float32: 1e-6, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } _rtol = { dtypes.float16: 1e-3, dtypes.float32: 1e-6, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } def assertAC(self, x, y): """Derived classes can set _atol, _rtol to get different tolerance.""" dtype = dtypes.as_dtype(x.dtype) atol = self._atol[dtype] rtol = self._rtol[dtype] self.assertAllClose(x, y, atol=atol, rtol=rtol) @property def _dtypes_to_test(self): # TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit. return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] @abc.abstractproperty def _shapes_to_test(self): """Returns list of tuples, each is one shape that will be tested.""" raise NotImplementedError("shapes_to_test has not been implemented.") @abc.abstractmethod def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder): """Build a batch matrix and an Operator that should have similar behavior. Every operator acts like a (batch) matrix. This method returns both together, and is used by tests. Args: shape: List-like of Python integers giving full shape of operator. dtype: Numpy dtype. Data type of returned array/operator. use_placeholder: Python bool. If True, initialize the operator with a placeholder of undefined shape and correct dtype. Returns: operator: `LinearOperator` subclass instance. mat: `Tensor` representing operator. feed_dict: Dictionary. If placholder is True, this must contains everything needed to be fed to sess.run calls at runtime to make the operator work. """ # Create a matrix as a numpy array with desired shape/dtype. # Create a LinearOperator that should have the same behavior as the matrix. raise NotImplementedError("Not implemented yet.") @abc.abstractmethod def _make_rhs(self, operator, adjoint): """Make a rhs appropriate for calling operator.solve(rhs). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the adjoint operator. Returns: A `Tensor` """ raise NotImplementedError("_make_rhs is not defined.") @abc.abstractmethod def _make_x(self, operator, adjoint): """Make an 'x' appropriate for calling operator.matmul(x). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making an 'x' value for the adjoint operator. Returns: A `Tensor` """ raise NotImplementedError("_make_x is not defined.") @property def _tests_to_skip(self): """List of test names to skip.""" # Subclasses should over-ride if they want to skip some tests. # To skip "test_foo", add "foo" to this list. return [] def _skip_if_tests_to_skip_contains(self, test_name): """If self._tests_to_skip contains test_name, raise SkipTest exception. See tests below for usage. Args: test_name: String name corresponding to a test. Raises: SkipTest Exception, if test_name is in self._tests_to_skip. """ if test_name in self._tests_to_skip: self.skipTest("%s skipped because it was added to self._tests_to_skip.") def test_to_dense(self): self._skip_if_tests_to_skip_contains("to_dense") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_dense = operator.to_dense() if not use_placeholder: self.assertAllEqual(shape, op_dense.get_shape()) op_dense_v, mat_v = sess.run([op_dense, mat], feed_dict=feed_dict) self.assertAC(op_dense_v, mat_v) def test_det(self): self._skip_if_tests_to_skip_contains("det") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_det = operator.determinant() if not use_placeholder: self.assertAllEqual(shape[:-2], op_det.get_shape()) op_det_v, mat_det_v = sess.run( [op_det, linalg_ops.matrix_determinant(mat)], feed_dict=feed_dict) self.assertAC(op_det_v, mat_det_v) def test_log_abs_det(self): self._skip_if_tests_to_skip_contains("log_abs_det") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_log_abs_det = operator.log_abs_determinant() mat_log_abs_det = math_ops.log( math_ops.abs(linalg_ops.matrix_determinant(mat))) if not use_placeholder: self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape()) op_log_abs_det_v, mat_log_abs_det_v = sess.run( [op_log_abs_det, mat_log_abs_det], feed_dict=feed_dict) self.assertAC(op_log_abs_det_v, mat_log_abs_det_v) def test_matmul(self): self._skip_if_tests_to_skip_contains("matmul") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: for adjoint in False, True: for adjoint_arg in False, True: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) x = self._make_x(operator, adjoint=adjoint) # If adjoint_arg, compute A X^H^H = A X. if adjoint_arg: op_matmul = operator.matmul( linear_operator_util.matrix_adjoint(x), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_matmul = operator.matmul(x, adjoint=adjoint) mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint) if not use_placeholder: self.assertAllEqual( op_matmul.get_shape(), mat_matmul.get_shape()) op_matmul_v, mat_matmul_v = sess.run( [op_matmul, mat_matmul], feed_dict=feed_dict) self.assertAC(op_matmul_v, mat_matmul_v) def test_solve(self): self._skip_if_tests_to_skip_contains("solve") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: for adjoint in False, True: for adjoint_arg in False, True: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) rhs = self._make_rhs(operator, adjoint=adjoint) # If adjoint_arg, solve A X = (rhs^H)^H = rhs. if adjoint_arg: op_solve = operator.solve( linear_operator_util.matrix_adjoint(rhs), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_solve = operator.solve( rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) mat_solve = linalg_ops.matrix_solve(mat, rhs, adjoint=adjoint) if not use_placeholder: self.assertAllEqual( op_solve.get_shape(), mat_solve.get_shape()) op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve], feed_dict=feed_dict) self.assertAC(op_solve_v, mat_solve_v) def test_trace(self): self._skip_if_tests_to_skip_contains("trace") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_trace = operator.trace() mat_trace = math_ops.trace(mat) if not use_placeholder: self.assertAllEqual(op_trace.get_shape(), mat_trace.get_shape()) op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace], feed_dict=feed_dict) self.assertAC(op_trace_v, mat_trace_v) def test_add_to_tensor(self): self._skip_if_tests_to_skip_contains("add_to_tensor") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_plus_2mat = operator.add_to_tensor(2 * mat) if not use_placeholder: self.assertAllEqual(shape, op_plus_2mat.get_shape()) op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat], feed_dict=feed_dict) self.assertAC(op_plus_2mat_v, 3 * mat_v) def test_diag_part(self): self._skip_if_tests_to_skip_contains("diag_part") for use_placeholder in False, True: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( shape, dtype, use_placeholder=use_placeholder) op_diag_part = operator.diag_part() mat_diag_part = array_ops.matrix_diag_part(mat) if not use_placeholder: self.assertAllEqual( mat_diag_part.get_shape(), op_diag_part.get_shape()) op_diag_part_, mat_diag_part_ = sess.run( [op_diag_part, mat_diag_part], feed_dict=feed_dict) self.assertAC(op_diag_part_, mat_diag_part_) @six.add_metaclass(abc.ABCMeta) class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for square operators. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @property def _shapes_to_test(self): # non-batch operators (n, n) and batch operators. return [(0, 0), (1, 1), (1, 3, 3), (3, 4, 4), (2, 1, 4, 4)] def _make_rhs(self, operator, adjoint): # This operator is square, so rhs and x will have same shape. # adjoint value makes no difference because the operator shape doesn't # change since it is square, but be pedantic. return self._make_x(operator, adjoint=not adjoint) def _make_x(self, operator, adjoint): # Value of adjoint makes no difference because the operator is square. # Return the number of systems to solve, R, equal to 1 or 2. r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() n = operator.domain_dimension.value x_shape = batch_shape + [n, r] else: batch_shape = operator.batch_shape_tensor() n = operator.domain_dimension_tensor() x_shape = array_ops.concat((batch_shape, [n, r]), 0) return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 @six.add_metaclass(abc.ABCMeta) class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for generic rectangular operators. Square shapes are never tested by this class, so if you want to test your operator with a square shape, create two test classes, the other subclassing SquareLinearOperatorFullMatrixTest. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @property def _tests_to_skip(self): """List of test names to skip.""" return ["solve", "det", "log_abs_det"] @property def _shapes_to_test(self): # non-batch operators (n, n) and batch operators. return [(2, 1), (1, 2), (1, 3, 2), (3, 3, 4), (2, 1, 2, 4)] def _make_rhs(self, operator, adjoint): # TODO(langmore) Add once we're testing solve_ls. raise NotImplementedError( "_make_rhs not implemented because we don't test solve") def _make_x(self, operator, adjoint): # Return the number of systems for the argument 'x' for .matmul(x) r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() if adjoint: n = operator.range_dimension.value else: n = operator.domain_dimension.value x_shape = batch_shape + [n, r] else: batch_shape = operator.batch_shape_tensor() if adjoint: n = operator.range_dimension_tensor() else: n = operator.domain_dimension_tensor() x_shape = array_ops.concat((batch_shape, [n, r]), 0) return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not contrib_tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape[-1].assert_is_compatible_with(shape[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True) def random_tril_matrix(shape, dtype, force_well_conditioned=False, remove_upper=True): """[batch] lower triangular matrix. Args: shape: `TensorShape` or Python `list`. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype force_well_conditioned: Python `bool`. If `True`, returned matrix will have eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit normal random variables. remove_upper: Python `bool`. If `True`, zero out the strictly upper triangle. If `False`, the lower triangle of returned matrix will have desired properties, but will not have the strictly upper triangle zero'd out. Returns: `Tensor` with desired shape and dtype. """ with ops.name_scope("random_tril_matrix"): # Totally random matrix. Has no nice properties. tril = random_normal(shape, dtype=dtype) if remove_upper: tril = array_ops.matrix_band_part(tril, -1, 0) # Create a diagonal with entries having modulus in [1, 2]. if force_well_conditioned: maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype) diag = random_sign_uniform( shape[:-1], dtype=dtype, minval=1., maxval=maxval) tril = array_ops.matrix_set_diag(tril, diag) return tril def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Gaussian entries. Samples are distributed like ``` N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_normal"): samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) if dtype.is_complex: if seed is not None: seed += 1234 more_samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_uniform"): samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) random entries from a "sign Uniform". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like ``` Z * X, where X ~ Uniform[minval, maxval], if dtype is real, Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_sign_uniform"): unsigned_samples = random_uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) if seed is not None: seed += 12 signs = math_ops.sign( random_ops.random_uniform( shape, minval=-1., maxval=1., seed=seed)) return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) def random_normal_correlated_columns( shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, eps=1e-4, seed=None): """Batch matrix with (possibly complex) Gaussian entries and correlated cols. Returns random batch matrix `A` with specified element-wise `mean`, `stddev`, living close to an embedded hyperplane. Suppose `shape[-2:] = (M, N)`. If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries. If `M >= N`, then the colums of `A` will be made almost dependent as follows: ``` L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1) B = random normal M x N-1 matrix, mean = 0, stddev = stddev. G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane E = a random normal M x N matrix, mean = 0, stddev = eps mu = a constant M x N matrix, equal to the argument "mean" A = G + E + mu ``` Args: shape: Python list of integers. Shape of the returned tensor. Must be at least length two. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype eps: Distance each column is perturbed from the low-dimensional subspace. seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. Raises: ValueError: If `shape` is not at least length 2. """ dtype = dtypes.as_dtype(dtype) if len(shape) < 2: raise ValueError( "Argument shape must be at least length 2. Found: %s" % shape) # Shape is the final shape, e.g. [..., M, N] shape = list(shape) batch_shape = shape[:-2] m, n = shape[-2:] # If there is only one column, "they" are by definition correlated. if n < 2 or n < m: return random_normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) # Shape of the matrix with only n - 1 columns that we will embed in higher # dimensional space. smaller_shape = batch_shape + [m, n - 1] # Shape of the embedding matrix, mapping batch matrices # from [..., N-1, M] to [..., N, M] embedding_mat_shape = batch_shape + [n, n - 1] # This stddev for the embedding_mat ensures final result has correct stddev. stddev_mat = 1 / np.sqrt(n - 1) with ops.name_scope("random_normal_correlated_columns"): smaller_mat = random_normal( smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed) if seed is not None: seed += 1287 embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed) embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True) embedded = array_ops.matrix_transpose(embedded_t) mean_mat = array_ops.ones_like(embedded) * mean return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
npuichigo/ttsflow
third_party/tensorflow/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py
Python
apache-2.0
25,544
[ "Gaussian" ]
7f45dfcb9a678372843d8e0ee42ee16822ae0442970d7d0f1735ab329fe0ff01
''' This module contains all pythran backends. * Cxx dumps the AST into C++ code * Python dumps the AST into Python code ''' from pythran.analyses import ArgumentEffects, BoundedExpressions, Dependencies from pythran.analyses import LocalDeclarations, GlobalDeclarations, Scope from pythran.analyses import YieldPoints, IsAssigned, ASTMatcher, AST_any from pythran.analyses import RangeValues, PureExpressions from pythran.cxxgen import Template, Include, Namespace, CompilationUnit from pythran.cxxgen import Statement, Block, AnnotatedStatement, Typedef from pythran.cxxgen import Value, FunctionDeclaration, EmptyStatement from pythran.cxxgen import FunctionBody, Line, ReturnStatement, Struct, Assign from pythran.cxxgen import For, While, TryExcept, ExceptHandler, If, AutoFor from pythran.cxxtypes import Assignable, DeclType, NamedType from pythran.openmp import OMPDirective from pythran.passmanager import Backend from pythran.syntax import PythranSyntaxError from pythran.tables import operator_to_lambda, MODULES, pythran_ward from pythran.types.conversion import PYTYPE_TO_CTYPE_TABLE, TYPE_TO_SUFFIX from pythran.types.types import Types from pythran import metadata, unparse from math import isnan, isinf import ast import cStringIO import os class Python(Backend): ''' Produces a Python representation of the AST. >>> import ast, passmanager >>> node = ast.parse("print 'hello world'") >>> pm = passmanager.PassManager('test') >>> print pm.dump(Python, node) print 'hello world' ''' def __init__(self): self.result = '' super(Python, self).__init__() def visit(self, node): output = cStringIO.StringIO() unparse.Unparser(node, output) self.result = output.getvalue() def templatize(node, types, default_types=None): if not default_types: default_types = [None] * len(types) if types: return Template( ["typename {0} {1}".format(t, "= {0}".format(d) if d else "") for t, d in zip(types, default_types)], node) else: return node def strip_exp(s): if s.startswith('(') and s.endswith(')'): return s[1:-1] else: return s def cxx_loop(fun): """ Decorator for loop node (For and While) to handle "else" branching. Decorated node will save flags for a goto statement used instead of usual break and add this flag at the end of the else statements. Examples -------- >> for i in xrange(12): >> if i == 5: >> break >> else: >> ... some code ... Becomes >> for(type i : xrange(12)) >> if(i==5) >> goto __no_breaking0; >> ... some code ... >> __no_breaking0; """ def loop_visitor(self, node): """ New decorate function. It push the breaking flag, run the visitor and add "else" statements. """ if node.orelse: break_handler = "__no_breaking{0}".format(len(self.break_handlers)) else: break_handler = None self.break_handlers.append(break_handler) res = fun(self, node) self.break_handlers.pop() # handle the body of the for loop if break_handler: orelse = map(self.visit, node.orelse) orelse_label = Statement("{0}:".format(break_handler)) return Block([res] + orelse + [orelse_label]) else: return res return loop_visitor class Cxx(Backend): """ Produces a C++ representation of the AST. Attributes ---------- ldecls : {ast.Name} set of local declarations. break_handler : [str] It contains flags for goto statements to jump on break in case of orelse statement in loop. None means there are no orelse statement so no jump are requiered. (else in loop means : don't execute if loop is terminated with a break) >>> import ast, passmanager >>> node = ast.parse("def foo(): print 'hello world'") >>> pm = passmanager.PassManager('test') >>> r = pm.dump(Cxx, node) >>> print r #include <pythonic/include/__builtin__/print.hpp> #include <pythonic/include/__builtin__/str.hpp> #include <pythonic/__builtin__/print.hpp> #include <pythonic/__builtin__/str.hpp> namespace __pythran_test { ; struct foo { typedef void callable; ; struct type { typedef typename pythonic::returnable<void>::type result_type; } ; typename type::result_type operator()() const; ; } ; typename foo::type::result_type foo::operator()() const { pythonic::__builtin__::print("hello world"); } } """ # recover previous generator state generator_state_holder = "__generator_state" generator_state_value = "__generator_value" # flags the last statement of a generator final_statement = "that_is_all_folks" def __init__(self): """ Basic initialiser gathering analysis informations. """ self.declarations = list() self.definitions = list() self.break_handlers = list() self.result = None self.ldecls = set() super(Cxx, self).__init__(Dependencies, GlobalDeclarations, BoundedExpressions, Types, ArgumentEffects, Scope, RangeValues, PureExpressions) # mod def visit_Module(self, node): """ Build a compilation unit. """ # build all types headers = [Include(os.path.join("pythonic", "include", *t) + ".hpp") for t in self.dependencies] headers += [Include(os.path.join("pythonic", *t) + ".hpp") for t in self.dependencies] body = map(self.visit, node.body) nsbody = body + self.declarations + self.definitions ns = Namespace(pythran_ward + self.passmanager.module_name, nsbody) self.result = CompilationUnit(headers + [ns]) # local declaration processing def process_locals(self, node, node_visited, *skipped): """ Declare variable local to node and insert declaration before. Not possible for function yielding values. """ local_vars = self.scope[node].difference(skipped) if not local_vars or self.yields: return node_visited # no processing locals_visited = [] for varname in local_vars: vartype = self.local_types[varname] decl = Statement("{} {}".format(vartype, varname)) locals_visited.append(decl) self.ldecls = {ld for ld in self.ldecls if ld.id not in local_vars} return Block(locals_visited + [node_visited]) def process_omp_attachements(self, node, stmt, index=None): """ Add OpenMP pragma on the correct stmt in the correct order. stmt may be a list. On this case, index have to be specify to add OpenMP on the correct statement. """ omp_directives = metadata.get(node, OMPDirective) if omp_directives: directives = list() for directive in reversed(omp_directives): directive.deps = map(self.visit, directive.deps) directives.append(directive) if index is None: stmt = AnnotatedStatement(stmt, directives) else: stmt[index] = AnnotatedStatement(stmt[index], directives) return stmt # stmt def visit_FunctionDef(self, node): class CachedTypeVisitor: class CachedType: def __init__(self, s): self.s = s def generate(self, ctx): return self.s def __init__(self, other=None): if other: self.cache = other.cache.copy() self.rcache = other.rcache.copy() self.mapping = other.mapping.copy() else: self.cache = dict() self.rcache = dict() self.mapping = dict() def __call__(self, node): if node not in self.mapping: t = node.generate(self) if t in self.rcache: self.mapping[node] = self.mapping[self.rcache[t]] self.cache[node] = self.cache[self.rcache[t]] else: self.rcache[t] = node self.mapping[node] = len(self.mapping) self.cache[node] = t return CachedTypeVisitor.CachedType( "__type{0}".format(self.mapping[node])) def typedefs(self): l = sorted(self.mapping.items(), key=lambda x: x[1]) L = list() visited = set() # the same value must not be typedefed twice for k, v in l: if v not in visited: typename = "__type" + str(v) L.append(Typedef(Value(self.cache[k], typename))) visited.add(v) return L # prepare context and visit function body fargs = node.args.args formal_args = [arg.id for arg in fargs] formal_types = ["argument_type" + str(i) for i in xrange(len(fargs))] self.ldecls = set(self.passmanager.gather(LocalDeclarations, node)) self.local_names = {sym.id for sym in self.ldecls}.union(formal_args) self.extra_declarations = [] lctx = CachedTypeVisitor() self.local_types = {n: self.types[n].generate(lctx) for n in self.ldecls} self.local_types.update((n.id, t) for n, t in self.local_types.items()) # choose one node among all the ones with the same name for each name self.ldecls = set({n.id: n for n in self.ldecls}.itervalues()) # 0 is used as initial_state, thus the +1 self.yields = {k: (1 + v, "yield_point{0}".format(1 + v)) for (v, k) in enumerate(self.passmanager.gather(YieldPoints, node))} # gather body dump operator_body = map(self.visit, node.body) # compute arg dump default_arg_values = ( [None] * (len(node.args.args) - len(node.args.defaults)) + [self.visit(n) for n in node.args.defaults]) default_arg_types = ( [None] * (len(node.args.args) - len(node.args.defaults)) + [self.types[n] for n in node.args.defaults]) # compute type dump result_type = self.types[node][0] callable_type = Typedef(Value("void", "callable")) pure_type = (Typedef(Value("void", "pure")) if node in self.pure_expressions else EmptyStatement()) def make_function_declaration(rtype, name, ftypes, fargs, defaults=None, attributes=[]): if defaults is None: defaults = [None] * len(ftypes) arguments = list() for i, (t, a, d) in enumerate(zip(ftypes, fargs, defaults)): if self.yields: rvalue_ref = "" elif self.argument_effects[node][i]: rvalue_ref = "&&" else: rvalue_ref = " const &" argument = Value( t + rvalue_ref, "{0}{1}".format(a, "= {0}".format(d) if d else "")) arguments.append(argument) return FunctionDeclaration(Value(rtype, name), arguments, *attributes) def make_const_function_declaration(rtype, name, ftypes, fargs, defaults=None): return make_function_declaration(rtype, name, ftypes, fargs, defaults, ["const"]) if self.yields: # generator case # a generator has a call operator that returns the iterator next_name = "__generator__{0}".format(node.name) instanciated_next_name = "{0}{1}".format( next_name, "<{0}>".format( ", ".join(formal_types)) if formal_types else "") operator_body.append( Statement("{0}: return result_type();".format( Cxx.final_statement))) next_declaration = [ FunctionDeclaration(Value("result_type", "next"), []), EmptyStatement()] # empty statement to force a comma ... # the constructors next_constructors = [ FunctionBody( FunctionDeclaration(Value("", next_name), []), Line(': pythonic::yielder() {}') )] if formal_types: # If all parameters have a default value, we don't need default # constructor if default_arg_values and all(default_arg_values): next_constructors = list() next_constructors.append(FunctionBody( make_function_declaration("", next_name, formal_types, formal_args, default_arg_values), Line(": {0} {{ }}".format( ", ".join(["pythonic::yielder()"] + map("{0}({0})".format, formal_args)))) )) next_iterator = [ FunctionBody( FunctionDeclaration(Value("void", "operator++"), []), Block([Statement("next()")])), FunctionBody( FunctionDeclaration( Value("typename {0}::result_type".format( instanciated_next_name), "operator*"), [], "const"), Block([ ReturnStatement( Cxx.generator_state_value)])), FunctionBody( FunctionDeclaration( Value("pythonic::types::generator_iterator<{0}>" .format(next_name), "begin"), []), Block([Statement("next()"), ReturnStatement( "pythonic::types::generator_iterator<{0}>" "(*this)".format(next_name))])), FunctionBody( FunctionDeclaration( Value("pythonic::types::generator_iterator<{0}>" .format(next_name), "end"), []), Block([ReturnStatement( "pythonic::types::generator_iterator<{0}>()" .format(next_name))])) ] next_signature = templatize( FunctionDeclaration( Value( "typename {0}::result_type".format( instanciated_next_name), "{0}::next".format(instanciated_next_name)), []), formal_types) next_body = operator_body # the dispatch table at the entry point next_body.insert(0, Statement("switch({0}) {{ {1} }}".format( Cxx.generator_state_holder, " ".join("case {0}: goto {1};".format(num, where) for (num, where) in sorted( self.yields.itervalues(), key=lambda x: x[0]))))) ctx = CachedTypeVisitor(lctx) next_members = ([Statement("{0} {1}".format(ft, fa)) for (ft, fa) in zip(formal_types, formal_args)] + [Statement( "{0} {1}".format(self.types[k].generate(ctx), k.id)) for k in self.ldecls] + [Statement("{0} {1}".format(v, k)) for k, v in self.extra_declarations] + [Statement( "typename {0}::result_type {1}".format( instanciated_next_name, Cxx.generator_state_value))]) extern_typedefs = [Typedef(Value(t.generate(ctx), t.name)) for t in self.types[node][1] if not t.isweak()] iterator_typedef = [ Typedef( Value("pythonic::types::generator_iterator<{0}>".format( "{0}<{1}>".format(next_name, ", ".join(formal_types)) if formal_types else next_name), "iterator")), Typedef(Value(result_type.generate(ctx), "value_type"))] result_typedef = [ Typedef(Value(result_type.generate(ctx), "result_type"))] extra_typedefs = (ctx.typedefs() + extern_typedefs + iterator_typedef + result_typedef) next_struct = templatize( Struct(next_name, extra_typedefs + next_members + next_constructors + next_iterator + next_declaration, "pythonic::yielder"), formal_types) next_definition = FunctionBody(next_signature, Block(next_body)) operator_declaration = [ templatize( make_const_function_declaration( instanciated_next_name, "operator()", formal_types, formal_args, default_arg_values), formal_types, default_arg_types), EmptyStatement()] operator_signature = make_const_function_declaration( instanciated_next_name, "{0}::operator()".format(node.name), formal_types, formal_args) operator_definition = FunctionBody( templatize(operator_signature, formal_types), Block([ReturnStatement("{0}({1})".format( instanciated_next_name, ", ".join(formal_args)))]) ) topstruct_type = templatize( Struct("type", extra_typedefs), formal_types) topstruct = Struct( node.name, [topstruct_type, callable_type, pure_type] + operator_declaration) self.declarations.append(next_struct) self.definitions.append(next_definition) else: # regular function case # a function has a call operator to be called # and a default constructor to create instances fscope = "type{0}::".format("<{0}>".format(", ".join(formal_types)) if formal_types else "") ffscope = "{0}::{1}".format(node.name, fscope) operator_declaration = [ templatize( make_const_function_declaration( "typename {0}result_type".format(fscope), "operator()", formal_types, formal_args, default_arg_values), formal_types, default_arg_types), EmptyStatement() ] operator_signature = make_const_function_declaration( "typename {0}result_type".format(ffscope), "{0}::operator()".format(node.name), formal_types, formal_args) ctx = CachedTypeVisitor(lctx) operator_local_declarations = ( [Statement("{0} {1}".format( self.types[k].generate(ctx), k.id)) for k in self.ldecls] + [Statement("{0} {1}".format(v, k)) for k, v in self.extra_declarations] ) dependent_typedefs = ctx.typedefs() operator_definition = FunctionBody( templatize(operator_signature, formal_types), Block(dependent_typedefs + operator_local_declarations + operator_body) ) ctx = CachedTypeVisitor() extra_typedefs = ( [Typedef(Value(t.generate(ctx), t.name)) for t in self.types[node][1] if not t.isweak()] + [Typedef(Value( result_type.generate(ctx), "result_type"))] ) extra_typedefs = ctx.typedefs() + extra_typedefs return_declaration = [ templatize( Struct("type", extra_typedefs), formal_types, default_arg_types ) ] topstruct = Struct(node.name, [callable_type, pure_type] + return_declaration + operator_declaration) self.declarations.append(topstruct) self.definitions.append(operator_definition) return EmptyStatement() def visit_Return(self, node): if self.yields: return Block([ Statement("{0} = -1".format( Cxx.generator_state_holder)), Statement("goto {0}".format( Cxx.final_statement)) ]) else: stmt = ReturnStatement(self.visit(node.value)) return self.process_omp_attachements(node, stmt) def visit_Delete(self, node): return EmptyStatement() def visit_Yield(self, node): num, label = self.yields[node] return "".join(n for n in Block([ Assign(Cxx.generator_state_holder, num), ReturnStatement("{0} = {1}".format( Cxx.generator_state_value, self.visit(node.value))), Statement("{0}:".format(label)) ]).generate()) def visit_Assign(self, node): """ Create Assign node for final Cxx representation. It tries to handle multi assignment like: >> a = b = c = 2 If only one local variable is assigned, typing is added: >> int a = 2; TODO: Handle case of multi-assignement for some local variables. Finally, process OpenMP clause like #pragma omp atomic """ if not all(isinstance(n, (ast.Name, ast.Subscript)) for n in node.targets): raise PythranSyntaxError( "Must assign to an identifier or a subscript", node) value = self.visit(node.value) targets = [self.visit(t) for t in node.targets] alltargets = "= ".join(targets) islocal = (len(targets) == 1 and isinstance(node.targets[0], ast.Name) and node.targets[0].id in self.scope[node]) if islocal and not self.yields: # remove this decl from local decls tdecls = {t.id for t in node.targets} self.ldecls = {d for d in self.ldecls if d.id not in tdecls} # add a local declaration alltargets = '{} {}'.format(self.local_types[node.targets[0]], alltargets) stmt = Assign(alltargets, value) return self.process_omp_attachements(node, stmt) def visit_AugAssign(self, node): value = self.visit(node.value) target = self.visit(node.target) l = operator_to_lambda[type(node.op)] if type(node.op) in (ast.FloorDiv, ast.Mod, ast.Pow): stmt = Assign(target, l(target, value)) else: stmt = Statement(l(target, '')[1:-2] + '= {0}'.format(value)) return self.process_omp_attachements(node, stmt) def visit_Print(self, node): values = [self.visit(n) for n in node.values] stmt = Statement("pythonic::__builtin__::print{0}({1})".format( "" if node.nl else "_nonl", ", ".join(values)) ) return self.process_omp_attachements(node, stmt) def gen_for(self, node, target, local_iter, local_iter_decl, loop_body): """ Create For representation on iterator for Cxx generation. Examples -------- >> "omp parallel for" >> for i in xrange(10): >> ... do things ... Becomes >> "omp parallel for shared(__iterX)" >> for(decltype(__iterX)::iterator __targetX = __iterX.begin(); __targetX < __iterX.end(); ++__targetX) >> typename decltype(__targetX)::reference i = *__targetX; >> ... do things ... It the case of not local variable, typing for `i` disappear and typing is removed for iterator in case of yields statement in function. """ # Choose target variable for iterator (which is iterator type) local_target = "__target{0}".format(len(self.break_handlers)) local_target_decl = NamedType("typename decltype({0})::iterator". format(local_iter)) # For yield function, all variables are globals. if self.yields: self.extra_declarations.append((local_target, local_target_decl,)) local_target_decl = "" # If variable is local to the for body it's a ref to the iterator value # type if node.target.id in self.scope[node] and not self.yields: self.ldecls = {d for d in self.ldecls if d.id != node.target.id} local_type = "typename decltype({})::reference ".format( local_target) else: local_type = "" # Assign iterable value loop_body_prelude = Statement("{} {}= *{}".format(local_type, target, local_target)) # Create the loop loop = For("{0} {1} = {2}.begin()".format(local_target_decl, local_target, local_iter), "{0} < {1}.end()".format(local_target, local_iter), "++{0}".format(local_target), Block([loop_body_prelude, loop_body])) return [self.process_omp_attachements(node, loop)] def handle_real_loop_comparison(self, args, stmts, target, upper_bound, step): """ Handle comparison for real loops. Add the correct comparison operator if possible or set a runtime __cmp comparison. """ # order is 1 for increasing loop, -1 for decreasing loop and 0 if it is # not known at compile time if len(args) <= 2: order = 1 elif isinstance(args[2], ast.Num): order = -1 + 2 * (int(args[2].n) > 0) elif isinstance(args[1], ast.Num) and isinstance(args[0], ast.Num): order = -1 + 2 * (int(args[1].n) > int(args[0].n)) else: order = 0 if order: comparison = "{} < {}" if order == 1 else "{} > {}" comparison = comparison.format(target, upper_bound) for_pos = 0 else: cmp_type = "std::function<bool(long, long)> " cmp_op = "__cmp{}".format(len(self.break_handlers)) # For yield function, all variables are globals. if self.yields: self.extra_declarations.append((cmp_op, cmp_type)) cmp_type = "" stmts.insert(0, Statement("{} {} = std::less<long>()".format( cmp_type, cmp_op))) stmts.insert(1, If("{} < 0L".format(step), Statement("{} = std::greater<long>()".format( cmp_op)))) for_pos = 2 comparison = "{0}({1}, {2})".format(cmp_op, target, upper_bound) return comparison, for_pos def gen_c_for(self, node, local_iter, loop_body): """ Create C For representation for Cxx generation. Examples -------- >> for i in xrange(10): >> ... do things ... Becomes >> for(long i = 0, __targetX = 10; i < __targetX; i += 1) >> ... do things ... Or >> for i in xrange(10, 0, -1): >> ... do things ... Becomes >> for(long i = 10, __targetX = 0; i > __targetX; i += -1) >> ... do things ... Or >> for i in xrange(a, b, c): >> ... do things ... Becomes >> std::function<bool(int, int)> __cmpX = std::less<long>(); >> if(c < 0) >> __cmpX = std::greater<long>(); >> for(long i = a, __targetX = b; __cmpX(i, __targetX); i += c) >> ... do things ... It the case of not local variable, typing for `i` disappear """ args = node.iter.args step = "1L" if len(args) <= 2 else self.visit(args[2]) if len(args) == 1: lower_bound = "0L" upper_value = self.visit(args[0]) else: lower_bound = self.visit(args[0]) upper_value = self.visit(args[1]) upper_bound = "__target{0}".format(len(self.break_handlers)) upper_type = iter_type = "long " # If variable is local to the for body keep it local... if node.target.id in self.scope[node] and not self.yields: self.ldecls = {d for d in self.ldecls if d.id != node.target.id} loop = list() else: # For yield function, upper_bound is globals. if self.yields: self.extra_declarations.append((upper_bound, upper_type)) upper_type = "" iter_type = "" # Back one step to keep Python behavior (except for break) loop = [If("{} == {}".format(local_iter, upper_bound), Statement("{} -= {}".format(local_iter, step)))] comparison, for_pos = self.handle_real_loop_comparison(args, loop, local_iter, upper_bound, step) forloop = For("{0} {1} = {2}".format(iter_type, local_iter, lower_bound), comparison, "{0} += {1}".format(local_iter, step), loop_body) loop.insert(for_pos, self.process_omp_attachements(node, forloop)) # Store upper bound value header = [Statement("{0} {1} = {2}".format(upper_type, upper_bound, upper_value))] return header, loop def handle_omp_for(self, node, local_iter): """ Fix OpenMP directives on For loops. Add the target as private variable as a new variable may have been introduce to handle cxx iterator. Also, add the iterator as shared variable as all 'parallel for chunck' have to use the same iterator. """ for directive in metadata.get(node, OMPDirective): if any(key in directive.s for key in (' parallel ', ' task ')): # Eventually add local_iter in a shared clause as iterable is # shared in the for loop (for every clause with datasharing) directive.s += ' shared({})' directive.deps.append(ast.Name(local_iter, ast.Load())) target = node.target assert isinstance(target, ast.Name) hasfor = 'for' in directive.s nodefault = 'default' not in directive.s noindexref = all(isinstance(x, ast.Name) and x.id != target.id for x in directive.deps) if (hasfor and nodefault and noindexref and target.id not in self.scope[node]): # Target is private by default in omp but iterator use may # introduce an extra variable directive.s += ' private({})' directive.deps.append(ast.Name(target.id, ast.Load())) def can_use_autofor(self, node): """ Check if given for Node can use autoFor syntax. To use auto_for: - iterator should have local scope - yield should not be use - OpenMP pragma should not be use TODO : Yield should block only if it is use in the for loop, not in the whole function. """ auto_for = (type(node.target) is ast.Name and node.target.id in self.scope[node]) auto_for &= not self.yields auto_for &= not metadata.get(node, OMPDirective) return auto_for def can_use_c_for(self, node): """ Check if a for loop can use classic C syntax. To use C syntax: - target should not be assign in the loop - xrange should be use as iterator - order have to be known at compile time or OpenMP should not be use """ assert isinstance(node.target, ast.Name) pattern = ast.Call(func=ast.Attribute(value=ast.Name(id='__builtin__', ctx=ast.Load()), attr='xrange', ctx=ast.Load()), args=AST_any(), keywords=[], starargs=None, kwargs=None) is_assigned = {node.target.id: False} [is_assigned.update(self.passmanager.gather(IsAssigned, stmt)) for stmt in node.body] if (node.iter not in ASTMatcher(pattern).search(node.iter) or is_assigned[node.target.id]): return False args = node.iter.args if (len(args) > 2 and (not isinstance(args[2], ast.Num) and not (isinstance(args[1], ast.Num) and isinstance(args[0], ast.Num))) and metadata.get(node, OMPDirective)): return False return True @cxx_loop def visit_For(self, node): """ Create For representation for Cxx generation. Examples -------- >> for i in xrange(10): >> ... work ... Becomes >> typename returnable<decltype(__builtin__.xrange(10))>::type __iterX = __builtin__.xrange(10); >> ... possible container size reservation ... >> for (typename decltype(__iterX)::iterator::reference i: __iterX) >> ... the work ... This function also handle assignment for local variables. We can notice that three kind of loop are possible: - Normal for loop on iterator - Autofor loop. - Normal for loop using integer variable iteration Kind of loop used depend on OpenMP, yield use and variable scope. """ if not isinstance(node.target, ast.Name): raise PythranSyntaxError( "Using something other than an identifier as loop target", node.target) target = self.visit(node.target) # Handle the body of the for loop loop_body = Block(map(self.visit, node.body)) # Declare local variables at the top of the loop body loop_body = self.process_locals(node, loop_body, node.target.id) iterable = self.visit(node.iter) if self.can_use_c_for(node): header, loop = self.gen_c_for(node, target, loop_body) else: # Iterator declaration local_iter = "__iter{0}".format(len(self.break_handlers)) local_iter_decl = Assignable(DeclType(iterable)) self.handle_omp_for(node, local_iter) # For yield function, iterable is globals. if self.yields: self.extra_declarations.append((local_iter, local_iter_decl,)) local_iter_decl = "" # Assign iterable # For C loop, it avoid issue if upper bound is reassign in the loop header = [Statement("{0} {1} = {2}".format(local_iter_decl, local_iter, iterable))] if self.can_use_autofor(node): self.ldecls = {d for d in self.ldecls if d.id != node.target.id} autofor = AutoFor(target, local_iter, loop_body) loop = [self.process_omp_attachements(node, autofor)] else: loop = self.gen_for(node, target, local_iter, local_iter_decl, loop_body) # For xxxComprehension, it is replaced by a for loop. In this case, # pre-allocate size of container. for comp in metadata.get(node, metadata.Comprehension): header.append(Statement("pythonic::utils::reserve({0},{1})".format( comp.target, iterable))) return Block(header + loop) @cxx_loop def visit_While(self, node): """ Create While node for Cxx generation. It is a cxx_loop to handle else clause. """ test = self.visit(node.test) body = [self.visit(n) for n in node.body] stmt = While(test, Block(body)) return self.process_omp_attachements(node, stmt) def visit_TryExcept(self, node): body = [self.visit(n) for n in node.body] except_ = list() [except_.extend(self.visit(n)) for n in node.handlers] return TryExcept(Block(body), except_, None) def visit_ExceptHandler(self, node): name = self.visit(node.name) if node.name else None body = [self.visit(m) for m in node.body] if not isinstance(node.type, ast.Tuple): return [ExceptHandler( node.type and node.type.attr, Block(body), name)] else: elts = [p.attr for p in node.type.elts] return [ExceptHandler(o, Block(body), name) for o in elts] def visit_If(self, node): test = self.visit(node.test) body = [self.visit(n) for n in node.body] orelse = [self.visit(n) for n in node.orelse] if isinstance(node.test, ast.Num) and node.test.n == 1: stmt = Block(body) else: stmt = If(test, Block(body), Block(orelse) if orelse else None) return self.process_locals(node, self.process_omp_attachements(node, stmt)) def visit_Raise(self, node): type = node.type and self.visit(node.type) if node.inst: if isinstance(node.inst, ast.Tuple): inst = ['"{0}"'.format(e.s) for e in node.inst.elts] else: inst = [node.inst.s] else: inst = None if inst: return Statement("throw {0}({1})".format(type, ", ".join(inst))) else: return Statement("throw {0}".format(type or "")) def visit_Assert(self, node): params = [self.visit(node.test), node.msg and self.visit(node.msg)] sparams = ", ".join(map(strip_exp, filter(None, params))) return Statement("pythonic::pythran_assert({0})".format(sparams)) def visit_Import(self, node): return EmptyStatement() # everything is already #included def visit_ImportFrom(self, node): assert False, "should be filtered out by the expand_import pass" def visit_Expr(self, node): # turn docstring into comments if type(node.value) is ast.Str: stmt = Line("//" + node.value.s.replace('\n', '\n//')) # other expressions are processed normally else: stmt = Statement(self.visit(node.value)) return self.process_locals(node, self.process_omp_attachements(node, stmt)) def visit_Pass(self, node): stmt = EmptyStatement() return self.process_omp_attachements(node, stmt) def visit_Break(self, node): """ Generate break statement in most case and goto for orelse clause. See Also : cxx_loop """ if self.break_handlers[-1]: return Statement("goto {0}".format(self.break_handlers[-1])) else: return Statement("break") def visit_Continue(self, node): return Statement("continue") # expr def visit_BoolOp(self, node): values = [self.visit(value) for value in node.values] if node in self.bounded_expressions: op = operator_to_lambda[type(node.op)] elif isinstance(node.op, ast.And): def op(l, r): return '({0} and {1})'.format(l, r) elif isinstance(node.op, ast.Or): def op(l, r): return '({0} or {1})'.format(l, r) return reduce(op, values) def visit_BinOp(self, node): left = self.visit(node.left) right = self.visit(node.right) if isinstance(node.left, ast.Str): left = "pythonic::types::str({})".format(left) elif isinstance(node.right, ast.Str): right = "pythonic::types::str({})".format(right) return operator_to_lambda[type(node.op)](left, right) def visit_UnaryOp(self, node): operand = self.visit(node.operand) return operator_to_lambda[type(node.op)](operand) def visit_IfExp(self, node): test = self.visit(node.test) body = self.visit(node.body) orelse = self.visit(node.orelse) return "(pythonic::__builtin__::bool_({0}) ? {1} : {2})".format(test, body, orelse) def visit_List(self, node): if not node.elts: # empty list return "pythonic::__builtin__::proxy::list{}()" else: elts = [self.visit(n) for n in node.elts] # constructor disambiguation, clang++ workaround if len(elts) == 1: elts.append('pythonic::types::single_value()') return "{0}({{ {1} }})".format( Assignable(self.types[node]), ", ".join(elts)) else: return "{0}({{ {1} }})".format( Assignable(self.types[node]), ", ".join(elts)) def visit_Set(self, node): if not node.elts: # empty set return "pythonic::__builtin__::proxy::set{}()" else: elts = [self.visit(n) for n in node.elts] return "{0}({{ {1} }})".format( Assignable(self.types[node]), ", ".join(elts)) def visit_Dict(self, node): if not node.keys: # empty dict return "pythonic::__builtin__::proxy::dict{}()" else: keys = [self.visit(n) for n in node.keys] values = [self.visit(n) for n in node.values] return "{0}({{ {1} }})".format( Assignable(self.types[node]), ", ".join("{{ {0}, {1} }}".format(k, v) for k, v in zip(keys, values))) def visit_Tuple(self, node): elts = map(self.visit, node.elts or ()) return "pythonic::types::make_tuple({0})".format(", ".join(elts)) def visit_Compare(self, node): left = self.visit(node.left) ops = [operator_to_lambda[type(n)] for n in node.ops] comparators = [self.visit(n) for n in node.comparators] all_compare = zip(ops, comparators) op, right = all_compare[0] output = [op(left, right)] left = right for op, right in all_compare[1:]: output.append(op(left, right)) left = right return " and ".join(output) def visit_Call(self, node): args = [self.visit(n) for n in node.args] func = self.visit(node.func) # special hook for getattr, as we cannot represent it in C++ if func == 'pythonic::__builtin__::proxy::getattr{}': return ('pythonic::__builtin__::getattr<{}>({})' .format('pythonic::types::attr::' + node.args[1].s.upper(), args[0])) else: return "{}({})".format(func, ", ".join(args)) def visit_Num(self, node): if type(node.n) == complex: return "{0}({1}, {2})".format( PYTYPE_TO_CTYPE_TABLE[complex], repr(node.n.real), repr(node.n.imag)) elif type(node.n) == long: return 'pythran_long({0})'.format(node.n) elif isnan(node.n): return 'pythonic::numpy::nan' elif isinf(node.n): return ('+' if node.n > 0 else '-') + 'pythonic::numpy::inf' else: return repr(node.n) + TYPE_TO_SUFFIX.get(type(node.n), "") def visit_Str(self, node): quoted = node.s.replace('"', '\\"').replace('\n', '\\n"\n"') return '"' + quoted + '"' def visit_Attribute(self, node): def rec(w, n): if isinstance(n, ast.Name): return w[n.id], (n.id,) elif isinstance(n, ast.Attribute): r = rec(w, n.value) return r[0][n.attr], r[1] + (n.attr,) obj, path = rec(MODULES, node) path = ('pythonic',) + path return ('::'.join(path) if obj.isliteral() else ('::'.join(path[:-1]) + '::proxy::' + path[-1] + '{}')) def visit_Subscript(self, node): value = self.visit(node.value) # we cannot overload the [] operator in that case if isinstance(node.value, ast.Str): value = 'pythonic::types::str({})'.format(value) # positive static index case if (isinstance(node.slice, ast.Index) and isinstance(node.slice.value, ast.Num) and (node.slice.value.n >= 0) and any(isinstance(node.slice.value.n, t) for t in (int, long))): return "std::get<{0}>({1})".format(node.slice.value.n, value) # slice optimization case elif (isinstance(node.slice, ast.Slice) and (isinstance(node.ctx, ast.Store) or node not in self.bounded_expressions)): slice = self.visit(node.slice) return "{1}({0})".format(slice, value) # extended slice case elif isinstance(node.slice, ast.ExtSlice): slice = self.visit(node.slice) return "{1}({0})".format(','.join(slice), value) # positive indexing case elif (isinstance(node.slice, ast.Index) and isinstance(node.slice.value, ast.Name) and self.range_values[node.slice.value.id].low >= 0): slice = self.visit(node.slice) return "{1}.fast({0})".format(slice, value) # standard case else: slice = self.visit(node.slice) return "{1}[{0}]".format(slice, value) def visit_Name(self, node): if node.id in self.local_names: return node.id elif node.id in self.global_declarations: return "{0}()".format(node.id) else: return node.id # other def visit_ExtSlice(self, node): return map(self.visit, node.dims) def visit_Slice(self, node): args = [] for field in ('lower', 'upper', 'step'): nfield = getattr(node, field) arg = (self.visit(nfield) if nfield else 'pythonic::__builtin__::None') args.append(arg) if node.step is None or (type(node.step) is ast.Num and node.step.n == 1): return "pythonic::types::contiguous_slice({},{})".format(args[0], args[1]) else: return "pythonic::types::slice({},{},{})".format(*args) def visit_Index(self, node): return self.visit(node.value)
artas360/pythran
pythran/backend.py
Python
bsd-3-clause
49,314
[ "VisIt" ]
53b9f52416eea5b2e8d32366f698c17b2163acb3ab24579713b3888acb549086
#!/usr/bin/env python """ Extracts data from reference files or calculates FF data. Takes a sequence of keywords corresponding to various datatypes (ex. mb = MacroModel bond lengths) followed by filenames, and extracts that particular data type from the file. Note that the order of filenames IS IMPORTANT! Used to manage calls to MacroModel but that is now done in the Mae class inside filetypes. I'm still debating if that should be there or here. Will see how this framework translates into Amber and then decide. """ from __future__ import absolute_import from __future__ import division import argparse import logging import logging.config import numpy as np import os import sys # I don't really want to import all of chain if possible. I only want # chain.from_iterable. # chain.from_iterable flattens a list of lists similar to: # [child for parent in grandparent for child in parent] # However, I think chain.from_iterable works on any number of nested lists. from itertools import chain from textwrap import TextWrapper import constants as co import compare import datatypes import filetypes import parameters logger = logging.getLogger(__name__) # Commands where we need to load the force field. COM_LOAD_FF = ['ma', 'mb', 'mt', 'ja', 'jb', 'jt'] # Commands related to Gaussian. COM_GAUSSIAN = ['ge', 'gea', 'geo', 'geao', 'gh', 'geigz'] # Commands related to Jaguar (Schrodinger). COM_JAGUAR = ['jq', 'jqh', 'jqa', 'je', 'jeo', 'jea', 'jeao', 'jh', 'jeigz'] # Commands related to MacroModel (Schrodinger). # Seems odd that the Jaguar geometry datatypes are in here, but we # do a MacroModel calculation to get the data in an easy form to # extract. COM_MACROMODEL = ['ja', 'jb', 'jt', 'mq', 'mqh', 'mqa', 'ma', 'mb', 'mt', 'me', 'meo', 'mea', 'meao', 'mh', 'mjeig', 'mgeig', 'mp', 'mgESP', 'mjESP'] # Commands related to Tinker. COM_TINKER = ['ta','tao', 'tb', 'tbo', 'tt','tto', 'te', 'teo', 'tea','teao', 'th', 'tjeigz', 'tgeig'] # Commands related to Amber. COM_AMBER = ['ae'] # All other commands. COM_OTHER = ['r'] # All possible commands. COM_ALL = COM_GAUSSIAN + COM_JAGUAR + COM_MACROMODEL + COM_TINKER + \ COM_AMBER + COM_OTHER def main(args): """ Arguments --------- args : string or list of strings Evaluated using parser returned by return_calculate_parser(). If it's a string, it will be converted into a list of strings. """ # Should be a list of strings for use by argparse. Ensure that's the case. # basestring is deprecated in python3, str is probably safe to use in both # but should be tested, for now sys.version_info switch can handle it if sys.version_info > (3, 0): if isinstance(args, str): args = args.split() else: if isinstance(args, basestring): args = args.split() parser = return_calculate_parser() opts = parser.parse_args(args) # This makes a dictionary that only contains the arguments related to # extracting data from everything in the argparse dictionary, opts. # Given that the user supplies: # python calculate.py -me a1.01.mae a2.01.mae a3.01.mae -me b1.01.mae # b2.01.mae -mb a1.01.mae b1.01.mae -jeig a1.01.in,a1.out # b1.01.in,b1.out # commands looks like: # {'me': [['a1.01.mae', 'a2.01.mae', 'a3.01.mae'], # ['b1.01.mae', 'b2.01.mae']], # 'mb': [['a1.01.mae'], ['b1.01.mae']], # 'jeig': [['a1.01.in,a1.out', 'b1.01.in,b1.out']] # } commands = {key: value for key, value in opts.__dict__.items() if key in COM_ALL and value} # Add in the empty commands. I'd rather not do this, but it makes later # coding when collecting data easier. for command in COM_ALL: if command not in commands: commands.update({command: []}) pretty_all_commands(commands) # This groups all of the data type commands associated with one file. # commands_for_filenames looks like: # {'a1.01.mae': ['me', 'mb'], # 'a1.01.in': ['jeig'], # 'a1.out': ['jeig'], # 'a2.01.mae': ['me'], # 'a3.01.mae': ['me'], # 'b1.01.mae': ['me', 'mb'], # 'b1.01.in': ['jeig'], # 'b1.out': ['jeig'], # 'b2.01.mae': ['me'] # } commands_for_filenames = sort_commands_by_filename(commands) pretty_commands_for_files(commands_for_filenames) # This dictionary associates the filename that the user supplied with # the command file that has to be used to execute some backend software # calculate in order to retrieve the data that the user requested. # inps looks like: # {'a1.01.mae': <__main__.Mae object at 0x1110e10>, # 'a1.01.in': None, # 'a1.out': None, # 'a2.01.mae': <__main__.Mae object at 0x1733b23>, # 'a3.01.mae': <__main__.Mae object at 0x1853e12>, # 'b1.01.mae': <__main__.Mae object at 0x2540e10>, # 'b1.01.in': None, # 'b1.out': None, # 'b2.01.mae': <__main__.Mae object at 0x1353e11>, # } inps = {} # This generates any of the necessary command files. It uses # commands_for_filenames, which contains all of the data types associated # with the given file. # Stuff below doesn't need both comma separated filenames simultaneously. for filename, commands_for_filename in commands_for_filenames.items(): logger.log(1, '>>> filename: {}'.format(filename)) logger.log(1, '>>> commands_for_filename: {}'.format( commands_for_filename)) # These next two if statements will break down what command files # have to be written by the backend software package. if any(x in COM_MACROMODEL for x in commands_for_filename): if os.path.splitext(filename)[1] == '.mae': inps[filename] = filetypes.Mae( os.path.join(opts.directory, filename)) inps[filename].commands = commands_for_filename inps[filename].write_com(sometext=opts.append) #Has to be here even though this is a Gaussian Job. if os.path.splitext(filename)[1] == '.chk': # The generated com file will be used as the input filename. It # also seems best to do the gaussian calculation in the # collect_data function since we need to collect the force # fields partial charges. com_filename = os.path.splitext(filename)[0] + '.ESP.q2mm.com' inps[com_filename] = filetypes.GaussCom( os.path.join(opts.directory, com_filename)) inps[com_filename].commands = commands_for_filename inps[com_filename].read_newzmat(filename) elif any(x in COM_TINKER for x in commands_for_filename): if os.path.splitext(filename)[1] == '.xyz': inps[filename] = filetypes.TinkerXYZ( os.path.join(opts.directory, filename)) inps[filename].commands = commands_for_filename elif any(x in COM_AMBER for x in commands_for_filename): # This doesn't work. # We need to know both filenames simultaneously for this Amber crap. # Have to add these to `inps` in some other way. pass # In this case, no command files have to be written. else: inps[filename] = None # Stuff below needs both comma separated filenames simultaneously. # Do the Amber inputs. # Leaving the filenames together because Taylor said this would work well. for comma_sep_filenames in flatten(commands['ae']): # Maybe make more specific later. inps[comma_sep_filenames] = filetypes.AmberInput( 'DOES_PATH_EVEN_MATTER') split_it = comma_sep_filenames.split(',') inps[comma_sep_filenames].directory = opts.directory inps[comma_sep_filenames].inpcrd = split_it[0] inps[comma_sep_filenames].prmtop = split_it[1] logger.log(1, '>>> commands: {}'.format(commands)) # Check whether or not to skip calculations. if opts.norun or opts.fake: logger.log(15, " -- Skipping backend calculations.") else: for filename, some_class in inps.items(): logger.log(1, '>>> filename: {}'.format(filename)) logger.log(1, '>>> some_class: {}'.format(some_class)) # Works if some class is None too. if hasattr(some_class, 'run'): # Ideally this can be the same for each software backend, # but that means we're going to have to make some changes # so that this token argument is handled properly. some_class.run(check_tokens=opts.check) # `data` is a list comprised of datatypes.Datum objects. # If we remove/with sorting removed, the Datum class is less # useful. We may want to reduce this to a N x 3 matrix or # 3 vectors (labels, weights, values). sub_names = ['OPT'] if opts.subnames: sub_names = opts.subnames if opts.fake: data = collect_data_fake( commands, inps, direc=opts.directory, invert=opts.invert, sub_names=sub_names) else: data = collect_data( commands, inps, direc=opts.directory, invert=opts.invert, sub_names=sub_names) # Adds weights to the data points in the data list. if opts.weight: compare.import_weights(data) # Optional printing or logging of data. if opts.doprint: pretty_data(data, log_level=None) return data def return_calculate_parser(add_help=True, parents=None): ''' Command line argument parser for calculate. Arguments --------- add_help : bool Whether or not to add help to the parser. Default is True. parents : argparse.ArgumentParser Parent parser incorporated into this parser. Default is None. ''' # Whether or not to add parents parsers. Not sure if/where this may be used # anymore. if parents is None: parents = [] # Whether or not to add help. You may not want to add help if these # arguments are being used in another, higher level parser. if add_help: parser = argparse.ArgumentParser( description=__doc__, parents=parents) else: parser = argparse.ArgumentParser( add_help=False, parents=parents) # GENERAL OPTIONS opts = parser.add_argument_group("calculate options") opts.add_argument( '--append', '-a', type=str, metavar='sometext', help='Append this text to command files generated by Q2MM.') opts.add_argument( '--directory', '-d', type=str, metavar='somepath', default=os.getcwd(), help=('Directory searched for files ' '(ex. *.mae, *.log, mm3.fld, etc.). ' 'Subshell commands (ex. MacroModel) are executed from here. ' 'Default is the current directory.')) opts.add_argument( '--doprint', '-p', action='store_true', help=("Logs data. Can generate extensive log files.")) opts.add_argument( '--fake', action='store_true', help=("Generate fake data sets. Used to expedite testing.")) opts.add_argument( '--ffpath', '-f', type=str, metavar='somepath', help=("Path to force field. Only necessary for certain data types " "if you don't provide the substructure name.")) opts.add_argument( '--invert', '-i', type=float, metavar='somefloat', help=("This option will invert the smallest eigenvalue to be whatever " "value is specified by this argument whenever a Hessian is " "read.")) opts.add_argument( '--nocheck', '-nc', action='store_false', dest='check', default=True, help=("By default, Q2MM checks whether MacroModel tokens are " "available before attempting a MacroModel calculation. If this " "option is supplied, MacroModel will not check for tokens " "first.")) opts.add_argument( '--norun', '-n', action='store_true', help="Don't run 3rd party software.") opts.add_argument( '--subnames', '-s', type=str, nargs='+', metavar='"Substructure Name OPT"', help=("Names of the substructures containing parameters to " "optimize in a mm3.fld file.")) opts.add_argument( '--weight', '-w', action='store_true', help='Add weights to data points.') # GAUSSIAN OPTIONS gau_args = parser.add_argument_group("gaussian reference data types") gau_args.add_argument( '-ge', type=str, nargs='+', action='append', default=[], metavar='somename.log', help=('Gaussian energies.')) gau_args.add_argument( '-gea', type=str, nargs='+', action='append', default=[], metavar='somename.log', help=('Gaussian energies. Energies will be relative to the average ' 'energy within this data type.')) gau_args.add_argument( '-geo', type=str, nargs='+', action='append', default=[], metavar='somename.log', help=('Gaussian energies. Same as -ge, except the files selected ' 'by this command will have their energies compared to those ' 'selected by -meo.')) gau_args.add_argument( '-geao', type=str, nargs='+', action='append', default=[], metavar='somename.log', help=('Gaussian energies. Same as -ge, except the files selected ' 'by this command will have their energies compared to those ' 'selected by -meo. Energies will be relative to the average ' 'energy within this data type.')) gau_args.add_argument( '-gh', type=str, nargs='+', action='append', default=[], metavar='somename.log', help='Gaussian Hessian extracted from a .log archive.') gau_args.add_argument( '-geigz', type=str, nargs='+', action='append', default=[], metavar='somename.log', help=('Gaussian eigenmatrix. Incluldes all elements, but zeroes ' 'all off-diagonal elements. Uses only the .log for ' 'the eigenvalues and eigenvectors.')) # JAGUAR OPTIONS jag_args = parser.add_argument_group("jaguar reference data types") jag_args.add_argument( '-jq', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='Jaguar partial charges.') jag_args.add_argument( '-jqh', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help=('Jaguar partial charges (excludes aliphatic hydrogens). ' 'Sums aliphatic hydrogen charges into their bonded sp3 ' 'carbon.')) jag_args.add_argument( '-jqa', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help=('Jaguar partial charges. Sums the partial charge of all singly ' 'bonded hydrogens into its connected atom.')) jag_args.add_argument( '-je', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='Jaguar energies.') jag_args.add_argument( '-jea', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help=('Jaguar energies. Everything will be relative to the average ' 'energy.')) jag_args.add_argument( '-jeo', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help=('Jaguar energies. Same as -je, except the files selected ' 'by this command will have their energies compared to those ' 'selected by -meo.')) jag_args.add_argument( '-jeao', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help=('Jaguar energies. Same as -jea, except the files selected ' 'by this command will have their energies compared to those ' 'selected by -meao.')) jag_args.add_argument( '-ja', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='Jaguar angles.') jag_args.add_argument( '-jb', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='Jaguar bond lengths.') jag_args.add_argument( '-jt', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='Jaguar torsions.') jag_args.add_argument( '-jh', type=str, nargs='+', action='append', default=[], metavar='somename.in', help='Jaguar Hessian.') jag_args.add_argument( '-jeigz', type=str, nargs='+', action='append', default=[], metavar='somename.in,somename.out', help=('Jaguar eigenmatrix. Incluldes all elements, but zeroes ' 'all off-diagonal elements.')) # ADDITIONAL REFERENCE OPTIONS ref_args = parser.add_argument_group("other reference data types") ref_args.add_argument( '-r', type=str, nargs='+', action='append', default=[], metavar='somename.txt', help=('Read reference data from file. The reference file should ' '3 space or tab separated columns. Column 1 is the labels, ' 'column 2 is the weights and column 3 is the values.')) # MACROMODEL OPTIONS mm_args = parser.add_argument_group("macromodel data types") mm_args.add_argument( '-mq', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel charges.') mm_args.add_argument( '-mqh', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel charges (excludes aliphatic hydrogens).') mm_args.add_argument( '-mqa', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help=('MacroModel partial charges. Sums the partial charge of all ' 'singly bonded hydrogens into its connected atom.')) mm_args.add_argument( '-me', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel energies (pre-FF optimization).') mm_args.add_argument( '-mea', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel energies (pre-FF optimization). Energies will be ' 'relative to the average energy.') mm_args.add_argument( '-meo', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel energies (post-FF optimization).') mm_args.add_argument( '-meao', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel energies (post-FF optimization). Energies will be ' 'relative to the average energy.') mm_args.add_argument( '-mb', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel bond lengths (post-FF optimization).') mm_args.add_argument( '-ma', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel angles (post-FF optimization).') mm_args.add_argument( '-mt', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel torsions (post-FF optimization).') mm_args.add_argument( '-mh', type=str, nargs='+', action='append', default=[], metavar='somename.mae', help='MacroModel Hessian.') mm_args.add_argument( '-mjeig', type=str, nargs='+', action='append', default=[], metavar='somename.mae,somename.out', help='MacroModel eigenmatrix (all elements). Uses Jaguar ' 'eigenvectors.') mm_args.add_argument( '-mgeig', type=str, nargs='+', action='append', default=[], metavar='somename.mae,somename.log', help='MacroModel eigenmatrix (all elements). Uses Gaussian ' 'eigenvectors.') mm_args.add_argument( '-mp', type=str, nargs='+', action='append', default=[], metavar='somename.fld,somename.txt', help='Uses a MM3* FF file (somename.fld) and a parameter file ' '(somename.txt) to use the current FF parameter values as data. This ' 'is used for harmonic parameter tethering.') mm_args.add_argument( '-mgESP', type=str, nargs='+', action='append', default=[], metavar='somename.mae,somename.chk', help='Uses the partial charges obtained from the FF and *mae file to ' 'determine the RMS of electrostatic fitting from a gaussain *chk file.') mm_args.add_argument( '-mjESP', type=str, nargs='+', action='append', default=[], metavar='somename.mae,somename.in', help='Uses the partial charges obtained from the FF and *mae file to ' 'determine the RMS of electrostatic fitting from a schrodinger *in ' 'file.') # TINKER OPTIONS tin_args = parser.add_argument_group("tinker data types") tin_args.add_argument( '-te', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker energies (pre-FF optimization).') tin_args.add_argument( '-tea', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker energies (pre-FF optimization). Energies will be ' 'relative to the average energy.') tin_args.add_argument( '-teo', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker energies (post-FF optimization).') tin_args.add_argument( '-teao', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker energies (post-FF optimization). Energies will be ' 'relative to the average energy.') tin_args.add_argument( '-tb', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker bond lengths (pre-FF optimization).') tin_args.add_argument( '-tbo', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker bond lengths (post-FF optimization).') tin_args.add_argument( '-ta', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker angles (pre-FF optimization).') tin_args.add_argument( '-tao', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker angles (post-FF optimization).') tin_args.add_argument( '-tt', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker torsions (pre-FF optimization).') tin_args.add_argument( '-tto', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker torsions (post-FF optimization).') tin_args.add_argument( '-th', type=str, nargs='+', action='append', default=[], metavar='somename.xyz', help='Tinker Hessian.') tin_args.add_argument( '-tjeig', type=str, nargs='+', action='append', default=[], metavar='somename.xyz,somename.out', help='Tinker eigenmatrix (all elements). Uses Jaguar ' 'eigenvectors.') tin_args.add_argument( '-tgeig', type=str, nargs='+', action='append', default=[], metavar='somename.xyz,somename.log', help='Tinker eigenmatrix (all elements). Uses Gaussian ' 'eigenvectors.') # AMBER OPTIONS amb_args = parser.add_argument_group("amber data types") amb_args.add_argument( '-ae', type=str, nargs='+', action='append', default=[], metavar='somename.inpcrd,somename.prmtop', help='Amber energy.') return parser def check_outs(filename, outs, classtype, direc): """ Reads a file if necessary. Checks the output dictionary first in case the file has already been loaded. Could work on easing the use of this by somehow reducing number of arguments required. """ logger.log(1, '>>> filename: {}'.format(filename)) logger.log(1, '>>> outs: {}'.format(outs)) logger.log(1, '>>> classtype: {}'.format(classtype)) logger.log(1, '>>> direc: {}'.format(direc)) if filename not in outs: outs[filename] = \ classtype(os.path.join(direc, filename)) return outs[filename] def collect_reference(path): """ Reads the data inside a reference data text file. This must have 3 columns: 1. Labels 2. Weights 3. Values """ data = [] with open(path, 'r') as f: for i, line in enumerate(f): # Skip certain lines. if line[0] in ['-', '#']: continue # if line.startswith('-'): # continue # Remove everything following a # in a line. line = line.partition('#')[0] cols = line.split() # There should always be 3 columns. assert len(cols) == 3, \ 'Error reading line {} from {}: {}'.format( i, path, line) lbl, wht, val = cols datum = datatypes.Datum(lbl=lbl, wht=float(wht), val=float(val)) # Added this from the function below, read_reference() lbl_to_data_attrs(datum, lbl) data.append(datum) return np.array(data) # Must be rewritten to go in a particular order of data types every time. def collect_data(coms, inps, direc='.', sub_names=['OPT'], invert=None): """ Arguments --------- invert : None or float If given, will modify the smallest value of the Hessian to this value. """ # outs looks like: # {'filename1': <some class for filename1>, # 'filename2': <some class for filename2>, # 'filename3': <some class for filename3> # } outs = {} # List of Datum objects. data = [] # REFERENCE DATA TEXT FILES # No grouping is necessary for this data type, so flatten the list of # lists. filenames = chain.from_iterable(coms['r']) for filename in filenames: # Unlike most datatypes, these Datum only get the attributes _lbl, # val and wht. This is to ensure that making and working with these # reference text files isn't too cumbersome. data.extend(collect_reference(os.path.join(direc, filename))) # MACROMODEL MM3* CURRENT PARAMETER VALUES filenames = chain.from_iterable(coms['mp']) for comma_filenames in filenames: # FF file and parameter file. name_fld, name_txt = comma_filenames.split(',') ff = datatypes.MM3(os.path.join(direc, name_fld)) ff.import_ff() ff.params = parameters.trim_params_by_file( ff.params, os.path.join(direc, name_txt)) for param in ff.params: data.extend([datatypes.Datum( val=param.value, com='mp', typ='p', src_1=name_fld, src_2=name_txt, idx_1=param.mm3_row, idx_2=param.mm3_col)]) # JAGUAR ENERGIES filenames_s = coms['je'] # idx_1 is the number used to group sets of relative energies. for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: mae = check_outs(filename, outs, filetypes.Mae, direc) # idx_2 corresponds to the structure inside the file in case the # .mae files contains multiple structures. for idx_2, structure in enumerate(mae.structures): try: energy = structure.props['r_j_Gas_Phase_Energy'] except KeyError: energy = structure.props['r_j_QM_Energy'] energy *= co.HARTREE_TO_KJMOL temp.append(datatypes.Datum( val=energy, com='je', typ='e', src_1=filename, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) # For this data type, we set everything relative. zero = min([x.val for x in temp]) for datum in temp: datum.val -= zero data.extend(temp) # GAUSSIAN ENERGIES filename_s = coms['ge'] for idx_1, filenames in enumerate(filename_s): temp = [] for filename in filenames: log = check_outs(filename, outs, filetypes.GaussLog, direc) # This will be a list of lists. For example, let's say that # co.GAUSSIAN_ENERGIES is ['HF', 'ZeroPoint'], then # the 1st list in things_to_add would be the HF energies # and the 2nd list would be the ZP energies. # # Consider if you had ['HF', 'ZeroPoint'] as co.GAUSSIAN_ENERGIES # and your archive had this: # HF=0.634,0.2352\ZeroPoint=0.01234,0.0164 # The resulting things_to_add would be: # things_to_add = [[0.634, 0.2352], # [0.01234, 0.0164]] things_to_add = [] # Remember, thing_label is whatever you specified in # co.GAUSSIAN_ENERGIES. for thing_label in co.GAUSSIAN_ENERGIES: # Consider if your Gaussian log archive has the following: # HF=0.234,0.1234,0.5732 # Then, if co.GAUSSIAN_ENERGIES includes 'HF', then that # particular thing, or sublist that goes into things_to_add, # would look like: # thing = ['0.234', '0.1234', '0.5732'] # Here's another example. Consider if your archive has the # property "stupidproperty": # stupidproperty=can,i,be,more,clear # Then this particular sublist, named thing, would be # thing = ['can', 'i', 'be', 'more', 'clear'] # Lastly, consider if you have this: # ZeroPoint=0.12341 # Then thing would be this: # thing = ['0.12341'] thing = log.structures[0].props[thing_label] # Deal with multiple structures by checking for this # split here. if ',' in thing: # Note that the "stupidproperty" example would fail here # because its elements can not be converted to floats. thing = [float(x) for x in thing.split(',')] # Here, thing might look like: # thing = [0.1235235, 0.2352, 0.352345] else: # Here it would be a list with only one element. thing = [float(thing)] things_to_add.append(thing) # Initialize list of zeros. Python syntax looks funny sometimes. # The length of the things_to_add sublists should always be the # same if you're doing it right. I suppose you could add some # sort of assert here. energies = [0.] * len(things_to_add[0]) # In this case, consider the earlier example where: # things_to_add = [[0.634, 0.2352], # [0.01234, 0.0164]] # Here, the first thing_group would be [0.634, 0.2352] and the # second thing_group would be [0.01234, 0.0164]. for thing_group in things_to_add: # After the loop through the 1st thing_group, we would have # energies = [0.634, 0.2352]. After the 2nd thing_group, we # would have energies = [0.634 + 0.01234, 0.2352 + 0.0164]. for i, thing in enumerate(thing_group): energies[i] += thing energies = [x * co.HARTREE_TO_KJMOL for x in energies] for i, e in enumerate(energies): temp.append(datatypes.Datum( val=e, com='ge', typ='e', src_1=filename, idx_1=idx_1 + 1, idx_2=i + 1)) # This works when HF and ZeroPoint are used. Had to make it more # general. # Revisit how structures are stored in GaussLog when you have time. # hf = log.structures[0].props['HF'] # zp = log.structures[0].props['ZeroPoint'] # if ',' in hf: # hfs = map(float, hf.split(',')) # zps = map(float, zp.split(',')) # else: # hfs = [float(hf)] # zps = [float(zp)] # es = [] # for hf, zp in izip(hfs, zps): # es = (hf + zp) * co.HARTREE_TO_KJMOL # for i, e in enumerate(es): # temp.append(datatypes.Datum( # val=e, # com='ge', # typ='e', # src_1=filename, # idx_1=idx_1 + 1, # idx_2=i + 1)) # Here's the old code from before we supported multiple energies. # I think it's helpful history for new coders trying to understand # how to write in new datatypes. Notice how the new code utilizes # idx_2. # hf = float(log.structures[0].props['HF']) # zp = float(log.structures[0].props['ZeroPoint']) # energy = (hf + zp) * co.HARTREE_TO_KJMOL # # We don't use idx_2 since we assume there is only one structure # # in a Gaussian .log. I think that's always the case. # temp.append(datatypes.Datum( # val=energy, # com='ge', # typ='e', # src_1=filename, # idx_1=idx_1 + 1)) zero = min([x.val for x in temp]) for datum in temp: datum.val -= zero data.extend(temp) # MACROMODEL ENERGIES filenames_s = coms['me'] ind = 'pre' for idx_1, filenames in enumerate(filenames_s): for filename in filenames: name_mae = inps[filename].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) indices = inps[filename]._index_output_mae # This is list of sets. The 1st value in the set corresponds to the # number of the structure. The 2nd value is the structure class. selected_structures = filetypes.select_structures( mae.structures, indices, ind) for idx_2, structure in selected_structures: data.append(datatypes.Datum( val=structure.props['r_mmod_Potential_Energy-MM3*'], com='me', typ='e', src_1=inps[filename].name_mae, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) # AMBER ENERGIES filenames_s = coms['ae'] for idx_1, filenames in enumerate(filenames_s): logger.log(1, '>>> idx_1: {}'.format(idx_1)) logger.log(1, '>>> filenames: {}'.format(filenames)) for idx_2, comma_sep_filenames in enumerate(filenames): name_1, name_2 = comma_sep_filenames.split(',') out = check_outs( comma_sep_filenames, outs, filetypes.AmberOut, direc) # Right now, path is a comma separated string. out.path = inps[comma_sep_filenames].out logger.log(1, '>>> out: {}'.format(out)) energy = out.read_energy() data.append(datatypes.Datum( val=energy, com='ae', typ='e', src_1=name_1, src_2=name_2, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) # JAGUAR AVERAGE ENERGIES filenames_s = coms['jea'] # idx_1 is the number used to group sets of relative energies. for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: mae = check_outs(filename, outs, filetypes.Mae, direc) # idx_2 corresponds to the structure inside the file in case the # .mae files contains multiple structures. for idx_2, structure in enumerate(mae.structures): try: energy = structure.props['r_j_Gas_Phase_Energy'] except KeyError: energy = structure.props['r_j_QM_Energy'] energy *= co.HARTREE_TO_KJMOL temp.append(datatypes.Datum( val=energy, com='jea', typ='ea', src_1=filename, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) # For this data type, we set everything relative. avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # GAUSSIAN AVERAGE ENERGIES filename_s = coms['gea'] for idx_1, filenames in enumerate(filename_s): temp = [] for filename in filenames: log = check_outs(filename, outs, filetypes.GaussLog, direc) things_to_add = [] for thing_label in co.GAUSSIAN_ENERGIES: thing = log.structures[0].props[thing_label] if ',' in thing: thing = [float(x) for x in thing.split(',')] else: thing = [float(thing)] things_to_add.append(thing) energies = [0.] * len(things_to_add[0]) for thing_group in things_to_add: for i, thing in enumerate(thing_group): energies[i] += thing energies = [x * co.HARTREE_TO_KJMOL for x in energies] for i, e in enumerate(energies): temp.append(datatypes.Datum( val=e, com='gea', typ='ea', src_1=filename, idx_1=idx_1 + 1, idx_2=i + 1)) avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # MACROMODEL AVERAGE ENERGIES filenames_s = coms['mea'] ind = 'pre' # idx_1 is the number used to group sets of relative energies. for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: name_mae = inps[filename].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) indices = inps[filename]._index_output_mae # This is list of sets. The 1st value in the set corresponds to the # number of the structure. The 2nd value is the structure class. selected_structures = filetypes.select_structures( mae.structures, indices, ind) for idx_2, structure in selected_structures: temp.append(datatypes.Datum( val=structure.props['r_mmod_Potential_Energy-MM3*'], com='mea', typ='ea', src_1=inps[filename].name_mae, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # JAGUAR ENERGIES COMPARED TO OPTIMIZED MM filenames_s = coms['jeo'] # idx_1 is the number used to group sets of relative energies. for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: mae = check_outs(filename, outs, filetypes.Mae, direc) # idx_2 corresponds to the structure inside the file in case the # .mae files contains multiple structures. for idx_2, structure in enumerate(mae.structures): try: energy = structure.props['r_j_Gas_Phase_Energy'] except KeyError: energy = structure.props['r_j_QM_Energy'] energy *= co.HARTREE_TO_KJMOL temp.append(datatypes.Datum( val=energy, com='jeo', typ='eo', src_1=filename, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) # For this data type, we set everything relative. zero = min([x.val for x in temp]) for datum in temp: datum.val -= zero data.extend(temp) # GAUSSIAN ENERGIES RELATIVE TO OPTIMIZED MM filename_s = coms['geo'] for idx_1, filenames in enumerate(filename_s): temp = [] for filename in filenames: log = check_outs(filename, outs, filetypes.GaussLog, direc) things_to_add = [] for thing_label in co.GAUSSIAN_ENERGIES: thing = log.structures[0].props[thing_label] if ',' in thing: thing = [float(x) for x in thing.split(',')] else: thing = [float(thing)] things_to_add.append(thing) energies = [0.] * len(things_to_add[0]) for thing_group in things_to_add: for i, thing in enumerate(thing_group): energies[i] += thing energies = [x * co.HARTREE_TO_KJMOL for x in energies] for i, e in enumerate(energies): temp.append(datatypes.Datum( val=e, com='geo', typ='eo', src_1=filename, idx_1=idx_1 + 1, idx_2=i + 1)) zero = min([x.val for x in temp]) for datum in temp: datum.val -= zero data.extend(temp) # MACROMODEL OPTIMIZED ENERGIES filenames_s = coms['meo'] ind = 'opt' for idx_1, filenames in enumerate(filenames_s): for filename in filenames: name_mae = inps[filename].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) indices = inps[filename]._index_output_mae selected_structures = filetypes.select_structures( mae.structures, indices, ind) for idx_2, structure in selected_structures: data.append(datatypes.Datum( val=structure.props['r_mmod_Potential_Energy-MM3*'], com='meo', typ='eo', src_1=inps[filename].name_mae, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) # JAGUAR ENERGIES RELATIVE TO AVERAGE COMPARED TO OPTIMIZED MM filenames_s = coms['jeao'] # idx_1 is the number used to group sets of relative energies. for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: mae = check_outs(filename, outs, filetypes.Mae, direc) # idx_2 corresponds to the structure inside the file in case the # .mae files contains multiple structures. for idx_2, structure in enumerate(mae.structures): try: energy = structure.props['r_j_Gas_Phase_Energy'] except KeyError: energy = structure.props['r_j_QM_Energy'] energy *= co.HARTREE_TO_KJMOL temp.append(datatypes.Datum( val=energy, com='jeao', typ='eao', src_1=filename, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # GAUSSIAN AVERAGE ENERGIES RELATIVE TO OPTIMIZED MM filename_s = coms['geao'] for idx_1, filenames in enumerate(filename_s): temp = [] for filename in filenames: log = check_outs(filename, outs, filetypes.GaussLog, direc) things_to_add = [] for thing_label in co.GAUSSIAN_ENERGIES: thing = log.structures[0].props[thing_label] if ',' in thing: thing = [float(x) for x in thing.split(',')] else: thing = [float(thing)] things_to_add.append(thing) energies = [0.] * len(things_to_add[0]) for thing_group in things_to_add: for i, thing in enumerate(thing_group): energies[i] += thing energies = [x * co.HARTREE_TO_KJMOL for x in energies] for i, e in enumerate(energies): temp.append(datatypes.Datum( val=e, com='geao', typ='eao', src_1=filename, idx_1=idx_1 + 1, idx_2=i + 1)) avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # MACROMODEL OPTIMIZED ENERGIES RELATIVE TO AVERAGE filenames_s = coms['meao'] ind = 'opt' for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: name_mae = inps[filename].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) indices = inps[filename]._index_output_mae selected_structures = filetypes.select_structures( mae.structures, indices, ind) for idx_2, structure in selected_structures: temp.append(datatypes.Datum( val=structure.props['r_mmod_Potential_Energy-MM3*'], com='meao', typ='eao', src_1=inps[filename].name_mae, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # JAGUAR BONDS filenames = chain.from_iterable(coms['jb']) for filename in filenames: data.extend(collect_structural_data_from_mae( filename, inps, outs, direc, sub_names, 'jb', 'pre', 'bonds')) # TINKER SP BONDS filenames = chain.from_iterable(coms['tb']) for filename in filenames: data.extend(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'tb', 'pre', 'bonds')) # TINKER SP ANGLES filenames = chain.from_iterable(coms['ta']) for filename in filenames: data.extend(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'ta', 'pre', 'angles')) # TINKER SP TORSIONS filenames = chain.from_iterable(coms['tt']) for filename in filenames: data.extend(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'tt', 'pre', 'torsions')) # TINKER OPTIMIZED BONDS filenames = chain.from_iterable(coms['tbo']) for filename in filenames: data.extend(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'tbo', 'opt', 'bonds')) # TINKER OPTIMIZED ANGLE filenames = chain.from_iterable(coms['tao']) for filename in filenames: data.extend(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'tao', 'opt', 'angles')) # TINKER OPTIMIZED ANGLE filenames = chain.from_iterable(coms['tto']) for filename in filenames: data.extend(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'tto', 'opt', 'torsions')) # TINKER ENERGIES RELATIVE TO LOWEST filenames_s = coms['te'] for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: temp.append(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'te', 'pre', 'e', idx_1 = idx_1)) zero = min([x.val for x in temp]) for datum in temp: datum.val -= zero data.extend(temp) # TINKER ENERGIES RELATIVE TO AVERAGE filenames_s = coms['tea'] for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: temp.append(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'tea', 'pre', 'ea', idx_1 = idx_1)) avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # TINKER OPTIMIZED ENERGIES RELATIVE LOWEST filenames_s = coms['teo'] for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: temp.append(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'teo', 'opt', 'eo', idx_1 = idx_1)) zero = min([x.val for x in temp]) for datum in temp: datum.val -= zero data.extend(temp) # TINKER OPTIMIZED ENERGIES RELATIVE TO AVERAGE filenames_s = coms['teao'] for idx_1, filenames in enumerate(filenames_s): temp = [] for filename in filenames: temp.append(collect_structural_data_from_tinker_log( filename, inps, outs, direc, 'teao', 'opt', 'eao', idx_1 = idx_1)) avg = sum([x.val for x in temp]) / len(temp) for datum in temp: datum.val -= avg data.extend(temp) # TINKER HESSIAN filenames = chain.from_iterable(coms['th']) for filename in filenames: xyz_struct = inps[filename].structures[0] num_atoms = xyz_struct.props['total atoms'] name_hes = inps[filename].name_hes hes = check_outs(name_hes, outs, filetypes.TinkerHess, direc) hes.natoms = num_atoms hess = hes.hessian datatypes.mass_weight_hessian(hess, xyz_struct.atoms) # Need to figure out dummy atoms at somepoint? # I'm not even sure if we can use dummy atoms in TINKER. low_tri_idx = np.tril_indices_from(hess) low_tri = hess[low_tri_idx] data.extend([datatypes.Datum( val=e, com='th', typ='h', src_1=hes.filename, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # TINKER EIGENMATRIX USING GAUSSIAN EIGENVECTORS filenames = chain.from_iterable(coms['tgeig']) for comma_filenames in filenames: name_xyz, name_gau_log = comma_filenames.split(',') name_xyz_hes = inps[name_xyz].name_hes xyz = check_outs(name_xyz, outs, filetypes.Tinker_xyz, direc) xyz_hes = check_outs(name_xyz_hes, outs, filetypes.TinkerHess, direc) gau_log = check_outs(name_gau_log, outs, filetypes.GaussLog, direc) xyz_struct = xyz.structures[0] num_atoms = xyz_struct.props['total atoms'] xyz_hes.natoms = num_atoms hess = xyz_hes.hessian datatypes.mass_weight_hessian(hess, xyz_struct.atoms) evec = gau_log.evecs try: eigenmatrix = np.dot(np.dot(evec, hess), evec.T) except ValueError: logger.warning('Matrices not aligned!') logger.warning('Hessian retrieved from {}: {}'.format( name_mae_log, hess.shape)) logger.warning('Eigenvectors retrieved from {}: {}'.format( name_gau_log, evec.shape)) raise low_tri_idx = np.tril_indices_from(eigenmatrix) low_tri = eigenmatrix[low_tri_idx] data.extend([datatypes.Datum( val=e, com='tgeig', typ='eig', src_1=name_xyz, src_2=name_gau_log, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # MACROMODEL BONDS filenames = chain.from_iterable(coms['mb']) for filename in filenames: data.extend(collect_structural_data_from_mae( filename, inps, outs, direc, sub_names, 'mb', 'opt', 'bonds')) # JAGUAR ANGLES filenames = chain.from_iterable(coms['ja']) for filename in filenames: data.extend(collect_structural_data_from_mae( filename, inps, outs, direc, sub_names, 'ja', 'pre', 'angles')) # MACROMODEL BONDS filenames = chain.from_iterable(coms['ma']) for filename in filenames: data.extend(collect_structural_data_from_mae( filename, inps, outs, direc, sub_names, 'ma', 'opt', 'angles')) # JAGUAR BONDS filenames = chain.from_iterable(coms['jt']) for filename in filenames: data.extend(collect_structural_data_from_mae( filename, inps, outs, direc, sub_names, 'jt', 'pre', 'torsions')) # MACROMODEL BONDS filenames = chain.from_iterable(coms['mt']) for filename in filenames: data.extend(collect_structural_data_from_mae( filename, inps, outs, direc, sub_names, 'mt', 'opt', 'torsions')) # JAGUAR CHARGES filenames = chain.from_iterable(coms['jq']) for filename in filenames: mae = check_outs(filename, outs, filetypes.Mae, direc) for idx_1, structure in enumerate(mae.structures): for atom in structure.atoms: # If it doesn't have the property b_q_use_charge, # use it. # If b_q_use_charge is 1, use it. If it's 0, don't # use it. if not 'b_q_use_charge' in atom.props or \ atom.props['b_q_use_charge']: data.append(datatypes.Datum( val=atom.partial_charge, com='jq', typ='q', src_1=filename, idx_1=idx_1 + 1, atm_1=atom.index)) # MACROMODEL CHARGES filenames = chain.from_iterable(coms['mq']) for filename in filenames: name_mae = inps[filename].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) # Pick out the right structures. Sometimes our .com files # generate many structures in a .mae, not all of which # apply to this command. structures = filetypes.select_structures( mae.structures, inps[filename]._index_output_mae, 'pre') for idx_1, structure in structures: for atom in structure.atoms: if not 'b_q_use_charge' in atom.props or \ atom.props['b_q_use_charge']: data.append(datatypes.Datum( val=atom.partial_charge, com='mq', typ='q', src_1=filename, idx_1=idx_1 + 1, atm_1=atom.index)) # MACROMODEL+GUASSIAN ESP filenames = chain.from_iterable(coms['mgESP']) for comma_filenames in filenames: charges_list = [] filename_mae, name_gau_chk = comma_filenames.split(',') #Filename of the output *mae file (i.e. filename.q2mm.mae) name_mae = inps[filename_mae].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) structures = filetypes.select_structures( mae.structures, inps[filename_mae]._index_output_mae, 'pre') for idx_1, structure in structures: for atom in structure.atoms: ### I think we want all the charges, right? #if not 'b_q_use_charge' in atom.props or \ # atom.props['b_q_use_charge']: if atom.atomic_num > 0: charges_list.append(atom.partial_charge) com_filename = os.path.splitext(name_gau_chk)[0] + '.ESP.q2mm.com' inps[com_filename].charge_list = charges_list inps[com_filename].write_com() inps[com_filename].run_gaussian() name_gauss_log = inps[com_filename].name_log gauss = check_outs(name_gauss_log, outs, filetypes.GaussLog, direc) esp_rms = gauss.esp_rms if esp_rms < 0.0: raise Exception('A negative RMS was obtained for the ESP fitting ' 'which indicates an error occured. Look at the ' 'following file: {}'.format(name_gauss_log)) data.append(datatypes.Datum( val=esp_rms, com='mgESP', typ='esp', src_1= name_mae, src_2='gaussian', idx_1 = 1)) # MACROMODEL+JAGUAR ESP ## This does not work, I still need to write code to support Jaguaer. -TR filenames = chain.from_iterable(coms['mjESP']) for comma_filenames in filenames: charges_list = [] name_mae, name_jag_chk = comma_filenames.split(',') mae = check_outs(name_mae, outs, filetypes.Mae, direc) structures = filetypes.select_structures( mae.structures, inps[name_mae]._index_output_mae, 'pre') for idx_1, structure in structures: for atom in structure.atoms: if not 'b_q_use_charge' in atom.props or \ atom.props['b_q_use_charge']: charges_list.append(atom.partial_charge) ###Filler for ESP calculations#### ### This is what is used in anna's code current_RMS = run_ChelpG_inp.run_JCHelpG(charges_list,name_jag_chk) ### End of filler if current_RMS < 0: sys.exit("Error while computing RMS. Exiting") data.append(datatypes.Datum( val=current_RMS, com='mjESP', typ='esp', src_1=name_mae, idx_1=1)) # JAGUAR CHARGES EXCLUDING ALIPHATIC HYDROGENS filenames = chain.from_iterable(coms['jqh']) for filename in filenames: mae = check_outs(filename, outs, filetypes.Mae, direc) for idx_1, structure in enumerate(mae.structures): aliph_hyds = structure.get_aliph_hyds() for atom in structure.atoms: # If it doesn't have the property b_q_use_charge, # use it. # If b_q_use_charge is 1, use it. If it's 0, don't # use it. if (not 'b_q_use_charge' in atom.props or \ atom.props['b_q_use_charge']) and \ not atom in aliph_hyds: charge = atom.partial_charge if atom.atom_type == 3: for bonded_atom_index in atom.bonded_atom_indices: bonded_atom = structure.atoms[bonded_atom_index - 1] if bonded_atom in aliph_hyds: charge += bonded_atom.partial_charge data.append(datatypes.Datum( val=charge, com='jqh', typ='qh', src_1=filename, idx_1=idx_1 + 1, atm_1=atom.index)) # MACROMODEL CHARGES EXCLUDING ALIPHATIC HYDROGENS filenames = chain.from_iterable(coms['mqh']) for filename in filenames: name_mae = inps[filename].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) # Pick out the right structures. Sometimes our .com files # generate many structures in a .mae, not all of which # apply to this command. structures = filetypes.select_structures( mae.structures, inps[filename]._index_output_mae, 'pre') for idx_1, structure in structures: aliph_hyds = structure.get_aliph_hyds() for atom in structure.atoms: if (not 'b_q_use_charge' in atom.props or \ atom.props['b_q_use_charge']) and \ atom not in aliph_hyds: # Since the charge is always zero AS FAR AS I KNOW, this # whole recalculation of the charge is totally unnecessary. # However, I want users to be aware that if a situation # arises that goes beyond something I experienced, # uncommenting this section, thereby making it more like the # code for -jqh, should solve the problem. # charge = atom.partial_charge # if atom.atom_type == 3: # for bonded_atom_index in atom.bonded_atom_indices: # bonded_atom = structure.atoms[bonded_atom_index - 1] # if bonded_atom in aliph_hyds: # charge += bonded_atom.partial_charge data.append(datatypes.Datum( # val=charge, val=atom.partial_charge, com='mqh', typ='qh', src_1=filename, idx_1=idx_1 + 1, atm_1=atom.index)) # JAGUAR CHARGES EXCLUDING ALL SINGLE BONDED HYDROGENS filenames = chain.from_iterable(coms['jqa']) for filename in filenames: mae = check_outs(filename, outs, filetypes.Mae, direc) for idx_1, structure in enumerate(mae.structures): hyds = structure.get_hyds() for atom in structure.atoms: # Check if we want to use this charge and ensure it's not a # hydrogen. if (not 'b_q_use_charge' in atom.props or \ atom.props['b_q_use_charge']) and \ atom not in hyds: charge = atom.partial_charge # Check if it's bonded to a hydrogen. for bonded_atom_index in atom.bonded_atom_indices: bonded_atom = structure.atoms[bonded_atom_index - 1] if bonded_atom in hyds: if len(bonded_atom.bonded_atom_indices) < 2: charge += bonded_atom.partial_charge data.append(datatypes.Datum( val=charge, com='jqa', typ='qa', src_1=filename, idx_1=idx_1 + 1, atm_1=atom.index)) # MACROMODEL CHARGES EXCLUDING ALL SINGLE BONDED HYDROGENS filenames = chain.from_iterable(coms['mqa']) for filename in filenames: name_mae = inps[filename].name_mae mae = check_outs(name_mae, outs, filetypes.Mae, direc) # Pick out the right structures. Sometimes our .com files # generate many structures in a .mae, not all of which # apply to this command. structures = filetypes.select_structures( mae.structures, inps[filename]._index_output_mae, 'pre') for idx_1, structure in structures: hyds = structure.get_hyds() for atom in structure.atoms: if (not 'b_q_use_charge' in atom.props or \ atom.props['b_q_use_charge']) and \ atom not in hyds: charge = atom.partial_charge for bonded_atom_index in atom.bonded_atom_indices: bonded_atom = structure.atoms[bonded_atom_index - 1] if bonded_atom in hyds: if len(bonded_atom.bonded_atom_indices) < 2: charge += bonded_atom.partial_charge data.append(datatypes.Datum( val=charge, com='mqa', typ='qa', src_1=filename, idx_1=idx_1 + 1, atm_1=atom.index)) # JAGUAR HESSIAN filenames = chain.from_iterable(coms['jh']) for filename in filenames: jin = check_outs(filename, outs, filetypes.JaguarIn, direc) hess = jin.hessian datatypes.mass_weight_hessian(hess, jin.structures[0].atoms) if invert: evals, evecs = np.linalg.eigh(hess) datatypes.replace_minimum(evals, value=invert) hess = evecs.dot(np.diag(evals).dot(evecs.T)) datatypes.replace_minimum(hess, value=invert) low_tri_idx = np.tril_indices_from(hess) low_tri = hess[low_tri_idx] data.extend([datatypes.Datum( val=e, com='jh', typ='h', src_1=jin.filename, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # GAUSSIAN HESSIAN filenames = chain.from_iterable(coms['gh']) for filename in filenames: log = check_outs(filename, outs, filetypes.GaussLog, direc) log.read_archive() # For now, the Hessian is stored on the structures inside the filetype. hess = log.structures[0].hess datatypes.mass_weight_hessian(hess, log.structures[0].atoms) if invert: # Faster to use scipy.linalg.eig or scipy.linalg.eigsh (even # faster). evals, evecs = np.linalg.eigh(hess) # Returns True. # print(np.allclose(evecs.dot(np.diag(evals).dot(evecs.T)), hess)) datatypes.replace_minimum(evals, value=invert) hess = evecs.dot(np.diag(evals).dot(evecs.T)) # Oh crap, just realized this probably needs to be mass weighted. # WARNING: This option may need to be mass weighted! low_tri_idx = np.tril_indices_from(hess) low_tri = hess[low_tri_idx] data.extend([datatypes.Datum( val=e, com='gh', typ='h', src_1=log.filename, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # MACROMODEL HESSIAN filenames = chain.from_iterable(coms['mh']) for filename in filenames: # Get the .log for the .mae. name_log = inps[filename].name_log # Used to get dummy atoms. mae = check_outs(filename, outs, filetypes.Mae, direc) # Used to get the Hessian. log = check_outs(name_log, outs, filetypes.MacroModelLog, direc) hess = log.hessian dummies = mae.structures[0].get_dummy_atom_indices() hess_dummies = datatypes.get_dummy_hessian_indices(dummies) hess = datatypes.check_mm_dummy(hess, hess_dummies) low_tri_idx = np.tril_indices_from(hess) low_tri = hess[low_tri_idx] data.extend([datatypes.Datum( val=e, com='mh', typ='h', src_1=mae.filename, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # JAGUAR EIGENMATRIX filenames = chain.from_iterable(coms['jeigz']) for comma_sep_filenames in filenames: name_in, name_out = comma_sep_filenames.split(',') jin = check_outs(name_in, outs, filetypes.JaguarIn, direc) out = check_outs(name_out, outs, filetypes.JaguarOut, direc) hess = jin.hessian evec = out.eigenvectors datatypes.mass_weight_hessian(hess, jin.structures[0].atoms) datatypes.mass_weight_eigenvectors(evec, out.structures[0].atoms) try: eigenmatrix = np.dot(np.dot(evec, hess), evec.T) except ValueError: logger.warning('Matrices not aligned!') logger.warning('Hessian retrieved from {}: {}'.format( name_in, hess.shape)) logger.warning('Eigenvectors retrieved from {}: {}'.format( name_out, evec.shape)) raise # Funny way to make off-diagonal elements zero. # eigenmatrix = np.diag(np.diag(eigenmatrix)) # Take diagonal into one dimensional array. eigenmatrix = np.diag(eigenmatrix) if invert: datatypes.replace_minimum(eigenmatrix, value=invert) # Turn back into a full matrix. eigenmatrix = np.diag(eigenmatrix) low_tri_idx = np.tril_indices_from(eigenmatrix) low_tri = eigenmatrix[low_tri_idx] data.extend([datatypes.Datum( val=e, com='jeigz', typ='eig', src_1=jin.filename, src_2=out.filename, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # GAUSSIAN EIGENMATRIX filenames = chain.from_iterable(coms['geigz']) for filename in filenames: log = check_outs(filename, outs, filetypes.GaussLog, direc) evals = log.evals * co.HESSIAN_CONVERSION if invert: datatypes.replace_minimum(evals, value=invert) eigenmatrix = np.diag(evals) low_tri_idx = np.tril_indices_from(eigenmatrix) low_tri = eigenmatrix[low_tri_idx] data.extend([datatypes.Datum( val=e, com='geigz', typ='eig', src_1=log.filename, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # MACROMODEL EIGENMATRIX USING JAGUAR EIGENVECTORS filenames = chain.from_iterable(coms['mjeig']) for comma_sep_filenames in filenames: name_mae, name_out = comma_sep_filenames.split(',') name_log = inps[name_mae].name_log mae = check_outs(name_mae, outs, filetypes.Mae, direc) log = check_outs(name_log, outs, filetypes.MacroModelLog, direc) out = check_outs(name_out, outs, filetypes.JaguarOut, direc) hess = log.hessian dummies = mae.structures[0].get_dummy_atom_indices() hess_dummies = datatypes.get_dummy_hessian_indices(dummies) hess = datatypes.check_mm_dummy(hess, hess_dummies) evec = out.eigenvectors datatypes.mass_weight_eigenvectors(evec, out.structures[0].atoms) try: eigenmatrix = np.dot(np.dot(evec, hess), evec.T) except ValueError: logger.warning('Matrices not aligned!') logger.warning('Hessian retrieved from {}: {}'.format( log.filename, hess.shape)) logger.warning('Eigenvectors retrieved from {}: {}'.format( name_out, evec.shape)) raise low_tri_idx = np.tril_indices_from(eigenmatrix) low_tri = eigenmatrix[low_tri_idx] data.extend([datatypes.Datum( val=e, com='mjeig', typ='eig', src_1=mae.filename, src_2=out.filename, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) # MACROMODEL EIGENMATRIX USING GAUSSIAN EIGENVECTORS filenames = chain.from_iterable(coms['mgeig']) for comma_filenames in filenames: name_mae, name_gau_log = comma_filenames.split(',') name_mae_log = inps[name_mae].name_log mae = check_outs(name_mae, outs, filetypes.Mae, direc) mae_log = check_outs(name_mae_log, outs, filetypes.MacroModelLog, direc) gau_log = check_outs(name_gau_log, outs, filetypes.GaussLog, direc) hess = mae_log.hessian dummies = mae.structures[0].get_dummy_atom_indices() hess_dummies = datatypes.get_dummy_hessian_indices(dummies) hess = datatypes.check_mm_dummy(hess, hess_dummies) evec = gau_log.evecs try: eigenmatrix = np.dot(np.dot(evec, hess), evec.T) except ValueError: logger.warning('Matrices not aligned!') logger.warning('Hessian retrieved from {}: {}'.format( name_mae_log, hess.shape)) logger.warning('Eigenvectors retrieved from {}: {}'.format( name_gau_log, evec.shape)) raise low_tri_idx = np.tril_indices_from(eigenmatrix) low_tri = eigenmatrix[low_tri_idx] data.extend([datatypes.Datum( val=e, com='mgeig', typ='eig', src_1=name_mae, src_2=name_gau_log, idx_1=x + 1, idx_2=y + 1) for e, x, y in zip( low_tri, low_tri_idx[0], low_tri_idx[1])]) logger.log(15, 'TOTAL DATA POINTS: {}'.format(len(data))) return np.array(data, dtype=datatypes.Datum) def collect_data_fake(coms, inps, direc='.', sub_names=['OPT']): """ Generates a random data set quickly. """ import random data = [] filenames = flatten(coms.values()) for idx_1, filename in enumerate(filenames): for idx_2 in range(5): data.append(datatypes.Datum( val=random.uniform(0, 10), com='rand', typ='a', src_1=filename, idx_1=idx_1 + 1, idx_2=idx_2 + 1)) return np.array(data, dtype=datatypes.Datum) def flatten(l): """ Simple means to flatten an irregular list of lists. http://stackoverflow.com/questions/2158395/ flatten-an-irregular-list-of-lists-in-python This goes a bit further than chain.from_iterable in that it can deal with an arbitrary number of nested lists. """ # Move this? import collections for el in l: if isinstance(el, collections.Iterable) and \ not isinstance(el, str): for sub in flatten(el): yield sub else: yield el def collect_structural_data_from_mae( name_mae, inps, outs, direc, sub_names, com, ind, typ): """ Repeated code used to extract structural data from .mae files (through the generation of .mmo files). Would be nice to reduce the number of arguments. The problem here is in carrying through data for the generation of the Datum object. Not going to write a pretty __doc__ for this since I want to make so many changes. These changes will likely go along with modifications to the classes inside filetypes. """ data = [] name_mmo = inps[name_mae].name_mmo # The indices is jsut a list for the calculation done, 'pre' or 'opt'. indices = inps[name_mae]._index_output_mmo mmo = check_outs(name_mmo, outs, filetypes.MacroModel, direc) selected_structures = filetypes.select_structures( mmo.structures, indices, ind) for idx_1, structure in selected_structures: data.extend(structure.select_data( typ, com=com, com_match=sub_names, src_1=mmo.filename, idx_1=idx_1 + 1)) return data # Added by Tony. # Probably want to use check_outs function at somepoint. def collect_structural_data_from_tinker_log( name_xyz, inps, outs, direc, com, ind, typ, idx_1 = None): select_struct = {'pre':0, 'opt':1} data = [] name_log = inps[name_xyz].name_log log = check_outs(name_log, outs, filetypes.Tinker_log, direc) log_structure = log.structures struct = log_structure[select_struct[ind]] # Stuff to try out hessian. # xyz_struct = xyz_structure[0] # num_atoms = xyz_struct.props['total atoms'] # hes_structure = inps[name_xyz].hess # hes_structure.natoms = num_atoms # hessian = hes_structure.hessian() # Stuff to try out hessian. if com in ['te','teo','tea','teao']: energy = struct.props['energy'] new_datum = (datatypes.Datum( val=energy, typ=typ, src_1=name_log, idx_1=idx_1 + 1)) return(new_datum) else: data.extend(struct.select_data( typ, com=com, src_1=name_log)) return(data) def sort_commands_by_filename(commands): ''' Takes a dictionary of commands like... {'me': [['a1.01.mae', 'a2.01.mae', 'a3.01.mae'], ['b1.01.mae', 'b2.01.mae']], 'mb': [['a1.01.mae'], ['b1.01.mae']], 'jeig': [['a1.01.in,a1.out', 'b1.01.in,b1.out']] } ... and turn it into a dictionary that looks like... {'a1.01.mae': ['me', 'mb'], 'a1.01.in': ['jeig'], 'a1.out': ['jeig'], 'a2.01.mae': ['me'], 'a3.01.mae': ['me'], 'b1.01.mae': ['me', 'mb'], 'b1.01.in': ['jeig'], 'b1.out': ['jeig'], 'b2.01.mae': ['me'] } Arguments --------- commands : dic Returns ------- dictionary of the sorted commands ''' sorted_commands = {} for command, groups_filenames in commands.items(): for comma_separated in chain.from_iterable(groups_filenames): for filename in comma_separated.split(','): if filename in sorted_commands: sorted_commands[filename].append(command) else: sorted_commands[filename] = [command] return sorted_commands # Will also have to be updated. Maybe the Datum class too and how it responds # to assigning labels. ## Why is this here? Is this deprecated? -Tony def read_reference(filename): data = [] with open(filename, 'r') as f: for line in f: # Skip certain lines. if line.startswith('-'): continue # Remove everything following a # in a line. line = line.partition('#')[0] cols = line.split() # There should always be 3 columns. if len(cols) == 3: lbl, wht, val = cols datum = datatypes.Datum(lbl=lbl, wht=float(wht), val=float(val)) lbl_to_data_attrs(datum, lbl) data.append(datum) data = data.sort(key=datatypes.datum_sort_key) return np.array(data) ## This is also part of the read_reference function above, but I think these ## labels and attributes are important for handleing data. # Shouldn't be necessary anymore. # This should be based by the datum type and not the length of the parts list. def lbl_to_data_attrs(datum, lbl): parts = lbl.split('_') datum.typ = parts[0] # if len(parts) == 3: if datum.typ in ['e','eo','ea','eao','eig','h','q','qh','qa']: idxs = parts[-1] # if len(parts) == 4: if datum.typ in ['b','t','a']: idxs = parts[-2] atm_nums = parts[-1] atm_nums = atm_nums.split('-') for i, atm_num in enumerate(atm_nums): setattr(datum, 'atm_{}'.format(i+1), int(atm_num)) if datum.typ in ['p']: datum.src_1 = parts[1] idxs = parts[-1] if datum.typ in ['esp']: datum.src_1 = parts[1] idxs = parts[-1] idxs = idxs.split('-') datum.idx_1 = int(idxs[0]) if len(idxs) == 2: datum.idx_2 == int(idxs[1]) # Right now, this only looks good if the logger doesn't append each log # message with something (module, date/time, etc.). # It would be great if this output looked good regardless of the settings # used for the logger. # That goes for all of these pretty output functions that use TextWrapper. def pretty_commands_for_files(commands_for_files, log_level=5): """ Logs the .mae commands dictionary, or the all of the commands used on a particular file. Arguments --------- commands_for_files : dic log_level : int """ if logger.getEffectiveLevel() <= log_level: foobar = TextWrapper( width=48, subsequent_indent=' '*26) logger.log( log_level, '--' + ' FILENAME '.center(22, '-') + '--' + ' COMMANDS '.center(22, '-') + '--') for filename, commands in commands_for_files.items(): foobar.initial_indent = ' {:22s} '.format(filename) logger.log(log_level, foobar.fill(' '.join(commands))) logger.log(log_level, '-'*50) def pretty_all_commands(commands, log_level=5): """ Logs the arguments/commands given to calculate that are used to request particular datatypes from particular files. Arguments --------- commands : dic log_level : int """ if logger.getEffectiveLevel() <= log_level: foobar = TextWrapper(width=48, subsequent_indent=' '*24) logger.log(log_level, '') logger.log( log_level, '--' + ' COMMAND '.center(9, '-') + '--' + ' GROUP # '.center(9, '-') + '--' + ' FILENAMES '.center(24, '-') + '--') for command, groups_filenames in commands.items(): for i, filenames in enumerate(groups_filenames): if i == 0: foobar.initial_indent = \ ' {:9s} {:^9d} '.format(command, i+1) else: foobar.initial_indent = \ ' ' + ' '*9 + ' ' + '{:^9d} '.format(i+1) logger.log(log_level, foobar.fill(' '.join(filenames))) logger.log(log_level, '-'*50) def pretty_data(data, log_level=20): """ Logs data as a table. Arguments --------- data : list of Datum log_level : int """ # Really, this should check every data point instead of only the 1st. if not data[0].wht: compare.import_weights(data) if log_level: string = ('--' + ' LABEL '.center(22, '-') + '--' + ' WEIGHT '.center(22, '-') + '--' + ' VALUE '.center(22, '-') + '--') logger.log(log_level, string) for d in data: if d.wht or d.wht == 0: string = (' ' + '{:22s}'.format(d.lbl) + ' ' + '{:22.4f}'.format(d.wht) + ' ' + '{:22.4f}'.format(d.val)) else: string = (' ' + '{:22s}'.format(d.lbl) + ' ' + '{:22.4f}'.format(d.val)) if log_level: logger.log(log_level, string) else: print(string) if log_level: logger.log(log_level, '-' * 50) if __name__ == '__main__': logging.config.dictConfig(co.LOG_SETTINGS) main(sys.argv[1:])
Q2MM/q2mm
q2mm/calculate.py
Python
mit
84,150
[ "Amber", "Gaussian", "Jaguar", "MacroModel", "TINKER" ]
848e3a0bdb11b9916a2ffe54071fa3ff2bad85951cf6120010fc593620734037
#!/usr/bin/env python """ blast2gff.py [options] <blast file> """ import sys from optparse import OptionParser from blast import BlastFile import gff usage = "%prog [options] <blast file>" parser = OptionParser(usage=usage) parser.add_option( "-s", "--source", dest="source", help="GFF source (Default: match)", default='match') parser.add_option( "-n", "--note", dest="note", help="Note", default=None) options, args = parser.parse_args() output = [] extrema = [] scores = [] for hsp in BlastFile(args[0]): tokens = hsp.queryId.split('|') if len(tokens)>4: name = tokens[3] else: name = hsp.queryId g = gff.Feature( reference=hsp.subjectId.split(':')[0], source=options.source, type='HSP', start=hsp.sStart, end=hsp.sEnd, score=hsp.bitScore, strand=hsp.strand(), group='Match %s ; Evalue %g' % (name, hsp.eValue) ) if options.note: g.group += ' ; Note "%s"' % options.note output.append(g) extrema.append(g.start) extrema.append(g.end) scores.append(hsp.bitScore) output.sort(key=lambda x: x.start) if output: g = output[0] match = gff.Feature( reference=g.reference, source=options.source, type='match', start=min(extrema), end=max(extrema), score=sum(scores), strand=g.strand, group='Match %s' % name ) if options.note: match.group += ' ; Note "%s"' % options.note output.insert(0,match) for g in output: print g
PapenfussLab/Mungo
bin/blast2gff.py
Python
artistic-2.0
1,596
[ "BLAST" ]
b9633896d58a6686bcbdd3b8200e7d6a15d3ba353b9d75ffb43a7ecb9f3b76c0
""" dabam: (dataBase for metrology) python module for processing remote files containing the results of metrology measurements on X-ray mirrors classes: dabam main functions: cdf (calculate antiderivative function) psd (calculate power spectral density) write_shadowSurface (writes file with a mesh for SHADOW) func_ellipse_slopes evaluates the ellipse slopes profile equation MODIFICATION HISTORY: 20130902 srio@esrf.eu, written 20131109 srio@esrf.eu, added command line arguments, access metadata 20151103 srio@esrf.eu, restructured to OO 20151118 srio@esrf.eu, cleaned and tested 20190731 srio@lbl.gov, updated version, allows reading external files, change server, etc. """ __author__ = "Manuel Sanchez del Rio" __contact__ = "srio@esrf.eu" __copyright = "ESRF, 2013-2015; LBNL, 2019" import numpy import copy # to manage input parameters from command-line argument import sys import argparse import json import os from io import StringIO try: # For Python 3.0 and later from urllib.request import urlopen except ImportError: # Fall back to Python 2's urllib2 from urllib2 import urlopen default_server = "http://ftp.esrf.eu/pub/scisoft/dabam/data/" class dabam(object): def __init__(self): self.description="dabam.py: python program to access and evaluate DAta BAse for Metrology (DABAM) files. See http://ftp.esrf.eu/pub/scisoft/dabam/README.md" self.is_remote_access = True self.server = default_server self.server_local = "" self.inputs = { 'entryNumber':1, # 'an integer indicating the DABAM entry number' 'silent':False, # 'Silent mode. Default is No' 'localFileRoot':None, # 'Define the name of local DABAM file root (<name>.dat for data, <name>.txt for metadata).' 'outputFileRoot':"", # 'Define the root for output files. Default is "", so no output files' 'setDetrending':-2, # 'Detrending: if >0 is the polynomial degree, -1=skip, -2=read from metadata DETRENDING, -3=ellipse(optimized) -4=ellipse(design)' 'nbinS':101, # 'number of bins of the slopes histogram in rads. ' 'nbinH':101, # 'number of bins heights histogram in m. ' 'shadowCalc':False, # 'Write file with mesh for SHADOW.' 'shadowNy':-1, # 'For SHADOW file, the number of points along Y (length). If negative, use the profile points. ' 'shadowNx':11, # 'For SHADOW file, the number of points along X (width). ' 'shadowWidth':6.0, # 'For SHADOW file, the surface dimension along X (width) in cm. ' 'multiply':1.0, # 'Multiply input profile (slope or height) by this number (to play with StDev values). ' 'oversample':0.0, # 'Oversample factor for abscissas. Interpolate profile foor a new one with this factor times npoints' 'useHeightsOrSlopes':-1, # 'Force calculations using profile heights (0) or slopes (1). Overwrites FILE_FORMAT keyword. Default=-1 (like FILE_FORMAT)' 'useAbscissasColumn':-1, # 'Use abscissas column index. Defaut=-1 use the metadata COLUMN_INDEX_ABSCISSAS or 0 if undefined'' 'useOrdinatesColumn':-1, # 'Use ordinates column index. Defaut=-1 use the metadata COLUMN_INDEX_ORDINATES or 1 if undefined' 'plot':None, # plot data # 'runTests':False, # run tests cases 'summary':False, # get summary of DABAM profiles } #to load profiles: TODO: rename some variables to more meaningful names self.metadata = None # metadata self.rawdata = None # raw datafile self.y = None # abscissa along the mirror self.zSlopesUndetrended = None # undetrended slope profile self.zSlopes = None # detrended slope profile self.zHeightsUndetrended = None # undetrended heights profile self.zHeights = None # detrended heights profile self.coeffs = None # information on detrending (polynomial coeffs) self.f = None # frequency of Power Spectral Density self.psdHeights = None # Power Spectral Density of Heights profile self.psdSlopes = None # Power Spectral Density of slopes profile self.csdHeights = None # Antiderivative of PDF of Heights profile self.csdSlopes = None # Antiderivative of PDF of Slopes profile self.histoSlopes = None # to store slopes histogram self.histoHeights = None # to store heights histogram self.momentsSlopes = None # to store moments of the slopes profile self.momentsHeights = None # to store moments of the heights profile self.powerlaw = {"hgt_pendent":None, "hgt_shift":None, "slp_pendent":None, "slp_shift":None, "index_from":None,"index_to":None} # to store a dictionary with the results of fitting the PSDs @classmethod def initialize_from_entry_number(cls, entry_number): dm = dabam() dm.load(entry_number) return dm @classmethod def initialize_from_local_server(cls,entry,server=None): dm0 = dabam() dm0.is_remote_access = False if server is not None: dm0.set_server(server) dm0.set_input_entryNumber(entry) dm0.load() return dm0 @classmethod def initialize_from_external_data(cls, input, column_index_abscissas=0, column_index_ordinates=1, skiprows=1, useHeightsOrSlopes=0, to_SI_abscissas=1.0, to_SI_ordinates=1.0, detrending_flag=-1, ): dm = dabam() dm.is_remote_access = False dm.rawdata = numpy.loadtxt(input, skiprows=skiprows) dm.set_input_useAbscissasColumn(column_index_abscissas) dm.set_input_useOrdinatesColumn(column_index_ordinates) dm.set_input_localFileRoot("<none>") # filename.rsplit( ".", 1 )[ 0 ]) dm.set_input_entryNumber(-1) dm.set_input_multiply(1.0) dm.set_input_oversample(0.0) # dm.set_input_setDetrending(-1) dm.set_input_useHeightsOrSlopes(useHeightsOrSlopes) # minimalist metadata dm.metadata = {} if useHeightsOrSlopes == 0: dm.metadata["FILE_FORMAT"] = 2 elif useHeightsOrSlopes: dm.metadata["FILE_FORMAT"] = 1 dm.metadata["FILE_HEADER_LINES"] = skiprows dm.metadata["X1_FACTOR"] = to_SI_abscissas for i in range(1, dm.rawdata.shape[1]): dm.metadata["Y%d_FACTOR" % i] = to_SI_ordinates dm.metadata["COLUMN_INDEX_ABSCISSAS"] = column_index_abscissas dm.metadata["COLUMN_INDEX_ORDINATES"] = column_index_ordinates dm.metadata["DETRENDING"] = detrending_flag dm.set_input_setDetrending(detrending_flag) dm.make_calculations() return dm # #setters (recommended to use setters for changing input and not setting directly the value in self.inputs, # because python does not give errors if the key does not exist but create a new one!) # @classmethod def get_default_server(cls): return default_server def set_default_server(self): self.set_server(default_server) def set_server(self,server): if server.find("//") >=0: self.is_remote_access = True self.server = server else: self.is_remote_access = False self.server_local = server def get_server(self,directory): if self.is_remote_access: return self.server_local else: return self.server_local def reset(self): self.__init__() #variables def set_input_entryNumber(self,value): self.inputs["entryNumber"] = value def set_input_silent(self,value): self.inputs["silent"] = value def set_input_localFileRoot(self,value): self.inputs["localFileRoot"] = value if value is not None: self.is_remote_access = False def set_input_outputFileRoot(self,value): self.inputs["outputFileRoot"] = value def set_input_setDetrending(self,value): self.inputs["setDetrending"] = value def set_input_nbinS(self,value): self.inputs["nbinS"] = value def set_input_nbinH(self,value): self.inputs["nbinH"] = value def set_input_shadowCalc(self,value): self.inputs["shadowCalc"] = value def set_input_shadowNy(self,value): self.inputs["shadowNy"] = value def set_input_shadowNx(self,value): self.inputs["shadowNx"] = value def set_input_shadowWidth(self,value): self.inputs["shadowWidth"] = value def set_input_multiply(self,value): self.inputs["multiply"] = value def set_input_oversample(self,value): self.inputs["oversample"] = value def set_input_useHeightsOrSlopes(self,value): self.inputs["useHeightsOrSlopes"] = value def set_input_useAbscissasColumn(self,value): self.inputs["useAbscissasColumn"] = value def set_input_useOrdinatesColumn(self,value): self.inputs["useOrdinatesColumn"] = value def set_input_plot(self,value): self.inputs["plot"] = value # def set_input_runTests(self,value): # self.inputs["runTests"] = value def set_input_summary(self,value): self.inputs["summary"] = value # a shortcut (frequent usage) def set_entry(self,value): self.inputs["entryNumber"] = value #others def set_inputs_from_dictionary(self,dict): try: self.set_input_entryNumber ( dict["entryNumber"] ) self.set_input_silent ( dict["silent"] ) self.set_input_localFileRoot ( dict["localFileRoot"] ) self.set_input_outputFileRoot ( dict["outputFileRoot"] ) self.set_input_setDetrending ( dict["setDetrending"] ) self.set_input_nbinS ( dict["nbinS"] ) self.set_input_nbinH ( dict["nbinH"] ) self.set_input_shadowCalc ( dict["shadowCalc"] ) self.set_input_shadowNy ( dict["shadowNy"] ) self.set_input_shadowNx ( dict["shadowNx"] ) self.set_input_shadowWidth ( dict["shadowWidth"] ) self.set_input_multiply ( dict["multiply"] ) self.set_input_oversample ( dict["oversample"] ) self.set_input_useHeightsOrSlopes ( dict["useHeightsOrSlopes"] ) self.set_input_useAbscissasColumn ( dict["useAbscissasColumn"] ) self.set_input_useOrdinatesColumn ( dict["useOrdinatesColumn"] ) self.set_input_plot ( dict["plot"] ) # self.set_input_runTests ( dict["runTests"] ) self.set_input_summary ( dict["summary"] ) except: raise Exception("Failed setting dabam input parameters from dictionary") # # tools # def is_remote_access(self): return self.is_remote_access def set_remote_access(self): self.is_remote_access = True # #getters # def get_input_value(self,key): try: return self.inputs[key] except: print("****get_input_value: Error returning value for key=%s"%(key)) return None def get_inputs_as_dictionary(self): return copy.copy(self.inputs) def get_input_value_help(self,key): if key == 'entryNumber': return 'An integer indicating the DABAM entry number or the remote profile files' if key == 'silent': return 'Avoid printing information messages.' if key == 'localFileRoot': return 'Define the name of local DABAM file root (<name>.dat for data, <name>.txt for metadata). If unset, use remote access' if key == 'outputFileRoot': return 'Define the root for output files. Set to "" for no output. Default is "'+self.get_input_value("outputFileRoot")+'"' if key == 'setDetrending': return 'Detrending: if >0 is the polynomial degree, -1=skip, -2=read from metadata DETRENDING, -3=ellipse(optimized), -4=ellipse(design). Default=%d'%self.get_input_value("setDetrending") if key == 'nbinS': return 'Number of bins for the slopes histogram in rads. Default is %d'%self.get_input_value("nbinS") if key == 'nbinH': return 'Number of bins for the heights histogram in m. Default is %d'%self.get_input_value("nbinH") if key == 'shadowCalc': return 'Write file with mesh for SHADOW. Default=No' if key == 'shadowNy': return 'For SHADOW file, the number of points along Y (length). If negative, use the profile points. Default=%d'%self.get_input_value("shadowNy") if key == 'shadowNx': return 'For SHADOW file, the number of points along X (width). Default=%d'%self.get_input_value("shadowNx") if key == 'shadowWidth': return 'For SHADOW file, the surface dimension along X (width) in cm. Default=%4.2f'%self.get_input_value("shadowWidth") if key == 'multiply': return 'Multiply input profile (slope or height) by this number (to play with StDev values). Default=%4.2f'%self.get_input_value("multiply") if key == 'oversample': return 'Oversample factor for the number of abscissas points. 0=No oversample. (Default=%2.1f)'%self.get_input_value("oversample") if key == 'useHeightsOrSlopes': return 'Force calculations using profile heights (0) or slopes (1). If -1, used metadata keyword FILE_FORMAT. Default=%d'%self.get_input_value("useHeightsOrSlopes") if key == 'useAbscissasColumn': return 'Use abscissas column index. Default=%d use the metadata COLUMN_INDEX_ABSCISSAS or 0 if undefined'%self.get_input_value("useAbscissasColumn") if key == 'useOrdinatesColumn': return 'Use ordinates column index. Default=%d use the metadata COLUMN_INDEX_ORDINATES or 1 if undefined'%self.get_input_value("useOrdinatesColumn") if key == 'plot': return 'Plot: all heights slopes psd_h psd_s csd_h csd_s. histo_s histo_h acf_h acf_s. Default=%s'%repr(self.get_input_value("plot")) if key == 'summary': return 'gets a summary of all DABAM profiles' return '' def get_input_value_short_name(self,key): if key == 'entryNumber': return 'N' if key == 'silent': return 's' if key == 'localFileRoot': return 'l' if key == 'outputFileRoot': return 'r' if key == 'setDetrending': return 'D' if key == 'nbinS': return 'b' if key == 'nbinH': return 'e' if key == 'shadowCalc': return 'S' if key == 'shadowNy': return 'y' if key == 'shadowNx': return 'x' if key == 'shadowWidth': return 'w' if key == 'multiply': return 'm' if key == 'oversample': return 'I' if key == 'useHeightsOrSlopes': return 'Z' if key == 'useAbscissasColumn': return 'A' if key == 'useOrdinatesColumn': return 'O' if key == 'plot': return 'P' if key == 'summary': return 'Y' return '?' # # file names # def file_metadata(self): return self._file_root()+'.txt' def file_data(self): return self._file_root()+'.dat' # # load profile and store data. This is the main action!! # def load(self,entry=None): if entry is None: pass else: self.set_input_entryNumber(entry) # load data and metadata self._load_file_metadata() self._load_file_data() # test consistency if self.is_remote_access: if self.get_input_value("entryNumber") <= 0: raise Exception("Error: entry number must be non-zero positive for remote access.") self.make_calculations() def metadata_set_info(self, YEAR_FABRICATION=None, SURFACE_SHAPE=None, FUNCTION=None, LENGTH=None, WIDTH=None, THICK=None, LENGTH_OPTICAL=None, SUBSTRATE=None, COATING=None, FACILITY=None, INSTRUMENT=None, POLISHING=None, ENVIRONMENT=None, SCAN_DATE=None, CALC_HEIGHT_RMS=None, CALC_HEIGHT_RMS_FACTOR=None, CALC_SLOPE_RMS=None, CALC_SLOPE_RMS_FACTOR=None, USER_EXAMPLE=None, USER_REFERENCE=None, USER_ADDED_BY=None, ): # # do not change these tags # # dm.metadata["FILE_FORMAT"] = None # dm.metadata["FILE_HEADER_LINES"] = None # dm.metadata["X1_FACTOR"] = None # dm.metadata["COLUMN_INDEX_ORDINATES"] = None # for i in range(4): # dm.metadata["Y1_FACTOR"%(i+1)] = None # # for i in range(4): # dm.metadata["PLOT_TITLE_X%d"%(i+1)] = None # dm.metadata["PLOT_TITLE_Y%d"%(i+1)] = None self.metadata["YEAR_FABRICATION"] = YEAR_FABRICATION self.metadata["SURFACE_SHAPE"] = SURFACE_SHAPE self.metadata["FUNCTION"] = FUNCTION self.metadata["LENGTH"] = LENGTH self.metadata["WIDTH"] = WIDTH self.metadata["THICK"] = THICK self.metadata["LENGTH_OPTICAL"] = LENGTH_OPTICAL self.metadata["SUBSTRATE"] = SUBSTRATE self.metadata["COATING"] = COATING self.metadata["FACILITY"] = FACILITY self.metadata["INSTRUMENT"] = INSTRUMENT self.metadata["POLISHING"] = POLISHING self.metadata["ENVIRONMENT"] = ENVIRONMENT self.metadata["SCAN_DATE"] = SCAN_DATE self.metadata["CALC_HEIGHT_RMS"] = CALC_HEIGHT_RMS self.metadata["CALC_HEIGHT_RMS_FACTOR"] = CALC_HEIGHT_RMS_FACTOR self.metadata["CALC_SLOPE_RMS"] = CALC_SLOPE_RMS self.metadata["CALC_SLOPE_RMS_FACTOR"] = CALC_SLOPE_RMS_FACTOR self.metadata["USER_EXAMPLE"] = USER_EXAMPLE self.metadata["USER_REFERENCE"] = USER_REFERENCE self.metadata["USER_ADDED_BY"] = USER_ADDED_BY # #calculations # def make_calculations(self): #calculate detrended profiles self._calc_detrended_profiles() #calculate psd self._calc_psd() #calculate histograms self._calc_histograms() #calculate moments self.momentsHeights = moment(self.zHeights) self.momentsSlopes = moment(self.zSlopes) # write files if self.get_input_value("outputFileRoot") != "": self._write_output_files() #write shadow file if self.get_input_value("shadowCalc"): self._write_file_for_shadow() if not(self.get_input_value("silent")): outFile = self.get_input_value("outputFileRoot")+'Shadow.dat' print ("File "+outFile+" for SHADOW written to disk.") #info if not(self.get_input_value("silent")): print(self.info_profiles()) def stdev_profile_heights(self): return self.zHeights.std(ddof=1) def stdev_profile_slopes(self): return self.zSlopes.std(ddof=1) def stdev_psd_heights(self): return numpy.sqrt(self.csdHeights[-1]) def stdev_psd_slopes(self): return numpy.sqrt(self.csdSlopes[-1]) def stdev_user_heights(self): try: if self.metadata['CALC_HEIGHT_RMS'] != None: if self.metadata['CALC_HEIGHT_RMS_FACTOR'] != None: return float(self.metadata['CALC_HEIGHT_RMS']) * float(self.metadata['CALC_HEIGHT_RMS_FACTOR']) else: return float(self.metadata['CALC_HEIGHT_RMS']) except: return None def stdev_user_slopes(self): try: if self.metadata['CALC_SLOPE_RMS'] != None: if self.metadata['CALC_SLOPE_RMS_FACTOR'] != None: return float(self.metadata['CALC_SLOPE_RMS']) * float(self.metadata['CALC_SLOPE_RMS_FACTOR']) else: return float(self.metadata['CALC_SLOPE_RMS']) except: return None def csd_heights(self): return numpy.sqrt(self.csdHeights)/self.stdev_psd_heights() def csd_slopes(self): return numpy.sqrt(self.csdSlopes)/self.stdev_psd_slopes() def autocorrelation_heights(self): c1,c2,c3 = autocorrelationfunction(self.y,self.zHeights) return c3 def autocorrelation_slopes(self): c1,c2,c3 = autocorrelationfunction(self.y,self.zSlopes) return c3 # # info # def info_profiles(self): if self.zHeights is None: return "Error: no loaded profile." txt = "" polDegree = self._get_polDegree() #; #; info #; # txt += '\n---------- profile results -------------------------\n' if self.is_remote_access: txt += 'Remote directory:\n %s\n'%self.server txt += 'Data File: %s\n'%self.file_data() txt += 'Metadata File: %s\n'%self.file_metadata() try: txt += "\nUser reference: %s\n"%self.metadata["USER_REFERENCE"] except: pass try: txt += "Added by (user): %s\n"%self.metadata["USER_ADDED_BY"] except: pass try: txt += '\nSurface shape: %s\n'%(self.metadata['SURFACE_SHAPE']) except: pass try: txt += 'Facility: %s\n'%(self.metadata['FACILITY']) except: pass try: txt += 'Scan length: %.3f mm\n'%(1e3*(self.y[-1]-self.y[0])) except: pass txt += 'Number of points: %d\n'%(len(self.y)) txt += '\n' if polDegree >= 0: if polDegree == 1: txt += "Linear detrending: z'=%g x%+g"%(self.coeffs[0],self.coeffs[1])+"\n" txt += 'Radius of curvature: %.3F m'%(1.0/self.coeffs[-2])+"\n" else: txt += 'Polynomial detrending coefficients: '+repr(self.coeffs)+"\n" elif polDegree == -1: txt += 'No detrending applied.\n' elif polDegree == -3: txt += 'Ellipse detrending applied. Using Optimized parameters:\n' txt += ' p = %f m \n'%self.coeffs[0] txt += ' q = %f m \n'%self.coeffs[1] txt += ' theta = %f rad \n'%self.coeffs[2] txt += ' vertical shift = %f nm \n'%self.coeffs[3] elif polDegree == -4: txt += 'Ellipse detrending applied. Usinng Design parameters:\n' txt += ' p = %f m \n'%self.coeffs[0] txt += ' q = %f m \n'%self.coeffs[1] txt += ' theta = %f rad \n'%self.coeffs[2] txt += ' vertical shift = %f nm \n'%self.coeffs[3] txt += self.statistics_summary() txt += '----------------------------------------------------\n' return txt def statistics_summary(self): txt = "" txt += 'Slopes profile:\n' txt += ' StDev of slopes profile: %.3f urad\n' %( 1e6*self.stdev_profile_slopes() ) txt += ' from PSD: %.3f urad\n' %( 1e6*self.stdev_psd_slopes()) if self.stdev_user_slopes() != None: txt += ' from USER (metadata): %.3f urad\n' %(1e6*self.stdev_user_slopes()) txt += ' Peak-to-valley: no detrend: %.3f urad\n' %(1e6*(self.zSlopesUndetrended.max() - self.zSlopesUndetrended.min())) txt += ' with detrend: %.3f urad\n' %(1e6*(self.zSlopes.max() - self.zSlopes.min() )) txt += ' Skewness: %.3f, Kurtosis: %.3f\n' %(self.momentsSlopes[2],self.momentsSlopes[3]) beta = -self.powerlaw["slp_pendent"] txt += ' PSD power law fit: beta:%.3f, Df: %.3f\n' %(beta,(5-beta)/2) txt += ' Autocorrelation length:%.3f\n' %(self.autocorrelation_slopes()) txt += 'Heights profile: \n' txt += ' StDev of heights profile: %.3f nm\n' %(1e9*self.stdev_profile_heights() ) txt += ' from PSD: %.3f nm\n' %(1e9*self.stdev_psd_heights() ) if self.stdev_user_heights() != None: txt += ' from USER (metadata): %.3f nm\n' %(1e9*self.stdev_user_heights()) txt += ' Peak-to-valley: no detrend: %.3f nm\n' %(1e9*(self.zHeightsUndetrended.max() - self.zHeightsUndetrended.min())) txt += ' with detrend: %.3f nm\n' %(1e9*(self.zHeights.max() - self.zHeights.min() )) txt += ' Skewness: %.3f, Kurtosis: %.3f\n' %(self.momentsHeights[2],self.momentsHeights[3]) beta = -self.powerlaw["hgt_pendent"] txt += ' PSD power law fit: beta:%.3f, Df: %.3f\n' %(beta,(5-beta)/2) txt += ' Autocorrelation length:%.3f\n' %(self.autocorrelation_heights()) return txt def plot(self,what=None): try: from matplotlib import pylab as plt except: print("Cannot make plots. Please install matplotlib.") return None if what is None: what = self.get_input_value("plot") if what == "all": what = ["heights","slopes","psd_h","psd_s","csd_h","cds_s","histo_s","histo_h"] else: what = what.split(" ") for i,iwhat in enumerate(what): print("plotting: ",iwhat) if (iwhat == "heights" ): f1 = plt.figure(1) plt.plot(1e3*self.y,1e6*self.zHeights) plt.title("heights profile") plt.xlabel("Y [mm]") plt.ylabel("Z [um]") elif (iwhat == "slopes"): f2 = plt.figure(2) plt.plot(1e3*self.y,1e6*self.zSlopes) plt.title("slopes profile") plt.xlabel("Y [mm]") plt.ylabel("Zp [urad]") elif (iwhat == "psd_h"): f3 = plt.figure(3) plt.loglog(self.f,self.psdHeights) y = self.f**(self.powerlaw["hgt_pendent"])*10**self.powerlaw["hgt_shift"] i0 = self.powerlaw["index_from"] i1 = self.powerlaw["index_to"] plt.loglog(self.f,y) plt.loglog(self.f[i0:i1],y[i0:i1]) beta = -self.powerlaw["hgt_pendent"] plt.title("PSD of heights profile (beta=%.2f,Df=%.2f)"%(beta,(5-beta)/2)) plt.xlabel("f [m^-1]") plt.ylabel("PSD [m^3]") elif (iwhat == "psd_s"): f4 = plt.figure(4) plt.loglog(self.f,self.psdSlopes) y = self.f**(self.powerlaw["slp_pendent"])*10**self.powerlaw["slp_shift"] i0 = self.powerlaw["index_from"] i1 = self.powerlaw["index_to"] plt.loglog(self.f,y) plt.loglog(self.f[i0:i1],y[i0:i1]) beta = -self.powerlaw["slp_pendent"] plt.title("PSD of slopes profile (beta=%.2f,Df=%.2f)"%(beta,(5-beta)/2)) plt.xlabel("f [m^-1]") plt.ylabel("PSD [rad^3]") elif (iwhat == "csd_h"): f5 = plt.figure(5) plt.semilogx(self.f,self.csd_heights()) plt.title("Cumulative Spectral Density of heights profile") plt.xlabel("f [m^-1]") plt.ylabel("csd_h") elif (iwhat == "csd_s"): f6 = plt.figure(6) plt.semilogx(self.f,self.csd_slopes()) plt.title("Cumulative Spectral Density of slopes profile") plt.xlabel("f [m^-1]") plt.ylabel("csd_s") elif (iwhat == "histo_s" ): f7 = plt.figure(7) plt.plot(1e6*self.histoSlopes["x_path"],self.histoSlopes["y1_path"]) plt.plot(1e6*self.histoSlopes["x_path"],self.histoSlopes["y2_path"]) plt.title("slopes histogram and Gaussian with StDev: %10.3f urad"%(1e6*self.stdev_profile_slopes())) plt.xlabel("Z' [urad]") plt.ylabel("counts") elif (iwhat == "histo_h" ): f8 = plt.figure(8) plt.plot(1e9*self.histoHeights["x_path"],self.histoHeights["y1_path"]) plt.plot(1e9*self.histoHeights["x_path"],self.histoHeights["y2_path"]) plt.title("heights histogram and Gaussian with StDev: %10.3f nm"%(1e9*self.stdev_profile_heights())) plt.xlabel("Z [nm]") plt.ylabel("counts") elif (iwhat == "acf_h" ): f9 = plt.figure(9) c1,c2,c3 = autocorrelationfunction(self.y,self.zHeights) plt.plot(c1[0:-1],c2) plt.title("Heights autocovariance. Autocorrelation length (acf_h=0.5)=%.3f m"%(c3)) plt.xlabel("Length [m]") plt.ylabel("acf") elif (iwhat == "acf_s" ): f10 = plt.figure(10) c1,c2,c3 = autocorrelationfunction(self.y,self.zSlopes) plt.plot(c1[0:-1],c2) plt.title("Slopes autocovariance. Autocorrelation length (acf_s=0.5)=%.3f m"%(c3)) plt.xlabel("Length [m]") plt.ylabel("acf_s") else: print("Plotting options are: heights slopes psd_h psd_s csd_h csd_s acf_h acf_s") return None plt.show() def write_template(self,number_string="000",FILE_FORMAT=1): """ FILE_FORMAT: 1 slopes in Col2 2 = heights in Col2 3 = slopes in Col2, file X1 Y1 X2 Y2 4 = heights in Col2, file X1 Y1 X2 Y2 :param number_string: :param FILE_FORMAT: :return: """ metadata = self.metadata.copy() metadata["FILE_FORMAT"] = FILE_FORMAT metadata["X1_FACTOR"] = 1.0 metadata["Y1_FACTOR"] = 1.0 j = json.dumps(metadata, ensure_ascii=True, indent=" ") f = open("dabam-%s.txt"%number_string, 'w') f.write(j) f.close() f = open("dabam-%s.dat"%number_string, 'w') for i in range(self.y.size): if metadata["FILE_FORMAT"] == 1: f.write("%g %g\n" % (self.y[i], self.zSlopes[i])) elif metadata["FILE_FORMAT"] == 2: f.write("%g %g\n" % (self.y[i], self.zHeights[i])) else: raise Exception("Cannot write data with FILE_FORMAT != 1,2") f.close() print("Files %s and %s written to disk. "%("dabam-%s.txt"%number_string,"dabam-%s.txt"%number_string)) # # auxiliar methods for internal use # def _get_polDegree(self): try: polDegreeDefault = self.metadata['DETRENDING'] except: polDegreeDefault = 1 try: if (self.metadata['SURFACE_SHAPE']).lower() == "elliptical": polDegreeDefault = -3 # elliptical detrending except: pass if int(self.get_input_value("setDetrending")) == -2: # this is the default polDegree = polDegreeDefault else: polDegree = int(self.get_input_value("setDetrending")) return polDegree def _set_from_command_line(self): # # define default aparameters taken from command arguments # parser = argparse.ArgumentParser(description=self.description) # main argument parser.add_argument('entryNumber', nargs='?', metavar='N', type=int, default=self.get_input_value('entryNumber'), help=self.get_input_value_help('entryNumber')) # parser.add_argument('-'+self.get_input_value_short_name('runTests'), '--runTests', action='store_true', # help=self.get_input_value_help('runTests')) parser.add_argument('-'+self.get_input_value_short_name('summary'), '--summary', action='store_true', help=self.get_input_value_help('summary')) # options (flags) parser.add_argument('-'+self.get_input_value_short_name('silent'),'--silent', action='store_true', help=self.get_input_value_help('silent')) #options (parameters) parser.add_argument('-'+self.get_input_value_short_name('localFileRoot'), '--localFileRoot', help=self.get_input_value_help('localFileRoot')) parser.add_argument('-'+self.get_input_value('outputFileRoot'), '--outputFileRoot', default=self.get_input_value('outputFileRoot'), help=self.get_input_value_help('outputFileRoot')) parser.add_argument('-'+self.get_input_value_short_name('setDetrending'), '--setDetrending', default=self.get_input_value('setDetrending'), help=self.get_input_value_help('setDetrending')) parser.add_argument('-'+self.get_input_value_short_name('nbinS'), '--nbinS', default=self.get_input_value('nbinS'), help=self.get_input_value_help('nbinS')) parser.add_argument('-'+self.get_input_value_short_name('nbinH'), '--nbinH', default=self.get_input_value('nbinH'), help=self.get_input_value_help('nbinH')) parser.add_argument('-'+self.get_input_value_short_name('shadowCalc'), '--shadowCalc', action='store_true', help=self.get_input_value_help('shadowCalc')) parser.add_argument('-'+self.get_input_value_short_name('shadowNy'), '--shadowNy', default=self.get_input_value('shadowNy'), help=self.get_input_value_help('shadowNy')) parser.add_argument('-'+self.get_input_value_short_name('shadowNx'), '--shadowNx', default=self.get_input_value('shadowNx'), help=self.get_input_value_help('shadowNx')) parser.add_argument('-'+self.get_input_value_short_name('shadowWidth'), '--shadowWidth', default=self.get_input_value('shadowWidth'), help=self.get_input_value_help('shadowWidth')) parser.add_argument('-'+self.get_input_value_short_name('multiply'), '--multiply', default=self.get_input_value('multiply'), help=self.get_input_value_help('multiply')) parser.add_argument('-'+self.get_input_value_short_name('oversample'), '--oversample', default=self.get_input_value('oversample'), help=self.get_input_value_help('oversample')) parser.add_argument('-'+self.get_input_value_short_name('useHeightsOrSlopes'), '--useHeightsOrSlopes', default=self.get_input_value('useHeightsOrSlopes'), help=self.get_input_value_help('useHeightsOrSlopes')) parser.add_argument('-'+self.get_input_value_short_name('useAbscissasColumn'), '--useAbscissasColumn', default=self.get_input_value('useAbscissasColumn'), help=self.get_input_value_help('useAbscissasColumn')) parser.add_argument('-'+self.get_input_value_short_name('useOrdinatesColumn'), '--useOrdinatesColumn', default=self.get_input_value('useOrdinatesColumn'), help=self.get_input_value_help('useOrdinatesColumn')) parser.add_argument('-'+self.get_input_value_short_name('plot'), '--plot', default=self.get_input_value('plot'), help=self.get_input_value_help('plot')) args = parser.parse_args() self.set_input_entryNumber(args.entryNumber) self.set_input_silent(args.silent) self.set_input_localFileRoot(args.localFileRoot) self.set_input_outputFileRoot(args.outputFileRoot) self.set_input_setDetrending(args.setDetrending) self.set_input_nbinS(args.nbinS) self.set_input_nbinH(args.nbinH) self.set_input_shadowCalc(args.shadowCalc) self.set_input_shadowNy(args.shadowNy) self.set_input_shadowNx(args.shadowNx) self.set_input_shadowWidth(args.shadowWidth) self.set_input_multiply(args.multiply) self.set_input_oversample(args.oversample) self.set_input_useHeightsOrSlopes(args.useHeightsOrSlopes) self.set_input_useAbscissasColumn(args.useAbscissasColumn) self.set_input_useOrdinatesColumn(args.useOrdinatesColumn) self.set_input_plot(args.plot) # self.set_input_runTests(args.runTests) self.set_input_summary(args.summary) def _file_root(self): if self.is_remote_access: input_option = self.get_input_value("entryNumber") inFileRoot = "dabam-%03d"%(input_option) else: if self.get_input_value("localFileRoot") is None: input_option = self.get_input_value("entryNumber") inFileRoot = os.path.join(self.server_local,"dabam-%03d"%input_option) else: inFileRoot = self.get_input_value("localFileRoot") return inFileRoot def _load_file_metadata(self): if self.is_remote_access: # metadata file myfileurl = self.server+self.file_metadata() u = urlopen(myfileurl) ur = u.read() ur1 = ur.decode(encoding='UTF-8') h = json.loads(ur1) # dictionnary with metadata self.metadata = h else: try: with open(self.file_metadata(), mode='r') as f1: h = json.load(f1) self.metadata = h except: print ("_load_file_metadata: Error accessing local file: "+self.file_metadata()) def _load_file_data(self,file_data=None): try: skipLines = self.metadata['FILE_HEADER_LINES'] except: skipLines = 0 if self.is_remote_access: # data self.rawdata = numpy.loadtxt(self.server+self.file_data(), skiprows=skipLines ) else: file_data = self.file_data() self.rawdata = numpy.loadtxt(file_data, skiprows=skipLines) #, dtype="float64" ) def _calc_detrended_profiles(self): """ Retrieve detrended profiles (slope and height): abscissa slope slope_detrended heights heights_detrended :return: """ #; #; convert to SI units (m,rad) #; a = self.rawdata.copy() # # select columns with abscissas and ordinates # col_abscissas = int( self.get_input_value("useAbscissasColumn") ) if col_abscissas == -1: try: col_abscissas = self.metadata["COLUMN_INDEX_ABSCISSAS"] except: col_abscissas = 0 col_ordinates = int( self.get_input_value("useOrdinatesColumn") ) if col_ordinates == -1: try: col_ordinates = self.metadata["COLUMN_INDEX_ORDINATES"] except: col_ordinates = 1 # a[:,col_ordinates] *= self.metadata['Y%d_FACTOR'%col_ordinates] # TODO: not valid for file type 3 ncols = a.shape[1] if int(self.metadata["FILE_FORMAT"]) <= 2: a[:, col_abscissas] *= self.metadata['X1_FACTOR'] for i in range(1,ncols): # X1 Y1 Y2 Y3... a[:,i] = a[:,i]*self.metadata['Y%d_FACTOR'%i] else: #X1 Y1 X2 Y2 etc ngroups = int(ncols / 2) icol = -1 for i in range(0,ngroups): # X1 Y1 Y2 Y3... icol += 1 a[:,icol] = a[:,icol]*self.metadata['X%d_FACTOR'%(i+1)] icol += 1 a[:,icol] = a[:,icol]*self.metadata['Y%d_FACTOR'%(i+1)] # #; apply multiplicative factor # if (self.get_input_value("multiply") != 1.0): factor = float(self.get_input_value("multiply")) a[:,col_ordinates] = a[:,col_ordinates] * factor if not(self.get_input_value("silent")): print("Multiplicative factor %.3f applied."%(factor)) col_ordinates_title = 'unknown' if self.metadata['FILE_FORMAT'] == 1: # slopes in Col2 col_ordinates_title = 'slopes' if self.metadata['FILE_FORMAT'] == 2: # heights in Col2 col_ordinates_title = 'heights' if self.metadata['FILE_FORMAT'] == 3: # slopes in Col2, file X1 Y1 X2 Y2 col_ordinates_title = 'slopes' if self.metadata['FILE_FORMAT'] == 4: # heights in Col2, file X1 Y1 X2 Y2 col_ordinates_title = 'heights' if int(self.get_input_value("useHeightsOrSlopes")) == -1: #default, keep current pass else: # overwrite if int(self.get_input_value("useHeightsOrSlopes")) == 0: col_ordinates_title = 'heights' if int(self.get_input_value("useHeightsOrSlopes")) == 1: col_ordinates_title = 'slopes' if not(self.get_input_value("silent")): print("Using: abscissas column index %d (mirror coordinates)"%(col_abscissas)) print(" ordinates column index %d (profile %s)"%(col_ordinates,col_ordinates_title)) #; #; Extract right columns and interpolate (if wanted) #; substract linear fit to the slopes (remove best circle from profile) #; a_h = a[:,col_abscissas] a_v = a[:,col_ordinates] factor = float(self.get_input_value("oversample")) if (factor > 1e-6): npoints = a_h.size npoints1 = int(npoints * factor) a_hi = numpy.linspace(a_h.min(),a_h.max(),npoints1) a_vi = numpy.interp(a_hi,a_h,a_v) a_h = a_hi a_v = a_vi if not(self.get_input_value("silent")): print("Oversampling/interpolating from %d to %d points."%(npoints,npoints1)) if col_ordinates_title == 'slopes': sy = a_h sz1 = a_v elif col_ordinates_title == 'heights': sy = a_h #TODO we suppose that data are equally spaced. Think how to generalise sz1 = numpy.gradient(a_v,(sy[1]-sy[0])) else: raise NotImplementedError #; #; Detrending: #; substract linear fit to the slopes (remove best circle from profile) #; sz = numpy.copy(sz1) # define detrending to apply: >0 polynomial prder, -1=None, -2=Default, -3=elliptical polDegree = self._get_polDegree() if polDegree >= 0: # polinomial fit coeffs = numpy.polyfit(sy, sz1, polDegree) pol = numpy.poly1d(coeffs) zfit = pol(sy) sz = sz1 - zfit else: coeffs = None if polDegree == -3: # ellipse (optimized) coeffs = None try: from scipy.optimize import curve_fit, leastsq except: raise ImportError("Cannot perform ellipse detrending: please install scipy") if not(self.get_input_value("silent")): print("Detrending an ellipse...") if ("ELLIPSE_DESIGN_P" in self.metadata) and ("ELLIPSE_DESIGN_Q" in self.metadata) and ("ELLIPSE_DESIGN_THETA" in self.metadata): ell_p = self.metadata["ELLIPSE_DESIGN_P"] ell_q = self.metadata["ELLIPSE_DESIGN_Q"] ell_theta = self.metadata["ELLIPSE_DESIGN_THETA"] fitfunc_ell_slopes = lambda p, x: func_ellipse_slopes(x, p[0], p[1], p[2], p[3]) errfunc_ell_slopes = lambda p, x, y: fitfunc_ell_slopes(p, x) - y p_guess = [ell_p,ell_q,ell_theta,0.0] szGuess = fitfunc_ell_slopes(p_guess, sy) coeffs, cov_x, infodic, mesg, ier = leastsq(errfunc_ell_slopes, p_guess, args=(sy, sz1), full_output=True) #zpopt= func_ellipse_slopes(sy, popt[0], popt[1], popt[2], popt[3]) szOptimized = fitfunc_ell_slopes(coeffs, sy) sz = sz1 - szOptimized if not(self.get_input_value("silent")): print("Ellipse design parameters found in metadata: p=%f m,q=%f m,theta=%f rad, shift=%f nm, Slopes_Std=%f urad"% (ell_p,ell_q,ell_theta,0.0,1e6*(sz1-szGuess).std(ddof=1) )) print("Optimized ellipse : p=%f m,q=%f m,theta=%f rad, shift=%f nm, Slopes_Std=%f urad\n"% (coeffs[0],coeffs[1],coeffs[2],coeffs[3],1e6*sz.std(ddof=1) )) else: if not(self.get_input_value("silent")): print("Ellipse design parameters NOT FOUND in metadata. Guessing parameters (may be unrealistic!)") coeffs, cov_x = curve_fit(func_ellipse_slopes, sy, sz1, maxfev=10000) szOptimized= func_ellipse_slopes(sy, coeffs[0], coeffs[1], coeffs[2], coeffs[3]) sz = sz1 - szOptimized if polDegree == -4: # ellipse (design) if not(self.get_input_value("silent")): print("Detrending an ellipse...") if ("ELLIPSE_DESIGN_P" in self.metadata) and ("ELLIPSE_DESIGN_Q" in self.metadata) and ("ELLIPSE_DESIGN_THETA" in self.metadata): coeffs = numpy.zeros(4) coeffs[0] = self.metadata["ELLIPSE_DESIGN_P"] coeffs[1] = self.metadata["ELLIPSE_DESIGN_Q"] coeffs[2] = self.metadata["ELLIPSE_DESIGN_THETA"] coeffs[3] = 0.0 fitfunc_ell_slopes = lambda p, x: func_ellipse_slopes(x, p[0], p[1], p[2], p[3]) szGuess = fitfunc_ell_slopes(coeffs, sy) sz = sz1 - szGuess else: print("Error: Ellipse detrend parameters not found in metadata") raise RuntimeError #; #; calculate heights by integrating the slope #; zprof = cdf(sy,sz) zprof1 = cdf(sy,sz1) self.y = sy self.zSlopesUndetrended = sz1 self.zSlopes = sz self.zHeightsUndetrended = zprof1 self.zHeights = zprof self.coeffs = coeffs def _calc_psd(self): sy = self.y #sz1 = self.sz1 sz = self.zSlopes #zprof1 = self.zprof1 zprof = self.zHeights #; #; calculate PSD on both profile and slope, and also then their antiderivative #; psdHeights,f = psd(sy,zprof,onlyrange=None) psdSlopes,f = psd(sy,sz,onlyrange=None) adpsdHeights = cdf(f,psdHeights) adpsdSlopes = cdf(f,psdSlopes) self.f = f self.psdHeights = psdHeights self.psdSlopes = psdSlopes self.csdHeights = adpsdHeights self.csdSlopes = adpsdSlopes #fit PSD to a power law x = numpy.log10(self.f) y_h = numpy.log10(self.psdHeights) y_s = numpy.log10(self.psdSlopes) #select the fitting area (80% of the full interval, centered) x_left = (x.min()+0.1*(x.max()-x.min())) x_right = (x.max()-0.1*(x.max()-x.min())) # redefine left limit for the fit to the frequency value corresponding to the correlation length # acf_h = autocovariance_1D(self.sy,self.zprof) # f_h = numpy.log10( 1.0 / acf_h[2] ) # x_left = f_h c1 = (x < x_right ) c2 = (x > x_left ) igood = numpy.where(c1 & c2) igood = numpy.array(igood) igood.shape = -1 coeffs_h = numpy.polyfit(x[igood], y_h[igood], 1) coeffs_s = numpy.polyfit(x[igood], y_s[igood], 1) self.powerlaw = {"hgt_pendent":coeffs_h[0], "hgt_shift":coeffs_h[1], \ "slp_pendent":coeffs_s[0], "slp_shift":coeffs_s[1],\ "index_from":igood[0],"index_to":igood[-1]} def _calc_histograms(self): """ Calculates slopes and heights histograms and also the Gaussians with their StDev results are stored in: self.histoSlopes = {"x":hy_center, "y1":hz, "y2":g, "x_path":hy_path, "y1_path":hz_path, "y2_path":g_path} where: x is the abscissas (at bin center), y1 is the histogram, y2 is the Gaussian x_path is the abscissas with points at left and riggh edges of each bin, y1_path is the :return: """ # # slopes histogram # # binsize = float(self.get_input_value("binS")) # default is 1e-7 rads # bins = numpy.ceil( (self.sz.max()-self.sz.min())/binsize ) bins = int(self.get_input_value("nbinS")) hz,hy_left = numpy.histogram(self.zSlopes, bins = bins) hy_center = hy_left[0:-1]+0.5*(hy_left[1]-hy_left[0]) #calculate positions of the center of the bins hy_right = hy_left[0:-1]+1.0*(hy_left[1]-hy_left[0]) #calculate positions of the right edge of the bins hy_path = [] hz_path = [] for s,t,v in zip(hy_left,hy_right,hz): hy_path.append(s) hz_path.append(v) hy_path.append(t) hz_path.append(v) hy_path = numpy.array(hy_path) hz_path = numpy.array(hz_path) #Gaussian with StDev of data g = numpy.exp( -numpy.power(hy_center-self.zSlopes.mean(),2)/2/numpy.power(self.stdev_profile_slopes(),2) ) g = g/g.sum()*hz.sum() g_path = numpy.exp( -numpy.power(hy_path-self.zSlopes.mean(),2)/2/numpy.power(self.stdev_profile_slopes(),2) ) g_path = g_path/g_path.sum()*hz_path.sum() self.histoSlopes = {"x":hy_center, "y1":hz, "y2":g, "x_path":hy_path, "y1_path":hz_path, "y2_path":g_path} # # heights histogram # # binsize = float(self.get_input_value("binH")) # bins = numpy.ceil( (self.zprof.max()-self.zprof.min())/binsize ) bins = int(self.get_input_value("nbinH")) hz,hy_left = numpy.histogram(self.zHeights, bins = bins) hy_center = hy_left[0:-1]+0.5*(hy_left[1]-hy_left[0]) #calculate positions of the center of the bins hy_right = hy_left[0:-1]+1.0*(hy_left[1]-hy_left[0]) #calculate positions of the right edge of the bins hy_path = [] hz_path = [] for s,t,v in zip(hy_left,hy_right,hz): hy_path.append(s) hz_path.append(v) hy_path.append(t) hz_path.append(v) hy_path = numpy.array(hy_path) hz_path = numpy.array(hz_path) #Gaussian with StDev of data g = numpy.exp( -numpy.power(hy_center-self.zHeights.mean(),2)/2/numpy.power(self.stdev_profile_heights(),2) ) g = g/g.sum()*hz.sum() g_path = numpy.exp( -numpy.power(hy_path-self.zHeights.mean(),2)/2/numpy.power(self.stdev_profile_heights(),2) ) g_path = g_path/g_path.sum()*hz_path.sum() self.histoHeights = {"x":hy_center, "y1":hz, "y2":g, "x_path":hy_path, "y1_path":hz_path, "y2_path":g_path} def _write_output_files(self): # write header file outFile = self.get_input_value("outputFileRoot") + "Header.txt" with open(outFile, mode='w') as f1: json.dump(self.metadata, f1, indent=2) if not(self.get_input_value("silent")): print ("File "+outFile+" containing metadata written to disk.") # # Dump heights and slopes profiles to files # outFile = self.get_input_value("outputFileRoot")+'Heights.dat' dd=numpy.concatenate( (self.y.reshape(-1,1), self.zHeights.reshape(-1,1)),axis=1) numpy.savetxt(outFile,dd,comments="#",header="F %s\nS 1 heights profile\nN 2\nL coordinate[m] height[m]"%(outFile)) if not(self.get_input_value("silent")): print ("File "+outFile+" containing heights profile written to disk.") outFile = self.get_input_value("outputFileRoot")+'Slopes.dat' dd=numpy.concatenate( (self.y.reshape(-1,1), self.zSlopes.reshape(-1,1)),axis=1) numpy.savetxt(outFile,dd,comments="#",header="F %s\nS 1 slopes profile\nN 2\nL coordinate[m] slopes[rad]"%(outFile)) if not(self.get_input_value("silent")): print ("File "+outFile+" written to disk.") #write psd file dd = numpy.concatenate( (self.f, self.psdHeights, self.psdSlopes, \ numpy.sqrt(self.csdHeights)/self.stdev_psd_heights(), \ numpy.sqrt(self.csdSlopes)/self.stdev_psd_slopes() \ ) ,axis=0).reshape(5,-1).transpose() outFile = self.get_input_value("outputFileRoot")+'PSD.dat' header = "F %s\nS 1 power spectral density\nN 5\nL freq[m^-1] psd_heights[m^3] psd_slopes[rad^3] csd_h csd_s"%(outFile) numpy.savetxt(outFile,dd,comments="#",header=header) if not(self.get_input_value("silent")): print ("File "+outFile+" written to disk.") # write slopes histogram dd=numpy.concatenate( (self.histoSlopes["x"],self.histoSlopes["y1"],self.histoSlopes["y2"] ) ,axis=0).reshape(3,-1).transpose() outFile = self.get_input_value("outputFileRoot")+'HistoSlopes.dat' numpy.savetxt(outFile,dd,header="F %s\nS 1 histograms of slopes\nN 3\nL slope[rad] at bin center counts Gaussian with StDev = %g"% (outFile,self.stdev_profile_slopes()),comments='#') if not(self.get_input_value("silent")): print ("File "+outFile+" written to disk.") # heights histogram dd=numpy.concatenate( (self.histoHeights["x"],self.histoHeights["y1"],self.histoHeights["y2"] ) ,axis=0).reshape(3,-1).transpose() outFile = self.get_input_value("outputFileRoot")+'HistoHeights.dat' numpy.savetxt(outFile,dd,header="F %s\nS 1 histograms of heights\nN 3\nL heights[m] at bin center counts Gaussian with StDev = %g"% (outFile,self.stdev_profile_heights()),comments='#') # profiles info outFile = self.get_input_value("outputFileRoot")+'Info.txt' f = open(outFile,'w') f.write(self.info_profiles()) f.close() def write_output_dabam_files(self, filename_root="dabam-XXX", loaded_from_file=None): # dump metadata outFile = filename_root + ".txt" with open(outFile, mode='w') as f1: json.dump(self.metadata, f1, indent=2) if not(self.get_input_value("silent")): print ("File "+outFile+" containing metadata written to disk.") # dump data outFile = filename_root + ".dat" if self.is_remote_access: # data myfileurl = self.server+self.file_data() try: u = urlopen(myfileurl) except: print ("_load_file_data: Error accessing remote file: "+myfileurl+" does not exist.") return None ur = u.read() f = open(outFile,'wb') f.write(ur) f.close() else: # try first to copy the file try: if loaded_from_file is None: loaded_from_file = self.file_data() if isinstance(loaded_from_file,list): # ascii text (list of lines) f = open(outFile, 'w') for i in range(len(loaded_from_file)): f.write("%s\n"%loaded_from_file[i]) f.close() elif isinstance(loaded_from_file,str): # file name with open(loaded_from_file, "r") as f: txt = f.read() f = open(outFile,'w') f.write(txt) f.close() except: # if not working, just dump the data numpy.savetxt(outFile,self.rawdata) def _write_file_for_shadow(self): # # write file for SHADOW (optional) # replicate the (x,z) profile in a "s" mesh of npointsx * npointsy # #inputs npointsy = int(self.get_input_value("shadowNy")) npointsx = int(self.get_input_value("shadowNx")) mirror_width = float(self.get_input_value("shadowWidth")) # in cm # units to cm y = (self.y).copy() * 100.0 # from m to cm z = (self.zHeights).copy() * 100.0 # from m to cm # set origin at the center of the mirror. TODO: allow any point for origin z = z - z.min() y = y - y[int(y.size/2)] # interpolate the profile (y,z) to have npointsy points (new profile yy,zz) if npointsy > 0: mirror_length = y.max() - y.min() yy = numpy.linspace(-mirror_length/2.0,mirror_length/2.0,npointsy) zz = numpy.interp(yy,y,z) # dump to file interpolated profile (for fun) if self.get_input_value("outputFileRoot") != "": dd = numpy.concatenate( (yy.reshape(-1,1), zz.reshape(-1,1)),axis=1) outFile = self.get_input_value("outputFileRoot") + "ProfileInterpolatedForShadow.dat" numpy.savetxt(outFile,dd) if not(self.get_input_value("silent")): print("File %s with interpolated heights profile for SHADOW written to disk."%outFile) else: yy = y zz = z npointsy = yy.size # fill the mesh arrays xx,yy,s with interpolated profile yy,zz xx=numpy.linspace(-mirror_width/2.0,mirror_width/2.0,npointsx) s = numpy.zeros( (npointsy,npointsx) ) for i in range(npointsx): s[:,i]=zz # write Shadow file outFile = self.get_input_value("outputFileRoot") + "Shadow.dat" tmp = write_shadowSurface(s,xx,yy,outFile=outFile) def _latex_line(self,table_number=1): """ to create a line with profile data latex-formatted for automatic compilation of tables in the paper :return: """ if table_number == 1: return ('%d & %s & %d & %.2f (%.2f %s) & %.2f (%.2f %s) \\\\'%( \ self.get_input_value("entryNumber"), \ self.metadata['SURFACE_SHAPE'], int(1e3*(self.y[-1]-self.y[0])), \ 1e6*self.zSlopes.std(ddof=1), \ 1e6*self.stdev_psd_slopes(), \ ("" if self.metadata['CALC_SLOPE_RMS'] is None else ",%.2f"%(self.metadata['CALC_SLOPE_RMS'])), \ 1e9*self.stdev_psd_heights(), \ 1e9*self.zHeights.std(ddof=1), \ ("" if self.metadata['CALC_HEIGHT_RMS'] is None else ",%.2f"%(self.metadata['CALC_HEIGHT_RMS'])), )) else: return ('%d & %d & %.2f & %.2f & %d & %.2f & %.2f & %.2f\\\\'%( \ self.get_input_value("entryNumber"), \ self.y.size, \ self.momentsHeights[2], \ self.momentsHeights[3],\ ((autocorrelationfunction(self.y,self.zHeights))[2])*1e3, \ -self.powerlaw["hgt_pendent"], \ self.momentsSlopes[2], \ self.momentsSlopes[3],\ )) def _text_line(self): """ to create a line with profile data ascii-formatted for automatic compilation of profile summary :return: """ return ('%3d %12s %8.2f %.2f %s %.2f %s'%( \ self.get_input_value("entryNumber"), \ self.metadata['SURFACE_SHAPE'], int(1e3*(self.y[-1]-self.y[0])), \ 1e6*self.zSlopes.std(ddof=1), \ (" " if self.metadata['CALC_SLOPE_RMS'] is None else "(%5.2f)"%(self.metadata['CALC_SLOPE_RMS'])), \ 1e9*self.zHeights.std(ddof=1), \ (" " if self.metadata['CALC_HEIGHT_RMS'] is None else "(%5.2f)"%(self.metadata['CALC_HEIGHT_RMS'])), )) def _dictionary_line(self): """ to create a dictionary with profile data for automatic compilation of profile summary :return: """ return { \ "entry":self.get_input_value("entryNumber"), \ "surface":self.metadata['SURFACE_SHAPE'], "length":(self.y[-1]-self.y[0]), \ "slp_err":self.zSlopes.std(ddof=1), \ "slp_err_user":self.metadata['CALC_SLOPE_RMS'], \ "hgt_err":self.zHeights.std(ddof=1), \ "hgt_err_user": self.metadata['CALC_HEIGHT_RMS'] } # # main functions (these function are sitting here for autoconsistency of dabam.py, otherwise can be in a dependency) # def cdf(sy, sz, method = 1 ): """ cdf: Calculates the profile from the slope by simple integration (antiderivative) INPUTS: sy - 1D array of (equally-spaced) lengths. sz - 1D array of slopes. KEYWORDS method : 0 use simple sum as integration method 1 use trapezoidal rule (default) RESTRICTIONS: the abscissas step must be sorted, but may not be constant 1D array with cdf """ zprof = sz*0.0 if method == 0: steps = sy[0:sz.size-1] steps = numpy.concatenate(([0],steps)) steps[0] = steps[1] steps.shape = -1 steps = sy - steps zprof = numpy.cumsum(sz*steps) else: for i in range(sz.size): zprof[i]= numpy.trapz(sz[0:i+1], x = sy[0:i+1]) return zprof def psd(xx, yy, onlyrange = None): """ psd: Calculates the PSD (power spectral density) from a profile INPUTS: x - 1D array of (equally-spaced) lengths. y - 1D array of heights. OUTPUTS: f - 1D array of spatial frequencies, in units of 1/[x]. s - 1D array of PSD values, in units of [y]^3. KEYWORD PARAMETERS: onlyrange - 2-element array specifying the min and max spatial frequencies to be considered. Default is from 1/(length) to 1/(2*interval) (i.e., the Nyquist frequency), where length is the length of the scan, and interval is the spacing between points. PROCEDURE Use FFT """ n_pts = xx.size if (n_pts <= 1): print ("psd: Error, must have at least 2 points.") return 0 window=yy*0+1. length=xx.max()-xx.min() # total scan length. delta = xx[1] - xx[0] # psd from windt code # s=length*numpy.absolute(numpy.fft.ifft(yy*window)**2) # s=s[0:(n_pts/2+1*numpy.mod(n_pts,2))] # take an odd number of points. #xianbo + luca: s0 = numpy.absolute(numpy.fft.fft(yy*window)) s = 2 * delta * s0[0:int(len(s0)/2)]**2/s0.size # uniformed with IGOR, FFT is not symmetric around 0 s[0] /= 2 s[-1] /= 2 n_ps=s.size # number of psd points. interval=length/(n_pts-1) # sampling interval. f_min=1./length # minimum spatial frequency. f_max=1./(2.*interval) # maximum (Nyquist) spatial frequency. # spatial frequencies. f=numpy.arange(float(n_ps))/(n_ps-1)*(f_max-f_min)+f_min if onlyrange != None: roi = (f <= onlyrange[1]) * (f >= onlyrange[0]) if roi.sum() > 0: roi = roi.nonzero() f = f[roi] s = s[roi] return s,f def autocorrelationfunction(x,f): """ calculates the autocovariance function and correlation length of a 1-d profile f(x). Adapted from matlab code acf1D (David Bergstrom) see http://www.mysimlabs.com/surface_generation.html :param x: the abscissas array (profile points) :param f: array with the funcion value (profile heights) :return: lags,acf,cl lags - lag length vector (abscissas of acf) acf - autocovariance function cl - correlation length """ # function [acf,cl,lags] = acf1D(f,x,opt) # % # % [acf,cl,lags] = acf1D(f,x) # % # % calculates the autocovariance function and correlation length of a # % 1-d profile f(x). # % # % Input: x - profile points # % f - profile heights # % # % Output: lags - lag length vector (useful for plotting the acf) # % acf - autocovariance function # % cl - correlation length # % # % Last updated: 2010-07-26 (David Bergstrom) # % N = len(x) lags = numpy.linspace(0,x[-1]-x[0],N) # c=xcov(f,'coeff'); % the autocovariance function f -= f.mean() c = numpy.convolve(f,f[::-1]) c = c / c.max() acf=c[(N-1):2*N-2] k = 0 while acf[k] > 1/numpy.exp(1): k = k + 1 cl = 1/2*(x[k-1]+x[k]-2*x[0]) return lags,acf,cl # def func_ellipse_slopes(x, p, q, theta, shift): """ calculates the derivative (y'(x) i.e., slopes) of a ellipse y(x) defined by its distance to focii (p,q) and grazing angle theta at coordinate x=0 :param x: the length coordinate for the ellipse (x=0 is the center) :param p: the distance from source to mirror center :param q: the distance from mirror center to image :param theta: the grazing incidence angle in rad :param shift: a vertical shift to be added to the ellipse y' coordinate. :return: """ a = (p + q) / 2 b = numpy.sqrt( numpy.abs(p * q)) * numpy.sin(theta) c = numpy.sqrt(numpy.abs(a*a - b*b)) epsilon = c / a # (x0,y0) are the coordinates of the center of the mirror # x0 = (p*p - q*q) / 4 / c x0 = (p - q) / 2 / epsilon y0 = -b * numpy.sqrt(numpy.abs(1.0 - ((x0/a)**2))) # the versor normal to the surface at the mirror center is -grad(ellipse) xnor = -2 * x0 / a**2 ynor = -2 * y0 / b**2 modnor = numpy.sqrt(xnor**2 + ynor**2) xnor /= modnor ynor /= modnor # tangent versor is perpendicular to normal versor xtan = ynor ytan = -xnor A = 1/b**2 B = 1/a**2 C = A CCC = numpy.zeros(11) # this is for the general 3D case (we need 10 coeffs, index=0 not used here) # The 2D implicit ellipse equation is c2 x^2 + c3 y^2 + c5 x y + c8 x + c9 y + c10 = 0 #CCC[1] = A CCC[2] = B*xtan**2 + C*ytan**2 CCC[3] = B*xnor**2 + C*ynor**2 #CCC[4] = .0 CCC[5] = 2*(B*xnor*xtan+C*ynor*ytan) #CCC[6] = .0 #CCC[7] = .0 CCC[8] = .0 CCC[9] = 2*(B*x0*xnor+C*y0*ynor) CCC[10]= .0 #reorder in y and get the second degree equation for heights # AA y^2 + BB y + CC = 0 AA = CCC[3] BB = CCC[5]*x + CCC[9] CC = CCC[2]*x*x + CCC[8]*x + CCC[10] DD = BB*BB-4*AA*CC #calculate derivatives and solve fir y' (P=primes) BBP = CCC[5] CCP = 2*CCC[2]*x+CCC[8] DDP = 2*BB*BBP -4*AA*CCP ells = (-1/2/AA) * (BBP + DDP/2/numpy.sqrt(DD)) return ells+shift def write_shadowSurface(s,xx,yy,outFile='presurface.dat'): """ write_shadowSurface: writes a mesh in the SHADOW/presurface format SYNTAX: out = write_shadowSurface(z,x,y,outFile=outFile) INPUTS: z - 2D array of heights x - 1D array of spatial coordinates along mirror width. y - 1D array of spatial coordinates along mirror length. OUTPUTS: out - 1=Success, 0=Failure outFile - output file in SHADOW format. If undefined, the file is names "presurface.dat" """ out = 1 try: fs = open(outFile, 'w') except IOError: out = 0 print ("Error: can\'t open file: "+outFile) return else: # dimensions fs.write( "%d %d \n"%(xx.size,yy.size)) # y array for i in range(yy.size): fs.write(' ' + repr(yy[i]) ) fs.write("\n") # for each x element, the x value and the corresponding z(y) profile for i in range(xx.size): tmps = "" for j in range(yy.size): tmps = tmps + " " + repr(s[j,i]) fs.write(' ' + repr(xx[i]) + " " + tmps ) fs.write("\n") fs.close() #print ("File "+outFile+" written to disk (for SHADOW).") def moment(array,substract_one_in_variance_n=True): """ Calculate the first four statistical moments of a 1D array :param array: :param substract_one_in_variance_n: :return: array with: m0 (mean) m1 (variance) m2 (skewness) m3 (kurtosis) """ a1 = numpy.array(array) m0 = a1.mean() tmp = (a1-m0)**2 if substract_one_in_variance_n: m1 = tmp.sum()/(a1.size-1) else: m1 = tmp.sum()/(a1.size) sd = numpy.sqrt(m1) tmp = (a1-m0)**3 m2 = tmp.sum()/sd**3/a1.size tmp = (a1-m0)**4 m3 = (tmp.sum()/sd**4/a1.size) - 3 #Fisher definition: substract 3 to return 0 for Normal distribution return m0,m1,m2,m3 def dabam_summary(nmax=None,latex=0): """ create a text with the summary of all dabam entries :param nmax: :param latex: :return: """ if nmax is None: nmax = 1000000 # this is like infinity if latex ==0: txt = "Entry shape Length[mm] slp_err [urad] hgt_err [um]\n" else: txt = "" for i in range(nmax): dm = dabam() dm.set_input_outputFileRoot("") # avoid output files dm.set_input_silent(1) dm.set_entry(i+1) try: dm.load() except: break if latex == 1: txt += dm._latex_line(table_number=1)+"\n" elif latex == 2: txt += dm._latex_line(table_number=2)+"\n" else: txt += dm._text_line()+"\n" return(txt) # ''' # def dabam_summary_dictionary(): # nmax = 1000000 # this is like infinity # out = [] # for i in range(nmax): # dm = dabam() # dm.set_input_outputFileRoot("") # avoid output files # dm.set_input_silent(1) # dm.set_entry(i+1) # try: # dm.load() # except: # break # tmp = dm._dictionary_line() # # out.append(tmp) # return(out) # ''' def dabam_summary_dictionary(surface=None, slp_err_from=None, slp_err_to=None, length_from=None, length_to=None, server=None, nmax = 1000000): out = [] for i in range(nmax): print(">>>>>>>>>>>>>>>>>>>>>>",i) dm = dabam() if server is not None: dm.set_server(server) dm.set_input_outputFileRoot("") # avoid output files dm.set_input_silent(1) dm.set_entry(i+1) try: dm.load() except: break tmp = dm._dictionary_line() print(">>>>>>>>>>>>>>>>>>>>>>", i, "loaded") add_element = True if not surface is None and not tmp["surface"] is None: add_element = tmp["surface"].capitalize() == surface.capitalize() if add_element and not slp_err_from is None and not slp_err_to is None: add_element = tmp["slp_err"] >= slp_err_from and tmp["slp_err"] <= slp_err_to if add_element and not length_from is None and not length_to is None: add_element = tmp["length"] >= length_from and tmp["length"] <= length_to if add_element: out.append(tmp) print(">>>>>>>>>>>>>>>>>>>>>>", i, "appended") return(out) def dabam_summary_dictionary_from_json_indexation(surface=None, slp_err_from=None, slp_err_to=None, length_from=None, length_to=None): h = load_json_summary(filename="dabam-summary.json") out = [] for key in h.keys(): tmp = h[key] add_element = True if not surface is None and not tmp["surface"] is None: add_element = tmp["surface"].capitalize() == surface.capitalize() if add_element and not slp_err_from is None and not slp_err_to is None: add_element = tmp["slp_err"] >= slp_err_from and tmp["slp_err"] <= slp_err_to if add_element and not length_from is None and not length_to is None: add_element = tmp["length"] >= length_from and tmp["length"] <= length_to if add_element: out.append(tmp) return(out) def make_json_summary(nmax=100000): # dump summary out_list = dabam_summary_dictionary(nmax=100000) out_dict = {} for i,ilist in enumerate(out_list): # print("analyzing entry: ",i+1) out_dict["entry_%03d"%ilist["entry"]] = ilist # print(out_dict) j = json.dumps(out_dict, ensure_ascii=True, indent=" ") print(j) f = open("dabam-summary.json", 'w') f.write(j) f.close() print("File dabam-summary.json written to disk") def load_json_summary(filename=None): self = dabam() if filename is None: if self.is_remote_access: # json summary file myfileurl = self.server+"dabam-summary.json" u = urlopen(myfileurl) ur = u.read() ur1 = ur.decode(encoding='UTF-8') h = json.loads(ur1) # dictionnary with summary else: # TODO local server try: with open(filename, mode='r') as f1: h = json.load(f1) except: print ("Error accessing local file: "+filename) else: try: with open(filename, mode='r') as f1: h = json.load(f1) except: print("Error accessing local file: " + filename) return h # # main program # def main(): # initialize dm = dabam() dm.set_input_outputFileRoot("tmp") # write files by default dm._set_from_command_line() # get arguments of dabam command line if dm.get_input_value("summary"): print(dabam_summary()) else: dm.load() # access data if dm.get_input_value("plot") != None: dm.plot() # # main call # if __name__ == '__main__': main() # # dump summary # # out_list = dabam_summary_dictionary() # # # print(out,type(out)) # # out_dict = {} # # for i,ilist in enumerate(out_list): # print("analyzing entry: ",i+1) # out_dict["entry_%03d"%ilist["entry"]] = ilist # # print(out_dict) # # j = json.dumps(out_dict, ensure_ascii=True, indent=" ") # # print(j) # f = open("dabam-summary.json", 'w') # f.write(j) # f.close() # print("File dabam-summary.json written to disk") # # # dm = dabam() # # dm.load(12) # h = load_json_summary("dabam-summary.json") # for key in h: # print(key) # # out = dabam_summary_dictionary_from_json_indexation(surface="elliptical(detrended)", slp_err_from=None, slp_err_to=None, length_from=None, length_to=None) # # for ilist in out: # print(ilist)
srio/dabam
code/dabam.py
Python
gpl-2.0
75,998
[ "Gaussian" ]
ba0903dc52e0ccc229b400439a9e64acfcbd987770b8cfa77c706674fe4ee2a4
""" Functions to create initializers for parameter variables. Usage ------- >>> from lasagne.layers import DenseLayer >>> from lasagne.init import Constant, Glorot >>> l1 = DenseLayer((100,20), num_units=50, W=GlorotUniform(), b=Constant(0.0)) """ import numpy as np from .utils import floatX class Initializer(object): """Initializer class The :class:`Initializer` class represents a weight initializer used to initialize weight parameters in a neural network layer. It should be subclassed when implementing new types of weight initializers. """ def __call__(self, shape): """ Makes :class:`Initializer` instances callable like a function, invoking their :meth:`sample()` method. """ return self.sample(shape) def sample(self, shape): """ Sample should return a theano.tensor of size shape and data type theano.config.floatX. Parameters ----------- shape : tuple or int Integer or tuple specifying the size of the returned matrix. returns : theano.tensor Matrix of size shape and dtype theano.config.floatX. """ raise NotImplementedError() class Normal(Initializer): """Sample initial weights from the Gaussian distribution Initial weight parameters are sampled from N(mean, std). Parameters ---------- std : float Std of initial parameters. mean : float Mean of initial parameters. """ def __init__(self, std=0.01, mean=0.0): self.std = std self.mean = mean def sample(self, shape): return floatX(np.random.normal(self.mean, self.std, size=shape)) class Uniform(Initializer): """Sample initial weights from the uniform distribution Parameters are sampled from U(a, b). Parameters ---------- range : float or tuple When std is None then range determines a, b. If range is a float the weights are sampled from U(-range, range). If range is a tuple the weights are sampled from U(range[0], range[1]). std : float or None If std is a float then the weights are sampled from U(mean - np.sqrt(3) * std, mean + np.sqrt(3) * std). mean : float see std for description. """ def __init__(self, range=0.01, std=None, mean=0.0): if std is not None: a = mean - np.sqrt(3) * std b = mean + np.sqrt(3) * std else: try: a, b = range # range is a tuple except TypeError: a, b = -range, range # range is a number self.range = (a, b) def sample(self, shape): return floatX(np.random.uniform( low=self.range[0], high=self.range[1], size=shape)) class Glorot(Initializer): """Glorot weight initialization [1]_ This is also known as Xavier initialization. Parameters ---------- initializer : lasagne.init.Initializer Initializer used to sample the weights, must accept std in its constructor to sample from a distribution with a given standard deviation. gain : float or 'relu' When 'relu' the gain is set to sqrt(2), see notes. c01b : bool If lasagne.layers.cuda_convnet.Conv2DCCLayer is initialized with dimshuffle=False, then c01b should be set to True. References ---------- [1] Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of training deep feedforward neural networks." International conference on artificial intelligence and statistics. 2010. Notes ---------- For DenseLayers if gain='relu' and initializer is Uniform then the weights are initialized as :math:`a = \frac{sqrt{6}}{\sqrt{fan_{in}+n_{hid}}}` :nath: `W \sim U[-a, a]`. If gain=1 and initializer is Normal then the weights are initialized as :math: `std = \sqrt{\frac{2}{fan_{in}+n_{hid}}}` :math: `W \sim N(0, std)`. See Also -------- GlorotNormal : Shortcut with Gaussian initializer. GlorotUniform : Shortcut with uniform initializer. """ def __init__(self, initializer, gain=1.0, c01b=False): if gain == 'relu': gain = np.sqrt(2) self.initializer = initializer self.gain = gain self.c01b = c01b def sample(self, shape): if self.c01b: if len(shape) != 4: raise RuntimeError( "If c01b is True, only shapes of length 4 are accepted") n1, n2 = shape[0], shape[3] receptive_field_size = shape[1] * shape[2] else: if len(shape) < 2: raise RuntimeError( "This initializer only works with shapes of length >= 2") n1, n2 = shape[:2] receptive_field_size = np.prod(shape[2:]) std = self.gain * np.sqrt(2.0 / ((n1 + n2) * receptive_field_size)) return self.initializer(std=std).sample(shape) class GlorotNormal(Glorot): """Glorot with weights sampled from Normal distribution See Glorot for description of parameters. """ def __init__(self, gain=1.0, c01b=False): super(GlorotNormal, self).__init__(Normal, gain, c01b) class GlorotUniform(Glorot): """Glorot with weights sampled from Uniform distribution See Glorot for description of parameters. """ def __init__(self, gain=1.0, c01b=False): super(GlorotUniform, self).__init__(Uniform, gain, c01b) class He(Initializer): """He weight initialization [1]_ Weights are initialized with std :math:`\sigma = gain \sqrt{\frac{1}{fan_{in}}}`. Parameters ---------- initializer : lasagne.init.Initializer Initializer used to sample the weights, must accept std in its constructor to sample from a distribution with a given standard deviation. gain : float or 'relu' When 'relu' gain is set to sqrt(2). c01b : bool If lasagne.layers.cuda_convnet.Conv2DCCLayer is initialized with dimshuffle=False, then c01b should be set to True. References ---------- [1] He, Kaiming, et al. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. arXiv preprint arXiv:1502.01852 (2015). See Also ---------- HeNormal : Shortcut with Gaussian initializer. HeUniform : Shortcut with uniform initializer. """ def __init__(self, initializer, gain=1.0, c01b=False): if gain == 'relu': gain = np.sqrt(2) self.initializer = initializer self.gain = gain self.c01b = c01b def sample(self, shape): if self.c01b: if len(shape) != 4: raise RuntimeError( "If c01b is True, only shapes of length 4 are accepted") fan_in = np.prod(shape[:3]) else: if len(shape) == 2: fan_in = shape[0] elif len(shape) > 2: fan_in = np.prod(shape[1:]) else: raise RuntimeError( "This initializer only works with shapes of length >= 2") std = self.gain * np.sqrt(1.0 / fan_in) return self.initializer(std=std).sample(shape) class HeNormal(He): """He initializer with weights sampled from Gaussian See He for description of parameters. """ def __init__(self, gain=1.0, c01b=False): super(HeNormal, self).__init__(Normal, gain, c01b) class HeUniform(He): """He initializer with weights sampled from Gaussian See He for description of parameters. """ def __init__(self, gain=1.0, c01b=False): super(HeUniform, self).__init__(Uniform, gain, c01b) class Constant(Initializer): """Initialize weights with constant value. Parameters ---------- val : float Constant value for weights. """ def __init__(self, val=0.0): self.val = val def sample(self, shape): return floatX(np.ones(shape) * self.val) class Sparse(Initializer): """Initialize weights as sparse matrix. Parameters ---------- sparsity : float Exact fraction of non-zero values per column. Larger values give less sparsity. std : float Non-zero weights are sampled from N(0, std). """ def __init__(self, sparsity=0.1, std=0.01): self.sparsity = sparsity self.std = std def sample(self, shape): if len(shape) != 2: raise RuntimeError( "sparse initializer only works with shapes of length 2") w = floatX(np.zeros(shape)) n_inputs, n_outputs = shape size = int(self.sparsity * n_inputs) # fraction of number of inputs for k in range(n_outputs): indices = np.arange(n_inputs) np.random.shuffle(indices) indices = indices[:size] values = floatX(np.random.normal(0.0, self.std, size=size)) w[indices, k] = values return w class Orthogonal(Initializer): """Intialize weights as Orthogonal matrix. Orthogonal matrix initialization. For n-dimensional shapes where n > 2, the n-1 trailing axes are flattened. For convolutional layers, this corresponds to the fan-in, so this makes the initialization usable for both dense and convolutional layers. Parameters ---------- gain : float or 'relu' 'relu' gives gain of sqrt(2). """ def __init__(self, gain=1.0): if gain == 'relu': gain = np.sqrt(2) self.gain = gain def sample(self, shape): if len(shape) < 2: raise RuntimeError("Only shapes of length 2 or more are " "supported.") flat_shape = (shape[0], np.prod(shape[1:])) a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) # pick the one with the correct shape q = u if u.shape == flat_shape else v q = q.reshape(shape) return floatX(self.gain * q)
dnuffer/Lasagne
lasagne/init.py
Python
mit
10,166
[ "Gaussian" ]
bf56386ad9fdee96dba94fc02250249fbb764ed8688c8f53b17ad3ba67a2ec1a
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2019 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This file is part of Psi4. # # Psi4 is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, version 3. # # Psi4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along # with Psi4; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # import sys import numpy as np from psi4 import core from .exceptions import ValidationError ### Matrix and Vector properties def _get_raw_views(self, copy=False): """ Gets simple raw view of the passed in object. """ if copy: return tuple([np.array(x) for x in self.array_interface()]) else: return tuple(self.array_interface()) def _find_dim(arr, ndim): """ Helper function to help deal with zero or sized arrays """ # Zero arrays if (arr is None) or (arr is False): return [0] * ndim # Make sure this is a numpy array like thing if not hasattr(arr, 'shape'): raise ValidationError("Expected numpy array, found object of type '%s'" % type(arr)) if len(arr.shape) == ndim: return [arr.shape[x] for x in range(ndim)] else: raise ValidationError("Input array does not have a valid shape.") def array_to_matrix(self, arr, name="New Matrix", dim1=None, dim2=None): """ Converts a numpy array or list of numpy arrays into a Psi4 Matrix (irreped if list). Parameters ---------- arr : array or list of arrays Numpy array or list of arrays to use as the data for a new core.Matrix name : str Name to give the new core.Matrix dim1 : list, tuple, or core.Dimension (optional) If a single dense numpy array is given, a dimension can be supplied to apply irreps to this array. Note that this discards all extra information given in the matrix besides the diagonal blocks determined by the passed dimension. dim2 : Same as dim1 only if using a psi4.core.Dimension object. Returns ------- matrix : :py:class:`~psi4.core.Matrix` or :py:class:`~psi4.core.Vector` Returns the given Psi4 object Notes ----- This is a generalized function to convert a NumPy array to a Psi4 object Examples -------- >>> data = np.random.rand(20) >>> vector = array_to_matrix(data) >>> irrep_data = [np.random.rand(2, 2), np.empty(shape=(0,3)), np.random.rand(4, 4)] >>> matrix = array_to_matrix(irrep_data) >>> print matrix.rowspi().to_tuple() (2, 0, 4) """ # What type is it? MRO can help. arr_type = self.__mro__[0] # Irreped case if isinstance(arr, (list, tuple)): if (dim1 is not None) or (dim2 is not None): raise ValidationError("Array_to_Matrix: If passed input is list of arrays dimension cannot be specified.") irreps = len(arr) if arr_type == core.Matrix: sdim1 = core.Dimension(irreps) sdim2 = core.Dimension(irreps) for i in range(irreps): d1, d2 = _find_dim(arr[i], 2) sdim1[i] = d1 sdim2[i] = d2 ret = self(name, sdim1, sdim2) elif arr_type == core.Vector: sdim1 = core.Dimension(irreps) for i in range(irreps): d1 = _find_dim(arr[i], 1) sdim1[i] = d1[0] ret = self(name, sdim1) else: raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % str(arr_type)) for view, vals in zip(ret.nph, arr): if 0 in view.shape: continue view[:] = vals return ret # No irreps implied by list else: if arr_type == core.Matrix: # Build an irreped array back out if dim1 is not None: if dim2 is None: raise ValidationError("Array_to_Matrix: If dim1 is supplied must supply dim2 also") dim1 = core.Dimension.from_list(dim1) dim2 = core.Dimension.from_list(dim2) if dim1.n() != dim2.n(): raise ValidationError("Array_to_Matrix: Length of passed dim1 must equal length of dim2.") ret = self(name, dim1, dim2) start1 = 0 start2 = 0 for num, interface in enumerate(ret.nph): d1 = dim1[num] d2 = dim2[num] if (d1 == 0) or (d2 == 0): continue view = np.asarray(interface) view[:] = arr[start1:start1 + d1, start2:start2 + d2] start1 += d1 start2 += d2 return ret # Simple case without irreps else: ret = self(name, arr.shape[0], arr.shape[1]) ret.np[:] = arr return ret elif arr_type == core.Vector: # Build an irreped array back out if dim1 is not None: if dim2 is not None: raise ValidationError("Array_to_Matrix: If dim2 should not be supplied for 1D vectors.") dim1 = core.Dimension.from_list(dim1) ret = self(name, dim1) start1 = 0 for num, interface in enumerate(ret.nph): d1 = dim1[num] if (d1 == 0): continue view = np.asarray(interface) view[:] = arr[start1:start1 + d1] start1 += d1 return ret # Simple case without irreps else: ret = self(name, arr.shape[0]) ret.np[:] = arr return ret else: raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % str(arr_type)) def _to_array(matrix, copy=True, dense=False): """ Converts a Psi4 Matrix or Vector to a numpy array. Either copies the data or simply constructs a view. Parameters ---------- matrix : :py:class:`~psi4.core.Matrix` or :py:class:`~psi4.core.Vector` Pointers to which Psi4 core class should be used in the construction. copy : bool, optional Copy the data if `True`, return a view otherwise dense : bool, optional Converts irreped Psi4 objects to diagonally blocked dense arrays if `True`. Returns a list of arrays otherwise. Returns ------- array : ndarray or list of ndarray Returns either a list of np.array's or the base array depending on options. Notes ----- This is a generalized function to convert a Psi4 object to a NumPy array Examples -------- >>> data = psi4.Matrix(3, 3) >>> data.to_array() [[ 0. 0. 0.] [ 0. 0. 0.] [ 0. 0. 0.]] """ if matrix.nirrep() > 1: # We will copy when we make a large matrix if dense: copy = False matrix_views = _get_raw_views(matrix, copy=copy) # Return the list of arrays if dense is False: return matrix_views # Build the dense matrix if isinstance(matrix, core.Vector): ret_type = '1D' elif isinstance(matrix, core.Matrix): ret_type = '2D' else: raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % type(matrix)) dim1 = [] dim2 = [] for h in matrix_views: # Ignore zero dim irreps if 0 in h.shape: dim1.append(0) dim2.append(0) else: dim1.append(h.shape[0]) if ret_type == '2D': dim2.append(h.shape[1]) ndim1 = np.sum(dim1) ndim2 = np.sum(dim2) if ret_type == '1D': dense_ret = np.zeros(shape=(ndim1)) start = 0 for d1, arr in zip(dim1, matrix_views): if d1 == 0: continue dense_ret[start:start + d1] = arr start += d1 else: dense_ret = np.zeros(shape=(ndim1, ndim2)) start1 = 0 start2 = 0 for d1, d2, arr in zip(dim1, dim2, matrix_views): if (d1 == 0) or (d2 == 0): continue dense_ret[start1:start1 + d1, start2:start2 + d2] = arr start1 += d1 start2 += d2 return dense_ret else: return _get_raw_views(matrix, copy=copy)[0] @property def _np_shape(self): """ Shape of the Psi4 data object """ view_data = _get_raw_views(self) if self.nirrep() > 1: return tuple(view_data[x].shape for x in range(self.nirrep())) else: return view_data[0].shape @property def _np_view(self): """ View without only one irrep """ if self.nirrep() > 1: raise ValidationError("Attempted to call .np on a Psi4 data object with multiple irreps." "Please use .nph for objects with irreps.") return _get_raw_views(self)[0] @property def _nph_view(self): """ View with irreps. """ return _get_raw_views(self) @property def _array_conversion(self): """ Provides the array interface to simply classes so that np.array(core.Matrix(5, 5)) works flawlessly. """ if self.nirrep() > 1: raise ValidationError("__array__interface__ can only be called on Psi4 data object with only one irrep!") else: return self.np.__array_interface__ def _np_write(self, filename=None, prefix=""): """ Writes the irreped matrix to a NumPy zipped file. Can return the packed data for saving many matrices into the same file. """ ret = {} ret[prefix + "Irreps"] = self.nirrep() ret[prefix + "Name"] = self.name for h, v in enumerate(self.nph): # If returning arrays to user, we want to return copies (snapshot), not # views of the core.Matrix's memory. if filename is None and not v.flags['OWNDATA']: v = np.copy(v) ret[prefix + "IrrepData" + str(h)] = v if isinstance(self, core.Matrix): ret[prefix + "Dim1"] = self.rowdim().to_tuple() ret[prefix + "Dim2"] = self.coldim().to_tuple() if isinstance(self, core.Vector): ret[prefix + "Dim"] = [self.dim(x) for x in range(self.nirrep())] if filename is None: return ret np.savez(filename, **ret) def _np_read(self, filename, prefix=""): """ Reads the data from a NumPy compress file. """ if isinstance(filename, np.lib.npyio.NpzFile): data = filename elif isinstance(filename, str): if not filename.endswith('.npz'): filename = filename + '.npz' data = np.load(filename) else: raise Exception("Filename not understood: %s" % filename) ret_data = [] if ((prefix + "Irreps") not in data.keys()) or ((prefix + "Name") not in data.keys()): raise ValidationError("File %s does not appear to be a numpyz save" % filename) for h in range(data[prefix + "Irreps"]): ret_data.append(data[prefix + "IrrepData" + str(h)]) arr_type = self.__mro__[0] if arr_type == core.Matrix: dim1 = core.Dimension.from_list(data[prefix + "Dim1"]) dim2 = core.Dimension.from_list(data[prefix + "Dim2"]) ret = self(str(data[prefix + "Name"]), dim1, dim2) elif arr_type == core.Vector: dim1 = core.Dimension.from_list(data[prefix + "Dim"]) ret = self(str(data[prefix + "Name"]), dim1) for h in range(data[prefix + "Irreps"]): ret.nph[h][:] = ret_data[h] return ret def _to_serial(data): """ Converts an object with a .nph accessor to a serialized dictionary """ json_data = {} json_data["shape"] = [] json_data["data"] = [] for view in data.nph: json_data["shape"].append(view.shape) json_data["data"].append(view.tostring()) if len(json_data["shape"][0]) == 1: json_data["type"] = "vector" elif len(json_data["shape"][0]) == 2: json_data["type"] = "matrix" else: raise ValidationError("_to_json is only used for vector and matrix objects.") return json_data def _from_serial(self, json_data): """ Converts serialized data to the correct Psi4 data type """ if json_data["type"] == "vector": dim1 = core.Dimension.from_list([x[0] for x in json_data["shape"]]) ret = self("Vector from JSON", dim1) elif json_data["type"] == "matrix": dim1 = core.Dimension.from_list([x[0] for x in json_data["shape"]]) dim2 = core.Dimension.from_list([x[1] for x in json_data["shape"]]) ret = self("Matrix from JSON", dim1, dim2) else: raise ValidationError("_from_json did not recognize type option of %s." % str(json_data["type"])) for n in range(len(ret.nph)): ret.nph[n].flat[:] = np.frombuffer(json_data["data"][n], dtype=np.double) return ret def _chain_dot(*args, **kwargs): """ Chains dot products together from a series of Psi4 Matrix classes. By default there is no transposes, an optional vector of booleans can be passed in. """ trans = kwargs.pop("trans", None) if trans is None: trans = [False for x in range(len(args))] else: if len(trans) != len(args): raise ValidationError( "Chain dot: The length of the transpose arguements is not equal to the length of args.") # Setup chain ret = args[0] if trans[0]: ret = ret.transpose() # Run through for n, mat in enumerate(args[1:]): ret = core.doublet(ret, mat, False, trans[n + 1]) return ret def _irrep_access(self, *args, **kwargs): """ Warns user when iterating/accessing an irreped object. """ raise ValidationError("Attempted to access by index/iteration a Psi4 data object that supports multiple" " irreps. Please use .np or .nph explicitly.") # Matrix attributes core.Matrix.from_array = classmethod(array_to_matrix) core.Matrix.from_list = classmethod(lambda self, x: array_to_matrix(self, np.array(x))) core.Matrix.to_array = _to_array core.Matrix.shape = _np_shape core.Matrix.np = _np_view core.Matrix.nph = _nph_view core.Matrix.__array_interface__ = _array_conversion core.Matrix.np_write = _np_write core.Matrix.np_read = classmethod(_np_read) core.Matrix.to_serial = _to_serial core.Matrix.from_serial = classmethod(_from_serial) core.Matrix.chain_dot = _chain_dot core.Matrix.__iter__ = _irrep_access core.Matrix.__getitem__ = _irrep_access # Vector attributes core.Vector.from_array = classmethod(array_to_matrix) core.Vector.from_list = classmethod(lambda self, x: array_to_matrix(self, np.array(x))) core.Vector.to_array = _to_array core.Vector.shape = _np_shape core.Vector.np = _np_view core.Vector.nph = _nph_view core.Vector.__array_interface__ = _array_conversion core.Vector.np_write = _np_write core.Vector.np_read = classmethod(_np_read) core.Vector.to_serial = _to_serial core.Vector.from_serial = classmethod(_from_serial) core.Vector.__iter__ = _irrep_access core.Vector.__getitem__ = _irrep_access ### CIVector properties @property def _civec_view(self): """ Returns a view of the CIVector's buffer """ return np.asarray(self) core.CIVector.np = _civec_view ### Dimension properties @classmethod def _dimension_from_list(self, dims, name="New Dimension"): """ Builds a core.Dimension object from a python list or tuple. If a dimension object is passed a copy will be returned. """ if isinstance(dims, (tuple, list, np.ndarray)): irreps = len(dims) elif isinstance(dims, core.Dimension): irreps = dims.n() else: raise ValidationError("Dimension from list: Type '%s' not understood" % type(dims)) ret = core.Dimension(irreps, name) for i in range(irreps): ret[i] = dims[i] return ret def _dimension_to_tuple(dim): """ Converts a core.Dimension object to a tuple. """ if isinstance(dim, (tuple, list)): return tuple(dim) irreps = dim.n() ret = [] for i in range(irreps): ret.append(dim[i]) return tuple(ret) def _dimension_iter(dim): """ Provides an iterator class for the Dimension object. Allows: dim = psi4.core.Dimension(...) list(dim) """ for i in range(dim.n()): yield dim[i] # Dimension attributes core.Dimension.from_list = _dimension_from_list core.Dimension.to_tuple = _dimension_to_tuple core.Dimension.__iter__ = _dimension_iter # General functions for NumPy array manipulation def block_diagonal_array(*args): """ Convert square NumPy array to a single block diagonal array. Mimic of SciPy's block_diag. """ # Validate the input matrices. dim = 0 for matrix in args: try: shape = matrix.shape dim += shape[0] except (AttributeError, TypeError): raise ValidationError("Cannot construct block diagonal from non-arrays.") if len(shape) != 2: raise ValidationError("Cannot construct block diagonal from non-2D arrays.") if shape[0] != shape[1]: raise ValidationError("Cannot construct block diagonal from non-square arrays.") # If this is too slow, try a sparse matrix? block_diag = np.zeros((dim, dim)) start = 0 for matrix in args: next_block = slice(start, start + matrix.shape[0]) block_diag[next_block, next_block] = matrix start += matrix.shape[0] return block_diag
jgonthier/psi4
psi4/driver/p4util/numpy_helper.py
Python
lgpl-3.0
18,310
[ "Psi4" ]
6dda8d73a7b6213c8bc8dce53736a3ac92dd6e35b987cb93b5c284935a6a23e0
import unittest import numpy as np import pandas as pd from scimap.peakfitting import Peak, discrete_fwhm # flake8: noqa """Module for testing generic peak fitting. Some techniques have more specific tests (eg X-ray diffraction, X-ray microscopy.""" class GuessParameters(unittest.TestCase): def test_single_peak(self): peak = Peak(num_peaks=1, method="Gaussian") y = np.array([0, 3, 9, 3, 0]) x = np.arange(0, len(y)) guess = peak.guess_parameters(x=x, y=y) self.assertEqual(len(guess), 1) self.assertEqual(guess[0].height, 9) # Height self.assertEqual(guess[0].center, 2) # Center self.assertAlmostEqual(guess[0].width, 0.85, places=2) # Width def test_uneven_peak(self): peak = Peak(num_peaks=1, method="Gaussian") y = np.array([0, 3, 9, 8, 6]) x = np.arange(0, len(y)) guess = peak.guess_parameters(x=x, y=y) self.assertAlmostEqual(guess[0].width, 0.85, places=2) def test_two_peaks(self): pass class FullWidthHalfMax(unittest.TestCase): def test_calculate_fwhm(self): y = np.array([0, 5.9, 6.1, 12, 6.1, 5.9, 0]) x = np.arange(0, len(y)) self.assertEqual(discrete_fwhm(x, y), 4) if __name__ == "__main__": unittest.main()
m3wolf/scimap
tests/test_peaks.py
Python
gpl-3.0
1,305
[ "Gaussian" ]
9463fb8d20c7b4ebebaa5ad2144aea0669c192e626a48cc000d35b81f10e46d9
import glob import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm import sys GRIDORDER = sys.argv[1] def summarize(): res = [] for fn in glob.glob("dfs/*.csv"): df = pd.read_csv(fn) sdf = df.groupby("flowpath").sum() / 10.0 adf = df[["flowpath", "length"]].groupby("flowpath").mean() for fp, row in sdf.iterrows(): length = adf.at[fp, "length"] res.append( dict( flowpath=fp, length=length, avg_det=(row["av_det"] * 4.463), runoff=row["runoff"], delivery=(row["delivery"] * 4.463), ) ) df = pd.DataFrame(res) df.to_csv("flowpaths%s.csv" % (GRIDORDER,)) def plot1(df): (fig, ax) = plt.subplots(1, 1) plt.hist2d(df["length"], df["avg_det"], bins=[160, 320], norm=LogNorm()) plt.colorbar(label="Flowpaths") ax.set_ylabel("Soil Detachment [T/a per year]") ax.set_xlabel("Flowpath Length [m]") ax.set_xlim(0, 400) ax.set_ylim(0, 50) df["ilength"] = (df["length"] / 5.0).astype("i") gdf = df.groupby("ilength").mean() ax.plot( gdf.index.values * 5.0, gdf["avg_det"].values, lw=2, color="k", label="Avg", zorder=5, ) ax.plot( gdf.index.values * 5.0, gdf["avg_det"].values, lw=4, color="w", zorder=4, ) ax.grid(True) ax.set_title("Iowa DEP:: Yearly Avg Detachment by Flowpath Length") ax.legend() fig.savefig("test.png") def main(): df = pd.read_csv("flowpaths%s.csv" % (GRIDORDER,)) x = [] y = [] y2 = [] for i in np.arange(0, 50, 0.5): x.append(i) y.append(df[df["length"] >= i]["delivery"].mean()) y2.append(df[df["length"] >= i]["avg_det"].mean()) (fig, ax) = plt.subplots(1, 1) ax.plot(x, y, label="Delivery") ax.plot(x, y2, label="Detachment") ax.set_xlabel( "Flowpath Length Floor [m], (average computed for len >= floor)" ) ax.set_title("Iowa DEP: Yearly Averages by Truncated Flowpath Length") ax.legend(loc="best") ax.grid(True) ax.set_ylabel("Soil Delivery or Detachment [T/a per year]") fig.savefig("test.png") if __name__ == "__main__": summarize() # main()
akrherz/idep
scripts/gridorder/flowpath_yearly.py
Python
mit
2,401
[ "ADF" ]
2d572af412a07f872536d708cf1b7fefbca9e6d5709d95f1032f30faf6e0803b
#!/usr/bin/python #========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ # # \author Hans J. Johnson # # This script is designed to help change the copyright notices in all ITK files to a common format. # For files that are .h, .cxx, .hxx, .c, if there is no other copyright information, add the itkCopyright. from __future__ import print_function import re import sys import os ## New license as specified on: http://itk.org/Wiki/ITK_Release_4/Licensing NewITKCopyrightNotice="""/*========================================================================= * * Copyright Insight Software Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ """ NewVTKDependantCopyrightNotice="""/*========================================================================= * * Portions of this file are subject to the VTK Toolkit Version 3 copyright. * * Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen * * For complete copyright, license and disclaimer of warranty information * please refer to the NOTICE file at the top of the ITK source tree. * *=========================================================================*/ """ ## Patterns that match the old copyright notice sections ## ITK only copyright ITKOnlyOldHeader=""" */\* *==.*Program:.*Insight Segmentation & Registration Toolkit.*Copyright .* Insight.*Consortium. All rights reserved.*See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.[\n\r ]*This software is distributed WITHOUT ANY WARRANTY; without even.*the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR.*PURPOSE. See the above copyright notices for more information.*=== *\*/[\n\r ]*""" ITKOnlyOldRE=re.compile(ITKOnlyOldHeader,re.MULTILINE|re.DOTALL|re.IGNORECASE) ## Files that originated in VTK, and now have ITK also ITKVTKOldHeader=""" */\* *==.*Program:.*Insight Segmentation & Registration Toolkit.*Copyright .* Insight Software Consortium. All rights reserved.*See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.[\n\r ]*.*VTKCopyright.txt.*This software is distributed WITHOUT ANY WARRANTY; without even.*the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR.*PURPOSE. See the above copyright notices for more information.*=== *\*/[\n\r ]*""" ITKVTKOldRE=re.compile(ITKVTKOldHeader,re.MULTILINE|re.DOTALL|re.IGNORECASE) ## Looking for new files. NewITKHeader=""" */\* *==.*http://www.apache.org/licenses/LICENSE-2.0.txt.*=== *\*/""" NewITKHeaderRE=re.compile(NewITKHeader,re.MULTILINE|re.DOTALL|re.IGNORECASE) eolSpaceRemove=re.compile(r' *$',re.MULTILINE) ## The exception list contains files that should not have the ITK copyright notices added. ExclusionList=['Utilities','.git'] ExtensionsThatNeedCopyright=['.cxx','.c','.h','.hxx'] ############ ############ ############ ############ ############ ############ ############ if len(sys.argv) != 2: print("USAGE: {0} <Top of ITK tree to process>".format(sys.argv[0])) sys.exit(-1) HeadOfITKTree=sys.argv[1] for top,directory,files in os.walk(HeadOfITKTree): ## First remove Excluded directories for dd in directory: if dd[0] == '.': #Skip all directories that begin with '.' directory.remove(dd) continue if dd in ExclusionList: directory.remove(dd) continue ## Now process each file for ff in files: if ff in ExclusionList: files.remove(ff) continue if ff[0] == '.': #Skip all files that begin with '.' files.remove(ff) #print("@@@@@@@",ff) continue currFile=os.path.join(top,ff) print(currFile) infile=open(currFile,'r') file_text=infile.read() newstring=file_text # default output to input, just in case all search patterns fail infile.close() substitutionMade=0 testITKOnlySearch=ITKOnlyOldRE.search(file_text) if testITKOnlySearch: print("{0} is ITKOnlyHeader".format(currFile)) newstring=ITKOnlyOldRE.sub(NewITKCopyrightNotice,file_text) newstring=eolSpaceRemove.sub("",newstring) ## a few files still have eol spaces substitutionMade=1 testITKVTKSearch=ITKVTKOldRE.search(file_text) if testITKVTKSearch: print("{0} is VTKITKHeader".format(currFile)) newstring=ITKVTKOldRE.sub(NewITKCopyrightNotice+NewVTKDependantCopyrightNotice,file_text) newstring=eolSpaceRemove.sub("",newstring) ## a few files still have eol spaces substitutionMade=1 ##Add new copyright if it had not already existed. root,ext=os.path.splitext(currFile) if ext in ExtensionsThatNeedCopyright: testNewITKHeaderRE=NewITKHeaderRE.search(file_text) # see if new CopyRight notice already exists. if testNewITKHeaderRE: print("Already Processed {0}".format(currFile)) elif (substitutionMade == 0): print("{0} needed copyright header.".format(currFile)) newstring=NewITKCopyrightNotice+file_text newstring=eolSpaceRemove.sub("",newstring) ## a few files still have eol spaces outfile=open(currFile,'w') outfile.write(newstring) outfile.close()
BlueBrain/ITK
Utilities/Maintenance/UpdateCopyrightStatementsInITK.py
Python
apache-2.0
6,488
[ "VTK" ]
a0597d8afb2fa64e59f596fe49701718aba765bd2684e7c1ac8eb33e5442a0f7
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class NetcdfFortran(AutotoolsPackage): """Fortran interface for NetCDF4""" homepage = "http://www.unidata.ucar.edu/software/netcdf" url = "http://www.unidata.ucar.edu/downloads/netcdf/ftp/netcdf-fortran-4.4.3.tar.gz" version('4.4.4', 'e855c789cd72e1b8bc1354366bf6ac72') version('4.4.3', 'bfd4ae23a34635b273d3eb0d91cbde9e') depends_on('netcdf') # The default libtool.m4 is too old to handle NAG compiler properly: # https://github.com/Unidata/netcdf-fortran/issues/94 patch('nag.patch', when='@:4.4.4%nag') def configure_args(self): return ['CPPFLAGS=-I' + self.spec['netcdf'].prefix.include] @property def libs(self): libraries = ['libnetcdff'] # This package installs both shared and static libraries. Permit # clients to query which one they want. query_parameters = self.spec.last_query.extra_parameters shared = 'shared' in query_parameters return find_libraries( libraries, root=self.prefix, shared=shared, recursive=True )
krafczyk/spack
var/spack/repos/builtin/packages/netcdf-fortran/package.py
Python
lgpl-2.1
2,323
[ "NetCDF" ]
e93858d3d94547fd7dc734843663bcbdf51812f73b90f56742f3d3aa80e2a392
# Copyright (c) 2010-2015 Bo Lin # Copyright (c) 2010-2015 Yanhong Annie Liu # Copyright (c) 2010-2015 Stony Brook University # Copyright (c) 2010-2015 The Research Foundation of SUNY # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import sys from ast import * from itertools import chain from . import dast from .utils import printd, printw, printe OperatorMap = { dast.AddOp : Add, dast.SubOp : Sub, dast.MultOp : Mult, dast.DivOp : Div, dast.ModOp : Mod, dast.PowOp : Pow, dast.LShiftOp : LShift, dast.RShiftOp : RShift, dast.BitOrOp : BitOr, dast.BitXorOp : BitXor, dast.BitAndOp : BitAnd, dast.FloorDivOp : FloorDiv, dast.EqOp : Eq, dast.NotEqOp : NotEq, dast.LtOp : Lt, dast.LtEOp : LtE, dast.GtOp : Gt, dast.GtEOp : GtE, dast.IsOp : Is, dast.IsNotOp : IsNot, dast.InOp : In, dast.NotInOp : NotIn, dast.USubOp : USub, dast.UAddOp : UAdd, dast.InvertOp : Invert, dast.AndOp : And, dast.OrOp : Or } AggregateMap = { dast.MaxExpr : "max", dast.MinExpr : "min", dast.SizeExpr : "len", dast.SumExpr : "sum" } # New matrix multiplication operator since 3.4: if sys.version_info > (3, 5): OperatorMap[dast.MatMultOp] = MatMult PATTERN_EXPR_NAME = "_PatternExpr_%d" QUATIFIED_EXPR_NAME = "_QuantifiedExpr_%d" ########## Convenience methods for creating AST nodes: ########## def call_noarg_ast(name): return Call(Name(name, Load()), [], [], None, None) def pyCall(func, args=[], keywords=[], starargs=None, kwargs=None): if isinstance(func, str): func = pyName(func) return Call(func, list(args), [keyword(arg, val) for arg, val in keywords], starargs, kwargs) def pyName(name, ctx=None): return Name(name, Load() if ctx is None else ctx) def pyNone(): if sys.version_info > (3, 4): return NameConstant(None) else: return pyName("None") def pyTrue(): if sys.version_info > (3, 4): return NameConstant(True) else: return pyName("True") def pyFalse(): if sys.version_info > (3, 4): return NameConstant(False) else: return pyName("False") def pyNot(expr): return UnaryOp(Not(), expr) def pyList(elts, ctx=None): return List(elts, Load() if ctx is None else ctx) def pySet(elts, ctx=None): return Set(elts) def pyTuple(elts, ctx=None): return Tuple(elts, Load() if ctx is None else ctx) def pySetC(elts): return pyCall("set", args=elts) def pySubscr(value, index, ctx=None): return Subscript(value, Index(index), Load() if ctx is None else ctx) def pySize(value): return pyCall("len", [value]) def pyMin(value): return pyCall("min", [value]) def pyMax(value): return pyCall("max", [value]) def pyAttr(name, attr, ctx=None): if isinstance(name, str): return Attribute(Name(name, Load()), attr, Load() if ctx is None else ctx) else: return Attribute(name, attr, Load() if ctx is None else ctx) def pyCompare(left, op, right): return Compare(left, [op()], [right]) def pyLabel(name, block=False, timeout=None): kws = [("block", pyTrue() if block else pyFalse())] if timeout is not None: kws.append(("timeout", timeout)) return Expr(pyCall(func=pyAttr(pyCall("super"), "_label"), args=[Str(name)], keywords=kws)) def pyClassDef(name, bases=[], keywords=[], starargs=None, kwargs=None, body=[], decorator_list=[]): return ClassDef(name, list(bases), [keyword(arg, val) for arg, val in keywords], starargs, kwargs, list(body), list(decorator_list)) def pyFunctionDef(name, args=[], body=[], decorator_list=[], returns=None): arglist = arguments(args=[arg(n, None) for n in args], vararg=None, varargannotation=None, kwonlyargs=[], kwarg=None, kwargannotation=None, defaults=[], kw_defaults=None) return FunctionDef(name, arglist, list(body), list(decorator_list), returns) def propagate_attributes(from_nodes, to_node): if isinstance(to_node, AST): if not (isinstance(from_nodes, list) or isinstance(from_nodes, set)): from_nodes = [from_nodes] for fro in from_nodes: if (hasattr(fro, "prebody") and isinstance(fro.prebody, list)): if not hasattr(to_node, "prebody"): to_node.prebody = [] to_node.prebody.extend(fro.prebody) if (hasattr(fro, "postbody") and isinstance(fro.postbody, list)): if not hasattr(to_node, "postbody"): to_node.postbody = [] to_node.postbody.extend(fro.postbody) return to_node def propagate_fields(node): if hasattr(node, '_fields'): for f in node._fields: propagate_attributes(getattr(node, f), node) return node def concat_bodies(subexprs, body): prebody = [] postbody = [] for e in subexprs: if hasattr(e, "prebody"): prebody.extend(e.prebody) if hasattr(e, "postbody"): postbody.extend(e.postbody) return prebody + body + postbody def is_all_wildcards(targets): """True if 'targets' contain only wildcards.""" for elt in targets: if not (isinstance(elt, Name) and elt.id == '_'): return False return True class PythonGeneratorException(Exception): pass def translate(distalgo_ast, filename="", options=None): pg = PythonGenerator(filename, options) try: return pg.visit(distalgo_ast) except Exception as ex: raise PythonGeneratorException(str(pg.current_node)) from ex # List of arguments needed to initialize a process: PROC_INITARGS = ["parent", "initq", "channel", "props"] PREAMBLE = parse( """ import da """).body POSTAMBLE = parse(""" if __name__ == "__main__": da.init(config) """).body class PythonGenerator(NodeVisitor): """Transforms DistPy AST into Python AST. """ def __init__(self, filename="", options=None): self.filename = filename self.processed_patterns = set() self.preambles = list(PREAMBLE) self.postambles = list() # One instance of PatternComprehensionGenerator for each query. # This is needed so free vars with the same name in a query can be # properly unified: self.pattern_generator = None self.cmdline_args = options self.module_args = None # Used by incgen to avoid expanding 'pre/postbody' in the inc module: self.disable_body_expansion = False self.current_node = None def get_option(self, option, default=None): if hasattr(self.cmdline_args, option): return getattr(self.cmdline_args, option) elif hasattr(self.module_args, option): return getattr(self.module_args, option) else: return default def reset(self): """Resets internal states. Call this before compiling a new file or code segment, if you don't want to create a new instance. """ self.processed_patterns = set() self.preambles = list(PREAMBLE) self.postambles = list() self.pattern_generator = None def visit(self, node): """Generic visit method. If the Incrementalization interface generated code for this node, as indicated by the 'ast_override' attribute, then return the generated code. Otherwise, call the normal visit method. """ assert node is None or isinstance(node, dast.DistNode) self.current_node = node if hasattr(node, "ast_override"): res = node.ast_override else: res = super().visit(node) if isinstance(res, list) and not self.disable_body_expansion: # This is a statement, expand pre and post bodies: return concat_bodies([node], res) else: # This is an expression, pass on pre and post bodies: return propagate_attributes([node], res) def body(self, body): """Process a block of statements.""" res = [] for stmt in body: if stmt.label is not None: res.append(pyLabel(stmt.label)) ast = self.visit(stmt) if ast is not None: res.extend(ast) else: printe("None result from %s" % str(stmt)) return res def bases(self, bases): """Process base classes of a class definition.""" res = [] for expr in bases: res.append(self.visit(expr)) return res def visit_Program(self, node): self.module_args = node._compiler_options body = [] body.extend(self.body(node.body)) return Module(self.preambles + body + self.postambles) def generate_event_def(self, node): evtype = pyAttr(pyAttr("da", "pat"), node.type.__name__) name = Str(node.name) history = self.history_stub(node) pattern = self.visit(node.pattern) sources = pyNone() destinations = pyNone() timestamps = pyNone() if len(node.sources) > 0: sources = pyList([self.visit(s) for s in node.sources]) if len(node.destinations) > 0: destinations = pyList([self.visit(s) for s in node.destinations]) if len(node.timestamps) > 0: timestamps = pyList([self.visit(s) for s in node.timestamps]) handlers = pyList([pyAttr("self", h.name) for h in node.handlers]) return pyCall(func=pyAttr(pyAttr("da", "pat"), "EventPattern"), args=[evtype, name, pattern], keywords=[("sources", sources), ("destinations", destinations), ("timestamps", timestamps), ("record_history", history), ("handlers", handlers)]) def history_initializers(self, node): return [Assign(targets=[pyAttr("self", evt.name)], value=pyList([])) for evt in node.events if evt.record_history] def generate_init(self, node): supercall = [Expr(pyCall(func=pyAttr(pyCall(pyName("super")), "__init__"), args=[pyName(n) for n in PROC_INITARGS]))] histories = self.history_initializers(node) events = [Expr(pyCall(func=pyAttr(pyAttr("self", "_events"), "extend"), args=[pyList([self.generate_event_def(evt) for evt in node.events])]))] return pyFunctionDef(name="__init__", args=(["self"] + PROC_INITARGS), body=(supercall + histories + events)) def generate_handlers(self, node): """Generate the message handlers of a process.""" body = [] for evt in node.events: for handler in evt.handlers: body.extend((self.visit(handler))) return body def visit_Arguments(self, node): """Generates the argument lists for functions and lambdas.""" args = [arg(ident.name, None) for ident in node.args] kwonlyargs = [arg(ident.name, None) for ident in node.kwonlyargs] kw_defaults = [self.visit(expr) for expr in node.kw_defaults] defaults = [self.visit(expr) for expr in node.defaults] if sys.version_info > (3, 4): vararg = arg(node.vararg.name, None) \ if node.vararg is not None else None kwarg = arg(node.kwarg.name, None) \ if node.kwarg is not None else None return arguments( args=args, vararg=vararg, kwonlyargs=kwonlyargs, kwarg=kwarg, defaults=defaults, kw_defaults=kw_defaults) else: vararg = node.vararg.name if node.vararg is not None else None kwarg = node.kwarg.name if node.kwarg is not None else None return arguments( args=args, vararg=vararg, varargannotation=None, kwonlyargs=kwonlyargs, kwarg=kwarg, kwargannotation=None, defaults=defaults, kw_defaults=kw_defaults) def visit_Process(self, node): printd("Compiling process %s" % node.name) printd("has methods:%r" % node.methods) cd = ClassDef() cd.name = node.name cd.bases = self.bases(node.bases) cd.bases.append(pyAttr("da", "DistProcess")) # ######################################## # TODO: just pass these through until we figure out a use for them: cd.keywords = node.ast.keywords cd.starargs = node.ast.starargs cd.kwargs = node.ast.kwargs # ######################################## cd.body = [self.generate_init(node)] if node.setup is not None: cd.body.extend(self.visit(node.setup)) if node.entry_point is not None: cd.body.extend(self._entry_point(node.entry_point)) cd.decorator_list = [self.visit(d) for d in node.decorators] cd.body.extend(self.body(node.methods)) cd.body.extend(self.generate_handlers(node)) return [cd] def _entry_point(self, node): stmts = self.visit(node) stmts[0].name = "_da_run_internal" return stmts def visit_Function(self, node): fd = FunctionDef() fd.name = node.name fd.args = self.visit(node.args) fd.body = self.body(node.body) if isinstance(node.parent, dast.Process): if node.name == "setup": fd.args = self.visit(node.parent.args) fd.body = ([Assign(targets=[pyAttr("self", name, Store())], value=pyName(name)) for name in node.parent.ordered_names] + fd.body) fd.args.args.insert(0, arg("self", None)) fd.decorator_list = [self.visit(d) for d in node.decorators] fd.returns = None return [fd] def visit_ClassStmt(self, node): cd = pyClassDef(name=node.name, bases=self.bases(node.bases), body=self.body(node.body)) # ######################################## # TODO: just pass these through until we figure out a use for them: cd.keywords = node.ast.keywords cd.starargs = node.ast.starargs cd.kwargs = node.ast.kwargs # ######################################## cd.decorator_list = [self.visit(d) for d in node.decorators] return [cd] def visit_PythonExpr(self, node): return node.ast def visit_SimpleExpr(self, node): return self.visit(node.value) def visit_AttributeExpr(self, node): sub = self.visit(node.value) ast = Attribute(sub, node.attr, None) return propagate_attributes([sub], ast) def visit_SubscriptExpr(self, node): val = self.visit(node.value) if isinstance(node.index, dast.SliceExpr): idx = self.visit(node.index) else: idx = Index(self.visit(node.index)) propagate_attributes([idx.value], idx) ast = Subscript(val, idx, Load()) return propagate_attributes((val, idx), ast) def visit_SliceExpr(self, node): l = self.visit(node.lower) if node.lower is not None else None u = self.visit(node.upper) if node.upper is not None else None s = self.visit(node.step) if node.step is not None else None ast = Slice(l, u, s) return propagate_attributes((l, u, s), ast) def visit_StarredExpr(self, node): val = self.visit(node.value) ast = Starred(val, None) return propagate_attributes([val], ast) def visit_EllipsisExpr(self, node): return Ellipsis() def visit_ConstantExpr(self, node): if isinstance(node.value, str): return Str(node.value) elif isinstance(node.value, bytes): return Bytes(node.value) else: return Num(node.value) def visit_SelfExpr(self, node): return pyName("self") def visit_TrueExpr(self, node): return pyTrue() def visit_FalseExpr(self, node): return pyFalse() def visit_NoneExpr(self, node): return pyNone() def visit_TupleExpr(self, node): ast = pyTuple([self.visit(e) for e in node.subexprs]) return propagate_attributes(ast.elts, ast) def visit_ListExpr(self, node): ast = pyList([self.visit(e) for e in node.subexprs]) return propagate_attributes(ast.elts, ast) def visit_SetExpr(self, node): ast = Set([self.visit(e) for e in node.subexprs]) return propagate_attributes(ast.elts, ast) def visit_DictExpr(self, node): ast = Dict([self.visit(e) for e in node.keys], [self.visit(e) for e in node.values]) return propagate_attributes(ast.keys + ast.values, ast) def visit_IfExpr(self, node): ast = IfExp(self.visit(node.condition), self.visit(node.body), self.visit(node.orbody)) return propagate_attributes((ast.test, ast.body, ast.orelse), ast) def visit_CallExpr(self, node): ast = pyCall(self.visit(node.func), [self.visit(a) for a in node.args], [(key, self.visit(value)) for key, value in node.keywords], self.visit(node.starargs) if node.starargs is not None else None, self.visit(node.kwargs) if node.kwargs is not None else None) return propagate_attributes([ast.func] + ast.args, ast) def visit_ApiCallExpr(self, node): ast = pyCall(pyAttr("da", node.func), [self.visit(a) for a in node.args], [(key, self.visit(value)) for key, value in node.keywords], self.visit(node.starargs) if node.starargs is not None else None, self.visit(node.kwargs) if node.kwargs is not None else None) return propagate_attributes(ast.args, ast) def visit_BuiltinCallExpr(self, node): ast = pyCall(pyAttr("self", node.func), [self.visit(a) for a in node.args]) return propagate_attributes(ast.args, ast) def visit_AggregateExpr(self, node): ast = pyCall(AggregateMap[type(node)], [self.visit(a) for a in node.args]) return propagate_attributes(ast.args, ast) visit_MaxExpr = visit_AggregateExpr visit_MinExpr = visit_AggregateExpr visit_SumExpr = visit_AggregateExpr visit_SizeExpr = visit_AggregateExpr def visit_LogicalExpr(self, node): if node.operator is dast.NotOp: ast = UnaryOp(Not(), self.visit(node.left)) return propagate_attributes([ast.operand], ast) else: ast = BoolOp(OperatorMap[node.operator](), [self.visit(e) for e in node.subexprs]) return propagate_attributes(ast.values, ast) def visit_DomainSpec(self, node): domain = self.visit(node.domain) if not isinstance(node.pattern, dast.PatternExpr): result = comprehension(self.visit(node.pattern), domain, []) else: if self.pattern_generator is None: # Legacy pattern target, condlist = PatternComprehensionGenerator().visit( node.pattern) else: target, condlist = self.pattern_generator.visit(node.pattern) result = comprehension(target, domain, condlist) return propagate_fields(result) def visit_QuantifiedExpr(self, node): if self.pattern_generator is None: self.pattern_generator = PatternComprehensionGenerator() is_top_level_query = True else: is_top_level_query = False if not self.get_option('use_top_semantic', default=False): self.pattern_generator.push_state() self.pattern_generator.reset_state() body = funcbody = [] for domspec in node.domains: comp = self.visit(domspec) ast = For(comp.target, comp.iter, [], []) body.append(propagate_attributes([ast.iter], ast)) body = body[0].body for cond in comp.ifs: ast = If(cond, [], []) body.append(propagate_attributes([ast.test], ast)) body = body[0].body postbody = [] ifcond = self.visit(node.predicate) if hasattr(ifcond, "prebody"): body.extend(ifcond.prebody) if hasattr(ifcond, "postbody"): postbody.extend(cnode.postbody) if node.operator is dast.UniversalOp: ifcond = UnaryOp(Not(), ifcond) ifbody = [Return(pyFalse())] else: # ExistentialExpr ifbody = [Return(pyTrue())] body.append(If(ifcond, ifbody, [])) body.extend(postbody) if node.operator is dast.UniversalOp: funcbody.append(Return(pyTrue())) else: funcbody.append(Return(pyFalse())) # names that should be unified with a containing query need to be # explicitly passed in: curnode = node params = set() while curnode is not node.top_level_query: curnode = curnode.parent if isinstance(curnode, dast.QueryExpr): params |= set(curnode.ordered_local_freevars) params &= node.nameobjs ast = pyCall(func=pyName(node.name), keywords=[(v.name, self.visit(v)) for v in params]) funast = pyFunctionDef(name=node.name, args=[v.name for v in params], body=funcbody) ast.prebody = [funast] nameset = node.freevars - params if len(nameset) > 0: # Back patch nonlocal statement if not isinstance(node.scope, dast.ComprehensionExpr): if not isinstance(node.statement.parent, dast.Program): decl = Nonlocal([nv.name for nv in nameset]) else: decl = Global([nv.name for nv in nameset]) funast.body.insert(0, decl) # Assignment needed to ensure all vars are bound at this point if is_top_level_query: ast.prebody.insert( 0, Assign(targets=[pyName(nv.name) for nv in nameset], value=pyNone())) if is_top_level_query: self.pattern_generator = None elif not self.get_option('use_top_semantic', default=False): self.pattern_generator.pop_state() return ast def visit_ComprehensionExpr(self, node): printd("Entering comprehension " + str(node)) if self.pattern_generator is None: self.pattern_generator = PatternComprehensionGenerator() is_top_level_query = True else: self.pattern_generator.push_state() is_top_level_query = False if not self.get_option('use_top_semantic', default=False): self.pattern_generator.reset_state() generators = [] dangling = [] for dom in node.conditions: comp = self.visit(dom) if isinstance(comp, comprehension): # Tuck any dangling conditions here: comp.ifs.extend(dangling) dangling = [] generators.append(comp) else: if len(generators) > 0: generators[-1].ifs.append(comp) propagate_attributes(generators[-1].ifs, generators[-1]) else: dangling.append(comp) if len(dangling) == 0: test = pyTrue() elif len(dangling) == 1: test = dangling[0] else: test = propagate_fields(BoolOp(And(), dangling)) try: if type(node) is dast.DictCompExpr: key = self.visit(node.elem.key) value = self.visit(node.elem.value) if len(generators) > 0: ast = DictComp(key, value, generators) else: # No generators, degenerate to IfExp: ast = IfExp(test, propagate_fields(Dict([key], [value])), Dict([], [])) return propagate_fields(ast) else: elem = self.visit(node.elem) if len(generators) > 0: if isinstance(node, dast.SetCompExpr): ast = SetComp(elem, generators) elif isinstance(node, dast.ListCompExpr): ast = ListComp(elem, generators) elif isinstance(node, dast.TupleCompExpr): ast = pyCall("tuple", args=[GeneratorExp(elem, generators)]) elif isinstance(node, dast.GeneratorExpr): ast = GeneratorExp(elem, generators) else: self.error("Unknown expression", node) return None else: # No generators, degenerate to IfExp: if isinstance(node, dast.SetCompExpr): ast = IfExp(test, propagate_fields(pySet([elem])), pySetC([])) elif isinstance(node, dast.ListCompExpr): ast = IfExp(test, propagate_fields(pyList([elem])), pyList([])) elif isinstance(node, dast.TupleCompExpr): ast = IfExp(test, propagate_fields(pyTuple([elem])), pyTuple([])) elif isinstance(node, dast.GeneratorExpr): # Impossible: self.error("Illegal generator expression.", node) return None else: self.error("Illegal unknown expression.", node) return None return propagate_fields(ast) finally: if is_top_level_query: printd("Leaving toplevel " + str(node)) self.pattern_generator = None else: # We need to restore the pattern state because comprehensions # does not bind witness values outside its scope: self.pattern_generator.pop_state() printd("Leaving comprehension " + str(node)) visit_GeneratorExpr = visit_ComprehensionExpr visit_SetCompExpr = visit_ComprehensionExpr visit_ListCompExpr = visit_ComprehensionExpr visit_DictCompExpr = visit_ComprehensionExpr visit_TupleCompExpr = visit_ComprehensionExpr def visit_ComparisonExpr(self, node): left = self.visit(node.left) right = self.visit(node.right) if isinstance(node.left, dast.PatternExpr): # 'PATTERN in DOMAIN' context = [(v.unique_name, self.visit(v.value)) for v in node.left.ordered_boundpatterns] ast = pyCall(func=pyAttr(left, "match_iter"), args=[right], keywords=context) else: op = OperatorMap[node.comparator]() ast = Compare(left, [op], [right]) return propagate_attributes((left, right), ast) def visit_ArithmeticExpr(self, node): op = OperatorMap[node.operator]() if issubclass(node.operator, dast.UnaryOperator): ast = UnaryOp(op, self.visit(node.right)) return propagate_fields(ast) else: ast = BinOp(self.visit(node.left), op, self.visit(node.right)) return propagate_fields(ast) visit_BinaryExpr = visit_ArithmeticExpr visit_UnaryExpr = visit_ArithmeticExpr def visit_PatternElement(self, node): if type(node) is dast.FreePattern: val = Str(node.value.name) if node.value is not None else pyNone() elif type(node) is dast.BoundPattern: val = Str(node.unique_name) elif type(node) is dast.ConstantPattern: if isinstance(node.value, dast.SelfExpr): # We have to special case the 'self' expr here: return pyCall(func=pyAttr(pyAttr("da", "pat"), "SelfPattern")) else: val = self.visit(node.value) else: val = pyList([self.visit(v) for v in node.value]) return pyCall(func=pyAttr(pyAttr("da", "pat"), type(node).__name__), args=[val]) visit_FreePattern = visit_PatternElement visit_BoundPattern = visit_PatternElement visit_ConstantPattern = visit_PatternElement visit_TuplePattern = visit_PatternElement visit_ListPattern = visit_PatternElement def visit_PatternExpr(self, node): if node.name not in self.processed_patterns: patast = self.visit(node.pattern) ast = Assign([pyName(node.name)], patast) self.preambles.append(ast) self.processed_patterns.add(node.name) return pyName(node.name) visit_LiteralPatternExpr = visit_PatternExpr def visit_HistoryExpr(self, node): assert node.event is not None return pyAttr("self", node.event.name) visit_ReceivedExpr = visit_HistoryExpr visit_SentExpr = visit_HistoryExpr def visit_LambdaExpr(self, node): args = self.visit(node.args) return Lambda(args, self.visit(node.body)) def visit_NamedVar(self, node): if isinstance(node.scope, dast.Process): return pyAttr("self", node.name) else: return pyName(node.name) ########## Statements ########## def visit_NoopStmt(self, node): return [Pass()] def visit_PassStmt(self, node): return [Pass()] def visit_AssignmentStmt(self, node): targets = [self.visit(tgt) for tgt in node.targets] val = self.visit(node.value) ast = Assign(targets, val) return concat_bodies(targets + [val], [ast]) def visit_OpAssignmentStmt(self, node): target = self.visit(node.target) val = self.visit(node.value) ast = AugAssign(target, OperatorMap[node.operator](), val) return concat_bodies([target, val], [ast]) def visit_IfStmt(self, node): test = self.visit(node.condition) body = self.body(node.body) orelse = self.body(node.elsebody) ast = If(test, body, orelse) return concat_bodies([test], [ast]) def visit_WhileStmt(self, node): test = self.visit(node.condition) body = self.body(node.body) orelse = self.body(node.elsebody) ast = While(test, body, orelse) return concat_bodies([test], [ast]) def visit_ForStmt(self, node): comp = self.visit(node.domain) body = self.body(node.body) orelse = self.body(node.elsebody) ast = For(comp.target, comp.iter, body, orelse) return concat_bodies((comp.target, comp.iter), [ast]) def visit_TryStmt(self, node): body = self.body(node.body) handlers = [self.visit(eh) for eh in node.excepthandlers] orelse = self.body(node.elsebody) finalbody = self.body(node.finalbody) return [Try(body, handlers, orelse, finalbody)] def visit_ExceptHandler(self, node): type = self.visit(node.type) body = self.body(node.body) return ExceptHandler(type, node.name, body) def visit_TryFinallyStmt(self, node): body = self.body(node.body) finalbody = self.body(node.finalbody) return [TryFinally(body, finalbody)] def visit_AwaitStmt(self, node): INCGRD = AugAssign(pyName(node.unique_label), Add(), Num(1)) DEDGRD = AugAssign(pyName(node.unique_label), Sub(), Num(1)) conds = [] body = [INCGRD] labelname = node.unique_label if node.label is None else node.label label = pyLabel(labelname, block=True, timeout=(self.visit(node.timeout) if node.timeout is not None else None)) last = body for br in node.branches: cond = self.visit(br.condition) conds.append(cond) ifbody = self.body(br.body) ifbody.append(INCGRD) brnode = If(cond, ifbody, []) last.append(brnode) last = brnode.orelse if len(node.orelse) > 0: cond = pyAttr("self", "_timer_expired") ifbody = self.body(node.orelse) ifbody.append(INCGRD) brnode = If(cond, ifbody, []) last.append(brnode) last = brnode.orelse # Label call must come after the If tests: last.append(label) last.append(DEDGRD) whilenode = While(pyCompare(pyName(node.unique_label), Eq, Num(0)), body, []) main = [Assign([pyName(node.unique_label)], Num(0))] if node.timeout is not None: main.append(Expr(pyCall(pyAttr("self", "_timer_start")))) main.append(whilenode) if node.is_in_loop: whilenode.orelse = [If(pyCompare(pyName(node.unique_label), NotEq, Num(2)), [Continue()], [])] main.append(If(pyCompare(pyName(node.unique_label), NotEq, Num(2)), [Break()], [])) return concat_bodies(conds, main) def visit_BranchingAwaitStmt(self, node): orlist = [] branches = ifbody = [] for branch in node.branches: if branch.condition is None: ifbody.extend(self.body(branch.body)) else: test = self.visit(branch.condition) orlist.append(test) body = self.body(branch.body) orelse = [] ifbody.append(If(test, body, orelse)) ifbody = orelse cond = BoolOp(Or(), orlist) test = UnaryOp(Not(), cond) condfunc = pyFunctionDef(name="await_cond_%d" % hash(node), body=[Return(test)]) awaitcall = pyCall(func=pyAttr(pyCall("super"), "_await_"), args=[pyName(condfunc.name)]) return concat_bodies(orlist, [condfunc, awaitcall, branches[0]]) def visit_LoopingAwaitStmt(self, node): cond = self.visit(node.condition) test = UnaryOp(Not(), cond) condfunc = pyFunctionDef(name="await_cond_%d" % hash(node), body=[Return(test)]) awaitcall = pyCall(func=pyAttr(pyCall("super"), "_await"), args=[pyName(condfunc.name)]) mainbody = self.body(node.body) orelse = [Break()] ifcheck = If(cond, mainbody, orelse) body = concat_bodies([cond], [condfunc, awaitcall, ifcheck]) ast = While(pyTrue(), body, []) return [ast] def visit_ReturnStmt(self, node): if node.value is not None: value = self.visit(node.value) else: value = None ast = Return(value) return concat_bodies([value], [ast]) def visit_DeleteStmt(self, node): targets = [self.visit(tgt) for tgt in node.targets] ast = Delete(targets) return concat_bodies(targets, [ast]) def visit_YieldStmt(self, node): if node.value is not None: value = self.visit(node.value) ast = Expr(Yield(value)) return concat_bodies([value], [ast]) else: return [Expr(Yield(None))] def visit_YieldFromStmt(self, node): if node.value is not None: value = self.visit(node.value) ast = Expr(YieldFrom(value)) return concat_bodies([value], [ast]) else: return [Expr(YieldFrom(None))] def visit_WithStmt(self, node): items = [] for item in node.items: context_expr = self.visit(item[0]) if item[1] is not None: optional_vars = self.visit(item[1]) else: optional_vars = None items.append(withitem(context_expr, optional_vars)) body = self.body(node.body) ast = With(items, body) return concat_bodies([e.context_expr for e in items], [ast]) def visit_SimpleStmt(self, node): value = self.visit(node.expr) ast = Expr(value) return concat_bodies([value], [ast]) def visit_BreakStmt(self, node): return [Break()] def visit_PassStmt(self, node): return [Pass()] def visit_ContinueStmt(self, node): return [Continue()] def visit_PythonStmt(self, node): return [node.ast] def visit_AssertStmt(self, node): expr = self.visit(node.expr) msg = self.visit(node.msg) if node.msg is not None else None ast = Assert(expr, msg) return concat_bodies([expr, msg], [ast]) def visit_GlobalStmt(self, node): return [Global(node.names)] def visit_NonlocalStmt(self, node): return [Nonlocal(node.names)] def visit_SendStmt(self, node): mesg = self.visit(node.message) tgt = self.visit(node.target) ast = Expr(pyCall(func=pyAttr("self", "_send"), args=[mesg, tgt])) return concat_bodies([mesg, tgt], [ast]) def visit_OutputStmt(self, node): args = [self.visit(msg) for msg in node.message] keywords = [] if node.level is not None: keywords.append(("level", self.visit(node.level))) if node.separator is not None: keywords.append(("sep", self.visit(node.separator))) ast = Expr(pyCall(func=pyAttr("self", "output"), args=args, keywords=keywords)) return concat_bodies(args, [ast]) def visit_ResetStmt(self, node): blueprint = """ for attr in dir(self): if attr.find("{0}Event_") != -1: getattr(self, attr).clear() """ if node.expr is None: typestr = "" else: typestr = str(node.expr.value) src = blueprint.format(typestr) return parse(src).body def history_stub(self, node): if node.record_history: return pyTrue() else: return pyNone() def visit_Event(self, node): return pyAttr(pyAttr("da", "pat"), node.type.__name__) def visit_EventHandler(self, node): stmts = self.visit_Function(node) stmts.append(Assign([pyAttr(node.name, "_labels")], (pyNone() if node.labels is None else pyCall(pyName("frozenset"), [Set([Str(l) for l in node.labels])])))) stmts.append(Assign([pyAttr(node.name, "_notlabels")], (pyNone() if node.notlabels is None else pyCall(pyName("frozenset"), [Set([Str(l) for l in node.notlabels])])))) return stmts class PatternComprehensionGenerator(PythonGenerator): def __init__(self): super().__init__() # Set of freevars seen so far. Freevars after the first occurrence # needs to be unified: self.freevars = set() self.state_stack = [] def push_state(self): self.state_stack.append(frozenset(self.freevars)) def pop_state(self): s = self.state_stack.pop() self.freevars = set(s) def reset_state(self): self.freevars = set() def visit_FreePattern(self, node): conds = [] if node.value is None: target = pyName("_") elif node.value in self.freevars: target = pyName(node.unique_name) conds = [pyCompare(target, Eq, self.visit(node.value))] else: target = self.visit(node.value) self.freevars.add(node.value) return target, conds def visit_BoundPattern(self, node): boundname = pyName(node.unique_name) targetname = self.visit(node.value) conast = pyCompare(boundname, Eq, targetname) return boundname, [conast] def visit_ConstantPattern(self, node): target = pyName(node.unique_name) compval = self.visit(node.value) return target, [pyCompare(target, Eq, compval)] def visit_TuplePattern(self, node): condition_list = [] targets = [] for elt in node.value: tgt, conds = self.visit(elt) targets.append(tgt) condition_list.extend(conds) if is_all_wildcards(targets): # Optimization: combine into one '_' return pyName('_'), [] target = pyTuple(targets) return target, condition_list def visit_ListPattern(self, node): raise NotImplementedError( "Can not compile list pattern to comprehension.") def visit_PatternExpr(self, node): return self.visit(node.pattern) visit_LiteralPatternExpr = visit_PatternExpr
sghosh1991/distalgo
da/compiler/pygen.py
Python
mit
43,508
[ "VisIt" ]
36b399263901465f6d0ff3d19c9c81305362da4bbd2f4be8009ad18649f24554
#pylint: disable=missing-docstring #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import vtk import mooseutils from .. import base class Line(base.ChiggerObject): """ Wrapper for vtk line/point object. """ @staticmethod def getOptions(): opt = base.ChiggerObject.getOptions() opt.add('x', [], "The x-axis data.") opt.add('y', [], "The y-axis data.") opt.add('label', "The plot label (name appearing in legend).", vtype=str) opt.add('style', '-', "The line style.", allow=['none', '-']) opt.add('color', "The color of the line to plot.", vtype=list) opt.add('width', "The width of the line in Points.", vtype=int) opt.add('corner', 'left-bottom', "The axis corner to place the line.", allow=['left-bottom', 'right-bottom', 'right-top', 'left-top']) opt.add('marker', 'none', "Set the marker type.", allow=['none', 'cross', 'plus', 'square', 'circle', 'diamond']) opt.add('append', True, "Append new data to the existing data.") opt.add('tracer', False, "Places both x and y tracing lines, (see 'xtracer' and " "'ytracer').") opt.add('xtracer', None, "Place a tracing line that follows the leading x-value (overrides " "'tracer' option).", vtype=bool) opt.add('ytracer', None, "Place a tracing line that follows the leading y-value (overrides " "'tracer' option).", vtype=bool) return opt def __init__(self, x_data=None, y_data=None, **kwargs): super(Line, self).__init__(**kwargs) # Storage for vtk line/point object self._vtkplot = None # Build the vtkTable that stores the data x = vtk.vtkFloatArray() x.SetName('x-data') y = vtk.vtkFloatArray() y.SetName('y-data') self._vtktable = vtk.vtkTable() self._vtktable.AddColumn(x) self._vtktable.AddColumn(y) # Storage for tracing lines self._xtracer = None self._ytracer = None # Set x,y data if x_data: self.setOption('x', x_data) if y_data: self.setOption('y', y_data) def setOptions(self, *args, **kwargs): """ Update line objects settings. """ super(Line, self).setOptions(*args, **kwargs) tracer = self.getOption('tracer') if tracer and not self.isOptionValid('xtracer'): self.setOption('xtracer', True) if tracer and not self.isOptionValid('ytracer'): self.setOption('ytracer', True) def initialize(self): """ Called prior to inserting the vtkPlotLine/Points object into the chart. see Graph::Update """ super(Line, self).initialize() # Create the vtk line or points object style = self.getOption('style') if style == '-' and not isinstance(self._vtkplot, vtk.vtkPlotLine): self._vtkplot = vtk.vtkPlotLine() self._vtkplot.SetInputData(self._vtktable, 0, 1) elif style == 'none' and not isinstance(self._vtkplot, vtk.vtkPlotPoints): self._vtkplot = vtk.vtkPlotPoints() self._vtkplot.SetInputData(self._vtktable, 0, 1) # Create tracer lines(s) if self.getOption('xtracer'): if self._xtracer is None: self._xtracer = Line(append=False, width=0.1, color=self.getOption('color')) self._xtracer.update() if self.getOption('ytracer'): if self._ytracer is None: self._ytracer = Line(append=False, width=0.1, color=self.getOption('color')) self._ytracer.update() def getVTKPlot(self): """ Return the vtkPlot object for this line. """ return self._vtkplot def update(self, **kwargs): """ Update the line object because of data of settings changed. """ super(Line, self).update(**kwargs) # Extract x,y data if not self.getOption('append'): self._vtktable.SetNumberOfRows(0) # Get the x,y data and reset to None so that data doesn't append over and over x = self.getOption('x') y = self.getOption('y') if (x and y) and (len(x) == len(y)): for i in range(len(x)): #pylint: disable=consider-using-enumerate array = vtk.vtkVariantArray() array.SetNumberOfTuples(2) array.SetValue(0, x[i]) array.SetValue(1, y[i]) self._vtktable.InsertNextRow(array) self._vtktable.Modified() elif (x and y) and (len(x) != len(y)): mooseutils.MooseException("Supplied x and y data must be same length.") # Apply the line/point settings if self.isOptionValid('color'): self._vtkplot.SetColor(*self.getOption('color')) if self.isOptionValid('width'): self._vtkplot.SetWidth(self.getOption('width')) if self.isOptionValid('label'): self._vtkplot.SetLabel(self.getOption('label')) vtk_marker = getattr(vtk.vtkPlotLine, self.getOption('marker').upper()) self._vtkplot.SetMarkerStyle(vtk_marker) # Label if not self.isOptionValid('label'): self._vtkplot.LegendVisibilityOff() else: self._vtkplot.LegendVisibilityOn() # Handle single point data if self._vtktable.GetNumberOfRows() == 1: self._vtktable.InsertNextRow(self._vtktable.GetRow(0)) # Tracers if self._xtracer: ax = self._vtkplot.GetYAxis() rmin = ax.GetMinimum() rmax = ax.GetMaximum() value = self._vtktable.GetValue(self._vtktable.GetNumberOfRows()-1, 0) self._xtracer.update(x=[value, value], y=[rmin, rmax]) if self._ytracer: ax = self._vtkplot.GetXAxis() rmin = ax.GetMinimum() rmax = ax.GetMaximum() value = self._vtktable.GetValue(self._vtktable.GetNumberOfRows()-1, 1) self._ytracer.update(x=[rmin, rmax], y=[value, value]) def getVTKPlotObjects(self): """ Return the vtkPlotLine/vtkPlotPoints object. see Graph.py """ objects = [self._vtkplot] if self._xtracer: objects.append(self._xtracer.getVTKPlot()) if self._ytracer: objects.append(self._ytracer.getVTKPlot()) return objects
nuclear-wizard/moose
python/chigger/graphs/Line.py
Python
lgpl-2.1
6,857
[ "MOOSE", "VTK" ]
0afc2548af1d1c3913300848dbfee0e15add91e7e2ec3a5ab8bfe5928b8fafb5
from vtk import * reader1 = vtkXMLTreeReader() reader1.SetFileName("vtkclasses.xml") reader1.SetEdgePedigreeIdArrayName("tree edge") reader1.GenerateVertexPedigreeIdsOff(); reader1.SetVertexPedigreeIdArrayName("id"); reader2 = vtkXMLTreeReader() reader2.SetFileName("vtklibrary.xml") reader2.SetEdgePedigreeIdArrayName("graph edge") reader2.GenerateVertexPedigreeIdsOff(); reader2.SetVertexPedigreeIdArrayName("id"); view = vtkTreeRingView() view.SetTreeFromInputConnection(reader2.GetOutputPort()) view.SetGraphFromInputConnection(reader1.GetOutputPort()) view.SetAreaColorArrayName("VertexDegree") view.SetAreaHoverArrayName("id") view.SetAreaLabelArrayName("id") view.SetAreaLabelVisibility(True) view.SetShrinkPercentage(0.02) view.SetBundlingStrength(.5) view.Update() view.SetEdgeColorArrayName("tree edge") view.SetColorEdges(True) view2 = vtkTreeRingView() view2.SetTreeFromInputConnection(reader1.GetOutputPort()) view2.SetGraphFromInputConnection(reader2.GetOutputPort()) view2.SetRootAngles(180.,360.) view2.SetAreaColorArrayName("VertexDegree") view2.SetAreaHoverArrayName("id") view2.SetAreaLabelArrayName("id") view2.SetAreaLabelVisibility(True) view2.SetShrinkPercentage(0.01) view2.Update() view2.SetEdgeColorArrayName("graph edge") view2.SetColorEdges(True) # Apply a theme to the views theme = vtkViewTheme.CreateMellowTheme() view.ApplyViewTheme(theme) view2.ApplyViewTheme(theme) theme.FastDelete() view.ResetCamera() view.Render() view2.ResetCamera() view2.Render() view.GetInteractor().Start()
collects/VTK
Examples/Infovis/Python/treering_hierarchical_view.py
Python
bsd-3-clause
1,526
[ "VTK" ]
87a88b439b11e48348b7747fecbd5032e852f10da93eebddad9ea34255bad79e
############################################################################## # # Copyright (c) 2009-2013 by University of Queensland # http://www.uq.edu.au # # Primary Business: Queensland, Australia # Licensed under the Open Software License version 3.0 # http://www.opensource.org/licenses/osl-3.0.php # # Development until 2012 by Earth Systems Science Computational Center (ESSCC) # Development since 2012 by School of Earth Sciences # ############################################################################## """3D magnetic inversion example using netCDF data""" # Filename for input data DATASET='${inversion-file}' # background magnetic flux density (B_north, B_east, B_vertical) in nano Tesla. B_b = [${bb-north}, ${bb-east}, ${bb-vertical}] # maximum depth (in meters) DEPTH = ${max-depth} # buffer zone above data (in meters; 6-10km recommended) AIR = ${air-buffer} # number of mesh elements in vertical direction (~1 element per 2km recommended) NE_Z = ${vertical-mesh-elements} # amount of horizontal padding (this affects end result, about 20% recommended) PAD_X = ${x-padding} PAD_Y = ${y-padding} N_THREADS = ${n-threads} ####### Do not change anything below this line ####### import os import subprocess import sys try: from esys.downunder import * from esys.escript import unitsSI as U from esys.weipa import saveSilo except ImportError: line=["/opt/escript/bin/run-escript","-t" + str(N_THREADS)]+sys.argv ret=subprocess.call(line) sys.exit(ret) def saveAndUpload(fn, **args): saveSilo(fn, **args) subprocess.call(["cloud", "upload", fn, fn, "--set-acl=public-read"]) #Convert entered nano Tesla to Tesla B_b=[b*U.Nano*U.Tesla for b in B_b] DATA_UNITS = U.Nano * U.Tesla source=NetCdfData(DataSource.MAGNETIC, DATASET, scale_factor=DATA_UNITS) db=DomainBuilder() db.addSource(source) db.setVerticalExtents(depth=DEPTH, air_layer=AIR, num_cells=NE_Z) db.setFractionalPadding(PAD_X, PAD_Y) db.setBackgroundMagneticFluxDensity(B_b) db.fixSusceptibilityBelow(depth=DEPTH) inv=MagneticInversion() inv.setup(db) B, w = db.getMagneticSurveys()[0] susceptibility=inv.run() saveAndUpload('result.silo', magnetic_anomaly=B, magnetic_weight=w, susceptibility=susceptibility) print("Results saved in result.silo") # Visualise result.silo using VisIt import visit visit.LaunchNowin() saveatts = visit.SaveWindowAttributes() saveatts.fileName = 'result-visit.png' saveatts.family = 0 saveatts.width = 1024 saveatts.height = 768 saveatts.resConstraint = saveatts.NoConstraint saveatts.outputToCurrentDirectory = 1 visit.SetSaveWindowAttributes(saveatts) visit.OpenDatabase('result.silo') visit.AddPlot('Contour', 'susceptibility') c=visit.ContourAttributes() c.colorType=c.ColorByColorTable c.colorTableName = "hot" visit.SetPlotOptions(c) visit.DrawPlots() v=visit.GetView3D() v.viewNormal=(-0.554924, 0.703901, 0.443377) v.viewUp=(0.272066, -0.3501, 0.896331) visit.SetView3D(v) visit.SaveWindow() subprocess.call(["cloud", "upload", "result-visit.png", "result-visit.png", "--set-acl=public-read"]) visit.DeleteAllPlots() visit.CloseDatabase('result.silo')
bencaradocdavies/vgml
src/main/resources/org/auscope/portal/server/scriptbuilder/templates/escript-magnetic.py
Python
gpl-3.0
3,212
[ "NetCDF", "VisIt" ]
a60e9ddcbfea21903b4f412c800b41741e2686b275c1eb39d6a04e1a3d5d9d68
#!/usr/bin/python #Copyright (C) 2011 by Forrest Sheng Bao http://fsbao.net # This software is licensed under MIT license. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # version 0.1 Last update 2011-09-07 # two external librabies needed import nibabel import numpy # three system libraries needed import sys import struct import os # two nested functions to read FreeSurfer surface and curvature file def readSurf(filename): f = open(filename, "rb") f.seek(3) # skip the first 3 Bytes "Magic" number s = f.read(50) # the second field is string of creation information of variable length End2 = s.find('\n\n',0) # end of the second field is a '\n\n' f.seek(3+End2+2) # jump to immediate Byte after the creating information s = f.read(8) VertexCount, FaceCount = struct.unpack(">ii", s) Vertex, Face = [], [] for i in xrange(0, VertexCount): s = f.read(8) R, A = struct.unpack(">ff", s) f.seek(-4, os.SEEK_CUR) s = f.read(8) A, S = struct.unpack(">ff", s) Vertex.append([R,A,S]) # R, A, S are the coordinates of vertexes for i in xrange(0, FaceCount): s = f.read(8) V0, V1 = struct.unpack(">ii", s) f.seek(-4, os.SEEK_CUR) s = f.read(8) V1, V2 = struct.unpack(">ii", s) Face.append([V0, V1, V2]) return Vertex, Face def readCurv(filename): '''Read FreeSurfer Curvature and Convexity files ''' f = open(filename, "rb") f.seek(3) # skip the first 3 Bytes "Magic" number s = f.read(8) # get the VertexCount and FaceCount VertexCount, FaceCount = struct.unpack(">ii", s) Curvature = [0.0] s = f.read(8) ValsPerVertex, Curvature[0] = struct.unpack(">if", s) VertexCount -= 1 # because the first curvature value has been loaded while VertexCount > 1: s = f.read(8) VertexVal1, VertexVal2 = struct.unpack(">ff", s) Curvature += [VertexVal1, VertexVal2] VertexCount -= 2 if VertexCount != 0: # number of vertexes is even (NOT ODD!!!) f.seek(-4, os.SEEK_CUR) # backward 4 Bytes from current position s = f.read(8) VertexVal1, VertexVal2 = struct.unpack(">ff", s) Curvature.append(VertexVal2) f.close() return Curvature # end of two nested functions # now begins the "real" code if len(sys.argv) < 4: print "Usage: python project.py volume_file surface_file curvature_file vtk_output" print "Example: python project.py LBottom.nii.gz lh.orig lh.curv LBottom.orig.vtk" print "For more usage information, check the wiki: http://code.google.com/p/mindboggle-utils/wiki/annot2vtk" exit(-1) # get voxel coordinates Img = nibabel.load(sys.argv[1]) Voxel = Img.get_data() Affine = Img.get_affine() Idx = numpy.nonzero(Voxel>0) X, Y, Z = Idx if len(X) != len(Y) or len(X) != len(Z) or len(Z) != len(Y): print "error in nonzero voxel dimension" exit(-1) else: Vec = numpy.concatenate((numpy.array(Idx), numpy.ones((1,len(X))))) Loc = numpy.dot(Affine, Vec)[0:3] # end of get voxel coordinates # begin vertex coordinates and curvature Vertexes, Faces = readSurf(sys.argv[2]) Curvature = readCurv(sys.argv[3]) # end of vertex coordinates and curvature # project voxels onto surfaces Projected = [] # to store the IDs of projected vertexes for i in xrange(0, len(X)): # for each voxel MiniDist, MiniVrtx = 10000, 0 for j in xrange(0, len(Vertexes)): # compare the voxel with all vertexes on the surface if Curvature[j] > 0: # only compare with vertexes of positive curvatures Vec1 = numpy.array([Loc[0, i], Loc[1,i], Loc[2, i]]) Vec2 = numpy.array(Vertexes[j]) Dist = numpy.dot( (Vec1 - Vec2), (Vec1 - Vec2) ) Dist = numpy.sqrt(Dist) if Dist < MiniDist: MiniDist = Dist MiniVrtx = j Projected.append(MiniVrtx) # end of project voxels onto surfaces # output projected voxels Fp = open(sys.argv[4], 'w') Fp.write('# vtk DataFile Version 2.0\n') Fp.write('created by vol2surf_label_transfer.py of Mindboggle-utils http://code.google.com/p/mindboggle-utils/ \n') Fp.write('ASCII\n') Fp.write('DATASET POLYDATA\n') Fp.write("POINTS " + str(len(Projected)) + " float \n") for Vrtx in Projected: Fp.write( str(Vertexes[Vrtx][0]) + " " + str(Vertexes[Vrtx][1]) + " " + str(Vertexes[Vrtx][2]) +" \n" ) Fp.write("VERTICES " + str(len(Projected)) + " " + str(len(Projected) + 1) + " \n") Fp.write(str(len(Projected)) + " ") for i in xrange(0,len(Projected)): Fp.write(str(i) + " ") # i am pretty sure it starts from -0 Fp.write("\n") Fp.write('POINT_DATA ' + str(len(X)) + ' \n') Fp.write('SCALARS label integer\n') Fp.write('LOOKUP_TABLE label \n') for i in xrange(0,len(X)): Fp.write(str(Voxel[X[i], Y[i], Z[i]]) + '\n') # end of output projected voxels
binarybottle/mindboggle_sidelined
label_vol2surf.py
Python
apache-2.0
5,910
[ "VTK" ]
5e1f16bf25fd87a09b61ac2c678e1609e2681ecce680f08c2b712fc9f685d84d
"""Handle extraction of final files from processing pipelines into storage. """ import datetime import os import toolz as tz from bcbio import log, utils from bcbio.upload import shared, filesystem, galaxy, s3 from bcbio.pipeline import run_info import bcbio.pipeline.datadict as dd _approaches = {"filesystem": filesystem, "galaxy": galaxy, "s3": s3} def project_from_sample(sample): upload_config = sample.get("upload") if upload_config: approach = _approaches[upload_config.get("method", "filesystem")] for finfo in _get_files_project(sample, upload_config): approach.update_file(finfo, None, upload_config) return [[sample]] def from_sample(sample): """Upload results of processing from an analysis pipeline sample. """ upload_config = sample.get("upload") if upload_config: approach = _approaches[upload_config.get("method", "filesystem")] for finfo in _get_files(sample): approach.update_file(finfo, sample, upload_config) return [[sample]] # ## File information from sample def _get_files(sample): """Retrieve files for the sample, dispatching by analysis type. Each file is a dictionary containing the path plus associated metadata about the file and pipeline versions. """ analysis = sample.get("analysis") if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]: return _get_files_variantcall(sample) elif analysis in ["RNA-seq"]: return _get_files_rnaseq(sample) elif analysis.lower() in ["smallrna-seq"]: return _get_files_srnaseq(sample) elif analysis.lower() in ["chip-seq"]: return _get_files_chipseq(sample) else: return [] def _get_files_rnaseq(sample): out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_alignment(algorithm, sample, out) out = _maybe_add_transcriptome_alignment(sample, out) out = _maybe_add_disambiguate(algorithm, sample, out) out = _maybe_add_counts(algorithm, sample, out) out = _maybe_add_cufflinks(algorithm, sample, out) out = _maybe_add_oncofuse(algorithm, sample, out) out = _maybe_add_rnaseq_variant_file(algorithm, sample, out) out = _maybe_add_sailfish_files(algorithm, sample, out) return _add_meta(out, sample) def _get_files_srnaseq(sample): out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_trimming(algorithm, sample, out) out = _maybe_add_seqbuster(algorithm, sample, out) return _add_meta(out, sample) def _get_files_chipseq(sample): out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_alignment(algorithm, sample, out) out = _maybe_add_peaks(algorithm, sample, out) return _add_meta(out, sample) def _add_meta(xs, sample=None, config=None): out = [] for x in xs: x["mtime"] = shared.get_file_timestamp(x["path"]) if sample and "sample" not in x: if isinstance(sample["name"], (tuple, list)): name = sample["name"][-1] else: name = "%s-%s" % (sample["name"], run_info.clean_name(sample["description"])) x["sample"] = name if config: if "fc_name" in config and "fc_date" in config: x["run"] = "%s_%s" % (config["fc_date"], config["fc_name"]) else: x["run"] = "project_%s" % datetime.datetime.now().strftime("%Y-%m-%d") out.append(x) return out def _get_files_variantcall(sample): """Return output files for the variant calling pipeline. """ out = [] algorithm = sample["config"]["algorithm"] out = _maybe_add_summary(algorithm, sample, out) out = _maybe_add_alignment(algorithm, sample, out) out = _maybe_add_disambiguate(algorithm, sample, out) out = _maybe_add_variant_file(algorithm, sample, out) out = _maybe_add_sv(algorithm, sample, out) out = _maybe_add_hla(algorithm, sample, out) out = _maybe_add_heterogeneity(algorithm, sample, out) out = _maybe_add_validate(algorithm, sample, out) return _add_meta(out, sample) def _maybe_add_validate(algorith, sample, out): for i, plot in enumerate(tz.get_in(("validate", "grading_plots"), sample, [])): ptype = os.path.splitext(plot)[-1].replace(".", "") out.append({"path": plot, "type": ptype, "ext": "validate%s" % ("" if i == 0 else "-%s" % (i + 1))}) return out def _maybe_add_rnaseq_variant_file(algorithm, sample, out): if sample.get("vrn_file"): out.append({"path": sample.get("vrn_file"), "type": "vcf", "ext": "vcf"}) return out def _maybe_add_variant_file(algorithm, sample, out): if sample.get("align_bam") is not None and sample.get("vrn_file"): for x in sample["variants"]: if not _sample_variant_file_in_population(x): out.extend(_get_variant_file(x, ("vrn_file",))) if x.get("bed_file"): out.append({"path": x["bed_file"], "type": "bed", "ext": "%s-callregions" % x["variantcaller"], "variantcaller": x["variantcaller"]}) if x.get("vrn_stats"): for extra, fname in x["vrn_stats"].items(): ext = utils.splitext_plus(fname)[-1].replace(".", "") out.append({"path": fname, "type": ext, "ext": "%s-%s" % (x["variantcaller"], extra), "variantcaller": x["variantcaller"]}) if x.get("germline") and os.path.exists(x["germline"]): out.extend(_get_variant_file(x, ("germline",), "-germline")) return out def _maybe_add_hla(algorithm, sample, out): if sample.get("align_bam") is not None and sample.get("hla") and "call_file" in sample["hla"]: out.append({"path": sample["hla"]["call_file"], "type": "csv", "ext": "hla-%s" % (sample["hla"]["hlacaller"])}) return out def _maybe_add_heterogeneity(algorithm, sample, out): for hetinfo in sample.get("heterogeneity", []): report = hetinfo.get("report") if report and os.path.exists(report): out.append({"path": report, "type": utils.splitext_plus(report)[-1].replace(".", "").replace("-", ""), "ext": "%s-report" % (hetinfo["caller"])}) for plot_type, plot_file in hetinfo.get("plots", {}).items(): if plot_file and os.path.exists(plot_file): out.append({"path": plot_file, "type": utils.splitext_plus(plot_file)[-1].replace(".", ""), "ext": "%s-%s-plot" % (hetinfo["caller"], plot_type)}) return out def _maybe_add_sv(algorithm, sample, out): if sample.get("align_bam") is not None and sample.get("sv"): for svcall in sample["sv"]: for key in ["vrn_file", "cnr", "cns", "cnr_bed", "cnr_bedgraph", "seg", "gainloss", "segmetrics", "vrn_bed", "vrn_bedpe"]: out.extend(_get_variant_file(svcall, (key,))) if "plot" in svcall: for plot_name, fname in svcall["plot"].items(): ext = os.path.splitext(fname)[-1].replace(".", "") out.append({"path": fname, "type": ext, "ext": "%s-%s" % (svcall["variantcaller"], plot_name), "variantcaller": svcall["variantcaller"]}) if "sv-validate" in sample: for vkey in ["csv", "plot", "df"]: vfile = tz.get_in(["sv-validate", vkey], sample) if vfile: to_u = [] if isinstance(vfile, dict): for svtype, fname in vfile.items(): to_u.append((fname, "-%s" % svtype)) else: to_u.append((vfile, "-%s" % vkey if vkey in ["df"] else "")) for vfile, ext in to_u: vext = os.path.splitext(vfile)[-1].replace(".", "") out.append({"path": vfile, "type": vext, "ext": "sv-validate%s" % ext}) return out def _sample_variant_file_in_population(x): """Check if a sample file is the same as the population file. This is true for batches where we don't extract into samples. '""" if "population" in x: a = _get_variant_file(x, ("population", "vcf")) b = _get_variant_file(x, ("vrn_file",)) if os.path.getsize(a[0]["path"]) == os.path.getsize(b[0]["path"]): return True return False def _get_variant_file(x, key, suffix=""): """Retrieve VCF file with the given key if it exists, handling bgzipped. """ out = [] fname = utils.get_in(x, key) upload_key = list(key) upload_key[-1] = "do_upload" do_upload = tz.get_in(tuple(upload_key), x, True) if fname and do_upload: if fname.endswith(".vcf.gz"): out.append({"path": fname, "type": "vcf.gz", "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) if utils.file_exists(fname + ".tbi"): out.append({"path": fname + ".tbi", "type": "vcf.gz.tbi", "index": True, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt", ".tsv")): ftype = utils.splitext_plus(fname)[-1][1:] if ftype == "txt": ftype = fname.split("-")[-1] out.append({"path": fname, "type": ftype, "ext": "%s%s" % (x["variantcaller"], suffix), "variantcaller": x["variantcaller"]}) return out def _maybe_add_sailfish_files(algorithm, sample, out): if dd.get_sailfish_dir(sample): out.append({"path": dd.get_sailfish_dir(sample), "type": "directory", "ext": os.path.join("sailfish", dd.get_sample_name(sample))}) return out def _maybe_add_summary(algorithm, sample, out): out = [] if "summary" in sample: if sample["summary"].get("pdf"): out.append({"path": sample["summary"]["pdf"], "type": "pdf", "ext": "summary"}) if sample["summary"].get("qc"): out.append({"path": sample["summary"]["qc"], "type": "directory", "ext": "qc"}) if utils.get_in(sample, ("summary", "researcher")): out.append({"path": sample["summary"]["researcher"], "type": "tsv", "sample": run_info.clean_name(utils.get_in(sample, ("upload", "researcher"))), "ext": "summary"}) return out def _maybe_add_alignment(algorithm, sample, out): if _has_alignment_file(algorithm, sample): for (fname, ext, isplus) in [(sample.get("work_bam"), "ready", False), (utils.get_in(sample, ("work_bam-plus", "disc")), "disc", True), (utils.get_in(sample, ("work_bam-plus", "sr")), "sr", True)]: if fname and os.path.exists(fname): if fname.endswith("bam"): ftype, fext = "bam", ".bai" elif fname.endswith("cram"): ftype, fext = "cram", ".crai" else: raise ValueError("Unexpected alignment file type %s" % fname) out.append({"path": fname, "type": ftype, "plus": isplus, "ext": ext}) if utils.file_exists(fname + fext): out.append({"path": fname + fext, "type": ftype + fext, "plus": isplus, "index": True, "ext": ext}) return out def _maybe_add_disambiguate(algorithm, sample, out): if "disambiguate" in sample: for extra_name, fname in sample["disambiguate"].items(): ftype = os.path.splitext(fname)[-1].replace(".", "") fext = ".bai" if ftype == "bam" else "" if fname and os.path.exists(fname): out.append({"path": fname, "type": ftype, "plus": True, "ext": "disambiguate-%s" % extra_name}) if fext and utils.file_exists(fname + fext): out.append({"path": fname + fext, "type": ftype + fext, "plus": True, "index": True, "ext": "disambiguate-%s" % extra_name}) return out def _maybe_add_transcriptome_alignment(sample, out): transcriptome_bam = dd.get_transcriptome_bam(sample) if transcriptome_bam and utils.file_exists(transcriptome_bam): out.append({"path": transcriptome_bam, "type": "bam", "ext": "transcriptome"}) return out def _maybe_add_counts(algorithm, sample, out): out.append({"path": sample["count_file"], "type": "counts", "ext": "ready"}) stats_file = os.path.splitext(sample["count_file"])[0] + ".stats" if utils.file_exists(stats_file): out.append({"path": stats_file, "type": "count_stats", "ext": "ready"}) return out def _maybe_add_oncofuse(algorithm, sample, out): if sample.get("oncofuse_file", None) is not None: out.append({"path": sample["oncofuse_file"], "type": "oncofuse_outfile", "ext": "ready"}) return out def _maybe_add_cufflinks(algorithm, sample, out): if "cufflinks_dir" in sample: out.append({"path": sample["cufflinks_dir"], "type": "directory", "ext": "cufflinks"}) return out def _maybe_add_trimming(algorithm, sample, out): fn = sample["collapse"] + "_size_stats" if utils.file_exists(fn): out.append({"path": fn, "type": "trimming_stats", "ext": "ready"}) return out def _maybe_add_seqbuster(algorithm, sample, out): fn = sample["seqbuster"] if utils.file_exists(fn): out.append({"path": fn, "type": "counts", "ext": "ready"}) fn = sample.get("seqbuster_novel") if fn and utils.file_exists(fn): out.append({"path": fn, "type": "counts", "ext": "ready"}) return out def _maybe_add_peaks(algorithm, sample, out): fns = sample.get("peaks_file", []) for fn in fns: if utils.file_exists(fn): name, ext = utils.splitext_plus(fn) caller = _get_peak_file(sample, name) out.append({"path": fn, "type": ext, "ext": caller}) return out def _get_peak_file(x, fn_name): """Get peak caller for this file name.""" for caller in dd.get_peakcaller(x): if fn_name.find(caller) > -1: return caller return os.path.basename(fn_name) def _has_alignment_file(algorithm, sample): return (((algorithm.get("aligner") or algorithm.get("realign") or algorithm.get("recalibrate") or algorithm.get("bam_clean") or algorithm.get("mark_duplicates")) and algorithm.get("merge_bamprep", True)) and sample.get("work_bam") is not None) # ## File information from full project def _get_files_project(sample, upload_config): """Retrieve output files associated with an entire analysis project. """ out = [{"path": sample["provenance"]["programs"]}] for fname in ["bcbio-nextgen.log", "bcbio-nextgen-commands.log"]: if os.path.exists(os.path.join(log.get_log_dir(sample["config"]), fname)): out.append({"path": os.path.join(log.get_log_dir(sample["config"]), fname), "type": "external_command_log", "ext": ""}) if "summary" in sample and sample["summary"].get("project"): out.append({"path": sample["summary"]["project"]}) mixup_check = tz.get_in(["summary", "mixup_check"], sample) if mixup_check: out.append({"path": sample["summary"]["mixup_check"], "type": "directory", "ext": "mixup_check"}) report = os.path.join(dd.get_work_dir(sample), "report") if utils.file_exists(report): out.append({"path": report, "type": "directory", "ext": "report"}) if sample.get("seqcluster", None): out.append({"path": sample["seqcluster"], "type": "directory", "ext": "seqcluster"}) for x in sample.get("variants", []): if "pop_db" in x: out.append({"path": x["pop_db"], "type": "sqlite", "variantcaller": x["variantcaller"]}) for x in sample.get("variants", []): if "population" in x: pop_db = tz.get_in(["population", "db"], x) if pop_db: out.append({"path": pop_db, "type": "sqlite", "variantcaller": x["variantcaller"]}) out.extend(_get_variant_file(x, ("population", "vcf"))) for x in sample.get("variants", []): if x.get("validate") and x["validate"].get("grading_summary"): out.append({"path": x["validate"]["grading_summary"]}) break if "coverage" in sample: cov_db = tz.get_in(["coverage", "summary"], sample) if cov_db: out.append({"path": cov_db, "type": "sqlite", "ext": "coverage"}) all_coverage = tz.get_in(["coverage", "all"], sample) if all_coverage: out.append({"path": all_coverage, "type": "bed", "ext": "coverage"}) if dd.get_mirna_counts(sample): out.append({"path": dd.get_mirna_counts(sample)}) if dd.get_isomir_counts(sample): out.append({"path": dd.get_isomir_counts(sample)}) if dd.get_novel_mirna_counts(sample): out.append({"path": dd.get_novel_mirna_counts(sample)}) if dd.get_novel_isomir_counts(sample): out.append({"path": dd.get_novel_isomir_counts(sample)}) if dd.get_combined_counts(sample): out.append({"path": dd.get_combined_counts(sample)}) if dd.get_annotated_combined_counts(sample): out.append({"path": dd.get_annotated_combined_counts(sample)}) if dd.get_combined_fpkm(sample): out.append({"path": dd.get_combined_fpkm(sample)}) if dd.get_combined_fpkm_isoform(sample): out.append({"path": dd.get_combined_fpkm_isoform(sample)}) if dd.get_transcript_assembler(sample): out.append({"path": dd.get_merged_gtf(sample)}) if dd.get_dexseq_counts(sample): out.append({"path": dd.get_dexseq_counts(sample)}) if dd.get_express_counts(sample): out.append({"path": dd.get_express_counts(sample)}) if dd.get_express_fpkm(sample): out.append({"path": dd.get_express_fpkm(sample)}) if dd.get_express_tpm(sample): out.append({"path": dd.get_express_tpm(sample)}) if dd.get_isoform_to_gene(sample): out.append({"path": dd.get_isoform_to_gene(sample)}) if dd.get_square_vcf(sample): out.append({"path": dd.get_square_vcf(sample)}) if dd.get_sailfish_tidy(sample): out.append({"path": dd.get_sailfish_tidy(sample)}) if dd.get_sailfish_transcript_tpm(sample): out.append({"path": dd.get_sailfish_transcript_tpm(sample)}) if dd.get_sailfish_gene_tpm(sample): out.append({"path": dd.get_sailfish_gene_tpm(sample)}) return _add_meta(out, config=upload_config)
gifford-lab/bcbio-nextgen
bcbio/upload/__init__.py
Python
mit
20,702
[ "Galaxy" ]
849b26251e8bfa0dede486f08210787941e407d03b1ff08c737b4abdc05007a4
#!/usr/bin/env python from fbxclass import * all_classes = get_all_classes_from_file() root_classes = [cls for cls in all_classes if cls.name == 'FbxObject'] fbxobject = root_classes[0] def traverse(cls, s=None): if s is None: s = set() s.add(cls) for cc in cls.children: traverse(cc, s) return s all_classes = traverse(fbxobject) # generate stubs for all classes classes_by_name = {} for cls in all_classes: name = cls.name name = name.split('<', 1)[0] if name not in classes_by_name: classes_by_name[name] = set() classes_by_name[name].add(cls) print('using System;') print('using System.Collections.Generic;') print('') print('namespace FbxSharp') print('{') print(' public partial class Visitor'.format(cls.name)) print(' {') for cls in all_classes: varname = 'obj' # cls.name.replace('Fbx', '').lower() print(' public virtual void Visit({} {}) {{ }}'.format(cls.name, varname)) print('') print(' /*********************************/') print('') print(' public void Accept(object obj, ' 'ISet<object> visitedObjects=null)') print(' {') print(' if (obj == null) return;') print(' if (visitedObjects == null) visitedObjects = new HashSet<object>();') print(' if (visitedObjects.Contains(obj)) return;') print(' visitedObjects.Add(obj);') print('') for cls in root_classes: print(' if (obj is {})'.format(cls.name)) print(' Accept{}(({})obj,' ' visitedObjects);'.format(cls.name, cls.name)) print(' }') def print_accept_method(cls): varname = 'obj' # cls.name.replace('Fbx', '').lower() print('') print(' protected void Accept{}({} {}, ' 'ISet<object> visitedObjects=null)'.format(cls.name, cls.name, varname)) print(' {') print(' Visit({});'.format(varname)) print('') print(' _Accept{}({}, visitedObjects);'.format(cls.name, varname)) print('') for cc in cls.children: print(' if (obj is {})'.format(cc.name)) print(' Accept{}(({})obj,' ' visitedObjects);'.format(cc.name, cc.name)) print(' }') for cc in cls.children: print_accept_method(cc) for cls in root_classes: print_accept_method(cls) print(' }') print('}') print('')
izrik/FbxSharp
tools/generate_visitor.py
Python
lgpl-2.1
2,585
[ "VisIt" ]
06bf29d0b484ff935fb20d1df45f966e472a99f77f6ff80392be8c97af29f4a0
#!/usr/bin/env python ''' xml_comparer.py Created by Anne Pajon on 20 Mar 2012 Copyright (c) 2012 Cancer Research UK - Cambridge Research Institute. This source file is licensed under the Academic Free License version 3.0 available at http://www.opensource.org/licenses/AFL-3.0. Permission is hereby granted to reproduce, translate, adapt, alter, transform, modify, or arrange this source file (the "Original Work"); to distribute or communicate copies of it under any license of your choice that does not contradict the terms and conditions; to perform or display the Original Work publicly. THE ORIGINAL WORK IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS AND WITHOUT WARRANTY, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. Script to compare main tool_conf.xml with existing one, remove unwanted sections and add specific ones: - remove unwanted sections from main file (optional) - add specific sections to main file (optional) - check for sections that are not in main file from local file - check for new sections and tools within each section http://www.boddie.org.uk/python/XML_intro.html http://www.w3.org/TR/REC-DOM-Level-1/level-one-core.html#ID-1590626202 ''' import optparse import sys import logging import xml.dom.minidom as minidom def removeComment(node): for child in node.childNodes: if child.nodeType == minidom.Node.COMMENT_NODE: child.parentNode.removeChild(child) def main(): # logging configuration logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # get the options parser = optparse.OptionParser() parser.add_option("-m", "--main", dest="main", action="store", help="path to Galaxy tool_conf.xml.main") parser.add_option("-c", "--config", dest="config", action="store", help="path to tool_conf.xml") parser.add_option("-e", "--extra", dest="extra", action="store", help="path to xml file containing the extra sections to add") parser.add_option("-r", "--remove", dest="remove", action="store", help="comma separated list of section ids to remove. CRI current list: regVar,hyphy,motifs,clustal,tax_manipulation,hgv,EMBOSSLite,cshl_library_information,ngs_mapping,samtools,indel_analysis,peak_calling,ngs-rna-tools,picard_beta,rgdat,rgqcplot,rgmodel,ngs,rgenetics") (options, args) = parser.parse_args() for option in ['main', 'config']: if getattr(options, option) == None: print "Please supply a --%s parameter.\n" % (option) parser.print_help() sys.exit() # set section to be removed in final file # ncbi_blast_plus_tools,NGS_QC,solexa_tools,ngs-simulation,gatk,variant_detection if options.remove: unwanted_sections = options.remove.split(',') else: unwanted_sections = None # parse tool_conf.xml.main xml = minidom.parse(options.main) toolbox = xml.getElementsByTagName('toolbox')[0] removeComment(toolbox) sections = toolbox.getElementsByTagName('section') labels = toolbox.getElementsByTagName('label') # parse tool_conf.xml local_xml = minidom.parse(options.config) local_toolbox = local_xml.getElementsByTagName('toolbox')[0] removeComment(local_toolbox) local_sections = local_toolbox.getElementsByTagName('section') # parse tool_conf_extra.xml if options.extra: cri_xml = minidom.parse(options.extra) cri_toolbox = cri_xml.getElementsByTagName('toolbox')[0] removeComment(cri_toolbox) else: cri_toolbox = None if unwanted_sections: # check that unwanted_sections are in main file for id in unwanted_sections: matching_sections = [section for section in sections if section.getAttribute('id') == id] matching_labels = [label for label in labels if label.getAttribute('id') == id] if (not matching_sections) and (not matching_labels): logging.warning('>>> %s: Unwanted section/label not found in %s. Update list of unwanted items.' % (id, options.main)) # remove unwanted sections from main file for section in sections: removeComment(section) section_id = section.getAttribute('id') if section_id in unwanted_sections: section.parentNode.removeChild(section) # remove unwanted labels from main file for label in labels: label_id = label.getAttribute('id') if label_id in unwanted_sections: label.parentNode.removeChild(label) # add specific sections to main file if cri_toolbox: for child in cri_toolbox.childNodes: if child.nodeType != minidom.Element.TEXT_NODE: removeComment(child) toolbox.appendChild(child) # update sections list after removing/adding some child elements sections = toolbox.getElementsByTagName('section') # check for sections that are not in main file for local_section in local_sections: removeComment(local_section) local_section_id = local_section.getAttribute('id') matching_sections = [section for section in sections if section.getAttribute('id') == local_section_id] if not matching_sections: logging.warning('--- %s: Section from %s is not in %s. It will be ignored.' % (local_section_id, options.config, options.main)) # check for new sections and tools within each section for section in sections: section_id = section.getAttribute('id') matching_sections = [local_section for local_section in local_sections if local_section.getAttribute('id') == section_id] if not matching_sections: logging.warning('+++ %s: New Section. Please update list of unwanted sections, otherwise it will be added.' % section_id) else: tools = section.getElementsByTagName('tool') local_tools = matching_sections[0].getElementsByTagName('tool') for tool in tools: tool_file = tool.getAttribute('file') matching_tools = [local_tool for local_tool in local_tools if local_tool.getAttribute('file') == tool_file] if not matching_tools: logging.warning('+++ %s: New Tool in section %s. It will be added.' % (tool_file, section_id)) for local_tool in local_tools: local_tool_file = local_tool.getAttribute('file') matching_tools = [tool for tool in tools if tool.getAttribute('file') == local_tool_file] if not matching_tools: logging.warning('--- %s: Tool from section %s is not in %s. It will be ignored.' % (local_tool_file, section_id, options.main)) # print xml file pretty_print = '\n'.join([line for line in xml.toprettyxml(indent=' '*3).split('\n') if line.strip()]) print pretty_print if __name__ == '__main__': main()
crukci-bioinformatics/galaxy
admin/scripts/xml_comparer.py
Python
mit
7,138
[ "Galaxy" ]
e1ac845dcd603d69c27bc1a628c4a5c06d149c16bf50fe3a68b82e6c3bf3f4c1
# ============================================================================ # # Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved. # www.conceptive.be / project-camelot@conceptive.be # # This file is part of the Camelot Library. # # This file may be used under the terms of the GNU General Public # License version 2.0 as published by the Free Software Foundation # and appearing in the file license.txt included in the packaging of # this file. Please review this information to ensure GNU # General Public Licensing requirements will be met. # # If you are unsure which license is appropriate for your use, please # visit www.python-camelot.com or contact project-camelot@conceptive.be # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # For use of this library in commercial applications, please contact # project-camelot@conceptive.be # # ============================================================================ from camelot.view.controls import editors from one2manydelegate import One2ManyDelegate class ManyToManyDelegate(One2ManyDelegate): """ .. image:: ../_static/manytomany.png """ def createEditor(self, parent, option, index): editor = editors.ManyToManyEditor(parent=parent, **self.kwargs) self.setEditorData(editor, index) editor.editingFinished.connect( self.commitAndCloseEditor ) return editor #@QtCore.pyqtSlot() # not yet converted to new style sig slot because sender doesn't work # in certain versions of pyqt def commitAndCloseEditor(self): editor = self.sender() self.commitData.emit(editor) def setModelData(self, editor, model, index): if editor.getModel(): model.setData(index, editor.getModel().collection_getter)
kurtraschke/camelot
camelot/view/controls/delegates/manytomanydelegate.py
Python
gpl-2.0
1,899
[ "VisIt" ]
cce5607bd0b783d4eb3bcbde7d3d514eab39789122edba241f3742f195653175
#!/usr/bin/python # # Author : Pierre-Jean Coudert # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. """ py2app/py2exe build script for Pwytter. Will automatically ensure that all build prerequisites are available via ez_setup Usage (Mac OS X): python setup.py py2app Usage (Windows): python setup.py py2exe """ import ez_setup ez_setup.use_setuptools() from setuptools import setup import os, sys import glob, fnmatch from pwytter import __version__ as VERSION mainscript = 'pwytter.py' def rec_glob(path,*masks): l=[] d={} for root, dirs, files in os.walk(path): for m in masks: l=l+ glob.glob(os.path.join(root,m)) for f in l: p,n= os.path.split(f) if p not in d.keys(): d[p]=[] d[p]=d[p]+[f] return d if sys.platform == 'darwin': extra_options = dict( setup_requires = ['py2app'], app = [mainscript], # Cross-platform applications generally expect sys.argv to # be used for opening files. options = dict( py2app = dict( argv_emulation=True, resources = ["theme","locale","media"], #distdir = 'MacBinaries' ), plist = dict( CFBundleName='Pwytter', CFBundleIconFile='media/pwytter.icns', #CFBundleDocumentTypes=[ # dict( # CFBundleTypeName=DB_FILE_TYPE, # CFBundleTypeRole='Editor', # NSDocumentClass='PackageDatabase', # # CFBundleTypeIconFile='Package Database.icns', # CFBundleTypeExtensions = ['packman', 'plist' ], # CFBundleTypeOSTypes=[], # ), # ], CFBundleGetInfoString=VERSION+', Pierre-Jean Coudert 2007, GNU GPL Licence', CFBundleIdentifier='com.pwytter', CFBundleShortVersionString=VERSION, CFBundleVersion=VERSION, # We need at least Panther, it may work on Jaguar but I've not yet # verified if it should work. LSMinimumSystemVersion='10.3.0', # We're not apple-scriptable NSAppleScriptEnabled='No', ), ), ) elif sys.platform == 'win32': import py2exe extra_options = dict( setup_requires = ['py2exe'], windows = [ {"script": mainscript, "icon_resources": [(1, "media\\pwytter.ico")] }], ) else: extra_options = dict( # Normally unix-like platforms will use "setup.py install" # and install the main script as such scripts=[mainscript], ) setup( name = "pwytter", version = VERSION, #install_requires = ["simplejson", "PIL"], #packages packages=['twclient'], package_dir={'twclient': 'twclient', 'simplejson': 'twclient/simplejson'}, #package_data={'twclient': glob.glob('twclient/doc/*.*')}, py_modules = ['pwytter','tkBalloon','pwParam','pwTools','pwSplashScreen', 'pwTheme'], data_files=[("text", glob.glob("*.txt")), ("theme", glob.glob("theme\\*.pwt")), ("media", glob.glob("media\\*.png") + glob.glob("media\\*.gif") + glob.glob("media\\*.ico") + glob.glob("media\\*.icns")) ] +rec_glob("locale","*.po","*.mo").items(), # #This next part is for the Cheese Shop. author='Pierre-Jean Coudert', author_email='coudert@free.fr', description='A python client for Twitter', long_description='A python client for Twitter. Portable User Interface in Tk.', license='GPL 2', platforms=['any'], url='http://www.pwytter.com', download_url='http://www.pwytter.com/download/', keywords='twitter client python tkinter', classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Environment :: X11 Applications', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Topic :: Communications :: Chat', 'Topic :: Internet' ], **extra_options )
frac/twitter-autoblock
setup.py
Python
gpl-2.0
4,983
[ "Jaguar" ]
affec81caf9f747b9661267c735706207c33c6d7495c984878e19055a7997b04
""" This migration script renames the sequencer table to 'external_service' table and creates a association table, 'request_type_external_service_association' and populates it. The 'sequencer_id' foreign_key from the 'request_type' table is removed. The 'sequencer_type_id' column is renamed to 'external_service_type_id' in the renamed table 'external_service'. Finally, adds a foreign key to the external_service table in the sample_dataset table and populates it. """ from sqlalchemy import * from sqlalchemy.orm import * from migrate import * from migrate.changeset import * from sqlalchemy.exc import * from galaxy.model.custom_types import * from galaxy.util.json import loads, dumps import datetime now = datetime.datetime.utcnow import logging log = logging.getLogger( __name__ ) metadata = MetaData() #migrate_engine = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) def nextval( table, col='id' ): if migrate_engine.name == 'postgres': return "nextval('%s_%s_seq')" % ( table, col ) elif migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite': return "null" else: raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name ) def upgrade(migrate_engine): metadata.bind = migrate_engine print __doc__ # Load existing tables metadata.reflect() # add a foreign key to the external_service table in the sample_dataset table try: SampleDataset_table = Table( "sample_dataset", metadata, autoload=True ) except NoSuchTableError, e: SampleDataset_table = None log.debug( "Failed loading table 'sample_dataset'" ) if SampleDataset_table is None: return try: Sequencer_table = Table( "sequencer", metadata, autoload=True ) except NoSuchTableError, e: Sequencer_table = None log.debug( "Failed loading table 'sequencer'" ) if Sequencer_table is None: return # create the column. Call it external_services_id as the table 'sequencer' is # going to be renamed to 'external_service' try: col = Column( "external_service_id", Integer, index=True ) col.create( SampleDataset_table, index_name="ix_sample_dataset_external_service_id" ) assert col is SampleDataset_table.c.external_service_id except Exception, e: log.debug( "Creating column 'external_service_id' in the 'sample_dataset' table failed: %s" % ( str( e ) ) ) if migrate_engine.name != 'sqlite': # Add the foreign key constraint try: cons = ForeignKeyConstraint( [SampleDataset_table.c.external_service_id], [Sequencer_table.c.id], name='sample_dataset_external_services_id_fk' ) # Create the constraint cons.create() except Exception, e: log.debug( "Adding foreign key constraint 'sample_dataset_external_services_id_fk' to table 'sample_dataset' failed: %s" % ( str( e ) ) ) # populate the column cmd = "SELECT sample_dataset.id, request_type.sequencer_id " \ + " FROM sample_dataset, sample, request, request_type " \ + " WHERE sample.id=sample_dataset.sample_id and request.id=sample.request_id and request.request_type_id=request_type.id " \ + " ORDER BY sample_dataset.id" result = migrate_engine.execute( cmd ) for r in result: sample_dataset_id = int(r[0]) sequencer_id = int(r[1]) cmd = "UPDATE sample_dataset SET external_service_id='%i' where id=%i" % ( sequencer_id, sample_dataset_id ) migrate_engine.execute( cmd ) # load request_type table try: RequestType_table = Table( "request_type", metadata, autoload=True ) except NoSuchTableError: RequestType_table = None log.debug( "Failed loading table request_type" ) if RequestType_table is None: return # rename 'sequencer' table to 'external_service' cmd = "ALTER TABLE sequencer RENAME TO external_service" migrate_engine.execute( cmd ) try: ExternalServices_table = Table( "external_service", metadata, autoload=True ) except NoSuchTableError, e: ExternalServices_table = None log.debug( "Failed loading table 'external_service'" ) if ExternalServices_table is None: return # if running postgres then rename the primary key sequence too if migrate_engine.name in ['postgres', 'postgresql']: cmd = "ALTER TABLE sequencer_id_seq RENAME TO external_service_id_seq" migrate_engine.execute( cmd ) # rename 'sequencer_type_id' column to 'external_service_type_id' in the table 'external_service' # create the column as 'external_service_type_id' try: col = Column( "external_service_type_id", TrimmedString( 255 ) ) col.create( ExternalServices_table ) assert col is ExternalServices_table.c.external_service_type_id except Exception, e: log.debug( "Creating column 'external_service_type_id' in the 'external_service' table failed: %s" % ( str( e ) ) ) # populate this new column cmd = "UPDATE external_service SET external_service_type_id=sequencer_type_id" migrate_engine.execute( cmd ) # remove the 'sequencer_type_id' column try: ExternalServices_table.c.sequencer_type_id.drop() except Exception, e: log.debug( "Deleting column 'sequencer_type_id' from the 'external_service' table failed: %s" % ( str( e ) ) ) # create 'request_type_external_service_association' table RequestTypeExternalServiceAssociation_table = Table( "request_type_external_service_association", metadata, Column( "id", Integer, primary_key=True ), Column( "request_type_id", Integer, ForeignKey( "request_type.id" ), index=True ), Column( "external_service_id", Integer, ForeignKey( "external_service.id" ), index=True ) ) try: RequestTypeExternalServiceAssociation_table.create() except Exception, e: log.debug( "Creating request_type_external_service_association table failed: %s" % str( e ) ) try: RequestTypeExternalServiceAssociation_table = Table( "request_type_external_service_association", metadata, autoload=True ) except NoSuchTableError: RequestTypeExternalServiceAssociation_table = None log.debug( "Failed loading table request_type_external_service_association" ) if RequestTypeExternalServiceAssociation_table is None: return # populate 'request_type_external_service_association' table cmd = "SELECT id, sequencer_id FROM request_type ORDER BY id ASC" result = migrate_engine.execute( cmd ) results_list = result.fetchall() # Proceed only if request_types exists if len( results_list ): for row in results_list: request_type_id = row[0] sequencer_id = row[1] if not sequencer_id: sequencer_id = 'null' cmd = "INSERT INTO request_type_external_service_association VALUES ( %s, %s, %s )" cmd = cmd % ( nextval( 'request_type_external_service_association' ), request_type_id, sequencer_id ) migrate_engine.execute( cmd ) # drop the 'sequencer_id' column in the 'request_type' table # sqlite does not support dropping columns if migrate_engine.name == 'sqlite': # In sqlite, create a temp table without the column that needs to be removed. # then copy all the rows from the original table and finally rename the temp table RequestTypeTemp_table = Table( 'request_type_temp', metadata, Column( "id", Integer, primary_key=True), Column( "create_time", DateTime, default=now ), Column( "update_time", DateTime, default=now, onupdate=now ), Column( "name", TrimmedString( 255 ), nullable=False ), Column( "desc", TEXT ), Column( "request_form_id", Integer, ForeignKey( "form_definition.id" ), index=True ), Column( "sample_form_id", Integer, ForeignKey( "form_definition.id" ), index=True ), Column( "deleted", Boolean, index=True, default=False ) ) try: RequestTypeTemp_table.create() except Exception, e: log.debug( "Creating request_type_temp table failed: %s" % str( e ) ) # insert all the rows from the request table to the request_temp table cmd = \ "INSERT INTO request_type_temp " + \ "SELECT id," + \ "create_time," + \ "update_time," + \ "name," + \ "desc," + \ "request_form_id," + \ "sample_form_id," + \ "deleted " + \ "FROM request_type;" migrate_engine.execute( cmd ) # delete the 'request_type' table try: RequestType_table.drop() except Exception, e: log.debug( "Dropping request_type table failed: %s" % str( e ) ) # rename table request_temp to request cmd = "ALTER TABLE request_type_temp RENAME TO request_type" migrate_engine.execute( cmd ) else: try: RequestType_table.c.sequencer_id.drop() except Exception, e: log.debug( "Deleting column 'sequencer_id' from the 'request_type' table failed: %s" % ( str( e ) ) ) def downgrade(migrate_engine): metadata.bind = migrate_engine # Load existing tables metadata.reflect() # load sequencer & request_type table try: RequestType_table = Table( "request_type", metadata, autoload=True ) except NoSuchTableError: RequestType_table = None log.debug( "Failed loading table request_type" ) if RequestType_table is None: return try: ExternalServices_table = Table( "external_service", metadata, autoload=True ) except NoSuchTableError, e: ExternalServices_table = None log.debug( "Failed loading table 'external_service'" ) if ExternalServices_table is None: return try: RequestTypeExternalServiceAssociation_table = Table( "request_type_external_service_association", metadata, autoload=True ) except NoSuchTableError: RequestTypeExternalServiceAssociation_table = None log.debug( "Failed loading table request_type_external_service_association" ) # create the 'sequencer_id' column in the 'request_type' table try: col = Column( "sequencer_id", Integer, ForeignKey( "external_service.id" ), nullable=True, index=True ) col.create( RequestType_table ) assert col is RequestType_table.c.sequencer_id except Exception, e: log.debug( "Creating column 'sequencer_id' in the 'request_type' table failed: %s" % ( str( e ) ) ) # populate 'sequencer_id' column in the 'request_type' table from the # 'request_type_external_service_association' table cmd = "SELECT request_type_id, external_service_id FROM request_type_external_service_association ORDER BY id ASC" result = migrate_engine.execute( cmd ) results_list = result.fetchall() # Proceed only if request_types exists if len( results_list ): for row in results_list: request_type_id = row[0] external_service_id = row[1] cmd = "UPDATE request_type SET sequencer_id=%i WHERE id=%i" % ( external_service_id, request_type_id ) migrate_engine.execute( cmd ) # remove the 'request_type_external_service_association' table if RequestTypeExternalServiceAssociation_table is not None: try: RequestTypeExternalServiceAssociation_table.drop() except Exception, e: log.debug( "Deleting 'request_type_external_service_association' table failed: %s" % str( e ) ) # rename 'external_service_type_id' column to 'sequencer_type_id' in the table 'external_service' # create the column 'sequencer_type_id' try: col = Column( "sequencer_type_id", TrimmedString( 255 ) ) col.create( ExternalServices_table ) assert col is ExternalServices_table.c.sequencer_type_id except Exception, e: log.debug( "Creating column 'sequencer_type_id' in the 'external_service' table failed: %s" % ( str( e ) ) ) # populate this new column cmd = "UPDATE external_service SET sequencer_type_id=external_service_type_id" migrate_engine.execute( cmd ) # remove the 'external_service_type_id' column try: ExternalServices_table.c.external_service_type_id.drop() except Exception, e: log.debug( "Deleting column 'external_service_type_id' from the 'external_service' table failed: %s" % ( str( e ) ) ) # rename the 'external_service' table to 'sequencer' cmd = "ALTER TABLE external_service RENAME TO sequencer" migrate_engine.execute( cmd ) # if running postgres then rename the primary key sequence too if migrate_engine.name == 'postgres': cmd = "ALTER SEQUENCE external_service_id_seq RENAME TO sequencer_id_seq" migrate_engine.execute( cmd ) # drop the 'external_service_id' column in the 'sample_dataset' table try: SampleDataset_table = Table( "sample_dataset", metadata, autoload=True ) except NoSuchTableError, e: SampleDataset_table = None log.debug( "Failed loading table 'sample_dataset'" ) if SampleDataset_table is None: return try: SampleDataset_table.c.external_service_id.drop() except Exception, e: log.debug( "Deleting column 'external_service_id' from the 'sample_dataset' table failed: %s" % ( str( e ) ) )
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/model/migrate/versions/0068_rename_sequencer_to_external_services.py
Python
gpl-3.0
14,154
[ "Galaxy" ]
b8c78ca674457561276cf2ae0bf49c49aad17882a6b51f8ae8a183f12c6754f2
from random import shuffle from typing import Optional import aiohttp from redbot.core.i18n import Translator, cog_i18n from redbot.core import checks, Config, commands from redbot.core.commands import UserInputOptional _ = Translator("Image", __file__) @cog_i18n(_) class Image(commands.Cog): """Image related commands.""" default_global = {"imgur_client_id": None} def __init__(self, bot): super().__init__() self.bot = bot self.config = Config.get_conf(self, identifier=2652104208, force_registration=True) self.config.register_global(**self.default_global) self.session = aiohttp.ClientSession() self.imgur_base_url = "https://api.imgur.com/3/" def cog_unload(self): self.session.detach() async def red_delete_data_for_user(self, **kwargs): """ Nothing to delete """ return async def initialize(self) -> None: """Move the API keys from cog stored config to core bot config if they exist.""" imgur_token = await self.config.imgur_client_id() if imgur_token is not None: if not await self.bot.get_shared_api_tokens("imgur"): await self.bot.set_shared_api_tokens("imgur", client_id=imgur_token) await self.config.imgur_client_id.clear() @commands.group(name="imgur") async def _imgur(self, ctx): """Retrieve pictures from Imgur. Make sure to set the Client ID using `[p]imgurcreds`. """ pass @_imgur.command(name="search", usage="[count] <terms...>") async def imgur_search(self, ctx, count: UserInputOptional[int] = 1, *, term: str): """Search Imgur for the specified term. - `[count]`: How many images should be returned (maximum 5). Defaults to 1. - `<terms...>`: The terms used to search Imgur. """ if count < 1 or count > 5: await ctx.send(_("Image count has to be between 1 and 5.")) return url = self.imgur_base_url + "gallery/search/time/all/0" params = {"q": term} imgur_client_id = (await ctx.bot.get_shared_api_tokens("imgur")).get("client_id") if not imgur_client_id: await ctx.send( _( "A Client ID has not been set! Please set one with `{prefix}imgurcreds`." ).format(prefix=ctx.clean_prefix) ) return headers = {"Authorization": "Client-ID {}".format(imgur_client_id)} async with self.session.get(url, headers=headers, params=params) as search_get: data = await search_get.json() if data["success"]: results = data["data"] if not results: await ctx.send(_("Your search returned no results.")) return shuffle(results) msg = _("Search results...\n") for r in results[:count]: msg += r["gifv"] if "gifv" in r else r["link"] msg += "\n" await ctx.send(msg) else: await ctx.send( _("Something went wrong. Error code is {code}.").format(code=data["status"]) ) @_imgur.command(name="subreddit") async def imgur_subreddit( self, ctx, subreddit: str, count: Optional[int] = 1, sort_type: str = "top", window: str = "day", ): """Get images from a subreddit. - `<subreddit>`: The subreddit to get images from. - `[count]`: The number of images to return (maximum 5). Defaults to 1. - `[sort_type]`: New, or top results. Defaults to top. - `[window]`: The timeframe, can be the past day, week, month, year or all. Defaults to day. """ if count < 1 or count > 5: await ctx.send(_("Image count has to be between 1 and 5.")) return sort_type = sort_type.lower() window = window.lower() if sort_type == "new": sort = "time" elif sort_type == "top": sort = "top" else: await ctx.send(_("Only 'new' and 'top' are a valid sort type.")) return if window not in ("day", "week", "month", "year", "all"): await ctx.send_help() return imgur_client_id = (await ctx.bot.get_shared_api_tokens("imgur")).get("client_id") if not imgur_client_id: await ctx.send( _( "A Client ID has not been set! Please set one with `{prefix}imgurcreds`." ).format(prefix=ctx.clean_prefix) ) return links = [] headers = {"Authorization": "Client-ID {}".format(imgur_client_id)} url = self.imgur_base_url + "gallery/r/{}/{}/{}/0".format(subreddit, sort, window) async with self.session.get(url, headers=headers) as sub_get: data = await sub_get.json() if data["success"]: items = data["data"] if items: for item in items[:count]: link = item["gifv"] if "gifv" in item else item["link"] links.append("{}\n{}".format(item["title"], link)) if links: await ctx.send("\n".join(links)) else: await ctx.send(_("No results found.")) else: await ctx.send( _("Something went wrong. Error code is {code}.").format(code=data["status"]) ) @checks.is_owner() @commands.command() async def imgurcreds(self, ctx): """Explain how to set imgur API tokens.""" message = _( "To get an Imgur Client ID:\n" "1. Login to an Imgur account.\n" "2. Visit this page https://api.imgur.com/oauth2/addclient.\n" "3. Enter a name for your application.\n" "4. Select *Anonymous usage without user authorization* for the auth type.\n" "5. Set the authorization callback URL to `https://localhost`.\n" "6. Leave the app website blank.\n" "7. Enter a valid email address and a description.\n" "8. Check the captcha box and click next.\n" "9. Your Client ID will be on the next page.\n" "10. Run the command `{prefix}set api imgur client_id <your_client_id_here>`.\n" ).format(prefix=ctx.clean_prefix) await ctx.maybe_send_embed(message) @commands.guild_only() @commands.command(usage="<keywords...>") async def gif(self, ctx, *, keywords): """Retrieve the first search result from Giphy. - `<keywords...>`: The keywords used to search Giphy. """ giphy_api_key = (await ctx.bot.get_shared_api_tokens("GIPHY")).get("api_key") if not giphy_api_key: await ctx.send( _("An API key has not been set! Please set one with `{prefix}giphycreds`.").format( prefix=ctx.clean_prefix ) ) return url = "http://api.giphy.com/v1/gifs/search" async with self.session.get(url, params={"api_key": giphy_api_key, "q": keywords}) as r: result = await r.json() if r.status == 200: if result["data"]: await ctx.send(result["data"][0]["url"]) else: await ctx.send(_("No results found.")) else: await ctx.send(_("Error contacting the Giphy API.")) @commands.guild_only() @commands.command(usage="<keywords...>") async def gifr(self, ctx, *, keywords): """Retrieve a random GIF from a Giphy search. - `<keywords...>`: The keywords used to generate a random GIF. """ giphy_api_key = (await ctx.bot.get_shared_api_tokens("GIPHY")).get("api_key") if not giphy_api_key: await ctx.send( _("An API key has not been set! Please set one with `{prefix}giphycreds`.").format( prefix=ctx.clean_prefix ) ) return url = "http://api.giphy.com/v1/gifs/random" async with self.session.get(url, params={"api_key": giphy_api_key, "tag": keywords}) as r: result = await r.json() if r.status == 200: if result["data"]: await ctx.send(result["data"]["url"]) else: await ctx.send(_("No results found.")) else: await ctx.send(_("Error contacting the API.")) @checks.is_owner() @commands.command() async def giphycreds(self, ctx): """Explains how to set GIPHY API tokens.""" message = _( "To get a GIPHY API Key:\n" "1. Login to (or create) a GIPHY account.\n" "2. Visit this page: https://developers.giphy.com/dashboard.\n" "3. Press *Create an App*.\n" "4. Click *Select API*, then *Next Step*.\n" "5. Add an app name, for example *Red*.\n" "6. Add an app description, for example *Used for Red's image cog*.\n" "7. Click *Create App*. You'll need to agree to the GIPHY API Terms.\n" "8. Copy the API Key.\n" "9. In Discord, run the command {command}.\n" ).format( command="`{prefix}set api GIPHY api_key {placeholder}`".format( prefix=ctx.clean_prefix, placeholder=_("<your_api_key_here>") ) ) await ctx.maybe_send_embed(message)
palmtree5/Red-DiscordBot
redbot/cogs/image/image.py
Python
gpl-3.0
9,620
[ "VisIt" ]
4c481afc8983e0f848afaef228b3a3aeb2d212f6500cefdbdf231b69040d456a
# (C) British Crown Copyright 2010 - 2013, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Provides an interface to manage URI scheme support in iris. """ import glob import os.path import types import re import warnings import collections import iris.fileformats import iris.fileformats.dot import iris.cube import iris.exceptions #: Used by callbacks to specify that the given cube should not be loaded. NO_CUBE = 'NOCUBE' CALLBACK_DEPRECATION_MSG = "Callback functions with a return value are deprecated." # Saving routines, indexed by file extension. class _SaversDict(dict): """A dictionary that can only have string keys with no overlap.""" def __setitem__(self, key, value): if not isinstance(key, basestring): raise ValueError("key is not a string") if key in self.keys(): raise ValueError("A saver already exists for", key) for k in self.keys(): if k.endswith(key) or key.endswith(k): raise ValueError("key %s conflicts with existing key %s" % (key, k)) dict.__setitem__(self, key, value) _savers = _SaversDict() def run_callback(callback, cube, field, filename): """ Runs the callback mechanism given the appropriate arguments. Args: * callback: A function to add metadata from the originating field and/or URI which obeys the following rules: 1. Function signature must be: ``(cube, field, filename)`` 2. Must not return any value - any alterations to the cube must be made by reference 3. If the cube is to be rejected the callback must raise an :class:`iris.exceptions.IgnoreCubeException` .. note:: It is possible that this function returns None for certain callbacks, the caller of this function should handle this case. """ #call the custom uri cm func, if provided, for every loaded cube if callback is None: return cube try: result = callback(cube, field, filename) # Callback can make changes to cube by reference except iris.exceptions.IgnoreCubeException: return None else: if result is not None: #raise TypeError("Callback functions must have no return value.") # no deprecation support method if isinstance(result, iris.cube.Cube): # no-op result = result elif result == NO_CUBE: result = None else: # Invalid return type, raise exception raise TypeError("Callback function returned an unhandled data type.") # Warn the user that callbacks that return something are deprecated warnings.warn(CALLBACK_DEPRECATION_MSG) return result else: return cube def decode_uri(uri, default='file'): r''' Decodes a single URI into scheme and scheme-specific parts. In addition to well-formed URIs, it also supports bare file paths. Both Windows and UNIX style paths are accepted. .. testsetup:: from iris.io import * Examples: >>> from iris.io import decode_uri >>> print decode_uri('http://www.thing.com:8080/resource?id=a:b') ('http', '//www.thing.com:8080/resource?id=a:b') >>> print decode_uri('file:///data/local/dataZoo/...') ('file', '///data/local/dataZoo/...') >>> print decode_uri('/data/local/dataZoo/...') ('file', '/data/local/dataZoo/...') >>> print decode_uri('file:///C:\data\local\dataZoo\...') ('file', '///C:\\data\\local\\dataZoo\\...') >>> print decode_uri('C:\data\local\dataZoo\...') ('file', 'C:\\data\\local\\dataZoo\\...') >>> print decode_uri('dataZoo/...') ('file', 'dataZoo/...') ''' # Catch bare UNIX and Windows paths i = uri.find(':') if i == -1 or re.match('[a-zA-Z]:', uri): scheme = default part = uri else: scheme = uri[:i] part = uri[i + 1:] return scheme, part def expand_filespecs(file_specs): """ Find all matching file paths from a list of file-specs. Args: * file_specs (iterable of string): File paths which may contain '~' elements or wildcards. Returns: A list of matching file paths. If any of the file-specs matches no existing files, an exception is raised. """ # Remove any hostname component - currently unused filenames = [os.path.expanduser(fn[2:] if fn.startswith('//') else fn) for fn in file_specs] # Try to expand all filenames as globs glob_expanded = {fn : sorted(glob.glob(fn)) for fn in filenames} # If any of the specs expanded to an empty list then raise an error value_lists = glob_expanded.viewvalues() if not all(value_lists): raise IOError("One or more of the files specified did not exist %s." % ["%s expanded to %s" % (pattern, expanded if expanded else "empty") for pattern, expanded in glob_expanded.iteritems()]) return sum(value_lists, []) def load_files(filenames, callback): """ Takes a list of filenames which may also be globs, and optionally a callback function, and returns a generator of Cubes from the given files. .. note:: Typically, this function should not be called directly; instead, the intended interface for loading is :func:`iris.load`. """ all_file_paths = expand_filespecs(filenames) # Create default dict mapping iris format handler to its associated filenames handler_map = collections.defaultdict(list) for fn in all_file_paths: with open(fn) as fh: handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(os.path.basename(fn), fh) handler_map[handling_format_spec].append(fn) # Call each iris format handler with the approriate filenames for handling_format_spec, fnames in handler_map.iteritems(): for cube in handling_format_spec.handler(fnames, callback): yield cube def load_http(urls, callback): """ Takes a list of urls and a callback function, and returns a generator of Cubes from the given URLs. .. note:: Typically, this function should not be called directly; instead, the intended interface for loading is :func:`iris.load`. """ # Create default dict mapping iris format handler to its associated filenames handler_map = collections.defaultdict(list) for url in urls: handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(url, None) handler_map[handling_format_spec].append(url) # Call each iris format handler with the appropriate filenames for handling_format_spec, fnames in handler_map.iteritems(): for cube in handling_format_spec.handler(fnames, callback): yield cube def _check_init_savers(): # TODO: Raise a ticket to resolve the cyclic import error that requires # us to initialise this on first use. Probably merge io and fileformats. if "pp" not in _savers: _savers.update({"pp": iris.fileformats.pp.save, "nc": iris.fileformats.netcdf.save, "dot": iris.fileformats.dot.save, "dotpng": iris.fileformats.dot.save_png, "grib2": iris.fileformats.grib.save_grib2}) def add_saver(file_extension, new_saver): """ Add a custom saver to the Iris session. Args: * file_extension - A string such as "pp" or "my_format". * new_saver - A function of the form ``my_saver(cube, target)``. See also :func:`iris.io.save` """ # Make sure it's a func with 2+ args if not hasattr(new_saver, "__call__") or new_saver.__code__.co_argcount < 2: raise ValueError("Saver routines must be callable with 2+ arguments.") # Try to add this saver. Invalid keys will be rejected. _savers[file_extension] = new_saver def find_saver(filespec): """ Find the saver function appropriate to the given filename or extension. Args: * filespec - A string such as "my_file.pp" or "PP". Returns: A save function or None. Save functions can be passed to :func:`iris.io.save`. """ _check_init_savers() matches = [ext for ext in _savers if filespec.lower().endswith('.' + ext) or filespec.lower() == ext] # Multiple matches could occur if one of the savers included a '.': # e.g. _savers = {'.dot.png': dot_png_saver, '.png': png_saver} if len(matches) > 1: fmt = "Multiple savers found for %r: %s" matches = ', '.join(map(repr, matches)) raise ValueError(fmt % (filespec, matches)) return _savers[matches[0]] if matches else None def save(source, target, saver=None, **kwargs): """ Save one or more Cubes to file (or other writable). Iris currently supports three file formats for saving, which it can recognise by filename extension: * netCDF - the Unidata network Common Data Format: * see :func:`iris.fileformats.netcdf.save` * GRIB2 - the WMO GRIdded Binary data format; * see :func:`iris.fileformats.grib.save_grib2` * PP - the Met Office UM Post Processing Format. * see :func:`iris.fileformats.pp.save` A custom saver can be provided to the function to write to a different file format. Args: * source - A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or sequence of cubes. * target - A filename (or writable, depending on file format). When given a filename or file, Iris can determine the file format. Kwargs: * saver - Optional. Specifies the save function to use. If omitted, Iris will attempt to determine the format. This keyword can be used to implement a custom save format. Function form must be: ``my_saver(cube, target)`` plus any custom keywords. It is assumed that a saver will accept an ``append`` keyword if it's file format can handle multiple cubes. See also :func:`iris.io.add_saver`. All other keywords are passed through to the saver function; see the relevant saver documentation for more information on keyword arguments. Examples:: # Save a cube to PP iris.save(my_cube, "myfile.pp") # Save a cube list to a PP file, appending to the contents of the file # if it already exists iris.save(my_cube_list, "myfile.pp", append=True) # Save a cube to netCDF, defaults to NETCDF4 file format iris.save(my_cube, "myfile.nc") # Save a cube list to netCDF, using the NETCDF4_CLASSIC storage option iris.save(my_cube_list, "myfile.nc", netcdf_format="NETCDF3_CLASSIC") """ # Determine format from filename if isinstance(target, basestring) and saver is None: saver = find_saver(target) elif isinstance(target, types.FileType) and saver is None: saver = find_saver(target.name) elif isinstance(saver, basestring): saver = find_saver(saver) if saver is None: raise ValueError("Cannot save; no saver") # Single cube? if isinstance(source, iris.cube.Cube): saver(source, target, **kwargs) # CubeList or sequence of cubes? elif (isinstance(source, iris.cube.CubeList) or (isinstance(source, (list, tuple)) and all([type(i) == iris.cube.Cube for i in source]))): # Only allow cubelist saving for those fileformats that are capable. if not 'iris.fileformats.netcdf' in saver.__module__: # Make sure the saver accepts an append keyword if not "append" in saver.__code__.co_varnames: raise ValueError("Cannot append cubes using saver function " "'%s' in '%s'" % (saver.__code__.co_name, saver.__code__.co_filename)) # Force append=True for the tail cubes. Don't modify the incoming # kwargs. kwargs = kwargs.copy() for i, cube in enumerate(source): if i != 0: kwargs['append'] = True saver(cube, target, **kwargs) # Netcdf saver. else: saver(source, target, **kwargs) else: raise ValueError("Cannot save; non Cube found in source")
kwilliams-mo/iris
lib/iris/io/__init__.py
Python
gpl-3.0
13,332
[ "NetCDF" ]
f268b589a2c20e7131a0289a10a2948977640002dbf8c93137a0994ce7bb6196
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import glob import logging import os import pyauto_functional # Must be imported before pyauto import pyauto class ThemesTest(pyauto.PyUITest): """TestCase for Themes.""" def Debug(self): """Test method for experimentation. This method will not run automatically. """ while True: raw_input('Hit <enter> to dump info.. ') self.pprint(self.GetThemeInfo()) def testSetTheme(self): """Verify theme install.""" self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup crx_file = os.path.abspath( os.path.join(self.DataDir(), 'extensions', 'theme.crx')) self.SetTheme(crx_file) # Verify "theme installed" infobar shows up self.assertTrue(self.WaitForInfobarCount(1)) theme = self.GetThemeInfo() self.assertEqual('camo theme', theme['name']) self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']) def testThemeInFullScreen(self): """Verify theme can be installed in FullScreen mode.""" self.ApplyAccelerator(pyauto.IDC_FULLSCREEN ) self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup crx_file = os.path.abspath( os.path.join(self.DataDir(), 'extensions', 'theme.crx')) self.SetTheme(crx_file) # Verify "theme installed" infobar shows up self.assertTrue(self.WaitForInfobarCount(1)) theme = self.GetThemeInfo() self.assertEqual('camo theme', theme['name']) def testThemeReset(self): """Verify theme reset.""" crx_file = os.path.abspath( os.path.join(self.DataDir(), 'extensions', 'theme.crx')) self.SetTheme(crx_file) self.assertTrue(self.ResetToDefaultTheme()) self.assertFalse(self.GetThemeInfo()) def _ReturnCrashingThemes(self, themes, group_size, urls): """Install the given themes in groups of group_size and return the group of themes that crashes (if any). Note: restarts the browser at the beginning of the function. Args: themes: A list of themes to install. group_size: The number of themes to install at one time. urls: The list of urls to visit. """ self.RestartBrowser() curr_theme = 0 num_themes = len(themes) while curr_theme < num_themes: logging.debug('New group of %d themes.' % group_size) group_end = curr_theme + group_size this_group = themes[curr_theme:group_end] # Apply each theme in this group. for theme in this_group: logging.debug('Applying theme: %s' % theme) self.SetTheme(theme) for url in urls: self.NavigateToURL(url) def _LogAndReturnCrashing(): logging.debug('Crashing themes: %s' % this_group) return this_group # Assert that there is at least 1 browser window. try: num_browser_windows = self.GetBrowserWindowCount() except: return _LogAndReturnCrashing() else: if not num_browser_windows: return _LogAndReturnCrashing() curr_theme = group_end # None of the themes crashed. return None def Runner(self): """Apply themes; verify that theme has been applied and browser doesn't crash. This does not get run automatically. To run: python themes.py themes.ThemesTest.Runner Note: this test requires that a directory of crx files called 'themes' exists in the data directory. """ themes_dir = os.path.join(self.DataDir(), 'themes') urls_file = os.path.join(self.DataDir(), 'urls.txt') assert os.path.exists(themes_dir), \ 'The dir "%s" must exist' % os.path.abspath(themes_dir) group_size = 20 num_urls_to_visit = 100 urls = [l.rstrip() for l in open(urls_file).readlines()[:num_urls_to_visit]] failed_themes = glob.glob(os.path.join(themes_dir, '*.crx')) while failed_themes and group_size: failed_themes = self._ReturnCrashingThemes(failed_themes, group_size, urls) group_size = group_size // 2 self.assertFalse(failed_themes, 'Theme(s) in failing group: %s' % failed_themes) if __name__ == '__main__': pyauto_functional.Main()
aYukiSekiguchi/ACCESS-Chromium
chrome/test/functional/themes.py
Python
bsd-3-clause
4,381
[ "VisIt" ]
4da3d737b44f8e72e1288f32d0baf886bd1d81dd0e2b1b8843c2dd73ddaeafeb
import json import requests import unittest2 as unittest class TestLauncher(unittest.TestCase): def test_basic(self): data = json.dumps({'application': 'basic'}) req = requests.post( 'http://localhost:8080/vtk', data=data ) self.assertTrue(req.ok) resp = req.json() self.assertIn('id', resp) self.assertIn('sessionURL', resp) self.assertNotIn('error', resp) req = requests.get( 'http://localhost:8080/vtk/' + resp['id'] ) self.assertTrue(req.ok) stat = req.json() self.assertNotIn('error', stat) self.assertEqual(stat['id'], resp['id']) req = requests.delete( 'http://localhost:8080/vtk/' + resp['id'] ) self.assertNotEqual(req.status_code, 404) req = requests.get( 'http://localhost:8080/vtk/' + resp['id'] ) self.assertFalse(req.ok) def test_bad_application(self): req = requests.post( 'http://localhost:8080/vtk', data='{"application": "unknown"}' ) self.assertFalse(req.ok) def test_start_fail(self): req = requests.post( 'http://localhost:8080/vtk', data='{"application": "fail"}' ) self.assertIn('error', req.json()) def test_start_timeout(self): req = requests.post( 'http://localhost:8080/vtk', data='{"application": "timeout"}' ) self.assertIn('error', req.json()) if __name__ == '__main__': unittest.main()
jbeezley/vtkweb-launcher
test/test_launcher.py
Python
bsd-3-clause
1,612
[ "VTK" ]
6bcfc846a6047d776de7c7b3a994938ddb2916582bc809cba04f928df76c97e3
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals import os import six from six.moves import filter, map from collections import defaultdict from monty.design_patterns import cached_class from monty.serialization import loadfn from pymatgen.io.vasp.sets import MITRelaxSet, MPRelaxSet from pymatgen.core.periodic_table import Element from pymatgen.analysis.structure_analyzer import oxide_type, sulfide_type import abc """ This module implements Compatibility corrections for mixing runs of different functionals. """ MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) __author__ = "Shyue Ping Ong, Anubhav Jain, Stephen Dacek, Sai Jayaraman" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __date__ = "Mar 19, 2012" class CompatibilityError(Exception): """ Exception class for Compatibility. Raised by attempting correction on incompatible calculation """ def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class Correction(six.with_metaclass(abc.ABCMeta, object)): """ A Correction class is a pre-defined scheme for correction a computed entry based on the type and chemistry of the structure and the calculation parameters. All Correction classes must implement a correct_entry method. """ @abc.abstractmethod def get_correction(self, entry): """ Returns correction for a single entry. Args: entry: A ComputedEntry object. Returns: The energy correction to be applied. Raises: CompatibilityError if entry is not compatible. """ return def correct_entry(self, entry): """ Corrects a single entry. Args: entry: A ComputedEntry object. Returns: An processed entry. Raises: CompatibilityError if entry is not compatible. """ entry.correction += self.get_correction(entry) return entry class PotcarCorrection(Correction): """ Checks that POTCARs are valid within a pre-defined input set. This ensures that calculations performed using different InputSets are not compared against each other. Entry.parameters must contain a "potcar_symbols" key that is a list of all POTCARs used in the run. Again, using the example of an Fe2O3 run using Materials Project parameters, this would look like entry.parameters["potcar_symbols"] = ['PAW_PBE Fe_pv 06Sep2000', 'PAW_PBE O 08Apr2002']. Args: input_set: InputSet object used to generate the runs (used to check for correct potcar symbols) check_hash (bool): If true, uses the potcar hash to check for valid potcars. If false, uses the potcar symbol (Less reliable). Defaults to True Raises: ValueError if entry do not contain "potcar_symbols" key. CombatibilityError if wrong potcar symbols """ def __init__(self, input_set, check_hash=False): potcar_settings = input_set.CONFIG["POTCAR"] if isinstance(list(potcar_settings.values())[-1], dict): if check_hash: self.valid_potcars = {k: d["hash"] for k, d in potcar_settings.items()} else: self.valid_potcars = {k: d["symbol"] for k, d in potcar_settings.items()} else: if check_hash: raise ValueError('Cannot check hashes of potcars,' ' hashes are not set') else: self.valid_potcars = {k: d for k, d in potcar_settings.items()} self.input_set = input_set self.check_hash = check_hash def get_correction(self, entry): if self.check_hash: if entry.parameters.get("potcar_spec"): psp_settings = set([d.get("hash") for d in entry.parameters[ "potcar_spec"] if d]) else: raise ValueError('Cannot check hash ' 'without potcar_spec field') else: if entry.parameters.get("potcar_spec"): psp_settings = set([d.get("titel").split()[1] for d in entry.parameters[ "potcar_spec"] if d]) else: psp_settings = set([sym.split()[1] for sym in entry.parameters[ "potcar_symbols"] if sym]) if {self.valid_potcars[str(el)] for el in entry.composition.elements} != psp_settings: raise CompatibilityError('Incompatible potcar') return 0 def __str__(self): return "{} Potcar Correction".format(self.input_set.__name__) @cached_class class GasCorrection(Correction): """ Correct anion energies to obtain the right formation energies. Note that this depends on calculations being run within the same input set. Args: config_file: Path to the selected compatibility.yaml config file. correct_peroxide: Specify whether peroxide/superoxide/ozonide corrections are to be applied or not. """ def __init__(self, config_file): c = loadfn(config_file) self.name = c['Name'] self.cpd_energies = c['Advanced']['CompoundEnergies'] def get_correction(self, entry): comp = entry.composition rform = entry.composition.reduced_formula if rform in self.cpd_energies: return self.cpd_energies[rform] * comp.num_atoms \ - entry.uncorrected_energy return 0 def __str__(self): return "{} Gas Correction".format(self.name) @cached_class class AnionCorrection(Correction): """ Correct gas energies to obtain the right formation energies. Note that this depends on calculations being run within the same input set. Args: config_file: Path to the selected compatibility.yaml config file. correct_peroxide: Specify whether peroxide/superoxide/ozonide corrections are to be applied or not. """ def __init__(self, config_file, correct_peroxide=True): c = loadfn(config_file) self.oxide_correction = c['OxideCorrections'] self.sulfide_correction = c.get('SulfideCorrections', defaultdict( float)) self.name = c['Name'] self.correct_peroxide = correct_peroxide def get_correction(self, entry): comp = entry.composition if len(comp) == 1: # Skip element entry return 0 correction = 0 # Check for sulfide corrections if Element("S") in comp: sf_type = "sulfide" if entry.data.get("sulfide_type"): sf_type = entry.data["sulfide_type"] elif hasattr(entry, "structure"): sf_type = sulfide_type(entry.structure) if sf_type in self.sulfide_correction: correction += self.sulfide_correction[sf_type] * comp["S"] # Check for oxide, peroxide, superoxide, and ozonide corrections. if Element("O") in comp: if self.correct_peroxide: if entry.data.get("oxide_type"): if entry.data["oxide_type"] in self.oxide_correction: ox_corr = self.oxide_correction[ entry.data["oxide_type"]] correction += ox_corr * comp["O"] if entry.data["oxide_type"] == "hydroxide": ox_corr = self.oxide_correction["oxide"] correction += ox_corr * comp["O"] elif hasattr(entry, "structure"): ox_type, nbonds = oxide_type(entry.structure, 1.05, return_nbonds=True) if ox_type in self.oxide_correction: correction += self.oxide_correction[ox_type] * \ nbonds elif ox_type == "hydroxide": correction += self.oxide_correction["oxide"] * \ comp["O"] else: rform = entry.composition.reduced_formula if rform in UCorrection.common_peroxides: correction += self.oxide_correction["peroxide"] * \ comp["O"] elif rform in UCorrection.common_superoxides: correction += self.oxide_correction["superoxide"] * \ comp["O"] elif rform in UCorrection.ozonides: correction += self.oxide_correction["ozonide"] * \ comp["O"] elif Element("O") in comp.elements and len(comp.elements)\ > 1: correction += self.oxide_correction['oxide'] * \ comp["O"] else: correction += self.oxide_correction['oxide'] * comp["O"] return correction def __str__(self): return "{} Anion Correction".format(self.name) @cached_class class AqueousCorrection(Correction): """ This class implements aqueous phase compound corrections for elements and H2O. Args: config_file: Path to the selected compatibility.yaml config file. """ def __init__(self, config_file): c = loadfn(config_file) self.cpd_energies = c['AqueousCompoundEnergies'] self.name = c["Name"] def get_correction(self, entry): comp = entry.composition rform = comp.reduced_formula cpdenergies = self.cpd_energies correction = 0 if rform in cpdenergies: if rform in ["H2", "H2O"]: correction = cpdenergies[rform] * comp.num_atoms \ - entry.uncorrected_energy - entry.correction else: correction += cpdenergies[rform] * comp.num_atoms if not rform == "H2O": correction += 0.5 * 2.46 * min(comp["H"]/2.0, comp["O"]) return correction def __str__(self): return "{} Aqueous Correction".format(self.name) @cached_class class UCorrection(Correction): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. Entry.parameters must contain a "hubbards" key which is a dict of all non-zero Hubbard U values used in the calculation. For example, if you ran a Fe2O3 calculation with Materials Project parameters, this would look like entry.parameters["hubbards"] = {"Fe": 5.3} If the "hubbards" key is missing, a GGA run is assumed. It should be noted that ComputedEntries assimilated using the pymatgen.apps.borg package and obtained via the MaterialsProject REST interface using the pymatgen.matproj.rest package will automatically have these fields populated. Args: config_file: Path to the selected compatibility.yaml config file. input_set: InputSet object (to check for the +U settings) compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. """ common_peroxides = ["Li2O2", "Na2O2", "K2O2", "Cs2O2", "Rb2O2", "BeO2", "MgO2", "CaO2", "SrO2", "BaO2"] common_superoxides = ["LiO2", "NaO2", "KO2", "RbO2", "CsO2"] ozonides = ["LiO3", "NaO3", "KO3", "NaO5"] def __init__(self, config_file, input_set, compat_type): if compat_type not in ['GGA', 'Advanced']: raise CompatibilityError("Invalid compat_type {}" .format(compat_type)) c = loadfn(config_file) self.input_set = input_set if compat_type == 'Advanced': self.u_settings = self.input_set.CONFIG["INCAR"]["LDAUU"] self.u_corrections = c["Advanced"]["UCorrections"] else: self.u_settings = {} self.u_corrections = {} self.name = c["Name"] self.compat_type = compat_type def get_correction(self, entry): if entry.parameters.get("run_type", "GGA") == "HF": raise CompatibilityError('Invalid run type') calc_u = entry.parameters.get("hubbards", None) calc_u = defaultdict(int) if calc_u is None else calc_u comp = entry.composition elements = sorted([el for el in comp.elements if comp[el] > 0], key=lambda el: el.X) most_electroneg = elements[-1].symbol correction = 0 ucorr = self.u_corrections.get(most_electroneg, {}) usettings = self.u_settings.get(most_electroneg, {}) for el in comp.elements: sym = el.symbol #Check for bad U values if calc_u.get(sym, 0) != usettings.get(sym, 0): raise CompatibilityError('Invalid U value on {}'.format(sym)) if sym in ucorr: correction += float(ucorr[sym]) * comp[el] return correction def __str__(self): return "{} {} Correction".format(self.name, self.compat_type) class Compatibility(object): """ The Compatibility class combines a list of corrections to be applied to an entry or a set of entries. Note that some of the Corrections have interdependencies. For example, PotcarCorrection must always be used before any other compatibility. Also, GasCorrection("MP") must be used with PotcarCorrection("MP") (similarly with "MIT"). Typically, you should use the specific MaterialsProjectCompatibility and MITCompatibility subclasses instead. Args: corrections: List of corrections to apply. """ def __init__(self, corrections): self.corrections = corrections def process_entry(self, entry): """ Process a single entry with the chosen Corrections. Args: entry: A ComputedEntry object. Returns: An adjusted entry if entry is compatible, otherwise None is returned. """ try: corrections = self.get_corrections_dict(entry) except CompatibilityError: return None entry.correction = sum(corrections.values()) return entry def get_corrections_dict(self, entry): """ Returns the corrections applied to a particular entry. Args: entry: A ComputedEntry object. Returns: ({correction_name: value}) """ corrections = {} for c in self.corrections: val = c.get_correction(entry) if val != 0: corrections[str(c)] = val return corrections def process_entries(self, entries): """ Process a sequence of entries with the chosen Compatibility scheme. Args: entries: A sequence of entries. Returns: An list of adjusted entries. Entries in the original list which are not compatible are excluded. """ return list(filter(None, map(self.process_entry, entries))) def get_explanation_dict(self, entry): """ Provides an explanation dict of the corrections that are being applied for a given compatibility scheme. Inspired by the "explain" methods in many database methodologies. Args: entry: A ComputedEntry. Returns: (dict) of the form {"Compatibility": "string", "Uncorrected_energy": float, "Corrected_energy": float, "Corrections": [{"Name of Correction": { "Value": float, "Explanation": "string"}]} """ centry = self.process_entry(entry) if centry is None: uncorrected_energy = entry.uncorrected_energy corrected_energy = None else: uncorrected_energy = centry.uncorrected_energy corrected_energy = centry.energy d = {"compatibility": self.__class__.__name__, "uncorrected_energy": uncorrected_energy, "corrected_energy": corrected_energy} corrections = [] corr_dict = self.get_corrections_dict(entry) for c in self.corrections: cd = {"name": str(c)} cd["description"] = c.__doc__.split("Args")[0].strip() cd["value"] = corr_dict.get(str(c), 0) corrections.append(cd) d["corrections"] = corrections return d def explain(self, entry): """ Prints an explanation of the corrections that are being applied for a given compatibility scheme. Inspired by the "explain" methods in many database methodologies. Args: entry: A ComputedEntry. """ d = self.get_explanation_dict(entry) print("The uncorrected value of the energy of %s is %f eV" % ( entry.composition, d["uncorrected_energy"])) print("The following corrections / screening are applied for %s:\n" %\ d["compatibility"]) for c in d["corrections"]: print("%s correction: %s\n" % (c["name"], c["description"])) print("For the entry, this correction has the value %f eV." % c[ "value"]) print("-" * 30) print("The final energy after corrections is %f" % d[ "corrected_energy"]) class MaterialsProjectCompatibility(Compatibility): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. Note that this should only be used for VASP calculations using the MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet). Using this compatibility scheme on runs with different parameters is not valid. Args: compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. correct_peroxide: Specify whether peroxide/superoxide/ozonide corrections are to be applied or not. check_potcar_hash (bool): Use potcar hash to verify potcars are correct. """ def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False): fp = os.path.join(MODULE_DIR, "MPCompatibility.yaml") super(MaterialsProjectCompatibility, self).__init__( [PotcarCorrection(MPRelaxSet, check_hash=check_potcar_hash), GasCorrection(fp), AnionCorrection(fp, correct_peroxide=correct_peroxide), UCorrection(fp, MPRelaxSet, compat_type)]) class MITCompatibility(Compatibility): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. Note that this should only be used for VASP calculations using the MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using this compatibility scheme on runs with different parameters is not valid. Args: compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. correct_peroxide: Specify whether peroxide/superoxide/ozonide corrections are to be applied or not. check_potcar_hash (bool): Use potcar hash to verify potcars are correct. """ def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False): fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml") super(MITCompatibility, self).__init__( [PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash), GasCorrection(fp), AnionCorrection(fp, correct_peroxide=correct_peroxide), UCorrection(fp, MITRelaxSet, compat_type)]) class MITAqueousCompatibility(Compatibility): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. Note that this should only be used for VASP calculations using the MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using this compatibility scheme on runs with different parameters is not valid. Args: compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. correct_peroxide: Specify whether peroxide/superoxide/ozonide corrections are to be applied or not. check_potcar_hash (bool): Use potcar hash to verify potcars are correct. """ def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False): fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml") super(MITAqueousCompatibility, self).__init__( [PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash), GasCorrection(fp), AnionCorrection(fp, correct_peroxide=correct_peroxide), UCorrection(fp, MITRelaxSet, compat_type), AqueousCorrection(fp)]) class MaterialsProjectAqueousCompatibility(Compatibility): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. Note that this should only be used for VASP calculations using the MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet). Using this compatibility scheme on runs with different parameters is not valid. Args: compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. correct_peroxide: Specify whether peroxide/superoxide/ozonide corrections are to be applied or not. check_potcar_hash (bool): Use potcar hash to verify potcars are correct. """ def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False): fp = os.path.join(MODULE_DIR, "MPCompatibility.yaml") super(MaterialsProjectAqueousCompatibility, self).__init__( [PotcarCorrection(MPRelaxSet, check_hash=check_potcar_hash), GasCorrection(fp), AnionCorrection(fp, correct_peroxide=correct_peroxide), UCorrection(fp, MPRelaxSet, compat_type), AqueousCorrection(fp)])
xhqu1981/pymatgen
pymatgen/entries/compatibility.py
Python
mit
24,628
[ "VASP", "pymatgen" ]
0182d04e062fa2f462aa568d2902eeff2cdc8dea3fcdacdc2c2b6ed0aaa0c0b7
########################################################################### # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### # # This code generated (see starthinker/scripts for possible source): # - Command: "python starthinker_ui/manage.py airflow" # ########################################################################### ''' -------------------------------------------------------------- Before running this Airflow module... Install StarThinker in cloud composer ( recommended ): From Release: pip install starthinker From Open Source: pip install git+https://github.com/google/starthinker Or push local code to the cloud composer plugins directory ( if pushing local code changes ): source install/deploy.sh 4) Composer Menu l) Install All -------------------------------------------------------------- If any recipe task has "auth" set to "user" add user credentials: 1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON] OR 1. Visit Airflow UI > Admin > Connections. 2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication. - Conn Type: Google Cloud Platform - Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md - Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials -------------------------------------------------------------- If any recipe task has "auth" set to "service" add service credentials: 1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON] OR 1. Visit Airflow UI > Admin > Connections. 2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication. - Conn Type: Google Cloud Platform - Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md - Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md -------------------------------------------------------------- DV360 Bulk Targeting Editor Allows bulk targeting DV360 through Sheets and BigQuery. - Select Load, click Save + Run, a sheet called DV Targeter will be created. - In the Partners sheet tab, fill in Filter column then select Load, click Save + Run. - In the Advertisers sheet tab, fill in Filter column. then select Load, click Save + Run. - Check the First And Third Party option to load audiences, which may be slow. If not loaded, user will enter audience ids into the sheet manually. - On the Line Items sheet tab, the Filter is used only to limit drop down choices in the rest of the tool. - Optionally edit or filter the Targeting Options or Inventory Sources sheets to limit choices. - Make targeting updates, fill in changes on all tabs with colored fields (RED FIELDS ARE NOT IMPLEMENTED, IGNORE). - Select Preview, click Save + Run then check the Preview tabs. - Select Update, click Save + Run then check the Success and Error tabs. - Load and Update can be run multiple times. - If an update fails, all parts of the update failed, break it up into multiple updates. - To refresh the Partner, Advertiser, or Line Item list, remove the filters and run load. -------------------------------------------------------------- This StarThinker DAG can be extended with any additional tasks from the following sources: - https://google.github.io/starthinker/ - https://github.com/google/starthinker/tree/master/dags ''' from starthinker.airflow.factory import DAG_Factory INPUTS = { 'auth_dv':'user', # Credentials used for dv. 'auth_sheet':'user', # Credentials used for sheet. 'auth_bigquery':'service', # Credentials used for bigquery. 'recipe_name':'', # Name of Google Sheet to create. 'recipe_slug':'', # Name of Google BigQuery dataset to create. 'command':'Load', # Action to take. 'first_and_third':False, # Load first and third party data (may be slow). If not selected, enter audience identifiers into sheet manually. } RECIPE = { 'setup':{ 'day':[ ], 'hour':[ ] }, 'tasks':[ { 'dataset':{ '__comment__':'Ensure dataset exists.', 'auth':{'field':{'name':'auth_bigquery','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}}, 'dataset':{'field':{'name':'recipe_slug','kind':'string','order':2,'default':'','description':'Name of Google BigQuery dataset to create.'}} } }, { 'drive':{ '__comment__':'Copy the default template to sheet with the recipe name', 'auth':{'field':{'name':'auth_sheet','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}}, 'copy':{ 'source':'https://docs.google.com/spreadsheets/d/1ARkIvh0D-gltZeiwniUonMNrm0Mi1s2meZ9FUjutXOE/', 'destination':{'field':{'name':'recipe_name','suffix':' DV Targeter','kind':'string','order':3,'default':'','description':'Name of Google Sheet to create.'}} } } }, { 'dv_targeter':{ '__comment':'Depending on users choice, execute a different part of the solution.', 'auth_dv':{'field':{'name':'auth_dv','kind':'authentication','order':1,'default':'user','description':'Credentials used for dv.'}}, 'auth_sheets':{'field':{'name':'auth_sheet','kind':'authentication','order':2,'default':'user','description':'Credentials used for sheet.'}}, 'auth_bigquery':{'field':{'name':'auth_bigquery','kind':'authentication','order':3,'default':'service','description':'Credentials used for bigquery.'}}, 'sheet':{'field':{'name':'recipe_name','suffix':' DV Targeter','kind':'string','order':4,'default':'','description':'Name of Google Sheet to create.'}}, 'dataset':{'field':{'name':'recipe_slug','kind':'string','order':5,'default':'','description':'Name of Google BigQuery dataset to create.'}}, 'command':{'field':{'name':'command','kind':'choice','choices':['Clear','Load','Preview','Update'],'order':6,'default':'Load','description':'Action to take.'}}, 'first_and_third':{'field':{'name':'first_and_third','kind':'boolean','order':6,'default':False,'description':'Load first and third party data (may be slow). If not selected, enter audience identifiers into sheet manually.'}} } } ] } dag_maker = DAG_Factory('dv360_targeter', RECIPE, INPUTS) dag = dag_maker.generate() if __name__ == "__main__": dag_maker.print_commandline()
google/starthinker
dags/dv360_targeter_dag.py
Python
apache-2.0
7,275
[ "VisIt" ]
ea57b7b647efcde32f49fd92ec7d6ec2f73fa7902a08c6f209d7387c5d7efb13
#!/usr/bin/env python from vtk import * import os.path data_dir = "../../../../VTKData/Data/Infovis/SQLite/" if not os.path.exists(data_dir): data_dir = "../../../../../VTKData/Data/Infovis/SQLite/" sqlite_file = data_dir + "SmallEmailTest.db" database = vtkSQLDatabase.CreateFromURL("sqlite://" + sqlite_file) database.Open("") query = database.GetQueryInstance() query.SetQuery("select Name, Job, Age from employee") queryToTable = vtkRowQueryToTable() queryToTable.SetQuery(query) queryToTable.Update() T = queryToTable.GetOutput() print "Query Results:" T.Dump(12) database.FastDelete()
timkrentz/SunTracker
IMU/VTK-6.2.0/Examples/Infovis/Python/database_query1.py
Python
mit
627
[ "VTK" ]
e814e346a19a789ab3068db084f4b92ba30aec238b71bc33466eab57997627cb
#!/usr/bin/env python # # Appcelerator Titanium Module Packager # # import os, subprocess, sys, glob, string import zipfile from datetime import date cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename)) os.chdir(cwd) required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk'] module_defaults = { 'description':'My module', 'author': 'Your Name', 'license' : 'Specify your license', 'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year), } module_license_default = "TODO: place your license here and we'll include it in the module distribution" def find_sdk(config): sdk = config['TITANIUM_SDK'] return os.path.expandvars(os.path.expanduser(sdk)) def replace_vars(config,token): idx = token.find('$(') while idx != -1: idx2 = token.find(')',idx+2) if idx2 == -1: break key = token[idx+2:idx2] if not config.has_key(key): break token = token.replace('$(%s)' % key, config[key]) idx = token.find('$(') return token def read_ti_xcconfig(): contents = open(os.path.join(cwd,'titanium.xcconfig')).read() config = {} for line in contents.splitlines(False): line = line.strip() if line[0:2]=='//': continue idx = line.find('=') if idx > 0: key = line[0:idx].strip() value = line[idx+1:].strip() config[key] = replace_vars(config,value) return config def generate_doc(config): docdir = os.path.join(cwd,'documentation') if not os.path.exists(docdir): print "Couldn't find documentation file at: %s" % docdir return None try: import markdown2 as markdown except ImportError: import markdown documentation = [] for file in os.listdir(docdir): if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)): continue md = open(os.path.join(docdir,file)).read() html = markdown.markdown(md) documentation.append({file:html}); return documentation def compile_js(manifest,config): js_file = os.path.join(cwd,'assets','net.imthinker.modules.tiextendtab.js') if not os.path.exists(js_file): return from compiler import Compiler try: import json except: import simplejson as json compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs') root_asset, module_assets = compiler.compile_module() root_asset_content = """ %s return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]); """ % root_asset module_asset_content = """ %s NSNumber *index = [map objectForKey:path]; if (index == nil) { return nil; } return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]); """ % module_assets from tools import splice_code assets_router = os.path.join(cwd,'Classes','NetImthinkerModulesTiextendtabModuleAssets.m') splice_code(assets_router, 'asset', root_asset_content) splice_code(assets_router, 'resolve_asset', module_asset_content) # Generate the exports after crawling all of the available JS source exports = open('metadata.json','w') json.dump({'exports':compiler.exports }, exports) exports.close() def die(msg): print msg sys.exit(1) def warn(msg): print "[WARN] %s" % msg def validate_license(): c = open(os.path.join(cwd,'LICENSE')).read() if c.find(module_license_default)!=-1: warn('please update the LICENSE file with your license text before distributing') def validate_manifest(): path = os.path.join(cwd,'manifest') f = open(path) if not os.path.exists(path): die("missing %s" % path) manifest = {} for line in f.readlines(): line = line.strip() if line[0:1]=='#': continue if line.find(':') < 0: continue key,value = line.split(':') manifest[key.strip()]=value.strip() for key in required_module_keys: if not manifest.has_key(key): die("missing required manifest key '%s'" % key) if module_defaults.has_key(key): defvalue = module_defaults[key] curvalue = manifest[key] if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key) return manifest,path ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README'] ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT'] def zip_dir(zf,dir,basepath,ignore=[]): for root, dirs, files in os.walk(dir): for name in ignoreDirs: if name in dirs: dirs.remove(name) # don't visit ignored directories for file in files: if file in ignoreFiles: continue e = os.path.splitext(file) if len(e) == 2 and e[1] == '.pyc': continue if len(e) == 2 and e[1] == '.js': continue from_ = os.path.join(root, file) to_ = from_.replace(dir, basepath, 1) zf.write(from_, to_) def glob_libfiles(): files = [] for libfile in glob.glob('build/**/*.a'): if libfile.find('Release-')!=-1: files.append(libfile) return files def build_module(manifest,config): from tools import ensure_dev_path ensure_dev_path() rc = os.system("xcodebuild -sdk iphoneos -configuration Release") if rc != 0: die("xcodebuild failed") rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release") if rc != 0: die("xcodebuild failed") # build the merged library using lipo moduleid = manifest['moduleid'] libpaths = '' for libfile in glob_libfiles(): libpaths+='%s ' % libfile os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid)) def package_module(manifest,mf,config): name = manifest['name'].lower() moduleid = manifest['moduleid'].lower() version = manifest['version'] modulezip = '%s-iphone-%s.zip' % (moduleid,version) if os.path.exists(modulezip): os.remove(modulezip) zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED) modulepath = 'modules/iphone/%s/%s' % (moduleid,version) zf.write(mf,'%s/manifest' % modulepath) libname = 'lib%s.a' % moduleid zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname)) docs = generate_doc(config) if docs!=None: for doc in docs: for file, html in doc.iteritems(): filename = string.replace(file,'.md','.html') zf.writestr('%s/documentation/%s'%(modulepath,filename),html) for dn in ('assets','example','platform'): if os.path.exists(dn): zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README']) zf.write('LICENSE','%s/LICENSE' % modulepath) zf.write('module.xcconfig','%s/module.xcconfig' % modulepath) exports_file = 'metadata.json' if os.path.exists(exports_file): zf.write(exports_file, '%s/%s' % (modulepath, exports_file)) zf.close() if __name__ == '__main__': manifest,mf = validate_manifest() validate_license() config = read_ti_xcconfig() sdk = find_sdk(config) sys.path.insert(0,os.path.join(sdk,'iphone')) sys.path.append(os.path.join(sdk, "common")) compile_js(manifest,config) build_module(manifest,config) package_module(manifest,mf,config) sys.exit(0)
ryugoo/TiExtendTab
build.py
Python
mit
6,804
[ "VisIt" ]
f9d0820ee5a749060727dd6ed862f0954688edc6076b0a136adbf6cbcf32a6ee
# This script is executed in the main console namespace so # that all the variables defined here become console variables. import ddapp import os import sys import PythonQt import json from PythonQt import QtCore, QtGui from time import time import imp import ddapp.applogic as app from ddapp import drcargs from ddapp import botpy from ddapp import vtkAll as vtk from ddapp import matlab from ddapp import jointcontrol from ddapp import callbacks from ddapp import camerabookmarks from ddapp import cameracontrol from ddapp import bihandeddemo from ddapp import debrisdemo from ddapp import doordemo from ddapp import drilldemo from ddapp import tabledemo from ddapp import mappingdemo from ddapp import valvedemo from ddapp import drivingplanner from ddapp import egressplanner from ddapp import polarisplatformplanner from ddapp import surprisetask from ddapp import continuouswalkingdemo from ddapp import sitstandplanner from ddapp import walkingtestdemo from ddapp import terraintask from ddapp import ik from ddapp import ikplanner from ddapp import objectmodel as om from ddapp import spreadsheet from ddapp import transformUtils from ddapp import tdx from ddapp import skybox from ddapp import perception from ddapp import segmentation from ddapp import cameraview from ddapp import colorize from ddapp import drakevisualizer from ddapp.fieldcontainer import FieldContainer from ddapp import robotstate from ddapp import roboturdf from ddapp import robotsystem from ddapp import affordancepanel from ddapp import filterUtils from ddapp import footstepsdriver from ddapp import footstepsdriverpanel from ddapp import framevisualization from ddapp import lcmloggerwidget from ddapp import lcmgl from ddapp import atlasdriver from ddapp import atlasdriverpanel from ddapp import multisensepanel from ddapp import navigationpanel from ddapp import mappingpanel from ddapp import handcontrolpanel from ddapp import sensordatarequestpanel from ddapp import tasklaunchpanel from ddapp import pfgrasp from ddapp import pfgrasppanel from ddapp.jointpropagator import JointPropagator from ddapp import coursemodel from ddapp import copmonitor from ddapp import robotplanlistener from ddapp import handdriver from ddapp import planplayback from ddapp import playbackpanel from ddapp import screengrabberpanel from ddapp import splinewidget from ddapp import teleoppanel from ddapp import vtkNumpy as vnp from ddapp import viewbehaviors from ddapp import visualization as vis from ddapp import actionhandlers from ddapp.timercallback import TimerCallback from ddapp.pointpicker import PointPicker, ImagePointPicker from ddapp import segmentationpanel from ddapp import lcmUtils from ddapp.utime import getUtime from ddapp.shallowCopy import shallowCopy from ddapp import segmentationroutines from ddapp import trackers from ddapp import gamepad from ddapp import blackoutmonitor from ddapp.tasks import robottasks as rt from ddapp.tasks import taskmanagerwidget from ddapp.tasks.descriptions import loadTaskDescriptions import drc as lcmdrc from collections import OrderedDict import functools import math import numpy as np from ddapp.debugVis import DebugData from ddapp import ioUtils as io drcargs.requireStrict() drcargs.args() app.startup(globals()) om.init(app.getMainWindow().objectTree(), app.getMainWindow().propertiesPanel()) actionhandlers.init() quit = app.quit exit = quit view = app.getDRCView() camera = view.camera() tree = app.getMainWindow().objectTree() orbit = cameracontrol.OrbitController(view) showPolyData = segmentation.showPolyData updatePolyData = segmentation.updatePolyData ############################################################################### robotSystem = robotsystem.create(view) globals().update(dict(robotSystem)) useIk = True useAtlasConvexHull = False useRobotState = True usePerception = True useGrid = True useSpreadsheet = True useFootsteps = True useHands = True usePlanning = True useAtlasDriver = True useLCMGL = True useLightColorScheme = True useLoggingWidget = True useDrakeVisualizer = True useNavigationPanel = True useFootContactVis = True useFallDetectorVis = True useImageWidget = False useCameraFrustumVisualizer = True useControllerRate = True useForceDisplay = False useSkybox = False useDataFiles = True usePFGrasp = False useGamepad = True useBlackoutText = True useRandomWalk = True useCOPMonitor = True useCourseModel = False useMappingPanel = True poseCollection = PythonQt.dd.ddSignalMap() costCollection = PythonQt.dd.ddSignalMap() if useSpreadsheet: spreadsheet.init(poseCollection, costCollection) if useIk: def onIkStartup(ikServer, startSuccess): if startSuccess: app.getMainWindow().statusBar().showMessage('Planning server started.', 2000) else: app.showErrorMessage('Error detected while starting the matlab planning server. ' 'Please check the output console for more information.', title='Error starting matlab') ikServer.outputConsole = app.getOutputConsole() ikServer.infoFunc = app.displaySnoptInfo ikServer.connectStartupCompleted(onIkStartup) startIkServer() if useAtlasDriver: atlasdriver.systemStatus.outputConsole = app.getOutputConsole() atlasdriverpanel.init(atlasDriver) if usePerception: segmentationpanel.init() cameraview.init() colorize.init() cameraview.cameraView.rayCallback = segmentation.extractPointsAlongClickRay multisensepanel.init(perception.multisenseDriver, neckDriver) sensordatarequestpanel.init() # for kintinuous, use 'CAMERA_FUSED', 'CAMERA_TSDF' disparityPointCloud = segmentation.DisparityPointCloudItem('stereo point cloud', 'CAMERA', 'CAMERA_LEFT', cameraview.imageManager) disparityPointCloud.addToView(view) om.addToObjectModel(disparityPointCloud, parentObj=om.findObjectByName('sensors')) def createPointerTracker(): return trackers.PointerTracker(robotStateModel, disparityPointCloud) if useGrid: grid = vis.showGrid(view, color=[0,0,0], alpha=0.1) grid.setProperty('Surface Mode', 'Surface with edges') app.setBackgroundColor([0.3, 0.3, 0.35], [0.95,0.95,1]) viewOptions = vis.ViewOptionsItem(view) om.addToObjectModel(viewOptions, parentObj=om.findObjectByName('sensors')) class ViewBackgroundLightHandler(object): def __init__(self, viewOptions, grid): self.viewOptions = viewOptions self.action = app.getToolsMenuActions()['ActionToggleBackgroundLight'] self.action.connect('triggered()', self.toggle) self.properties = { viewOptions : {'Gradient background':True, 'Background color':[0.0, 0.0, 0.0], 'Background color 2':[0.3, 0.3, 0.3]}, grid : {'Surface Mode':'Wireframe', 'Alpha':0.05, 'Color':[1.0, 1.0, 1.0], 'Color By':0} } self.cachedProperties = {} self.storeProperties() def storeProperties(self): def grab(obj, props): for key in props.keys(): self.cachedProperties.setdefault(obj, dict())[key] = obj.getProperty(key) for obj, props in self.properties.iteritems(): grab(obj, props) def applyProperties(self, properties): def send(obj, props): for key, value in props.iteritems(): obj.setProperty(key, value) for obj, props in properties.iteritems(): send(obj, props) def toggle(self): if self.action.checked: self.storeProperties() self.applyProperties(self.properties) else: self.applyProperties(self.cachedProperties) viewBackgroundLightHandler = ViewBackgroundLightHandler(viewOptions, grid) if not useLightColorScheme: viewBackgroundLightHandler.action.trigger() if useHands: handcontrolpanel.init(lHandDriver, rHandDriver, robotStateModel, robotStateJointController, view) if useFootsteps: footstepsPanel = footstepsdriverpanel.init(footstepsDriver, robotStateModel, robotStateJointController, irisDriver) if useLCMGL: lcmglManager = lcmgl.init(view) app.MenuActionToggleHelper('Tools', 'Renderer - LCM GL', lcmglManager.isEnabled, lcmglManager.setEnabled) if useDrakeVisualizer: drakeVisualizer = drakevisualizer.DrakeVisualizer(view) app.MenuActionToggleHelper('Tools', 'Renderer - Drake', drakeVisualizer.isEnabled, drakeVisualizer.setEnabled) if usePlanning: def showPose(pose): playbackRobotModel.setProperty('Visible', True) playbackJointController.setPose('show_pose', pose) def playPlan(plan): playPlans([plan]) def playPlans(plans): planPlayback.stopAnimation() playbackRobotModel.setProperty('Visible', True) planPlayback.playPlans(plans, playbackJointController) def playManipPlan(): playPlan(manipPlanner.lastManipPlan) def playWalkingPlan(): playPlan(footstepsDriver.lastWalkingPlan) def plotManipPlan(): planPlayback.plotPlan(manipPlanner.lastManipPlan) def planStand(): ikPlanner.computeStandPlan(robotStateJointController.q) def planNominal(): ikPlanner.computeNominalPlan(robotStateJointController.q) def fitDrillMultisense(): pd = om.findObjectByName('Multisense').model.revPolyData om.removeFromObjectModel(om.findObjectByName('debug')) segmentation.findAndFitDrillBarrel(pd) def refitBlocks(autoApprove=True): polyData = om.findObjectByName('Multisense').model.revPolyData segmentation.updateBlockAffordances(polyData) if autoApprove: approveRefit() def approveRefit(): for obj in om.getObjects(): if isinstance(obj, segmentation.BlockAffordanceItem): if 'refit' in obj.getProperty('Name'): originalObj = om.findObjectByName(obj.getProperty('Name').replace(' refit', '')) if originalObj: originalObj.params = obj.params originalObj.polyData.DeepCopy(obj.polyData) originalObj.actor.GetUserTransform().SetMatrix(obj.actor.GetUserTransform().GetMatrix()) originalObj.actor.GetUserTransform().Modified() obj.setProperty('Visible', False) def sendDataRequest(requestType, repeatTime=0.0): msg = lcmdrc.data_request_t() msg.type = requestType msg.period = int(repeatTime*10) # period is specified in tenths of a second msgList = lcmdrc.data_request_list_t() msgList.utime = getUtime() msgList.requests = [msg] msgList.num_requests = len(msgList.requests) lcmUtils.publish('DATA_REQUEST', msgList) def sendSceneHeightRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.HEIGHT_MAP_SCENE, repeatTime) def sendWorkspaceDepthRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.DEPTH_MAP_WORKSPACE_C, repeatTime) def sendSceneDepthRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.DEPTH_MAP_SCENE, repeatTime) def sendFusedDepthRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.FUSED_DEPTH, repeatTime) def sendFusedHeightRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.FUSED_HEIGHT, repeatTime) handJoints = [] if drcargs.args().directorConfigFile.find('atlas') != -1: handJoints = roboturdf.getRobotiqJoints() + ['neck_ay'] else: for handModel in ikPlanner.handModels: handJoints += handModel.handModel.model.getJointNames() # filter base joints out handJoints = [ joint for joint in handJoints if joint.find('base')==-1 ] teleopJointPropagator = JointPropagator(robotStateModel, teleopRobotModel, handJoints) playbackJointPropagator = JointPropagator(robotStateModel, playbackRobotModel, handJoints) def doPropagation(model=None): if teleopRobotModel.getProperty('Visible'): teleopJointPropagator.doPropagation() if playbackRobotModel.getProperty('Visible'): playbackJointPropagator.doPropagation() robotStateModel.connectModelChanged(doPropagation) #app.addToolbarMacro('scene height', sendSceneHeightRequest) #app.addToolbarMacro('scene depth', sendSceneDepthRequest) #app.addToolbarMacro('stereo height', sendFusedHeightRequest) #app.addToolbarMacro('stereo depth', sendFusedDepthRequest) jointLimitChecker = teleoppanel.JointLimitChecker(robotStateModel, robotStateJointController) jointLimitChecker.setupMenuAction() jointLimitChecker.start() spindleSpinChecker = multisensepanel.SpindleSpinChecker(spindleMonitor) spindleSpinChecker.setupMenuAction() postureShortcuts = teleoppanel.PosturePlanShortcuts(robotStateJointController, ikPlanner) def drillTrackerOn(): om.findObjectByName('Multisense').model.showRevolutionCallback = fitDrillMultisense def drillTrackerOff(): om.findObjectByName('Multisense').model.showRevolutionCallback = None def fitPosts(): segmentation.fitVerticalPosts(segmentation.getCurrentRevolutionData()) affordancePanel.onGetRaycastTerrain() ikPlanner.addPostureGoalListener(robotStateJointController) if 'fixedBaseArm' in drcargs.getDirectorConfig()['userConfig']: ikPlanner.fixedBaseArm = True playbackPanel = playbackpanel.init(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner) footstepsDriver.walkingPlanCallback = playbackPanel.setPlan manipPlanner.connectPlanReceived(playbackPanel.setPlan) teleopPanel = teleoppanel.init(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, playbackPanel.setPlan, playbackPanel.hidePlan) if useGamepad: gamePad = gamepad.Gamepad(teleopPanel, teleopJointController, ikPlanner, view) if useBlackoutText: blackoutMonitor = blackoutmonitor.BlackoutMonitor(robotStateJointController, view, cameraview, mapServerSource) debrisDemo = debrisdemo.DebrisPlannerDemo(robotStateModel, robotStateJointController, playbackRobotModel, ikPlanner, manipPlanner, atlasdriver.driver, lHandDriver, perception.multisenseDriver, refitBlocks) tableDemo = tabledemo.TableDemo(robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver, perception.multisenseDriver, view, robotStateJointController, playPlans, teleopPanel) tableTaskPanel = tabledemo.TableTaskPanel(tableDemo) drillDemo = drilldemo.DrillPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, teleopPanel.showPose, cameraview, segmentationpanel) drillTaskPanel = drilldemo.DrillTaskPanel(drillDemo) valveDemo = valvedemo.ValvePlannerDemo(robotStateModel, footstepsDriver, footstepsPanel, manipPlanner, ikPlanner, lHandDriver, rHandDriver, robotStateJointController) valveTaskPanel = valvedemo.ValveTaskPanel(valveDemo) drivingPlannerPanel = drivingplanner.DrivingPlannerPanel(robotSystem) walkingDemo = walkingtestdemo.walkingTestDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, robotStateJointController, playPlans, showPose) bihandedDemo = bihandeddemo.BihandedPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, showPose, cameraview, segmentationpanel) mappingDemo = mappingdemo.MappingDemo(robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver, perception.multisenseDriver, view, robotStateJointController, playPlans) if useMappingPanel: mappingPanel = mappingpanel.init(robotStateJointController, footstepsDriver) mappingTaskPanel = mappingpanel.MappingTaskPanel(mappingDemo, mappingPanel) doorDemo = doordemo.DoorDemo(robotStateModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, showPose) doorTaskPanel = doordemo.DoorTaskPanel(doorDemo) terrainTaskPanel = terraintask.TerrainTaskPanel(robotSystem) terrainTask = terrainTaskPanel.terrainTask surpriseTaskPanel = surprisetask.SurpriseTaskPanel(robotSystem) surpriseTask = surpriseTaskPanel.planner egressPanel = egressplanner.EgressPanel(robotSystem) egressPlanner = egressPanel.egressPlanner taskPanels = OrderedDict() taskPanels['Driving'] = drivingPlannerPanel.widget taskPanels['Egress'] = egressPanel.widget taskPanels['Door'] = doorTaskPanel.widget taskPanels['Valve'] = valveTaskPanel.widget taskPanels['Drill'] = drillTaskPanel.widget taskPanels['Surprise'] = surpriseTaskPanel.widget taskPanels['Terrain'] = terrainTaskPanel.widget taskPanels['Table'] = tableTaskPanel.widget if useMappingPanel: taskPanels['Mapping'] = mappingTaskPanel.widget tasklaunchpanel.init(taskPanels) splinewidget.init(view, handFactory, robotStateModel) rt.robotSystem = robotSystem taskManagerPanel = taskmanagerwidget.init() for taskDescription in loadTaskDescriptions(): taskManagerPanel.taskQueueWidget.loadTaskDescription(taskDescription[0], taskDescription[1]) taskManagerPanel.taskQueueWidget.setCurrentQueue('Task library') for obj in om.getObjects(): obj.setProperty('Deletable', False) if useCOPMonitor: copMonitor = copmonitor.COPMonitor(robotSystem, view); if useNavigationPanel: navigationPanel = navigationpanel.init(robotStateJointController, footstepsDriver) picker = PointPicker(view, callback=navigationPanel.pointPickerStoredFootsteps, numberOfPoints=2) #picker.start() continuouswalkingDemo = continuouswalkingdemo.ContinousWalkingDemo(robotStateModel, footstepsPanel, robotStateJointController, ikPlanner, teleopJointController, navigationPanel, cameraview) if useLoggingWidget: w = lcmloggerwidget.LCMLoggerWidget(statusBar=app.getMainWindow().statusBar()) app.getMainWindow().statusBar().addPermanentWidget(w.button) if useControllerRate: class ControllerRateLabel(object): ''' Displays a controller frequency in the status bar ''' def __init__(self, atlasDriver, statusBar): self.atlasDriver = atlasDriver self.label = QtGui.QLabel('') statusBar.addPermanentWidget(self.label) self.timer = TimerCallback(targetFps=1) self.timer.callback = self.showRate self.timer.start() def showRate(self): rate = self.atlasDriver.getControllerRate() rate = 'unknown' if rate is None else '%d hz' % rate self.label.text = 'Controller rate: %s' % rate controllerRateLabel = ControllerRateLabel(atlasDriver, app.getMainWindow().statusBar()) if useForceDisplay: class LCMForceDisplay(object): ''' Displays foot force sensor signals in a status bar widget or label widget ''' def onAtlasState(self,msg): self.l_foot_force_z = msg.force_torque.l_foot_force_z self.r_foot_force_z = msg.force_torque.r_foot_force_z def __init__(self, channel, statusBar=None): self.sub = lcmUtils.addSubscriber(channel, lcmdrc.atlas_state_t, self.onAtlasState) self.label = QtGui.QLabel('') statusBar.addPermanentWidget(self.label) self.timer = TimerCallback(targetFps=10) self.timer.callback = self.showRate self.timer.start() self.l_foot_force_z = 0 self.r_foot_force_z = 0 def __del__(self): lcmUtils.removeSubscriber(self.sub) def showRate(self): global leftInContact, rightInContact self.label.text = '%.2f | %.2f' % (self.l_foot_force_z,self.r_foot_force_z) rateComputer = LCMForceDisplay('ATLAS_STATE', app.getMainWindow().statusBar()) if useSkybox: skyboxDataDir = os.path.expanduser('~/Downloads/skybox') imageMap = skybox.getSkyboxImages(skyboxDataDir) skyboxObjs = skybox.createSkybox(imageMap, view) skybox.connectSkyboxCamera(view) #skybox.createTextureGround(os.path.join(skyboxDataDir, 'Dirt_seamless.jpg'), view) #view.camera().SetViewAngle(60) class RobotLinkHighligher(object): def __init__(self, robotModel): self.robotModel = robotModel self.previousColors = {} def highlightLink(self, linkName, color): currentColor = self.robotModel.model.getLinkColor(linkName) if not currentColor.isValid(): return if linkName not in self.previousColors: self.previousColors[linkName] = currentColor alpha = self.robotModel.getProperty('Alpha') newColor = QtGui.QColor(color[0]*255, color[1]*255, color[2]*255, alpha*255) self.robotModel.model.setLinkColor(linkName, newColor) def dehighlightLink(self, linkName): color = self.previousColors.pop(linkName, None) if color is None: return color.setAlpha(self.robotModel.getProperty('Alpha')*255) self.robotModel.model.setLinkColor(linkName, color) robotHighlighter = RobotLinkHighligher(robotStateModel) if useFootContactVis: def onFootContact(msg): leftInContact = msg.left_contact > 0.0 rightInContact = msg.right_contact > 0.0 for linkName, inContact in [['l_foot', msg.left_contact > 0.0], ['r_foot', msg.right_contact > 0.0]]: if inContact: robotHighlighter.highlightLink(linkName, [0, 0, 1]) else: robotHighlighter.dehighlightLink(linkName) #robotStateModel.model.setLinkColor(drcargs.getDirectorConfig()['leftFootLink'], contactColor if leftInContact else noContactColor) #robotStateModel.model.setLinkColor(drcargs.getDirectorConfig()['rightFootLink'], contactColor if rightInContact else noContactColor) footContactSub = lcmUtils.addSubscriber('FOOT_CONTACT_ESTIMATE', lcmdrc.foot_contact_estimate_t, onFootContact) footContactSub.setSpeedLimit(60) if useFallDetectorVis: def onPlanStatus(msg): links = ['pelvis', 'utorso'] if msg.plan_type == lcmdrc.plan_status_t.RECOVERING: for link in links: robotHighlighter.highlightLink(link, [1,0.4,0.0]) elif msg.plan_type == lcmdrc.plan_status_t.BRACING: for link in links: robotHighlighter.highlightLink(link, [1, 0, 0]) else: for link in links: robotHighlighter.dehighlightLink(link) fallDetectorSub = lcmUtils.addSubscriber("PLAN_EXECUTION_STATUS", lcmdrc.plan_status_t, onPlanStatus) fallDetectorSub.setSpeedLimit(10) if useDataFiles: for filename in drcargs.args().data_files: polyData = io.readPolyData(filename) if polyData: vis.showPolyData(polyData, os.path.basename(filename)) if useImageWidget: imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'CAMERA_LEFT', view) #imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'KINECT_RGB', view) if useCameraFrustumVisualizer: cameraFrustumVisualizer = cameraview.CameraFrustumVisualizer(robotStateModel, cameraview.imageManager, 'CAMERA_LEFT') class ImageOverlayManager(object): def __init__(self): self.viewName = 'CAMERA_LEFT' #self.viewName = 'KINECT_RGB' self.size = 400 self.position = [0, 0] self.usePicker = False self.imageView = None self.imagePicker = None self._prevParent = None def show(self): if self.imageView: return imageView = cameraview.views[self.viewName] self.imageView = imageView self._prevParent = imageView.view.parent() imageView.view.hide() imageView.view.setParent(view) imageView.view.resize(self.size, self.size) imageView.view.move(*self.position) imageView.view.show() if self.usePicker: self.imagePicker = ImagePointPicker(imageView) self.imagePicker.start() def hide(self): if self.imageView: self.imageView.view.hide() self.imageView.view.setParent(self._prevParent) self.imageView.view.show() self.imageView = None if self.imagePicker: self.imagePicker.stop() class ToggleImageViewHandler(object): def __init__(self, manager): self.action = app.getToolsMenuActions()['ActionToggleImageView'] self.action.connect('triggered()', self.toggle) self.manager = manager def toggle(self): if self.action.checked: self.manager.show() else: self.manager.hide() imageOverlayManager = ImageOverlayManager() imageViewHandler = ToggleImageViewHandler(imageOverlayManager) showImageOverlay = imageOverlayManager.show hideImageOverlay = imageOverlayManager.hide screengrabberpanel.init(view) framevisualization.init(view) affordancePanel = affordancepanel.init(view, affordanceManager, ikServer, robotStateJointController, raycastDriver) camerabookmarks.init(view) def getLinkFrame(linkName, model=None): model = model or robotStateModel return model.getLinkFrame(linkName) def showLinkFrame(linkName, model=None): frame = getLinkFrame(linkName, model) if not frame: raise Exception('Link not found: ' + linkName) return vis.updateFrame(frame, linkName, parent='link frames') def sendEstRobotState(pose=None): if pose is None: pose = robotStateJointController.q msg = robotstate.drakePoseToRobotState(pose) lcmUtils.publish('EST_ROBOT_STATE', msg) def enableArmEncoders(): msg = lcmdrc.utime_t() msg.utime = 1 lcmUtils.publish('ENABLE_ENCODERS', msg) def disableArmEncoders(): msg = lcmdrc.utime_t() msg.utime = -1 lcmUtils.publish('ENABLE_ENCODERS', msg) def sendDesiredPumpPsi(desiredPsi): atlasDriver.sendDesiredPumpPsi(desiredPsi) app.setCameraTerrainModeEnabled(view, True) app.resetCamera(viewDirection=[-1,0,0], view=view) viewBehaviors = viewbehaviors.ViewBehaviors(view) # Drill Demo Functions for in-image rendering: useDrillDemo = False if useDrillDemo: def spawnHandAtCurrentLocation(side='left'): if (side is 'left'): tf = transformUtils.copyFrame( getLinkFrame( 'l_hand_face') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'left') else: tf = transformUtils.copyFrame( getLinkFrame( 'right_pointer_tip') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'right') def drawFrameInCamera(t, frameName='new frame',visible=True): v = imageView.view q = cameraview.imageManager.queue localToCameraT = vtk.vtkTransform() q.getTransform('local', 'CAMERA_LEFT', localToCameraT) res = vis.showFrame( vtk.vtkTransform() , 'temp',view=v, visible=True, scale = 0.2) om.removeFromObjectModel(res) pd = res.polyData pd = filterUtils.transformPolyData(pd, t) pd = filterUtils.transformPolyData(pd, localToCameraT) q.projectPoints('CAMERA_LEFT', pd ) vis.showPolyData(pd, ('overlay ' + frameName), view=v, colorByName='Axes',parent='camera overlay',visible=visible) def drawObjectInCamera(objectName,visible=True): v = imageView.view q = cameraview.imageManager.queue localToCameraT = vtk.vtkTransform() q.getTransform('local', 'CAMERA_LEFT', localToCameraT) obj = om.findObjectByName(objectName) if obj is None: return objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform()) objPolyDataOriginal = obj.polyData pd = objPolyDataOriginal pd = filterUtils.transformPolyData(pd, objToLocalT) pd = filterUtils.transformPolyData(pd, localToCameraT) q.projectPoints('CAMERA_LEFT', pd) vis.showPolyData(pd, ('overlay ' + objectName), view=v, color=[0,1,0],parent='camera overlay',visible=visible) def projectDrillDemoInCamera(): q = om.findObjectByName('camera overlay') om.removeFromObjectModel(q) imageView = cameraview.views['CAMERA_LEFT'] imageView.imageActor.SetOpacity(.2) drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False) tf = transformUtils.copyFrame( drillDemo.drill.frame.transform ) tf.PreMultiply() tf.Concatenate( drillDemo.drill.drillToButtonTransform ) drawFrameInCamera(tf, 'drill button') tf2 = transformUtils.copyFrame( tf ) tf2.PreMultiply() tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) ) drawFrameInCamera(tf2, 'drill button flip') drawObjectInCamera('drill',visible=False) drawObjectInCamera('sensed pointer tip') obj = om.findObjectByName('sensed pointer tip frame') if (obj is not None): drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False) #drawObjectInCamera('left robotiq',visible=False) #drawObjectInCamera('right pointer',visible=False) v = imageView.view v.render() showImageOverlay() drillDemo.pointerTracker = createPointerTracker() drillDemo.projectCallback = projectDrillDemoInCamera drillYawPreTransform = vtk.vtkTransform() drillYawPreTransform.PostMultiply() def onDrillYawSliderChanged(value): yawOffset = value - 180.0 drillDemo.drillYawSliderValue = yawOffset drillDemo.updateDrillToHand() app.getMainWindow().macrosToolBar().addWidget(QtGui.QLabel('drill yaw:')) slider = QtGui.QSlider(QtCore.Qt.Horizontal) slider.setMaximum(360) slider.setValue(180) slider.setMaximumWidth(200) slider.connect('valueChanged(int)', onDrillYawSliderChanged) app.getMainWindow().macrosToolBar().addWidget(slider) def sendPointerPrep(): drillDemo.planPointerPressGaze(-0.05) def sendPointerPress(): drillDemo.planPointerPressGaze(0.01) def sendPointerPressDeep(): drillDemo.planPointerPressGaze(0.015) app.addToolbarMacro('drill posture', drillDemo.planBothRaisePowerOn) app.addToolbarMacro('pointer prep', sendPointerPrep) app.addToolbarMacro('pointer press', sendPointerPress) app.addToolbarMacro('pointer press deep', sendPointerPressDeep) if usePFGrasp: pfgrasper = pfgrasp.PFGrasp(drillDemo, robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, showPose, cameraview, segmentationpanel) showImageOverlay() hideImageOverlay() pfgrasppanel.init(pfgrasper, _prevParent, imageView, imagePicker, cameraview) import signal def sendMatlabSigint(): ikServer.comm.client.proc.send_signal(signal.SIGINT) #app.addToolbarMacro('Ctrl+C MATLAB', sendMatlabSigint) class AffordanceTextureUpdater(object): def __init__(self, affordanceManager): self.affordanceManager = affordanceManager self.timer = TimerCallback(targetFps=10) self.timer.callback = self.updateTextures self.timer.start() def updateTexture(self, obj): if obj.getProperty('Camera Texture Enabled'): cameraview.applyCameraTexture(obj, cameraview.imageManager) else: cameraview.disableCameraTexture(obj) obj._renderAllViews() def updateTextures(self): for aff in affordanceManager.getAffordances(): self.updateTexture(aff) affordanceTextureUpdater = AffordanceTextureUpdater(affordanceManager) def drawCenterOfMass(model): stanceFrame = footstepsDriver.getFeetMidPoint(model) com = list(model.model.getCenterOfMass()) com[2] = stanceFrame.GetPosition()[2] d = DebugData() d.addSphere(com, radius=0.015) obj = vis.updatePolyData(d.getPolyData(), 'COM %s' % model.getProperty('Name'), color=[1,0,0], visible=False, parent=model) def initCenterOfMassVisulization(): for model in [robotStateModel, teleopRobotModel, playbackRobotModel]: model.connectModelChanged(drawCenterOfMass) drawCenterOfMass(model) initCenterOfMassVisulization() class RobotMoverWidget(object): def __init__(self, jointController): self.jointController = jointController pos, rpy = jointController.q[:3], jointController.q[3:6] t = transformUtils.frameFromPositionAndRPY(pos, np.degrees(rpy)) self.frame = vis.showFrame(t, 'mover widget', scale=0.3) self.frame.setProperty('Edit', True) self.frame.connectFrameModified(self.onFrameModified) def onFrameModified(self, frame): pos, rpy = self.frame.transform.GetPosition(), transformUtils.rollPitchYawFromTransform(self.frame.transform) q = self.jointController.q.copy() q[:3] = pos q[3:6] = rpy self.jointController.setPose('moved_pose', q) class RobotGridUpdater(object): def __init__(self, gridFrame, robotModel, jointController): self.gridFrame = gridFrame self.robotModel = robotModel self.jointController = jointController self.robotModel.connectModelChanged(self.updateGrid) def updateGrid(self, model): pos = self.jointController.q[:3] x = int(np.round(pos[0])) / 10 y = int(np.round(pos[1])) / 10 z = int(np.round(pos[2] - 0.85)) / 1 t = vtk.vtkTransform() t.Translate((x*10,y*10,z)) self.gridFrame.copyFrame(t) gridUpdater = RobotGridUpdater(grid.getChildFrame(), robotStateModel, robotStateJointController) class IgnoreOldStateMessagesSelector(object): def __init__(self, jointController): self.jointController = jointController self.action = app.addMenuAction('Tools', 'Ignore Old State Messages') self.action.setCheckable(True) self.action.setChecked(self.jointController.ignoreOldStateMessages) self.action.connect('triggered()', self.toggle) def toggle(self): self.jointController.ignoreOldStateMessages = bool(self.action.checked) IgnoreOldStateMessagesSelector(robotStateJointController) class RandomWalk(object): def __init__(self, max_distance_per_plan=2): self.subs = [] self.max_distance_per_plan=max_distance_per_plan def handleStatus(self, msg): if msg.plan_type == msg.STANDING: goal = transformUtils.frameFromPositionAndRPY( np.array([robotStateJointController.q[0] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5), robotStateJointController.q[1] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5), robotStateJointController.q[2] - 0.84]), [0, 0, robotStateJointController.q[5] + 2 * np.degrees(np.pi) * (np.random.random() - 0.5)]) request = footstepsDriver.constructFootstepPlanRequest(robotStateJointController.q, goal) request.params.max_num_steps = 18 footstepsDriver.sendFootstepPlanRequest(request) def handleFootstepPlan(self, msg): footstepsDriver.commitFootstepPlan(msg) def start(self): sub = lcmUtils.addSubscriber('PLAN_EXECUTION_STATUS', lcmdrc.plan_status_t, self.handleStatus) sub.setSpeedLimit(0.2) self.subs.append(sub) self.subs.append(lcmUtils.addSubscriber('FOOTSTEP_PLAN_RESPONSE', lcmdrc.footstep_plan_t, self.handleFootstepPlan)) def stop(self): for sub in self.subs: lcmUtils.removeSubscriber(sub) if useRandomWalk: randomWalk = RandomWalk() if useCourseModel: courseModel = coursemodel.CourseModel() if 'useKuka' in drcargs.getDirectorConfig()['userConfig']: import kinectlcm #kinectlcm.init() imageOverlayManager.viewName = "KINECT_RGB" #ikPlanner.fixedBaseArm = True #showImageOverlay() if 'exo' in drcargs.args(): if (drcargs.args().exo): ikPlanner.pushToMatlab = False def roomMap(): mappingPanel.onStartMappingButton() t = mappingdemo.MappingDemo(robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver, perception.multisenseDriver, view, robotStateJointController, playPlans) t.visOnly = False t.optionalUserPromptEnabled = False q = t.autonomousExecuteRoomMap() q.connectTaskEnded(mappingSweepEnded) q.start() def mappingSweepEnded(taskQ, task): if task.func_name == 'doneIndicator': import time as qq mappingPanel.onStopMappingButton() qq.sleep(3) mappingPanel.onShowMapButton() print "DONE WITH MAPPING ROOM"
rdeits/director
src/python/ddapp/startup.py
Python
bsd-3-clause
38,265
[ "VTK" ]
dfe295adc47fa75b5b119eba06b22035165e0da6306f56a4db4f6a293c403f66
import pandas as pd import numpy as np from statsmodels.tsa.stattools import adfuller as ADF s = pd.Series([1, 2, 3], index = ['a', 'b', 'c']) d = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns = ['a', 'b', 'c']) d2 = pd.DataFrame(s) d.head() d.describe() #pd.read_excel('data.xls') #pd.read_csv('data.csv', encoding = 'utf-8') ADF(np.random.rand(100))
XiangYz/webscraper
data2.py
Python
lgpl-2.1
358
[ "ADF" ]
4c02dd0ba8bac9482d61a37d3b91eb0f39efa216dcd1a4fbfd87a4e1ae7f3ddc
# basic block timing analysis import utils import msp_base as base import msp_fr5969_model as model import msp_elftools as elftools from msp_isa import isa import msp_instr as instr class BasicBlock(object): def __init__(self, addr, verbosity = 0): self.addr = addr self.instrs = [] self.callsites = set() self.preds = set() self.succs = set() self.call_return_target = None if verbosity >= 2: print(' NEW bblock {:05x}'.format(addr)) def __str__(self): s = 'BB at {:05x}\n'.format(self.addr) s += ' under: ' first = True for callsite in sorted(self.callsites): if first: first = False else: s += ' ' s += '{:05x}\n'.format(callsite) s += ' preds:\n' for pred in sorted(self.preds): s += ' {:05x}\n'.format(pred) s += ' succs:\n' for succ in sorted(self.succs): s += ' {:05x}\n'.format(succ) if self.call_return_target is not None: s += ' call returns to {:05x}\n'.format(self.call_return_target) return s class CallSite(object): def __init__(self, addr, verbosity = 0): self.addr = addr self.bblock = None self.children = set() self.return_targets = set() self.callees = set() if verbosity >= 2: print(' NEW site {:05x}'.format(addr)) def __str__(self): s = 'CallSite at {:05x}\n'.format(self.addr) s += ' callees:\n' for callee in sorted(self.callees): s += ' {:05x}\n'.format(callee) s += ' return_targets:\n' for rt in sorted(self.return_targets): s += ' {:05x}\n'.format(rt) s += ' children:\n' for child in sorted(self.children): s += ' {:05x}\n'.format(child) if self.bblock is not None: # s += '-- entry block --\n{:s}'.format(str(self.bblock)) s += '-- entry block --\n' else: s += '-- no entry block recorded --\n' return s class CFG(object): def __init__(self, fname, verbosity = 0): self.verbosity = verbosity self.state = model.Model() self.read16 = model.mk_read16(self.state.read8) # TODO: alternative to readfields that doesn't always crash? for addr in range(0, model.ram_start, 2): self.state.mmio_handle_default(addr, initial_value = 0xff) self.state.mmio_handle_default(addr+1, initial_value = 0x3f) for addr in range(model.ram_start+model.ram_size, model.fram_start, 2): self.state.mmio_handle_default(addr, initial_value = 0xff) self.state.mmio_handle_default(addr+1, initial_value = 0x3f) for addr in range(model.fram_start+model.fram_size, model.fram_start+model.fram_size+16, 2): self.state.mmio_handle_default(addr, initial_value = 0xff) self.state.mmio_handle_default(addr+1, initial_value = 0x3f) if self.verbosity >= 1: print('loading {:s}'.format(fname)) elftools.load(self.state, fname, restore_regs=False, verbosity=self.verbosity) def _describe(self, call_table, block_table, entrypoint): print('CFG starting from {:05x}, {:d} sites, {:d} basic blocks\n' .format(entrypoint, len(call_table), len(block_table))) ct_clone = call_table.copy() bt_clone = block_table.copy() self._describe_call(ct_clone, bt_clone, entrypoint) if ct_clone or bt_clone: while ct_clone: print('REMAINING: {:d} sites, {:d} basic blocks\n' .format(len(ct_clone), len(bt_clone))) first = sorted(ct_clone.keys())[0] self._describe_call(ct_clone, bt_clone, first) while bt_clone: print('REMAINING: {:d} sites, {:d} basic blocks\n' .format(len(ct_clone), len(bt_clone))) first = sorted(bt_clone.keys())[0] self._describe_block(bt_clone, first) else: print('No unreachable blocks, good') def _describe_call(self, call_table, block_table, call_addr): callsite_object = call_table.pop(call_addr, None) if callsite_object is None: print('Already visited site {:05x}\n'.format(call_addr)) else: print(str(callsite_object)) self._describe_block(block_table, call_addr) for addr in sorted(callsite_object.callees): self._describe_call(call_table, block_table, addr) def _describe_block(self, block_table, block_addr): block_object = block_table.pop(block_addr, None) if block_object is None: print('Already visited block {:05x}\n'.format(block_addr)) else: print(str(block_object)) addr = block_object.call_return_target if addr is not None: self._describe_block(block_table, addr) else: for addr in sorted(block_object.succs): self._describe_block(block_table, addr) def _describe_workset(self, workset): print('Workset: {:d} blocks to process'.format(len(workset))) for pc, callsite, pc_pred in workset: print(' pc {:05x}, under {:05x}, from {:05x}' .format(pc, callsite, pc_pred)) print('') def get_seqs(self, length): btab = self.block_table.copy() ctab = self.call_table.copy() def _get_suffix(block, current, remaining): assert remaining >= 1 if remaining == 1: output = [[block.instrs[current]]] elif current < len(block.instrs)-1: # print([x for x in _get_suffix(block, current+1, remaining-1)]) # print([block.instrs[current]]) output = [[block.instrs[current]] + x for x in _get_suffix(block, current+1, remaining-1)] else: # print([x for addr in block.succs for x in _get_suffix(btab[addr], 0, remaining-1)]) assert current == len(block.instrs)-1 if block.succs: succs = block.succs else: # assume return succs = [addr for cs in block.callsites for addr in ctab[cs].return_targets] output = [[block.instrs[current]] + x for addr in succs for x in _get_suffix(btab[addr], 0, remaining-1)] return output return [_get_suffix(b, i, length) for b in btab.values() for i in range(len(b.instrs))] def build_graph(self, do_quadratic_checks = False): # find entry point entrypoint = self.read16(model.resetvec) entrysite = CallSite(entrypoint, self.verbosity) # tables of observed call sites and blocks # TODO: will need multiple initial entries to support more than the boring set of # interrupt vectors call_table = {entrypoint : entrysite} call_table = { self.read16(ivec) : CallSite(self.read16(ivec), self.verbosity) # hack for uninitialized vectors that don't make sense for ivec in range(model.ivec_start, model.ivec_start+model.ivec_count*2,2) if self.read16(ivec) != 0xffff } block_table = {} # tables of covered instructions and memory sites ins_table = {} byte_table = {} # set of pc values to consider next # we could come in through any interrupt vector... workset = { (self.read16(ivec), self.read16(ivec), ivec) for ivec in range(model.ivec_start, model.ivec_start+model.ivec_count*2,2) # hack for uninitialized vectors that don't make sense if self.read16(ivec) != 0xffff } def update_workset(pc, callsite, pc_pred): if pc not in block_table or callsite not in block_table[pc].callsites: if self.verbosity >= 3: print(' pushed {:05x} under {:05x}, from {:05x}'.format(pc, callsite, pc_pred)) workset.add( (pc, callsite, pc_pred) ) # pc and callsite already matched, update preds only else: block_table[pc].preds.add(pc_pred) # could maintain a separate index if this is needed as a feature if do_quadratic_checks: def pc_in_workset(pc_check): return any(pc == pc_check for pc, callsite, pc_pred in workset) while workset: pc, callsite, pc_pred = workset.pop() if self.verbosity >= 3: print(' popped {:05x} under {:05x}, from {:05x}'.format(pc, callsite, pc_pred)) # we might be seeing the same block again under a different callsite, # as callsites can overlap... if pc in block_table: if self.verbosity >= 2: print(' DUPE bblock {:05x}'.format(pc)) current_block = block_table[pc] #assert callsite not in current_block.callsites # This assertion doesn't make sense, in the case where we add multiple entries # to a new block under the same callsite but different PCs, for example in the case # where we just split. current_block.callsites.add(callsite) current_block.preds.add(pc_pred) # put all of the block's non-call successors back onto the worklist with the new callsite pc_target = current_block.call_return_target if pc_target is not None: update_workset(pc_target, callsite, current_block.addr) else: for pc_target in current_block.succs: update_workset(pc_target, callsite, current_block.addr) continue # and done with this block current_block = BasicBlock(pc, verbosity=self.verbosity) current_block.callsites.add(callsite) current_block.preds.add(pc_pred) block_table[pc] = current_block # check if this is the entry point of a new callsite: should only happen once callsite_object = call_table[callsite] if callsite == pc: assert callsite_object.bblock == None, 'callsite {:05x} has already seen pc'.format(callsite) callsite_object.bblock = current_block callsite_object.children.add(pc) in_block = True split_addr = None while in_block: word = self.read16(pc) ins = isa.decode(word) # check the validity of the decoding if ins is None: raise ValueError('unable to decode word {:04x} at {:05x}, skipping' .format(word, pc)) # note that AI will change the registers... self.state.writereg(0, pc) fields = ins.readfields(self.state) pc_next = instr.pcadd(pc, ins.length) if self.verbosity >= 4: print(' pc {:05x}, len({:d}), next {:05x}'.format(pc, ins.length, pc_next)) # check if we've visited this memory before, split if we have if pc in ins_table: prev_addr = ins_table[pc] if split_addr is None: split_addr = prev_addr if prev_addr == current_block.addr: if self.verbosity >= 2: print('post split: revisiting {:05x} in block {:05x} again' .format(pc, prev_addr)) else: if self.verbosity >= 2: print('split: revisiting {:05x} in block {:05x}, first visit from block {:05x}' .format(pc, current_block.addr, ins_table[pc])) # to split we just remove the existing block and put it back on the worklist. # the fallthrough logic will connect back to the new block automatically. if prev_addr in block_table: prev_block = block_table.pop(prev_addr) # remove from children of callsites -- # the update logic should re-add everything, not clear if this is necessary for old_callsite in prev_block.callsites: old_callsite_object = call_table[old_callsite] if old_callsite_object.bblock is prev_block: old_callsite_object.bblock = None if prev_addr in old_callsite_object.children: old_callsite_object.children.remove(prev_addr) else: print('WARNING: old site {:05x} missing child {:05x}' .format(old_callsite, prev_addr)) # don't remove from predecessors, for old_pred in prev_block.preds: # just put back on worklist for each pred, for each of that pred's callsites for old_callsite in prev_block.callsites: update_workset(prev_addr, old_callsite, old_pred) else: # if we couldn't find the block, we must have already removed it and it's # somewhere in the worklist if do_quadratic_checks: assert pc_in_workset(prev_addr), ('previously removed {:05x} missing from workset' .format(prev_addr)) check_str = ' (found {:05x} in workset)'.format(prev_addr) else: check_str = '' if self.verbosity >= 2: print('already removed {:05x} for a previous split'.format(prev_addr, check_str)) else: if split_addr != prev_addr: if do_quadratic_checks: assert pc_in_workset(prev_addr), ('was splitting {:05x}, saw {:05x} not in workset' .format(split_addr, prev_addr)) check_str = ' (found {:05x} in workset)'.format(prev_addr) else: check_str = '' if self.verbosity >= 2: print('splits overlap: was splitting {:05x}, saw {:05x}{:s}' .format(split_addr, prev_addr, check_str)) # update split_addr to track multiple overlaps instead of just creating spam split_addr = prev_addr # #self._describe(call_table, block_table, entrypoint) # self._describe_workset(workset) # for i in range(pc, pc + 32): # if i in ins_table: # print(' {:05x} : {:05x}'.format(i, ins_table[i])) # else: # print(' {:05x} : None'.format(i)) # assert False ins_table[pc] = current_block.addr for b in range(pc, pc_next): if b in byte_table: assert split_addr == prev_addr, 'byte splitting {:05x}, saw {:05x}'.format(split_addr, prev_addr) byte_table[b] = current_block.addr current_block.instrs.append((ins, fields)) if ins.fmt in {'jump'}: offset = fields['jump_offset'] # jump offsets are actually relative to the fallthrough pc pc_target = instr.pcadd(pc_next, offset) current_block.succs.add(pc_target) update_workset(pc_target, callsite, current_block.addr) if ins.name not in {'JMP'}: current_block.succs.add(pc_next) update_workset(pc_next, callsite, current_block.addr) in_block = False elif ins.name in {'CALL'}: if ins.smode in {'#N', '#@N'}: pc_call_target = fields['src'] # create a new callsite for this call target if we don't already have one if pc_call_target not in call_table: callsite_object_target = CallSite(pc_call_target, verbosity=self.verbosity) call_table[pc_call_target] = callsite_object_target else: callsite_object_target = call_table[pc_call_target] # we'll update that callsite's bblock and children when we pull this target # pc out of the workset # update return targets and callees callsite_object_target.return_targets.add(pc_next) current_block.call_return_target = pc_next callsite_object.callees.add(pc_call_target) # update the workset with both the call and return targets current_block.succs.add(pc_call_target) update_workset(pc_call_target, pc_call_target, current_block.addr) current_block.succs.add(pc_next) update_workset(pc_next, callsite, current_block.addr) else: if self.verbosity >= 0: print('indirect call at {:05x}, unsupported'.format(pc)) in_block = False # check for indirect branch elif ins.name in {'MOV'} and ins.dmode in {'Rn'} and fields['rdst'] in {0}: # emulated return if ins.smode in {'@Rn+'} and fields['rsrc'] in {1}: pass # we've already captured the return targets when we entered the callsite, # nothing else to do elif ins.smode in {'#N', '#@N'}: pc_target = fields['src'] current_block.succs.add(pc_target) update_workset(pc_target, callsite, current_block.addr) else: if self.verbosity >= 0: print('indirect branch at {:05x}, unsupported'.format(pc)) in_block = False # reti behaves the same as return elif ins.name in {'RETI'}: in_block = False # look at next instruction in block else: pc = pc_next # check to make sure we aren't merging with an existing block if pc in block_table: if self.verbosity >= 2: print('fallthrough edge from block {:05x} to {:05x}'.format(current_block.addr, pc)) current_block.succs.add(pc) update_workset(pc, callsite, current_block.addr) in_block = False if self.verbosity >= 4: print('created:') print(str(current_block)) if self.verbosity >= 1: print('processed workset, {:d} callsites, {:d} blocks'.format(len(call_table), len(block_table))) print('covered {:d} instructions, {:d} bytes'.format(len(ins_table), len(byte_table))) range_start = None prev = None if self.verbosity >= 2: for k in sorted(byte_table): if range_start is None: range_start = k prev = k elif k == prev + 1: prev = k else: print(' {:05x} - {:05x}'.format(range_start, prev)) range_start = k prev = k print(' {:05x} - {:05x}'.format(range_start, prev)) if self.verbosity >= 3: self._describe(call_table, block_table, entrypoint) self.call_table = call_table self.block_table = block_table # TODO: support multiple vectors self.entrypoint = entrypoint def check_instrs(self): pc = self.read16(model.resetvec) word = self.read16(pc) while word != 0x3fff and pc < model.upper_start + model.upper_size: ins = isa.decode(word) if ins is None: print('{:05x} unable to decode instruction: {:04x}' .format(pc, word)) pc += 2 else: print('{:05x}'.format(pc), ins.fmt, ins.name, ins.smode, ins.dmode) pc += ins.length word = self.read16(pc) if __name__ == '__main__': import sys fname = sys.argv[1] cfg = CFG(fname, verbosity=1) cfg.build_graph(do_quadratic_checks=True) seqs = cfg.get_seqs(12) print(len(seqs)) # for s in seqs: # print(len(s)) # for ins, fields in s: # print(' ', ins.name)
billzorn/msp-pymodel
lib/msp_cfg.py
Python
mit
22,399
[ "VisIt" ]
3e88282b328a855ac97e78d8103a6bf37cc1a2115c6d6a265696887a6e686bdb
#!/bin/env python #Copyright ReportLab Europe Ltd. 2000-2004 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/randomtext.py __version__=''' $Id: randomtext.py 3342 2008-12-12 15:55:34Z andy $ ''' ############################################################################### # generates so-called 'Greek Text' for use in filling documents. ############################################################################### __doc__="""Like Lorem Ipsum, but more fun and extensible. This module exposes a function randomText() which generates paragraphs. These can be used when testing out document templates and stylesheets. A number of 'themes' are provided - please contribute more! We need some real Greek text too. There are currently six themes provided: STARTUP (words suitable for a business plan - or not as the case may be), COMPUTERS (names of programming languages and operating systems etc), BLAH (variations on the word 'blah'), BUZZWORD (buzzword bingo), STARTREK (Star Trek), PRINTING (print-related terms) PYTHON (snippets and quotes from Monty Python) CHOMSKY (random lingusitic nonsense) EXAMPLE USAGE: from reportlab.lib import randomtext print randomtext.randomText(randomtext.PYTHON, 10) This prints a random number of random sentences (up to a limit of ten) using the theme 'PYTHON'. """ #theme one :-) STARTUP = ['strategic', 'direction', 'proactive', 'venture capital', 'reengineering', 'forecast', 'resources', 'SWOT analysis', 'forward-thinking', 'profit', 'growth', 'doubletalk', 'B2B', 'B2C', 'venture capital', 'IPO', "NASDAQ meltdown - we're all doomed!"] #theme two - computery things. COMPUTERS = ['Python', 'Perl', 'Pascal', 'Java', 'Javascript', 'VB', 'Basic', 'LISP', 'Fortran', 'ADA', 'APL', 'C', 'C++', 'assembler', 'Larry Wall', 'Guido van Rossum', 'XML', 'HTML', 'cgi', 'cgi-bin', 'Amiga', 'Macintosh', 'Dell', 'Microsoft', 'firewall', 'server', 'Linux', 'Unix', 'MacOS', 'BeOS', 'AS/400', 'sendmail', 'TCP/IP', 'SMTP', 'RFC822-compliant', 'dynamic', 'Internet', 'A/UX', 'Amiga OS', 'BIOS', 'boot managers', 'CP/M', 'DOS', 'file system', 'FreeBSD', 'Freeware', 'GEOS', 'GNU', 'Hurd', 'Linux', 'Mach', 'Macintosh OS', 'mailing lists', 'Minix', 'Multics', 'NetWare', 'NextStep', 'OS/2', 'Plan 9', 'Realtime', 'UNIX', 'VMS', 'Windows', 'X Windows', 'Xinu', 'security', 'Intel', 'encryption', 'PGP' , 'software', 'ActiveX', 'AppleScript', 'awk', 'BETA', 'COBOL', 'Delphi', 'Dylan', 'Eiffel', 'extreme programming', 'Forth', 'Fortran', 'functional languages', 'Guile', 'format your hard drive', 'Icon', 'IDL', 'Infer', 'Intercal', 'J', 'Java', 'JavaScript', 'CD-ROM', 'JCL', 'Lisp', '"literate programming"', 'Logo', 'MUMPS', 'C: drive', 'Modula-2', 'Modula-3', 'Oberon', 'Occam', 'OpenGL', 'parallel languages', 'Pascal', 'Perl', 'PL/I', 'PostScript', 'Prolog', 'hardware', 'Blue Screen of Death', 'Rexx', 'RPG', 'Scheme', 'scripting languages', 'Smalltalk', 'crash!', 'disc crash', 'Spanner', 'SQL', 'Tcl/Tk', 'TeX', 'TOM', 'Visual', 'Visual Basic', '4GL', 'VRML', 'Virtual Reality Modeling Language', 'difference engine', '...went into "yo-yo mode"', 'Sun', 'Sun Microsystems', 'Hewlett Packard', 'output device', 'CPU', 'memory', 'registers', 'monitor', 'TFT display', 'plasma screen', 'bug report', '"mis-feature"', '...millions of bugs!', 'pizza', '"illiterate programming"','...lots of pizza!', 'pepperoni pizza', 'coffee', 'Jolt Cola[TM]', 'beer', 'BEER!'] #theme three - 'blah' - for when you want to be subtle. :-) BLAH = ['Blah', 'BLAH', 'blahblah', 'blahblahblah', 'blah-blah', 'blah!', '"Blah Blah Blah"', 'blah-de-blah', 'blah?', 'blah!!!', 'blah...', 'Blah.', 'blah;', 'blah, Blah, BLAH!', 'Blah!!!'] #theme four - 'buzzword bingo' time! BUZZWORD = ['intellectual capital', 'market segment', 'flattening', 'regroup', 'platform', 'client-based', 'long-term', 'proactive', 'quality vector', 'out of the loop', 'implement', 'streamline', 'cost-centered', 'phase', 'synergy', 'synergize', 'interactive', 'facilitate', 'appropriate', 'goal-setting', 'empowering', 'low-risk high-yield', 'peel the onion', 'goal', 'downsize', 'result-driven', 'conceptualize', 'multidisciplinary', 'gap analysis', 'dysfunctional', 'networking', 'knowledge management', 'goal-setting', 'mastery learning', 'communication', 'real-estate', 'quarterly', 'scalable', 'Total Quality Management', 'best of breed', 'nimble', 'monetize', 'benchmark', 'hardball', 'client-centered', 'vision statement', 'empowerment', 'lean & mean', 'credibility', 'synergistic', 'backward-compatible', 'hardball', 'stretch the envelope', 'bleeding edge', 'networking', 'motivation', 'best practice', 'best of breed', 'implementation', 'Total Quality Management', 'undefined', 'disintermediate', 'mindset', 'architect', 'gap analysis', 'morale', 'objective', 'projection', 'contribution', 'proactive', 'go the extra mile', 'dynamic', 'world class', 'real estate', 'quality vector', 'credibility', 'appropriate', 'platform', 'projection', 'mastery learning', 'recognition', 'quality', 'scenario', 'performance based', 'solutioning', 'go the extra mile', 'downsize', 'phase', 'networking', 'experiencing slippage', 'knowledge management', 'high priority', 'process', 'ethical', 'value-added', 'implement', 're-factoring', 're-branding', 'embracing change'] #theme five - Star Trek STARTREK = ['Starfleet', 'Klingon', 'Romulan', 'Cardassian', 'Vulcan', 'Benzite', 'IKV Pagh', 'emergency transponder', 'United Federation of Planets', 'Bolian', "K'Vort Class Bird-of-Prey", 'USS Enterprise', 'USS Intrepid', 'USS Reliant', 'USS Voyager', 'Starfleet Academy', 'Captain Picard', 'Captain Janeway', 'Tom Paris', 'Harry Kim', 'Counsellor Troi', 'Lieutenant Worf', 'Lieutenant Commander Data', 'Dr. Beverly Crusher', 'Admiral Nakamura', 'Irumodic Syndrome', 'Devron system', 'Admiral Pressman', 'asteroid field', 'sensor readings', 'Binars', 'distress signal', 'shuttlecraft', 'cloaking device', 'shuttle bay 2', 'Dr. Pulaski', 'Lwaxana Troi', 'Pacifica', 'William Riker', "Chief O'Brian", 'Soyuz class science vessel', 'Wolf-359', 'Galaxy class vessel', 'Utopia Planitia yards', 'photon torpedo', 'Archer IV', 'quantum flux', 'spacedock', 'Risa', 'Deep Space Nine', 'blood wine', 'quantum torpedoes', 'holodeck', 'Romulan Warbird', 'Betazoid', 'turbolift', 'battle bridge', 'Memory Alpha', '...with a phaser!', 'Romulan ale', 'Ferrengi', 'Klingon opera', 'Quark', 'wormhole', 'Bajoran', 'cruiser', 'warship', 'battlecruiser', '"Intruder alert!"', 'scout ship', 'science vessel', '"Borg Invasion imminent!" ', '"Abandon ship!"', 'Red Alert!', 'warp-core breech', '"All hands abandon ship! This is not a drill!"'] #theme six - print-related terms PRINTING = ['points', 'picas', 'leading', 'kerning', 'CMYK', 'offset litho', 'type', 'font family', 'typography', 'type designer', 'baseline', 'white-out type', 'WOB', 'bicameral', 'bitmap', 'blockletter', 'bleed', 'margin', 'body', 'widow', 'orphan', 'cicero', 'cursive', 'letterform', 'sidehead', 'dingbat', 'leader', 'DPI', 'drop-cap', 'paragraph', 'En', 'Em', 'flush left', 'left justified', 'right justified', 'centered', 'italic', 'Latin letterform', 'ligature', 'uppercase', 'lowercase', 'serif', 'sans-serif', 'weight', 'type foundry', 'fleuron', 'folio', 'gutter', 'whitespace', 'humanist letterform', 'caption', 'page', 'frame', 'ragged setting', 'flush-right', 'rule', 'drop shadows', 'prepress', 'spot-colour', 'duotones', 'colour separations', 'four-colour printing', 'Pantone[TM]', 'service bureau', 'imagesetter'] #it had to be done!... #theme seven - the "full Monty"! PYTHON = ['Good evening ladies and Bruces','I want to buy some cheese', 'You do have some cheese, do you?', "Of course sir, it's a cheese shop sir, we've got...",'discipline?... naked? ... With a melon!?', 'The Church Police!!' , "There's a dead bishop on the landing", 'Would you like a twist of lemming sir?', '"Conquistador Coffee brings a new meaning to the word vomit"','Your lupins please', 'Crelm Toothpaste, with the miracle ingredient Fraudulin', "Well there's the first result and the Silly Party has held Leicester.", 'Hello, I would like to buy a fish license please', "Look, it's people like you what cause unrest!", "When we got home, our Dad would thrash us to sleep with his belt!", 'Luxury', "Gumby Brain Specialist", "My brain hurts!!!", "My brain hurts too.", "How not to be seen", "In this picture there are 47 people. None of them can be seen", "Mrs Smegma, will you stand up please?", "Mr. Nesbitt has learned the first lesson of 'Not Being Seen', not to stand up.", "My hovercraft is full of eels", "Ah. You have beautiful thighs.", "My nipples explode with delight", "Drop your panties Sir William, I cannot wait 'til lunchtime", "I'm a completely self-taught idiot.", "I always wanted to be a lumberjack!!!", "Told you so!! Oh, coitus!!", "", "Nudge nudge?", "Know what I mean!", "Nudge nudge, nudge nudge?", "Say no more!!", "Hello, well it's just after 8 o'clock, and time for the penguin on top of your television set to explode", "Oh, intercourse the penguin!!", "Funny that penguin being there, isn't it?", "I wish to register a complaint.", "Now that's what I call a dead parrot", "Pining for the fjords???", "No, that's not dead, it's ,uhhhh, resting", "This is an ex-parrot!!", "That parrot is definitely deceased.", "No, no, no - it's spelt Raymond Luxury Yach-t, but it's pronounced 'Throatwobbler Mangrove'.", "You're a very silly man and I'm not going to interview you.", "No Mungo... never kill a customer." "And I'd like to conclude by putting my finger up my nose", "egg and Spam", "egg bacon and Spam", "egg bacon sausage and Spam", "Spam bacon sausage and Spam", "Spam egg Spam Spam bacon and Spam", "Spam sausage Spam Spam Spam bacon Spam tomato and Spam", "Spam Spam Spam egg and Spam", "Spam Spam Spam Spam Spam Spam baked beans Spam Spam Spam", "Spam!!", "I don't like Spam!!!", "You can't have egg, bacon, Spam and sausage without the Spam!", "I'll have your Spam. I Love it!", "I'm having Spam Spam Spam Spam Spam Spam Spam baked beans Spam Spam Spam and Spam", "Have you got anything without Spam?", "There's Spam egg sausage and Spam, that's not got much Spam in it.", "No one expects the Spanish Inquisition!!", "Our weapon is surprise, surprise and fear!", "Get the comfy chair!", "Amongst our weaponry are such diverse elements as: fear, surprise, ruthless efficiency, an almost fanatical devotion to the Pope, and nice red uniforms - Oh damn!", "Nobody expects the... Oh bugger!", "What swims in the sea and gets caught in nets? Henri Bergson?", "Goats. Underwater goats with snorkels and flippers?", "A buffalo with an aqualung?", "Dinsdale was a looney, but he was a happy looney.", "Dinsdale!!", "The 127th Upper-Class Twit of the Year Show", "What a great Twit!", "thought by many to be this year's outstanding twit", "...and there's a big crowd here today to see these prize idiots in action.", "And now for something completely different.", "Stop that, it's silly", "We interrupt this program to annoy you and make things generally irritating", "This depraved and degrading spectacle is going to stop right now, do you hear me?", "Stop right there!", "This is absolutely disgusting and I'm not going to stand for it", "I object to all this sex on the television. I mean, I keep falling off", "Right! Stop that, it's silly. Very silly indeed", "Very silly indeed", "Lemon curry?", "And now for something completely different, a man with 3 buttocks", "I've heard of unisex, but I've never had it", "That's the end, stop the program! Stop it!"] leadins=[ "To characterize a linguistic level L,", "On the other hand,", "This suggests that", "It appears that", "Furthermore,", "We will bring evidence in favor of the following thesis: ", "To provide a constituent structure for T(Z,K),", "From C1, it follows that", "For any transformation which is sufficiently diversified in application to be of any interest,", "Analogously,", "Clearly,", "Note that", "Of course,", "Suppose, for instance, that", "Thus", "With this clarification,", "Conversely,", "We have already seen that", "By combining adjunctions and certain deformations,", "I suggested that these results would follow from the assumption that", "If the position of the trace in (99c) were only relatively inaccessible to movement,", "However, this assumption is not correct, since", "Comparing these examples with their parasitic gap counterparts in (96) and (97), we see that", "In the discussion of resumptive pronouns following (81),", "So far,", "Nevertheless,", "For one thing,", "Summarizing, then, we assume that", "A consequence of the approach just outlined is that", "Presumably,", "On our assumptions,", "It may be, then, that", "It must be emphasized, once again, that", "Let us continue to suppose that", "Notice, incidentally, that", "A majority of informed linguistic specialists agree that", ] subjects = [ "the notion of level of grammaticalness", "a case of semigrammaticalness of a different sort", "most of the methodological work in modern linguistics", "a subset of English sentences interesting on quite independent grounds", "the natural general principle that will subsume this case", "an important property of these three types of EC", "any associated supporting element", "the appearance of parasitic gaps in domains relatively inaccessible to ordinary extraction", "the speaker-hearer's linguistic intuition", "the descriptive power of the base component", "the earlier discussion of deviance", "this analysis of a formative as a pair of sets of features", "this selectionally introduced contextual feature", "a descriptively adequate grammar", "the fundamental error of regarding functional notions as categorial", "relational information", "the systematic use of complex symbols", "the theory of syntactic features developed earlier", ] verbs= [ "can be defined in such a way as to impose", "delimits", "suffices to account for", "cannot be arbitrary in", "is not subject to", "does not readily tolerate", "raises serious doubts about", "is not quite equivalent to", "does not affect the structure of", "may remedy and, at the same time, eliminate", "is not to be considered in determining", "is to be regarded as", "is unspecified with respect to", "is, apparently, determined by", "is necessary to impose an interpretation on", "appears to correlate rather closely with", "is rather different from", ] objects = [ "problems of phonemic and morphological analysis.", "a corpus of utterance tokens upon which conformity has been defined by the paired utterance test.", "the traditional practice of grammarians.", "the levels of acceptability from fairly high (e.g. (99a)) to virtual gibberish (e.g. (98d)).", "a stipulation to place the constructions into these various categories.", "a descriptive fact.", "a parasitic gap construction.", "the extended c-command discussed in connection with (34).", "the ultimate standard that determines the accuracy of any proposed grammar.", "the system of base rules exclusive of the lexicon.", "irrelevant intervening contexts in selectional rules.", "nondistinctness in the sense of distinctive feature theory.", "a general convention regarding the forms of the grammar.", "an abstract underlying order.", "an important distinction in language use.", "the requirement that branching is not tolerated within the dominance scope of a complex symbol.", "the strong generative capacity of the theory.", ] def format_wisdom(text,line_length=72): try: import textwrap return textwrap.fill(text, line_length) except: return text def chomsky(times = 1): if not isinstance(times, int): return format_wisdom(__doc__) import random prevparts = [] newparts = [] output = [] for i in xrange(times): for partlist in (leadins, subjects, verbs, objects): while 1: part = random.choice(partlist) if part not in prevparts: break newparts.append(part) output.append(' '.join(newparts)) prevparts = newparts newparts = [] return format_wisdom(' '.join(output)) from reportlab import rl_config if rl_config.invariant: if not getattr(rl_config,'_random',None): rl_config._random = 1 import random random.seed(2342471922L) del random del rl_config def randomText(theme=STARTUP, sentences=5): #this may or may not be appropriate in your company if type(theme)==type(''): if theme.lower()=='chomsky': return chomsky(sentences) elif theme.upper() in ('STARTUP','COMPUTERS','BLAH','BUZZWORD','STARTREK','PRINTING','PYTHON'): theme = globals()[theme] else: raise ValueError('Unknown theme "%s"' % theme) from random import randint, choice RANDOMWORDS = theme #sentences = 5 output = "" for sentenceno in range(randint(1,sentences)): output = output + 'Blah' for wordno in range(randint(10,25)): if randint(0,4)==0: word = choice(RANDOMWORDS) else: word = 'blah' output = output + ' ' +word output = output+'. ' return output if __name__=='__main__': print chomsky(5)
commtrack/temp-aquatest
reportlab/lib/randomtext.py
Python
bsd-3-clause
19,004
[ "Brian", "Galaxy" ]
5ce0b41f65806a1125f98cf3d6008dd8764d68cce408c66d29059fab4fb1b3b6
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia # This file is part of ngs_crumbs. # ngs_crumbs is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # ngs_crumbs is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>. import unittest import os.path from tempfile import NamedTemporaryFile from subprocess import check_output from cStringIO import StringIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from crumbs.seq.trim import (TrimLowercasedLetters, TrimEdges, TrimOrMask, TrimByQuality, TrimWithBlastShort, seq_to_trim_packets, TrimMatePairChimeras) from crumbs.utils.bin_utils import SEQ_BIN_DIR from crumbs.utils.tags import (SEQRECORD, SEQITEM, TRIMMING_RECOMMENDATIONS, VECTOR, ORPHAN_SEQS, SEQS_PASSED, OTHER) from crumbs.seq.seq import (get_str_seq, get_annotations, get_int_qualities, get_name) from crumbs.seq.seqio import read_seq_packets, read_seqs from crumbs.seq.seq import SeqWrapper, SeqItem from crumbs.utils.test_utils import TEST_DATA_DIR FASTQ = '@seq1\naTCgt\n+\n?????\n@seq2\natcGT\n+\n?????\n' FASTQ2 = '@seq1\nATCGT\n+\nA???A\n@seq2\nATCGT\n+\n?????\n' FASTQ3 = '@seq1\nAAAAAATCGTTTTTTT\n+\n00000A???A000000\n' # pylint: disable=R0201 # pylint: disable=R0904 def _make_fhand(content=''): 'It makes temporary fhands' fhand = NamedTemporaryFile() fhand.write(content) fhand.flush() return fhand class TrimTest(unittest.TestCase): 'It tests the trim functions' @staticmethod def test_trim_seqs(): 'It tests the trim seq function' seqs = [] seqs.append([SeqWrapper(SEQRECORD, SeqRecord(Seq('aaCTTTC')), None)]) seqs.append([SeqWrapper(SEQRECORD, SeqRecord(Seq('CTTCaa')), None)]) seqs.append([SeqWrapper(SEQRECORD, SeqRecord(Seq('aaCTCaa')), None)]) seqs.append([SeqWrapper(SEQRECORD, SeqRecord(Seq('actg')), None)]) seqs.append([SeqWrapper(SEQRECORD, SeqRecord(Seq('AC')), None)]) trim_packet = {SEQS_PASSED: seqs, ORPHAN_SEQS: []} trim_lowercased_seqs = TrimLowercasedLetters() trim = TrimOrMask() # pylint: disable=W0141 trim_packet = trim(trim_lowercased_seqs(trim_packet)) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['CTTTC', 'CTTC', 'CTC', 'AC'] seqs = [] seq = SeqItem('s', ['>s\n', 'aaCTTTC\n']) seqs.append([SeqWrapper(SEQITEM, seq, 'fasta')]) trim_packet = {SEQS_PASSED: seqs, ORPHAN_SEQS: []} trim_packet = trim(trim_lowercased_seqs(trim_packet)) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['CTTTC'] # with pairs seq = SeqItem('s.f', ['>s.f\n', 'aaCTTTC\n']) seq1 = SeqItem('s.r', ['>s.r\n', 'aaCTTTC\n']) seq2 = SeqItem('s1.f', ['>s1.f\n', 'aa\n']) seq3 = SeqItem('s1.r', ['>s1.r\n', 'aaCTTTC\n']) seqs = [] seqs.append([SeqWrapper(SEQITEM, seq, 'fasta'), SeqWrapper(SEQITEM, seq1, 'fasta')]) seqs.append([SeqWrapper(SEQITEM, seq2, 'fasta'), SeqWrapper(SEQITEM, seq3, 'fasta')]) trim_packet = {SEQS_PASSED: seqs, ORPHAN_SEQS: []} trim_lowercased_seqs = TrimLowercasedLetters() trim = TrimOrMask() # pylint: disable=W0141 trim_packet = trim(trim_lowercased_seqs(trim_packet)) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] orphan_res = [get_str_seq(s) for s in trim_packet[ORPHAN_SEQS]] assert orphan_res == ['CTTTC'] assert ['CTTTC', 'CTTTC'] == res # no drag trim_packet = {SEQS_PASSED: seqs, ORPHAN_SEQS: []} trim_lowercased_seqs = TrimLowercasedLetters() trim = TrimOrMask() # pylint: disable=W0141 trim_packet = trim(trim_lowercased_seqs(trim_packet)) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] orphan_res = [get_name(s) for s in trim_packet[ORPHAN_SEQS]] assert orphan_res == ['s1.r'] assert ['CTTTC', 'CTTTC'] == res class TrimByCaseBinTest(unittest.TestCase): 'It tests the trim_by_case binary' def test_trim_case_bin(self): trim_bin = os.path.join(SEQ_BIN_DIR, 'trim_by_case') assert 'usage' in check_output([trim_bin, '-h']) fastq_fhand = _make_fhand(FASTQ) result = check_output([trim_bin, fastq_fhand.name]) assert '@seq1\nTC\n+' in result def test_trim_in_parallel(self): 'It trims sequences in parallel' trim_bin = os.path.join(SEQ_BIN_DIR, 'trim_by_case') fastq_fhand = _make_fhand(FASTQ) result = check_output([trim_bin, '-p', '2', fastq_fhand.name]) assert '@seq1\nTC\n+' in result def test_trim_in_pairs(self): trim_bin = os.path.join(SEQ_BIN_DIR, 'trim_by_case') content = '>s.f\naattACT\n>s.r\naattACT\n>s1.f\naattACT\n>s1.r\naatt\n' fastq_fhand = _make_fhand(content) orphan_fhand = NamedTemporaryFile() result = check_output([trim_bin, fastq_fhand.name, '--paired_reads', '--orphan_file', orphan_fhand.name]) assert ">s.f\nACT\n>s.r\nACT" in result assert open(orphan_fhand.name).read() == '>s1.f\nACT\n' # no orphan file fastq_fhand = _make_fhand(content) result = check_output([trim_bin, fastq_fhand.name, '--paired_reads']) assert ">s.f\nACT\n>s.r\nACT" in result class TrimEdgesTest(unittest.TestCase): 'It test the fixed number of bases trimming' def _some_seqs(self): 'It returns some seqrecords.' seqs = [] seq = SeqRecord(Seq('ACCG'), letter_annotations={'dummy': 'dddd'}) seq = SeqWrapper(SEQRECORD, seq, None) seqs.append([seq]) seq = SeqRecord(Seq('AAACCCGGG')) seq = SeqWrapper(SEQRECORD, seq, None) seqs.append([seq]) trim_packet = {SEQS_PASSED: seqs, ORPHAN_SEQS: []} return trim_packet def test_edge_trimming(self): 'It trims the edges' trim = TrimOrMask() trim_edges = TrimEdges(left=1) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['CCG', 'AACCCGGG'] trim_edges = TrimEdges(right=1) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['ACC', 'AAACCCGG'] trim_edges = TrimEdges(left=1, right=1) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['CC', 'AACCCGG'] trim_edges = TrimEdges(left=2, right=2) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['ACCCG'] trim_edges = TrimEdges(left=3, right=3) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['CCC'] trim = TrimOrMask(mask=True) trim_edges = TrimEdges(left=1) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['aCCG', 'aAACCCGGG'] trim_edges = TrimEdges(right=1) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['ACCg', 'AAACCCGGg'] trim_edges = TrimEdges(left=1, right=1) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['aCCg', 'aAACCCGGg'] trim_edges = TrimEdges(left=2, right=2) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['accg', 'aaACCCGgg'] trim_edges = TrimEdges(left=3, right=3) trim_packet = trim(trim_edges(self._some_seqs())) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['accg', 'aaaCCCggg'] # test overlapping mask trim1 = TrimEdges(left=3, right=3) trim2 = TrimEdges(left=4, right=4) trim_packet = trim(trim2(trim1(self._some_seqs()))) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['accg', 'aaacCcggg'] # With a SeqItem trim = TrimOrMask(mask=False) trim_edges = TrimEdges(left=1, right=1) seq = SeqItem('s', ['>s\n', 'ACTTTC\n']) seqs = [[SeqWrapper(SEQITEM, seq, 'fasta')]] trim_packet = {SEQS_PASSED: seqs, ORPHAN_SEQS: []} trim_packet = trim(trim_edges(trim_packet)) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['CTTT'] trim = TrimOrMask(mask=True) seq = SeqItem('s', ['>s\n', 'ACTTTC\n']) seqs = [[SeqWrapper(SEQITEM, seq, 'fasta')]] trim_packet = {SEQS_PASSED: seqs, ORPHAN_SEQS: []} trim_packet = trim(trim_edges(trim_packet)) res = [get_str_seq(s) for l in trim_packet[SEQS_PASSED] for s in l] assert res == ['aCTTTc'] def test_trim_edges_bin(self): 'It tests the trim_edges binary' trim_bin = os.path.join(SEQ_BIN_DIR, 'trim_edges') assert 'usage' in check_output([trim_bin, '-h']) fastq_fhand = _make_fhand(FASTQ2) result = check_output([trim_bin, fastq_fhand.name]) assert '@seq1\nATCGT\n+' in result result = check_output([trim_bin, '-l', '1', '-r', '1', fastq_fhand.name]) assert '@seq1\nTCG\n+\n???\n' in result result = check_output([trim_bin, '-l', '1', '-r', '1', '-m', fastq_fhand.name]) assert '@seq1\naTCGt\n+\nA???A\n' in result class TrimAndMaskTest(unittest.TestCase): 'It tests the trimming and masking according to the recommendations.' def test_trimming(self): 'The sequences are trimmed according to the recommendations.' seq1 = 'gggtctcatcatcaggg'.upper() seq = SeqRecord(Seq(seq1), annotations={TRIMMING_RECOMMENDATIONS: {}}) seq = SeqWrapper(SEQRECORD, seq, None) seqs = [seq] trim_packet = {SEQS_PASSED: [seqs], ORPHAN_SEQS: []} trim_rec = get_annotations(seq)[TRIMMING_RECOMMENDATIONS] seq_trimmer = TrimOrMask() trim_rec['vector'] = [(0, 3), (8, 13)] get_annotations(seq)[TRIMMING_RECOMMENDATIONS] = trim_rec trim_packet2 = seq_trimmer(trim_packet) res = [get_str_seq(s) for l in trim_packet2[SEQS_PASSED] for s in l] assert res == ['CTCA'] trim_rec['vector'] = [(0, 0), (8, 13)] get_annotations(seq)[TRIMMING_RECOMMENDATIONS] = trim_rec trim_packet2 = seq_trimmer(trim_packet) res = [get_str_seq(s) for l in trim_packet2[SEQS_PASSED] for s in l] assert res == ['GGTCTCA'] trim_rec['vector'] = [(0, 1), (8, 12)] trim_rec['quality'] = [(1, 8), (13, 17)] get_annotations(seq)[TRIMMING_RECOMMENDATIONS] = trim_rec trim_packet2 = seq_trimmer(trim_packet) assert not trim_packet2[SEQS_PASSED] trim_rec['vector'] = [(0, 0), (8, 13)] trim_rec['quality'] = [] get_annotations(seq)[TRIMMING_RECOMMENDATIONS] = trim_rec trim_packet2 = seq_trimmer(trim_packet) res = [get_str_seq(s) for l in trim_packet2[SEQS_PASSED] for s in l] assert res == ['GGTCTCA'] trim_packet2[SEQS_PASSED][0][0] assert TRIMMING_RECOMMENDATIONS not in get_annotations(trim_packet2[SEQS_PASSED][0][0]) class TrimByQualityTest(unittest.TestCase): 'It test the quality trimming' def test_quality_trimming(self): 'It trims the edges' trim = TrimOrMask() trim_quality = TrimByQuality(window=5, threshold=30) seq = SeqRecord(Seq('ACTGCTGCATAAAA')) quals = [10, 10, 20, 30, 30, 30, 40, 40, 30, 30, 20, 20, 10, 10] seq.letter_annotations['phred_quality'] = quals seq = SeqWrapper(SEQRECORD, seq, None) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert get_int_qualities(seq2) == [20, 30, 30, 30, 40, 40, 30, 30, 20] # all bad trim_quality = TrimByQuality(window=5, threshold=60) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_packet2 = trim(trim_quality(trim_packet)) assert not trim_packet2[SEQS_PASSED] # all OK trim_quality = TrimByQuality(window=5, threshold=5) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert get_int_qualities(seq2) == quals seq = SeqRecord(Seq('ACTGCTGCATAA')) quals = [20, 20, 20, 60, 60, 60, 60, 60, 20, 20, 20, 20] trim_quality = TrimByQuality(window=5, threshold=50) seq.letter_annotations['phred_quality'] = quals seq = SeqWrapper(SEQRECORD, seq, None) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert get_int_qualities(seq2) == [20, 60, 60, 60, 60, 60, 20] quals = [40, 18, 10, 40, 40, 5, 8, 30, 14, 3, 40, 40, 40, 11, 6, 5, 3, 20, 10, 12, 8, 5, 4, 7, 1] seq = SeqRecord(Seq('atatatatagatagatagatagatg')) seq.letter_annotations['phred_quality'] = quals seq = SeqWrapper(SEQRECORD, seq, None) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_quality = TrimByQuality(window=5, threshold=25) trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert get_int_qualities(seq2) == [40, 18, 10, 40, 40] quals = [40, 40, 13, 11, 40, 9, 40, 4, 27, 38, 40, 4, 11, 40, 40, 10, 10, 21, 3, 40, 9, 9, 12, 10, 9] seq = SeqRecord(Seq('atatatatatatatatatatatata')) seq.letter_annotations['phred_quality'] = quals seq = SeqWrapper(SEQRECORD, seq, None) trim_quality = TrimByQuality(window=5, threshold=25) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] expected = [40, 4, 27, 38, 40] assert get_int_qualities(seq2) == expected quals = [40, 40, 13, 11, 40, 9, 40, 4, 27, 38, 40, 4, 11, 40, 40, 10, 10, 21, 3, 40, 9, 9, 12, 10, 9] seq = SeqRecord(Seq('atatatatatatatatatatatata')) seq.letter_annotations['phred_quality'] = quals seq = SeqWrapper(SEQRECORD, seq, None) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_quality = TrimByQuality(window=5, threshold=25, trim_left=False) trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert get_int_qualities(seq2) == [40, 40, 13, 11, 40, 9, 40, 4, 27, 38, 40] quals = [40, 40, 13, 11, 40, 9, 40, 4, 27, 38, 40, 4, 11, 40, 40, 10, 10, 21, 3, 40, 9, 9, 12, 10, 9] seq = SeqRecord(Seq('atatatatatatatatatatatata')) seq.letter_annotations['phred_quality'] = quals seq = SeqWrapper(SEQRECORD, seq, None) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_quality = TrimByQuality(window=5, threshold=25, trim_right=False) trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert get_int_qualities(seq2) == [40, 4, 27, 38, 40, 4, 11, 40, 40, 10, 10, 21, 3, 40, 9, 9, 12, 10, 9] quals = [40, 40, 13, 11, 40, 9, 40, 4, 27, 38, 40, 4, 11, 40, 40, 10, 10, 21, 3, 40, 9, 9, 12, 10, 9] seq = SeqRecord(Seq('atatatatatatatatatatatata')) seq.letter_annotations['phred_quality'] = quals seq = SeqWrapper(SEQRECORD, seq, None) trim_quality = TrimByQuality(window=5, threshold=25, trim_right=False, trim_left=False) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert get_int_qualities(seq2) == quals # With SeqItems seq = SeqItem('s', ['@s\n', 'atatatatatatatatatatatata\n', '\n', 'II.,I*I%<GI%,II++6$I**-+*\n']) seq = SeqWrapper(SEQITEM, seq, 'fastq') trim_quality = TrimByQuality(window=5, threshold=25, trim_right=True, trim_left=False) trim_packet = {SEQS_PASSED: [[seq]], ORPHAN_SEQS: []} trim_packet2 = trim(trim_quality(trim_packet)) seq2 = trim_packet2[SEQS_PASSED][0][0] assert seq2.object.lines[3] == 'II.,I*I%<GI\n' def test_trim_quality_bin(self): 'It tests the trim_edges binary' trim_bin = os.path.join(SEQ_BIN_DIR, 'trim_quality') assert 'usage' in check_output([trim_bin, '-h']) fastq_fhand = _make_fhand(FASTQ2) result = check_output([trim_bin, fastq_fhand.name]) assert '@seq1\nATCGT\n+' in result fastq_fhand = _make_fhand(FASTQ3) result = check_output([trim_bin, fastq_fhand.name]) assert result == '@seq1\nAATCGTT\n+\n0A???A0\n' fastq_fhand = _make_fhand(FASTQ3) result = check_output([trim_bin, fastq_fhand.name, '-r']) assert result == '@seq1\nAATCGTTTTTTT\n+\n0A???A000000\n' fastq_fhand = _make_fhand(FASTQ3) result = check_output([trim_bin, fastq_fhand.name, '-l']) assert result == '@seq1\nAAAAAATCGTT\n+\n00000A???A0\n' # pylint: disable=C0301 FASTQ4 = '''@HWI-ST1203:122:C130PACXX:4:1101:13499:4144 1:N:0:CAGATC AAGCAGTGGTATCAAAGCAGAGTACTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTCCAACCCTTTGCTTTTTTTTTTTTTCGAGGAGGAGGGT + @@@DBDDDDHFBHGGHFEA@GG<?FHHIIIIIIIIIGCCCCCCCCCCCCCCCCBCCBC########################################### @HWI-ST1203:122:C130PACXX:4:1101:13623:4101 1:N:0:CAGATC AAGCAGTGGTATCAACGCAGAGTACATGGGCGAGAAGAAGGATCCAAGTGGTGCCAAGGTTACCAAATCTGCAGCCAAGAAGGCTGGAAAGTGAACCGTGC + CCCFFFFFHHHHHJJIJJJHIIHGIIIIIIIJGGHIIJGHJIJIGHFG@FHGGHIJHHHHHFFFFFDEEEEEEDDBDCBDDDDDDDBADCD>C@DCDD<<< @HWI-ST1203:122:C130PACXX:4:1101:13615:4163 1:N:0:CAGATC GGAAGAGGAACAAGTGAGCAGCAGGACTGTATGATATTCTCATCTGAAGACAGGGACCATCATATTCCCCGGGAAACTCCGATGCCAGAGTATTAGCATGC + @1?DFFFFGHHHHIBGGHGHGEICCAGHHCFGHHIGGHFIHIIIJJJIJIJJJIJIIIJJJJJJJICEEHHFDADBCCDDDDDBBDDCAB@CCDEEDDEDC ''' FASTQ5 = '''@HWI-ST1203:122:C130PACXX:4:1101:13499:4144 1:N:0:CAGATC TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAGATGTGTATAAGAGACAGCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCAGATGTGTATA + @@@DBDDDDHFBHGGHFEA@GG<?FHHIIIIIIIIIGCCCCCCCCCCCCCCCCBCCBC########################################### @HWI-ST1203:122:C130PACXX:4:1101:13623:4101 1:N:0:CAGATC AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACTGTCTCTTATACACATCTAGATGTGTATAAGAGACAGTTTTTTTTTTTTTTTT + CCCFFFFFHHHHHJJIJJJHIIHGIIIIIIIJGGHIIJGHJIJIGHFG@FHGGHIJHHHHHFFFFFDEEEEEEDDBDCBDDDDDDDBADCD>C@DCDD<<< @HWI-ST1203:122:C130PACXX:4:1101:13615:4163 1:N:0:CAGATC GGAAGAGGAACAAGTGAGCAGCAGGACTGTATGATATTCTCATCTGAAGACAGGGACCATCATATTCCCCGGGAAACTCCGATGCCAGAGTATTAGCATGC + @1?DFFFFGHHHHIBGGHGHGEICCAGHHCFGHHIGGHFIHIIIJJJIJIJJJIJIIIJJJJJJJICEEHHFDADBCCDDDDDBBDDCAB@CCDEEDDEDC @HWI-ST1203:122:C130PACXX:4:1101:13615:4164 1:N:0:CAGATC AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACTGTCTCTTATACA + @1?DFFFFGHHHHIBGGHGHGEICCAGHHCFGHHIGGHFIHIIIJJJIJIJJJIJIIIJJJ ''' class TrimBlastShortTest(unittest.TestCase): 'It tests the blast short adaptor trimming' def test_blast_short_trimming(self): 'It trims oligos using blast-short' oligo1 = SeqRecord(Seq('AAGCAGTGGTATCAACGCAGAGTACATGGG')) oligo2 = SeqRecord(Seq('AAGCAGTGGTATCAACGCAGAGTACTTTTT')) oligo1 = SeqWrapper(SEQRECORD, oligo1, None) oligo2 = SeqWrapper(SEQRECORD, oligo2, None) adaptors = [oligo1, oligo2] blast_trim = TrimWithBlastShort(oligos=adaptors) fhand = StringIO(FASTQ4) seq_packets = read_seq_packets([fhand], prefered_seq_classes=[SEQRECORD]) trim_packets = list(seq_to_trim_packets(seq_packets)) trim_packets2 = blast_trim(trim_packets[0]) # It should trim the first and the second reads. res = [get_annotations(s).get(TRIMMING_RECOMMENDATIONS, {}).get(VECTOR, []) for l in trim_packets2[SEQS_PASSED] for s in l] assert res == [[(0, 29)], [(0, 29)], []] # With SeqItems oligo1 = SeqItem('oligo1', ['>oligo1\n', 'AAGCAGTGGTATCAACGCAGAGTACATGGG\n']) oligo2 = SeqItem('oligo2', ['>oligo2\n', 'AAGCAGTGGTATCAACGCAGAGTACTTTTT\n']) oligo1 = SeqWrapper(SEQITEM, oligo1, 'fasta') oligo2 = SeqWrapper(SEQITEM, oligo2, 'fasta') adaptors = [oligo1, oligo2] blast_trim = TrimWithBlastShort(oligos=adaptors) fhand = StringIO(FASTQ4) seq_packets = list(read_seq_packets([fhand], prefered_seq_classes=[SEQITEM])) trim_packets = list(seq_to_trim_packets(seq_packets)) trim_packets2 = blast_trim(trim_packets[0]) # It should trim the first and the second reads. res = [get_annotations(s).get(TRIMMING_RECOMMENDATIONS, {}).get(VECTOR, []) for l in trim_packets2[SEQS_PASSED] for s in l] assert res == [[(0, 29)], [(0, 29)], []] def test_trim_oligos_bin(self): 'It tests the trim_blast_short binary' trim_bin = os.path.join(SEQ_BIN_DIR, 'trim_blast_short') assert 'usage' in check_output([trim_bin, '-h']) fastq_fhand = _make_fhand(FASTQ4) result = check_output([trim_bin, '-l', 'AAGCAGTGGTATCAACGCAGAGTACATGGG', '-l', 'AAGCAGTGGTATCAACGCAGAGTACTTTTT', fastq_fhand.name]) assert '\nTTTTTTTTTTTTTTTTTTTT' in result assert '\nCGAGAAGAAGGATCCAAGT' in result class TrimChimericRegions(unittest.TestCase): def test_trim_chimeric_region(self): index_fpath = os.path.join(TEST_DATA_DIR, 'ref_example.fasta') query1 = '@seq2 f\nGGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT' query1 += 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n' query1 += '+\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' query1 += '$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n' query2 = '@seq2 r\nCATCATTGCATAAGTAACACTCAACCAACAGTGCTACAGGGTTGTAACG\n' query2 += '+\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n' query = query1 + query2 fhand = NamedTemporaryFile() fhand.write(query) fhand.flush() trim_chimeras = TrimMatePairChimeras(index_fpath) seq_packets = list(read_seq_packets([open(fhand.name)])) trim_packets = list(seq_to_trim_packets(seq_packets)) trim_packets2 = trim_chimeras(trim_packets[0]) # It should trim the first and the second reads. res = [get_annotations(s).get(TRIMMING_RECOMMENDATIONS, {}).get(OTHER, []) for l in trim_packets2[SEQS_PASSED] for s in l] assert res == [[(49, 105)], []] def test_trim_chimeras_bin(self): trim_chimeras_bin = os.path.join(SEQ_BIN_DIR, 'trim_mp_chimeras') assert 'usage' in check_output([trim_chimeras_bin, '-h']) index_fpath = os.path.join(TEST_DATA_DIR, 'ref_example.fasta') query1 = '@seq2 f\nGGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT' query1 += 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n' query1 += '+\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' query1 += '$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n' query2 = '@seq2 r\nCATCATTGCATAAGTAACACTCAACCAACAGTGCTACAGGGTTGTAACG\n' query2 += '+\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n' query = query1 + query2 in_fhand = NamedTemporaryFile() in_fhand.write(query) in_fhand.flush() out_fhand = NamedTemporaryFile() expected_seqs = ['GGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT', 'CATCATTGCATAAGTAACACTCAACCAACAGTGCTACAGGGTTGTAACG'] cmd = [trim_chimeras_bin, in_fhand.name, '-r', index_fpath, '-o', out_fhand.name] # raw_input(" ".join(cmd)) check_output(cmd, stdin=in_fhand) counts = 0 for seq in read_seqs([open(out_fhand.name)]): assert get_str_seq(seq) in expected_seqs counts += 1 assert counts != 0 # With several threads cmd = [trim_chimeras_bin, in_fhand.name, '-r', index_fpath, '-o', out_fhand.name, '-p', '2'] check_output(cmd, stdin=in_fhand) counts = 0 for seq in read_seqs([open(out_fhand.name)]): assert get_str_seq(seq) in expected_seqs counts += 1 assert counts != 0 # class TrimNexteraAdaptersTest(unittest.TestCase): # def test_nextera_trimming(self): # TODO: it is not working OK # #check for seq_record. It does not work # 'It trims oligos using blast-short' # # oligo1 = SeqRecord(Seq('AGATGTGTATAAGAGACAG')) # oligo2 = SeqRecord(Seq('CTGTCTCTTATACACATCT')) # oligo1 = SeqWrapper(SEQRECORD, oligo1, None) # oligo2 = SeqWrapper(SEQRECORD, oligo2, None) # # adaptors = [oligo1, oligo2] # # blast_trim = TrimNexteraAdapters(oligos=adaptors) # fhand = StringIO(FASTQ5) # seq_packets = read_seq_packets([fhand], # prefered_seq_classes=[SEQRECORD]) # trim_packets = list(seq_to_trim_packets(seq_packets)) # trim_packets2 = blast_trim(trim_packets[0]) # # It should trim the first and the second reads. # res = [get_annotations(s).get(TRIMMING_RECOMMENDATIONS, {}).get(OTHER, # []) # for l in trim_packets2[SEQS_PASSED] for s in l] # assert res == [[(39, 100)], [(47, 100)], [], [(42, 60)]] # # # With SeqItems # oligo1 = SeqItem('oligo1', ['>oligo1\n', 'AGATGTGTATAAGAGACAG\n']) # oligo2 = SeqItem('oligo2', ['>oligo2\n', 'CTGTCTCTTATACACATCT\n']) # oligo1 = SeqWrapper(SEQITEM, oligo1, 'fasta') # oligo2 = SeqWrapper(SEQITEM, oligo2, 'fasta') # # adaptors = [oligo1, oligo2] # # blast_trim = TrimNexteraAdapters(oligos=adaptors) # fhand = StringIO(FASTQ5) # seq_packets = list(read_seq_packets([fhand], # prefered_seq_classes=[SEQITEM])) # trim_packets = list(seq_to_trim_packets(seq_packets)) # trim_packets2 = blast_trim(trim_packets[0]) # # It should trim the first and the second reads. # res = [get_annotations(s).get(TRIMMING_RECOMMENDATIONS, {}).get(OTHER, # []) # for l in trim_packets2[SEQS_PASSED] for s in l] # assert res == [[(39, 100)], [(47, 100)], [], [(42, 60)]] # def test_trim_with_cutadapt(self): # #TODO finish it by importing the cutadapt code or remove it # in_fhand = NamedTemporaryFile(suffix='.fastq') # in_fhand.write(FASTQ5) # in_fhand.flush() # out_fhand = NamedTemporaryFile() # summary_fhand = NamedTemporaryFile() # # oligos = {_3END: ['CTGTCTCTTATACACATCT', 'AGATGTGTATAAGAGACAG']} # trim_with_cutadapt(in_fhand.name, out_fhand.name, oligos, # summary_fpath=summary_fhand.name) # trimmed_reads = read_seqs([open(out_fhand.name)]) # read3 = 'GGAAGAGGAACAAGTGAGCAGCAGGACTGTATGATATTCTCATCTGAAGACAGGGACCATC' # read3 += 'ATATTCCCCGGGAAACTCCGATGCCAGAGTATTAGCATGC' # expected_seqs = ['TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT', read3, # 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] # for seq in trimmed_reads: # assert get_str_seq(seq) in expected_seqs # def xtest_trim_nextera_adapters_bin(self): # 'It tests the trim nextera adapters binary' # TODO finish it by importing the cutadapt code or remove it # trim_bin = os.path.join(BIN_DIR, 'trim_nextera_adapters') # assert 'usage' in check_output([trim_bin, '-h']) # # with blast algorithm # in_fhand = NamedTemporaryFile(suffix='.fastq') # in_fhand.write(FASTQ5) # in_fhand.flush() # out_fhand = NamedTemporaryFile() # # cmd = [trim_bin, in_fhand.name, '-o', out_fhand.name] # check_output(cmd) # trimmed_reads = read_seqs([open(out_fhand.name)]) # read3 = 'GGAAGAGGAACAAGTGAGCAGCAGGACTGTATGATATTCTCATCTGAAGACAGGGACCATC' # read3 += 'ATATTCCCCGGGAAACTCCGATGCCAGAGTATTAGCATGC' # last read should no be trimmed this way, but keep 5 As more # expected_seqs = ['TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT', read3, # 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', # 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] # for seq in trimmed_reads: # assert get_str_seq(seq) in expected_seqs # # with cutadapt # in_fhand = NamedTemporaryFile(suffix='.fastq') # in_fhand.write(FASTQ5) # in_fhand.flush() # out_fhand = NamedTemporaryFile() # summary_fhand = NamedTemporaryFile() # # cmd = [trim_bin, in_fhand.name, '-o', out_fhand.name, '-c', # '-s', summary_fhand.name] # check_output(cmd) # trimmed_reads = read_seqs([open(out_fhand.name)]) # read3 = 'GGAAGAGGAACAAGTGAGCAGCAGGACTGTATGATATTCTCATCTGAAGACAGGGACCATC' # read3 += 'ATATTCCCCGGGAAACTCCGATGCCAGAGTATTAGCATGC' # expected_seqs = ['TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT', read3, # 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] # for seq in trimmed_reads: # assert get_str_seq(seq) in expected_seqs if __name__ == '__main__': # import sys; sys.argv = ['', 'TrimChimericRegions'] unittest.main()
JoseBlanca/ngs_crumbs
test/seq/test_trim.py
Python
gpl-3.0
31,869
[ "BLAST" ]
f33959644ed2e1bb2bfd65e2cbca0be10c776b95f2abcfb532c665bc10e410f2
import os from ase import Atom, Atoms from ase.parallel import size, rank from gpaw import GPAW, FermiDirac from gpaw.analyse.simple_stm import SimpleStm from gpaw.test import equal load=True load=False txt = '/dev/null' txt='-' me = '' if size > 1: me += 'rank ' + str(rank) + ': ' BH = Atoms([Atom('B', [.0, .0, .41]), Atom('H', [.0, .0, -1.23]), ], cell=[5, 6, 6.5]) BH.center() f3dname = 'stm3d.plt' def testSTM(calc): stm = SimpleStm(calc) stm.write_3D([1,0,0], f3dname) # single wf wf = stm.gd.integrate(stm.ldos) ## print "wf=", wf if size == 1: # XXXX we have problem with reading plt in parallel stm2 = SimpleStm(f3dname) wf2 = stm2.gd.integrate(stm2.ldos) print 'Integrals: written, read=', wf, wf2 equal(wf, wf2, 1.e-7) ## print eigenvalue_string(calc) stm.write_3D(3.1, f3dname) wf2 = stm.gd.integrate(stm.ldos) ## print "wf2=", wf2 equal(wf2, 2, 0.12) return wf # finite system without spin and width fname='BH-nospin_wfs.gpw' if not load: BH.set_pbc(False) cf = GPAW(nbands=3, h=.3, txt=txt) BH.set_calculator(cf) e1 = BH.get_potential_energy() niter1 = cf.get_number_of_iterations() cf.write(fname, 'all') else: cf = GPAW(fname, txt=txt) wf = testSTM(cf) # finite system with spin fname='BH-spin_Sz2_wfs.gpw' BH.set_initial_magnetic_moments([1, 1]) if not load: BH.set_pbc(False) cf = GPAW(occupations=FermiDirac(0.1, fixmagmom=True), nbands=5, h=0.3, txt=txt) BH.set_calculator(cf) e2 = BH.get_potential_energy() niter2 = cf.get_number_of_iterations() cf.write(fname, 'all') else: cf = GPAW(fname, txt=txt) testSTM(cf) # periodic system if not load: BH.set_pbc(True) cp = GPAW(spinpol=True, nbands=3, h=.3, kpts=(2,1,1), txt=txt) BH.set_calculator(cp) e3 = BH.get_potential_energy() niter3 = cp.get_number_of_iterations() cp.write('BH-8kpts_wfs.gpw', 'all') else: cp = GPAW('BH-8kpts_wfs.gpw', txt=txt) stmp = SimpleStm(cp) stmp.write_3D(-4., f3dname) print me + 'Integrals(occ): 2 * wf, bias=', 2 * wf, stmp.gd.integrate(stmp.ldos) equal(2 * wf, stmp.gd.integrate(stmp.ldos), 0.02) stmp.write_3D(+4., f3dname) print me + 'Integrals(unocc): 2 * wf, bias=', print 2 * wf, stmp.gd.integrate(stmp.ldos) equal(2 * wf, stmp.gd.integrate(stmp.ldos), 0.02) energy_tolerance = 0.00007 niter_tolerance = 0 equal(e1, -2.54026, energy_tolerance) equal(niter1, 26, niter_tolerance) equal(e2, -1.51101, energy_tolerance) equal(niter2, 21, niter_tolerance) equal(e3, -2.83573, energy_tolerance) assert 38 <= niter3 <= 40, niter3
qsnake/gpaw
gpaw/test/simple_stm.py
Python
gpl-3.0
2,676
[ "ASE", "GPAW" ]
7e9f351809a700684e98dc200f2c91507dcba7eba0f2c88c71541b653229fc3e
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides utilities for basic math operations. """ import collections import numpy as np def abs_cap(val, max_abs_val=1): """ Returns the value with its absolute value capped at max_abs_val. Particularly useful in passing values to trignometric functions where numerical errors may result in an argument > 1 being passed in. Args: val (float): Input value. max_abs_val (float): The maximum absolute value for val. Defaults to 1. Returns: val if abs(val) < 1 else sign of val * max_abs_val. """ return max(min(val, max_abs_val), -max_abs_val) def sort_dict(d, key=None, reverse=False): """ Sorts a dict by value. Args: d: Input dictionary key: Function which takes an tuple (key, object) and returns a value to compare and sort by. By default, the function compares the values of the dict i.e. key = lambda t : t[1] reverse: Allows to reverse sort order. Returns: OrderedDict object whose keys are ordered according to their value. """ kv_items = list(d.items()) # Sort kv_items according to key. if key is None: kv_items.sort(key=lambda t: t[1], reverse=reverse) else: kv_items.sort(key=key, reverse=reverse) # Build ordered dict. return collections.OrderedDict(kv_items) def minloc(seq): """ Return the index of the (first) minimum in seq >>> assert minloc(range(3)) == 0 """ return min(enumerate(seq), key=lambda s: s[1])[0] def maxloc(seq): """ Return the index of the (first) maximum in seq >>> assert maxloc([1,3,2,3]) == 1 """ return max(enumerate(seq), key=lambda s: s[1])[0] def min_max_indexes(seq): """ Uses enumerate, max, and min to return the indices of the values in a list with the maximum and minimum value: """ l = sorted(enumerate(seq), key=lambda s: s[1]) return l[0][0], l[-1][0] def strictly_increasing(values): """True if values are stricly increasing.""" return all(x < y for x, y in zip(values, values[1:])) def strictly_decreasing(values): """True if values are stricly decreasing.""" return all(x > y for x, y in zip(values, values[1:])) def non_increasing(values): """True if values are not increasing.""" return all(x >= y for x, y in zip(values, values[1:])) def non_decreasing(values): """True if values are not decreasing.""" return all(x <= y for x, y in zip(values, values[1:])) def monotonic(values, mode="<", atol=1.0e-8): """ Returns False if values are not monotonic (decreasing|increasing). mode is "<" for a decreasing sequence, ">" for an increasing sequence. Two numbers are considered equal if they differ less that atol. .. warning: Not very efficient for large data sets. >>> values = [1.2, 1.3, 1.4] >>> monotonic(values, mode="<") False >>> monotonic(values, mode=">") True """ if len(values) == 1: return True if mode == ">": for i in range(len(values) - 1): v, vp = values[i], values[i + 1] if abs(vp - v) > atol and vp <= v: return False elif mode == "<": for i in range(len(values) - 1): v, vp = values[i], values[i + 1] if abs(vp - v) > atol and vp >= v: return False raise ValueError("Wrong mode %s" % str(mode)) def round_to_sigfigs(num, sigfigs): """ Rounds a number rounded to a specific number of significant figures instead of to a specific precision. """ if not isinstance(sigfigs, int): raise TypeError("Number of significant figures must be integer.") if sigfigs < 1: raise ValueError("Number of significant figures " "must be larger than zero.") if num == 0: return num prec = int(sigfigs - np.ceil(np.log10(np.absolute(num)))) return round(num, prec) def make_symmetric_matrix_from_upper_tri(val): """ Given a symmetric matrix in upper triangular matrix form as flat array indexes as: [A_xx,A_yy,A_zz,A_xy,A_xz,A_yz] This will generate the full matrix: [[A_xx,A_xy,A_xz],[A_xy,A_yy,A_yz],[A_xz,A_yz,A_zz] """ idx = [0, 3, 4, 1, 5, 2] val = np.array(val)[idx] mask = ~np.tri(3, k=-1, dtype=bool) out = np.zeros((3, 3), dtype=val.dtype) out[mask] = val # pylint: disable=E1137 out.T[mask] = val return out
gmatteo/pymatgen
pymatgen/util/num.py
Python
mit
4,590
[ "pymatgen" ]
2f3615ed3e5bbc6f7865d6012ee0e9386c748bfb5aebeda53f7fc70c8cefa075
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import torch import torch.nn as nn from .utils import PYTORCH_REGRESSION_LOSS_MAP import numpy as np class LSTMSeq2Seq(nn.Module): def __init__(self, input_feature_num, future_seq_len, output_feature_num, lstm_hidden_dim=128, lstm_layer_num=2, dropout=0.25, teacher_forcing=False): super(LSTMSeq2Seq, self).__init__() self.lstm_encoder = nn.LSTM(input_size=input_feature_num, hidden_size=lstm_hidden_dim, num_layers=lstm_layer_num, dropout=dropout, batch_first=True) self.lstm_decoder = nn.LSTM(input_size=output_feature_num, hidden_size=lstm_hidden_dim, num_layers=lstm_layer_num, dropout=dropout, batch_first=True) self.fc = nn.Linear(in_features=lstm_hidden_dim, out_features=output_feature_num) self.future_seq_len = future_seq_len self.output_feature_num = output_feature_num self.teacher_forcing = teacher_forcing def forward(self, input_seq, target_seq=None): x, (hidden, cell) = self.lstm_encoder(input_seq) # input feature order should have target dimensions in the first decoder_input = input_seq[:, -1, :self.output_feature_num] decoder_input = decoder_input.unsqueeze(1) decoder_output = [] for i in range(self.future_seq_len): decoder_output_step, (hidden, cell) = self.lstm_decoder(decoder_input, (hidden, cell)) out_step = self.fc(decoder_output_step) decoder_output.append(out_step) if not self.teacher_forcing or target_seq is None: # no teaching force decoder_input = out_step else: # with teaching force decoder_input = target_seq[:, i:i+1, :] decoder_output = torch.cat(decoder_output, dim=1) return decoder_output def model_creator(config): return LSTMSeq2Seq(input_feature_num=config["input_feature_num"], output_feature_num=config["output_feature_num"], future_seq_len=config["future_seq_len"], lstm_hidden_dim=config.get("lstm_hidden_dim", 128), lstm_layer_num=config.get("lstm_layer_num", 2), dropout=config.get("dropout", 0.25), teacher_forcing=config.get("teacher_forcing", False)) def optimizer_creator(model, config): return getattr(torch.optim, config.get("optim", "Adam"))(model.parameters(), lr=config.get("lr", 0.001)) def loss_creator(config): loss_name = config.get("loss", "mse") if loss_name in PYTORCH_REGRESSION_LOSS_MAP: loss_name = PYTORCH_REGRESSION_LOSS_MAP[loss_name] else: raise RuntimeError(f"Got '{loss_name}' for loss name, " "where 'mse', 'mae' or 'huber_loss' is expected") return getattr(torch.nn, loss_name)() try: from bigdl.orca.automl.model.base_pytorch_model import PytorchBaseModel class Seq2SeqPytorch(PytorchBaseModel): def __init__(self, check_optional_config=False): super().__init__(model_creator=model_creator, optimizer_creator=optimizer_creator, loss_creator=loss_creator, check_optional_config=check_optional_config) def _input_check(self, x, y): if len(x.shape) < 3: raise RuntimeError(f"Invalid data x with {len(x.shape)} " "dim where 3 dim is required.") if len(y.shape) < 3: raise RuntimeError(f"Invalid data y with {len(y.shape)} dim " "where 3 dim is required.") if y.shape[-1] > x.shape[-1]: raise RuntimeError("output dim should not larger than input dim " f"while we get {y.shape[-1]} > {x.shape[-1]}.") def _forward(self, x, y): self._input_check(x, y) return self.model(x, y) def _get_required_parameters(self): return { "input_feature_num", "future_seq_len", "output_feature_num" } def _get_optional_parameters(self): return { "lstm_hidden_dim", "lstm_layer_num", "teacher_forcing" } | super()._get_optional_parameters() except ImportError: pass
intel-analytics/BigDL
python/chronos/src/bigdl/chronos/model/Seq2Seq_pytorch.py
Python
apache-2.0
5,461
[ "ORCA" ]
0b94494466f2f569a6499a39fbeabc4a807b5514e129681d2b9ee0e4dfc4495e
# -*- coding: utf-8 -*- # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Additional help about types of credentials and authentication.""" from __future__ import absolute_import from gslib.help_provider import HelpProvider _DETAILED_HELP_TEXT = (""" <B>OVERVIEW</B> gsutil currently supports several types of credentials/authentication, as well as the ability to access public data anonymously (see "gsutil help anon" for more on anonymous access). Each of these type of credentials is discussed in more detail below, along with information about configuring and using credentials via either the Cloud SDK or standalone installations of gsutil. <B>Configuring/Using Credentials via Cloud SDK Distribution of gsutil</B> When gsutil is installed/used via the Cloud SDK ("gcloud"), credentials are stored by Cloud SDK in a non-user-editable file located under ~/.config/gcloud (any manipulation of credentials should be done via the gcloud auth command). If you need to set up multiple credentials (e.g., one for an individual user account and a second for a service account), the gcloud auth command manages the credentials for you, and you switch between credentials using the gcloud auth command as well (for more details see https://developers.google.com/cloud/sdk/gcloud/#gcloud.auth). Once credentials have been configured via gcloud auth, those credentials will be used regardless of whether the user has any boto configuration files (which are located at ~/.boto unless a different path is specified in the BOTO_CONFIG environment variable). However, gsutil will still look for credentials in the boto config file if a type of credential is needed that's not stored in the gcloud credential store (e.g., an HMAC credential for an S3 account). <B>Configuring/Using Credentials via Standalone gsutil Distribution</B> If you installed a standalone distribution of gsutil (downloaded from https://pub.storage.googleapis.com/gsutil.tar.gz, https://pub.storage.googleapis.com/gsutil.zip, or PyPi), credentials are configured using the gsutil config command, and are stored in the user-editable boto config file (located at ~/.boto unless a different path is specified in the BOTO_CONFIG environment). In this case if you want to set up multiple credentials (e.g., one for an individual user account and a second for a service account), you run gsutil config once for each credential, and save each of the generated boto config files (e.g., renaming one to ~/.boto_user_account and the second to ~/.boto_service_account), and you switch between the credentials using the BOTO_CONFIG environment variable (e.g., by running BOTO_CONFIG=~/.boto_user_account gsutil ls). Note that when using the standalone version of gsutil with the JSON API you can configure at most one of the following types of GCS credentials in a single boto config file: OAuth2 User Account, OAuth2 Service Account. In addition to these, you may also have S3 HMAC credentials (necessary for using s3:// URLs) and GCE Internal Service Account credentials. GCE Internal Service Account credentials are used only when OAuth2 credentials are not present. <B>SUPPORTED CREDENTIAL TYPES</B> gsutil supports several types of credentials (the specific subset depends on which distribution of gsutil you are using; see above discussion). OAuth2 User Account: This is the preferred type of credentials for authenticating requests on behalf of a specific user (which is probably the most common use of gsutil). This is the default type of credential that will be created when you run "gsutil config". For more details about OAuth2 authentication, see: https://developers.google.com/accounts/docs/OAuth2#scenarios HMAC: This type of credential can be used by programs that are implemented using HMAC authentication, which is an authentication mechanism supported by certain other cloud storage service providers. This type of credential can also be used for interactive use when moving data to/from service providers that support HMAC credentials. This is the type of credential that will be created when you run "gsutil config -a". Note that it's possible to set up HMAC credentials for both Google Cloud Storage and another service provider; or to set up OAuth2 user account credentials for Google Cloud Storage and HMAC credentials for another service provider. To do so, after you run the gsutil config command, you can edit the generated ~/.boto config file and look for comments for where other credentials can be added. For more details about HMAC authentication, see: https://developers.google.com/storage/docs/reference/v1/getting-startedv1#keys OAuth2 Service Account: This is the preferred type of credential to use when authenticating on behalf of a service or application (as opposed to a user). For example, if you will run gsutil out of a nightly cron job to upload/download data, using a service account allows the cron job not to depend on credentials of an individual employee at your company. This is the type of credential that will be configured when you run "gsutil config -e". It is important to note that a service account is considered an Editor by default for the purposes of API access, rather than an Owner. In particular, the fact that Editors have OWNER access in the default object and bucket ACLs, but the canned ACL options remove OWNER access from Editors, can lead to unexpected results. The solution to this problem is to ensure the service account is an Owner in the Permissions tab for your project. To find the email address of your service account, visit the `Google Developers Console <https://cloud.google.com/console#/project>`_, click on the project you're using, click "APIs & auth", and click "Credentials". To create a service account, visit the Google Developers Console and then: - Click "APIs & auth" in the left sidebar. - Click "Credentials". - Click "Create New Client ID". - Select "Service Account" as your application type. - Save the JSON private key or the .p12 private key and password provided. For further information about account roles, see: https://developers.google.com/console/help/#DifferentRoles For more details about OAuth2 service accounts, see: https://developers.google.com/accounts/docs/OAuth2ServiceAccount GCE Internal Service Account: This is the type of service account used for accounts hosted by App Engine or GCE. Such credentials are created automatically for you on GCE when you run the gcutil addinstance command with the --service_account flag. For more details about GCE service accounts, see: https://developers.google.com/compute/docs/authentication; For more details about App Engine service accounts, see: https://developers.google.com/appengine/docs/python/appidentity/overview """) class CommandOptions(HelpProvider): """Additional help about types of credentials and authentication.""" # Help specification. See help_provider.py for documentation. help_spec = HelpProvider.HelpSpec( help_name='creds', help_name_aliases=['credentials', 'authentication', 'auth', 'gcloud'], help_type='additional_help', help_one_line_summary='Credential Types Supporting Various Use Cases', help_text=_DETAILED_HELP_TEXT, subcommand_help_text={}, )
mattdr/gsutil
gslib/addlhelp/creds.py
Python
apache-2.0
8,137
[ "VisIt" ]
133ac74f80a215b6caa80884cde87261c75f239cd371a57fceb4d0ab345f19d9
import numpy import scipy.stats from scipy.stats import multivariate_normal from scipy.linalg import orth import matplotlib.pyplot as plt from math import pi ,sin, cos import h5py from scipy.ndimage.filters import gaussian_filter class rotation_3d(object): """ the class allows one to rotate a 3D vector in different directions """ def __init__(self): self.rot_mat_x = numpy.eye(3) self.rot_mat_y = numpy.eye(3) self.rot_mat_z = numpy.eye(3) def _calc_rotation_matrix_x(self, theta, units='deg'): assert units=='deg' or units=='rad' if units=='deg': theta_rad = theta * pi / 180.0 self.rot_mat_x = numpy.array((1, 0, 0, 0, cos(theta_rad), -sin(theta_rad), 0, sin(theta_rad), cos(theta_rad))).reshape((3, 3)) def _calc_rotation_matrix_y(self, theta, units='deg'): assert units=='deg' or units=='rad' if units=='deg': theta_rad = theta * pi / 180.0 self.rot_mat_y = numpy.array((cos(theta_rad), 0, sin(theta_rad), 0, 1, 0, -sin(theta_rad), 0, cos(theta_rad))).reshape((3, 3)) def _calc_rotation_matrix_z(self, theta, units='deg'): assert units=='deg' or units=='rad' if units=='deg': theta_rad = theta * pi / 180.0 self.rot_mat_z = numpy.array((cos(theta_rad), -sin(theta_rad), 0, sin(theta_rad), cos(theta_rad), 0, 0, 0, 1)).reshape((3, 3)) def _calc_rotation_matrix(self): self.rot_mat = numpy.dot(numpy.dot(self.rot_mat_x, self.rot_mat_y), self.rot_mat_z) def return_rotation_matrix(self, theta_x, theta_y, theta_z, units='deg'): """ function rotates a vector in 3D with three given angles """ assert units=='deg' or units=='rad' self._calc_rotation_matrix_x(theta_x, units) self._calc_rotation_matrix_y(theta_y, units) self._calc_rotation_matrix_z(theta_z, units) self._calc_rotation_matrix() return self.rot_mat def rotate_vector(self, theta_x, theta_y, theta_z, vector, units='deg'): """ function rotates a vector in 3D with three given angles """ assert units=='deg' or units=='rad' assert vector.shape == (3, ) self._calc_rotation_matrix_x(theta_x, units) self._calc_rotation_matrix_y(theta_y, units) self._calc_rotation_matrix_z(theta_z, units) self._calc_rotation_matrix() return numpy.dot(vector, self.rot_mat) class mixture_of_gaussians(object): """ the class represents a D dimensional galaxy model which is constructed from Gaussians """ def __init__(self, D): self.alphas = [] self.mus = [] self.fis = [] self.K = 0 self.D = D def copy(self): """ This code is brittle because we are not using proper setters (or adders) to construct the mixture. """ new = mixture_of_gaussians(self.D) for alpha, mu, fi in zip(self.alphas, self.mus, self.fis): new.add_gaussian(alpha, mu.copy(), fi.copy()) return new def __mul__(self, factor): new = self.copy() for k, alpha in enumerate(self.alphas): new.alphas[k] = alpha * factor return new def rescale(self, scale): """ Expand everything by isotropic scale. Hacky and brittle! Returns a copy! """ new = self.copy() new.mus = [scale * mu for mu in self.mus] new.fis = [scale * scale * fi for fi in self.fis] return new def add_gaussian(self, alpha, mu, fi): assert mu.shape == (self.D,) assert fi.shape == (self.D, self.D) self.alphas.append(alpha) self.mus.append(mu) self.fis.append(fi) self.K += 1 def convolve(self, other): """ Convolve a mixture with another mixture. Might really be *correlate* rather than convolve! Returns a new object; doesn't work in place. """ assert self.D == other.D new = mixture_of_gaussians(self.D) for ks in range(self.K): for ko in range(other.K): new.add_gaussian(self.alphas[ks] * other.alphas[ko], self.mus[ks] + other.mus[ko], self.fis[ks] + other.fis[ko]) return new def render(self, positions): N, D = positions.shape assert D == self.D densities= numpy.zeros(N) for k in range(self.K): gaus_k = multivariate_normal(mean=self.mus[k], cov=self.fis[k]) pdf_k = gaus_k.pdf(positions) densities += self.alphas[k] * pdf_k return densities def get_total_mass(self): return numpy.sum(self.alphas) class galaxy_model_3d(mixture_of_gaussians): """ the class represents a 3D dimensional galaxy model which is constructed from Gaussians """ def __init__(self): super(galaxy_model_3d, self).__init__(3) def copy(self): """ This code is brittle because we are not using proper setters (or adders) to construct the mixture. """ new = galaxy_model_3d() for alpha, mu, fi in zip(self.alphas, self.mus, self.fis): new.add_gaussian(alpha, mu.copy(), fi.copy()) return new def project_2d(self, xi_hat, eta_hat): assert xi_hat.shape == (self.D,) assert eta_hat.shape == (self.D,) assert numpy.isclose(numpy.dot(xi_hat, xi_hat), 1.0) assert numpy.isclose(numpy.dot(eta_hat, eta_hat), 1.0) assert numpy.isclose(numpy.dot(xi_hat, eta_hat), 0.0) projection_matrix = numpy.vstack((xi_hat, eta_hat)) mixture_2d = mixture_of_gaussians(2) for k in range(self.K): m = numpy.dot(projection_matrix, self.mus[k]) V = numpy.dot(numpy.dot(projection_matrix, self.fis[k]), projection_matrix.T) mixture_2d.add_gaussian(self.alphas[k], m, V) return mixture_2d def render_2d_image(self, xi_hat, eta_hat, xs, ys, intensity=1., psf=None): Y, X = numpy.meshgrid(ys, xs) xs_flatten = X.flatten() ys_flatten = Y.flatten() positions_flatten = numpy.vstack((xs_flatten, ys_flatten)).T mixture_2d = self.project_2d(xi_hat, eta_hat) * intensity if psf is not None: mixture_2d = mixture_2d.convolve(psf) densities_flatten = mixture_2d.render(positions_flatten) densities = numpy.reshape(densities_flatten, X.shape) return densities def _construct_covariance_from_vector(self, vector): assert len(vector)==6 assert numpy.isreal(vector).all() fi = numpy.zeros((3,3)) fi[numpy.diag_indices(3)] = vector[:3] fi[numpy.tril_indices(3, -1)] += vector[3:] covariance = numpy.dot(fi, fi.T) return covariance def set_parameters_from_vector(self, vector): assert len(vector) % 10 == 0 self.__init__() for i in xrange(0, len(vector), 10): parameters = vector[i:i+10] alpha = parameters[0] mu = parameters[1:4] fi = self._construct_covariance_from_vector(parameters[4:]) ## old covariance construction ## #fi = numpy.zeros((3,3)) #fi[numpy.diag_indices(3)] = parameters[4:7] #fi[numpy.triu_indices(3, 1)] += parameters[7:10] #fi[numpy.tril_indices(3, -1)] += parameters[7:10] self.add_gaussian(alpha, mu, fi) def get_parameters_vector(self): vector = numpy.zeros(10 * self.K) for k in range(self.K): i = 10 * k vector[i] = self.alphas[k] vector[i+1:i+4] = self.mus[k] vector[i+4:i+7] = (self.fis[k])[numpy.diag_indices(3)] vector[i+7:i+10] = (self.fis[k])[numpy.triu_indices(3, 1)] return vector def get_ln_prior(self): """ Penalize bad (or impossible) condition numbers. """ lnp = 0. for fi in self.fis: try: eigs = numpy.linalg.eigvalsh(fi) except: print "eigs did not converge" return -numpy.Inf if numpy.any(eigs <= 0.): return -numpy.Inf lnp -= numpy.log(numpy.max(eigs) / numpy.min(eigs)) # condition number! return lnp def choose_random_projection(): """ Generate two orthogonal normal vectors, drawn isotropically from the sphere. """ xhat = numpy.random.normal(size=3) xhat /= numpy.sqrt(numpy.dot(xhat, xhat)) yhat = numpy.random.normal(size=3) yhat -= numpy.dot(xhat, yhat) * xhat yhat /= numpy.sqrt(numpy.dot(yhat, yhat)) return xhat, yhat class image_and_model(object): """ This class represents a 2D image of a galaxy and holds all the parameters that convert the 3D model to this """ def __init__(self): self.data = None self.synthetic = 0. self.ivar = None self.shape = None self.psf = None self.parameters = {'alpha' : None, 'beta' : None, 'gamma' : None, 'intensity' : None, 'scale' : None, 'xshift' : None, 'yshift' : None, 'bg' : None} def set_data(self, data): if self.shape is None: self.shape = data.shape else: assert data.shape == self.shape self.data = data def set_ivar(self, ivar): """ Set the estimated inverse variance map for the image. """ if self.shape is None: self.shape = ivar.shape else: assert ivar.shape == self.shape self.ivar = ivar def set_shape(self, shape): assert len(shape) == 2 self.shape = shape self.synthetic = 0. return None def set_psf(self, psf): assert type(psf) == mixture_of_gaussians assert psf.D == 2 self.psf = psf self.synthetic = 0. def set_parameters(self, **kwargs): if kwargs is not None: for key, value in kwargs.iteritems(): assert key in self.parameters.keys() self.parameters[key] = value self.synthetic = 0. def set_parameters_from_vector(self, par_vector): self.parameters['alpha'] = par_vector[0] self.parameters['beta'] = par_vector[1] self.parameters['gamma'] = par_vector[2] self.parameters['intensity'] = par_vector[3] self.parameters['scale'] = par_vector[4] self.parameters['xshift'] = par_vector[5] self.parameters['yshift'] = par_vector[6] self.parameters['bg'] = par_vector[7] self.synthetic = 0. def set_galaxy(self, galaxy): assert type(galaxy) == galaxy_model_3d self.galaxy = galaxy self.synthetic = 0. def get_data(self): return self.data def get_ivar(self): return self.ivar def get_synthetic(self): if numpy.isscalar(self.synthetic): if self.synthetic != 0.0: self.synthetic = 0 self.construct_synthetic() return self.synthetic def get_shape(self): return self.shape def get_parameters(self): return self.parameters def get_parameters_vector(self): return numpy.array((self.parameters['alpha'], self.parameters['beta'], self.parameters['gamma'], self.parameters['intensity'], self.parameters['scale'], self.parameters['xshift'], self.parameters['yshift'], self.parameters['bg'])) def get_parameter(self, key): assert key in self.parameters.keys() return self.parameters[key] def _add_to_synthetic(self, contribution): self.synthetic += contribution def construct_synthetic(self, xi_hat=None, eta_hat=None): nx, ny = self.shape xs = (numpy.arange(nx) - self.parameters['xshift']) * self.parameters['scale'] # kpc ys = (numpy.arange(ny) - self.parameters['yshift']) * self.parameters['scale'] # kpc if xi_hat == None and eta_hat == None: r = rotation_3d() r_mat = r.return_rotation_matrix(self.parameters['alpha'], self.parameters['beta'], self.parameters['gamma']) xi_hat = r_mat[0] eta_hat = r_mat[1] self._add_to_synthetic(self.parameters['bg']) self._add_to_synthetic(self.galaxy.render_2d_image(xi_hat, eta_hat, xs, ys, intensity=self.parameters['intensity'], psf=self.psf.rescale(self.parameters['scale']))) def get_chi_squared(self): return numpy.sum(self.ivar * (self.data - self.get_synthetic()) ** 2) def get_chi_vector(self): return (numpy.sqrt(self.ivar) * (self.data - self.get_synthetic())).flatten() def get_ln_likelihood(self): return -0.5 * self.get_chi_squared() def get_ln_prior(self): return 0. # no beliefs def __call__(self, parameter_vector): self.set_parameters_from_vector(parameter_vector) return self.get_chi_squared() class album_and_model(object): """ This class represents a set of images that come from the same 3D model with different parameters """ def __init__(self): self.images = [] self.galaxy = None def __len__(self): return len(self.images) def __getitem__(self, i): return self.images[i] def __iter__(self): for image in self.images: yield image def add_image(self, image): assert type(image) == image_and_model self.images.append(image) def get_all_images(self): return self.images def set_galaxy(self, galaxy): self.galaxy = galaxy for image in self.images: image.set_galaxy(galaxy) def get_chi_squared(self): chisquared = 0. for image in self.images: chisquared += image.get_chisquared() return chisquared def get_ln_likelihood(self): lnlike = 0. for image in self.images: lnlike += image.get_ln_likelihood() return lnlike def get_ln_prior(self): lnp = self.galaxy.get_ln_prior() for image in self.images: lnp += image.get_ln_prior() return lnp def get_ln_posterior(self): lnp = self.get_ln_prior() if numpy.isfinite(lnp): lnp += self.get_ln_likelihood() return lnp def __call__(self, galparvec): """ Return -2 * ln_prob, which is something we can *minimize*. """ galaxy = galaxy_model_3d() galaxy.set_parameters_from_vector(galparvec) self.set_galaxy(galaxy) # must use `set_galaxy()` to propagate to images return -2. * self.get_ln_posterior() class illustris_model_and_image(object): def __init__(self, file_path): assert h5py.is_hdf5(file_path) self.file_path = file_path f = h5py.File(file_path, "r") stars_snap = f['PartType4'] stars_coords = stars_snap['Coordinates'] stars_mags = stars_snap['GFM_StellarPhotometrics'] self.stars_coords = (stars_coords - numpy.mean(stars_coords, axis=0)) / numpy.std(stars_coords, axis=0) self.stars_mags = {'U': stars_mags[:,0], 'B': stars_mags[:,1], 'V': stars_mags[:,2], 'K': stars_mags[:,3], 'g': stars_mags[:,4], 'r': stars_mags[:,5], 'i': stars_mags[:,6], 'z': stars_mags[:,7]} self.image = 0. self.image_parameters = {'alpha' : None, 'beta' : None, 'gamma' : None, 'intensity' : None, 'scale' : None, 'xshift' : None, 'yshift' : None, 'bg' : None, 'psf_size': None} def set_image_shape(self, shape): assert len(shape) == 2 self.shape = shape def set_image_parameters(self, **kwargs): if kwargs is not None: for key, value in kwargs.iteritems(): assert key in self.image_parameters.keys() self.image_parameters[key] = value self.image = 0. def get_image(self): return self.image def get_shape(self): return self.shape def _add_to_image(self, contribution): self.image += contribution def render_2d_image(self, xi_hat, eta_hat, xs, ys, band_mag='g'): projection_matrix = numpy.vstack((xi_hat, eta_hat)) stars_coords_2d = numpy.dot(self.stars_coords, projection_matrix.T) H, xedges, yedges = numpy.histogram2d(stars_coords_2d[:,0], stars_coords_2d[:,1], [xs, ys], normed=True, weights= 10 ** (self.stars_mags[band_mag]/-2.5)) return H def construct_image(self, xi_hat=None, eta_hat=None): nx, ny = self.shape xs = (numpy.arange(nx + 1) - self.image_parameters['xshift']) * self.image_parameters['scale'] # kpc ys = (numpy.arange(ny + 1) - self.image_parameters['yshift']) * self.image_parameters['scale'] # kpc if xi_hat == None and eta_hat == None: r = rotation_3d() r_mat = r.return_rotation_matrix(self.image_parameters['alpha'], self.image_parameters['beta'], self.image_parameters['gamma']) xi_hat = r_mat[0] eta_hat = r_mat[1] H = self.render_2d_image(xi_hat, eta_hat, xs, ys) if self.image_parameters['psf_size'] != None: H = gaussian_filter(H, self.image_parameters['psf_size']) if self.image_parameters['bg'] != None: self._add_to_image(self.image_parameters['bg']) self._add_to_image(H * self.image_parameters['intensity'])
davidwhogg/DeprojectAllGalaxies
scripts/astrohack_projections.py
Python
mit
15,510
[ "Galaxy" ]
55bcdcac3245578c3db09a71b0be572ff0bae94a9df854c6559cc6e8de5f6d04
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Crawler implementation.""" from builtins import str from builtins import range from queue import Empty from queue import Queue import threading import time from future import standard_library from google.cloud.forseti.common.util import logger from google.cloud.forseti.services.inventory import cai_temporary_storage from google.cloud.forseti.services.inventory.base import cai_gcp_client from google.cloud.forseti.services.inventory.base import cloudasset from google.cloud.forseti.services.inventory.base import crawler from google.cloud.forseti.services.inventory.base import gcp from google.cloud.forseti.services.inventory.base import resources standard_library.install_aliases() LOGGER = logger.get_logger(__name__) class CrawlerConfig(crawler.CrawlerConfig): """Crawler configuration to inject dependencies.""" def __init__(self, storage, progresser, api_client, variables=None): """Initialize Args: storage (Storage): The inventory storage progresser (QueueProgresser): The progresser implemented using a queue api_client (ApiClientImpl): GCP API client variables (dict): config variables """ super(CrawlerConfig, self).__init__() self.storage = storage self.progresser = progresser self.variables = {} if not variables else variables self.client = api_client class ParallelCrawlerConfig(crawler.CrawlerConfig): """Multithreaded crawler configuration, to inject dependencies.""" def __init__(self, storage, progresser, api_client, threads, variables=None): """Initialize Args: storage (Storage): The inventory storage progresser (QueueProgresser): The progresser implemented using a queue api_client (ApiClientImpl): GCP API client threads (int): how many threads to use variables (dict): config variables """ super(ParallelCrawlerConfig, self).__init__() self.storage = storage self.progresser = progresser self.variables = {} if not variables else variables self.threads = threads self.client = api_client class Crawler(crawler.Crawler): """Simple single-threaded Crawler implementation.""" def __init__(self, config): """Initialize Args: config (CrawlerConfig): The crawler configuration """ super(Crawler, self).__init__() self.config = config def run(self, resource): """Run the crawler, given a start resource. Args: resource (object): Resource to start with. Returns: QueueProgresser: The filled progresser described in inventory """ resource.accept(self) return self.config.progresser def visit(self, resource): """Handle a newly found resource. Args: resource (object): Resource to handle. Raises: Exception: Reraises any exception. """ progresser = self.config.progresser try: resource.get_iam_policy(self.get_client()) resource.get_org_policy(self.get_client()) resource.get_access_policy(self.get_client()) resource.get_gcs_policy(self.get_client()) resource.get_dataset_policy(self.get_client()) resource.get_cloudsql_policy(self.get_client()) resource.get_billing_info(self.get_client()) resource.get_enabled_apis(self.get_client()) resource.get_kubernetes_service_config(self.get_client()) self.write(resource) except Exception as e: LOGGER.exception(e) progresser.on_error(e) raise else: progresser.on_new_object(resource) def dispatch(self, callback): """Dispatch crawling of a subtree. Args: callback (function): Callback to dispatch. """ callback() def write(self, resource): """Save resource to storage. Args: resource (object): Resource to handle. """ self.config.storage.write(resource) def get_client(self): """Get the GCP API client. Returns: object: GCP API client """ return self.config.client def on_child_error(self, resource_full_name, error): """Process the error generated by child of a resource Inventory does not stop for children errors but raise a warning Args: resource_full_name (str): The full name of the resource that raised the error. error (str): error message to handle """ self.config.storage.warning(resource_full_name, error) self.config.progresser.on_warning(error) class ParallelCrawler(Crawler): """Multi-threaded Crawler implementation.""" def __init__(self, config): """Initialize Args: config (ParallelCrawlerConfig): The crawler configuration """ super(ParallelCrawler, self).__init__(config) self._write_lock = threading.Lock() self._dispatch_queue = Queue() self._shutdown_event = threading.Event() def _start_workers(self): """Start a pool of worker threads for processing the dispatch queue.""" self._shutdown_event.clear() for _ in range(self.config.threads): worker = threading.Thread(target=self._process_queue) worker.daemon = True worker.start() def _process_queue(self): """Process items in the queue until the shutdown event is set.""" while not self._shutdown_event.is_set(): try: callback = self._dispatch_queue.get(timeout=1) except Empty: continue callback() self._dispatch_queue.task_done() def run(self, resource): """Run the crawler, given a start resource. Args: resource (Resource): Resource to start with. Returns: QueueProgresser: The filled progresser described in inventory """ try: self._start_workers() resource.accept(self) self._dispatch_queue.join() finally: self._shutdown_event.set() # Wait for threads to exit. time.sleep(2) return self.config.progresser def dispatch(self, callback): """Dispatch crawling of a subtree. Args: callback (function): Callback to dispatch. """ self._dispatch_queue.put(callback) def _api_client_factory(config, threads, inventory_index_id): """Creates the proper initialized API client based on the configuration. Args: config (object): Inventory configuration on server. threads (int): how many threads to use. inventory_index_id (int): The inventory index ID for this export. Returns: Union[gcp.ApiClientImpl, cai_gcp_client.CaiApiClientImpl]: The initialized api client implementation class. """ client_config = config.get_api_quota_configs() client_config['domain_super_admin_email'] = config.get_gsuite_admin_email() client_config['excluded_resources'] = config.get_excluded_resources() if config.get_cai_enabled(): # TODO: When CAI supports resource exclusion, update the following # method to handle resource exclusion during export time. engine, tmpfile = cai_temporary_storage.create_sqlite_db(threads) asset_count = cloudasset.load_cloudasset_data( engine, config, inventory_index_id) LOGGER.info('%s total assets loaded from Cloud Asset data.', asset_count) if asset_count: return cai_gcp_client.CaiApiClientImpl(client_config, engine, tmpfile) # Default to the non-CAI implementation return gcp.ApiClientImpl(client_config) def _crawler_factory(storage, progresser, client, parallel, threads): """Creates the proper initialized crawler based on the configuration. Args: storage (object): Storage implementation to use. progresser (object): Progresser to notify status updates. client (object): The API client instance. parallel (bool): If true, use the parallel crawler implementation. threads (int): how many threads to use when running in parallel Returns: Union[Crawler, ParallelCrawler]: The initialized crawler implementation class. """ excluded_resources = set(client.config.get('excluded_resources', [])) config_variables = {'excluded_resources': excluded_resources} if parallel: parallel_config = ParallelCrawlerConfig(storage, progresser, client, threads=threads, variables=config_variables) return ParallelCrawler(parallel_config) # Default to the non-parallel crawler crawler_config = CrawlerConfig(storage, progresser, client, variables=config_variables) return Crawler(crawler_config) def _root_resource_factory(config, client): """Creates the proper initialized crawler based on the configuration. Args: config (object): Inventory configuration on server. client (object): The API client instance. Returns: Resource: The initialized root resource. """ if config.use_composite_root(): composite_root_resources = config.get_composite_root_resources() return resources.CompositeRootResource.create(composite_root_resources) # Default is a single resource as root. return resources.from_root_id(client, config.get_root_resource_id()) def run_crawler(storage, progresser, config, parallel=True, threads=10): """Run the crawler with a determined configuration. Args: storage (object): Storage implementation to use. progresser (object): Progresser to notify status updates. config (object): Inventory configuration on server. parallel (bool): If true, use the parallel crawler implementation. threads (int): how many threads to use when running in parallel. Returns: QueueProgresser: The progresser implemented in inventory """ if parallel and 'sqlite' in str(config.get_service_config().get_engine()): LOGGER.info('SQLite used, disabling parallel threads.') parallel = False threads = 1 client = _api_client_factory( config, threads, progresser.inventory_index_id) crawler_impl = _crawler_factory(storage, progresser, client, parallel, threads) resource = _root_resource_factory(config, client) progresser = crawler_impl.run(resource) return progresser
forseti-security/forseti-security
google/cloud/forseti/services/inventory/crawler.py
Python
apache-2.0
11,979
[ "VisIt" ]
c8b9c058c49f5dc86ce4284193b334b8cd5c473051df76800b7b6f7a6a8b1361