text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Trains a CNN on fake and real galaxy images using TensorFlow.
Adapted from https://github.com/ageron/handson-ml
"""
import numpy as np
import os
from datetime import datetime
from glob import glob
import tensorflow as tf
def get_indir(nersc=False):
'''Returns path to dr5_testtrain directory'''
if nersc:
return os.path.join('/global/cscratch1/sd/kaylanb','obiwan_out')
else:
return os.path.join(os.environ['HOME'],'Downloads')
def get_outdir(nersc=False,knl=False):
'''Where to write ckpt and log files'''
if (nersc) & (knl):
return os.path.join('/global/cscratch1/sd/kaylanb','obiwan_out','cnn_knl')
elif (nersc) & (not knl):
return os.path.join('/global/cscratch1/sd/kaylanb','obiwan_out','cnn')
else:
return os.path.join(os.environ['HOME'],'Downloads','cnn')
def get_xtrain_fns(brick,indir):
search= os.path.join(indir,'dr5_testtrain','testtrain',
brick[:3],brick,'xtrain_*.npy')
xtrain_fns= glob(search)
if len(xtrain_fns) == 0:
raise IOError('No training data found matching: %s' % search)
return xtrain_fns
#def BatchGen(X,y,brick,batch_size=32):
def BatchGen(brick,indir,batch_size=32):
fns= get_xtrain_fns(brick,indir)
for fn in fns:
print('Loading %s' % fn)
X= np.load(fns[0])
y= np.load(fns[0].replace('xtrain_','ytrain_'))
N= X.shape[0]
ind= np.array_split(np.arange(N),N // batch_size + 1)
for i in ind:
yield X[i,...],y[i].astype(np.int32) #.reshape(-1,1).astype(np.int32)
def get_bricks(fn='cnn_bricks.txt'):
fn= os.path.join(os.path.dirname(__file__),
'../../../etc',fn)
if not os.path.exists(fn):
raise IOError('Need to create brick list: %s' % fn)
bricks= np.loadtxt(fn,dtype=str)
if len(bricks.shape) == 0:
# single line
with open(fn,'r') as f:
bricks= np.array([f.read().strip()])
return bricks
def get_checkpoint(epoch,brick,outdir):
return os.path.join(outdir,'ckpts',
'epoch_%s_brick_%s.ckpt' % (epoch,brick))
def bookmark_fn(outdir):
"""Single line text file storing the epoch,brick,batch number of last ckpt"""
return os.path.join(outdir,'ckpts',
'last_epoch_brick_batch.txt')
def get_bookmark(outdir):
with open(bookmark_fn(outdir),'r') as f:
epoch,brick,ith_batch= f.read().strip().split(' ')
return epoch,brick,ith_batch
def get_logdir(outdir):
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
logdir= os.path.join(outdir,'logs')
return os.path.join(logdir,"{}/run-{}/".format(logdir, now))
height,width,channels = (64,64,6)
conv_kwargs= dict(strides=1,
padding='SAME',
activation=tf.nn.relu)
pool_kwargs= dict(ksize= [1,2,2,1],
strides=[1,2,2,1],
padding='VALID')
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None,height,width,channels], name="X")
y = tf.placeholder(tf.int32, shape=[None], name="y")
# 64x64
with tf.name_scope("layer1"):
conv1 = tf.layers.conv2d(X, filters=3*channels, kernel_size=7,
**conv_kwargs)
pool1 = tf.nn.avg_pool(conv1, **pool_kwargs)
# 32x32
with tf.name_scope("layer2"):
conv2 = tf.layers.conv2d(pool1, filters=6*channels, kernel_size=7,
**conv_kwargs)
pool2 = tf.nn.avg_pool(conv2, **pool_kwargs)
# 16x16
with tf.name_scope("layer3"):
conv3 = tf.layers.conv2d(pool2, filters=9*channels, kernel_size=7,
**conv_kwargs)
pool3 = tf.nn.avg_pool(conv3, **pool_kwargs)
# next is fc
pool3_flat = tf.reshape(pool3,
shape=[-1, pool3.shape[1] * pool3.shape[2] * pool3.shape[3]])
with tf.name_scope("fc"):
fc = tf.layers.dense(pool3_flat, 64, activation=tf.nn.relu, name="fc")
with tf.name_scope("output"):
logits = tf.layers.dense(fc, 2, name="output") # 2 classes
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
with tf.name_scope("init_and_save"):
init = tf.global_variables_initializer()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
loss_summary= tf.summary.scalar('loss', loss)
accur_summary = tf.summary.scalar('accuracy', accuracy)
if __name__ == '__main__':
from argparse import ArgumentParser
parser= ArgumentParser()
parser.add_argument('--outdir', type=str, default=None, help='optional output directory for the checkpoint and log files')
args= parser.parse_args()
knl=False
config=None
if 'isKNL' in os.environ.keys():
# Set in slurm_job_knl.sh
knl=True
config= tf.ConfigProto()
config.intra_op_parallelism_threads=os.environ['OMP_NUM_THREADS']
assert(os.environ['OMP_NUM_THREADS'] == 68)
config.inter_op_parallelism_threads=1
nersc=False
if 'CSCRATCH' in os.environ.keys():
nersc=True
indir= get_indir(nersc=nersc)
outdir= get_outdir(nersc=nersc,knl=knl)
if not args.outdir is None:
outdir= args.outdir
# Train
n_epochs = 4
batch_size = 16
bricks= get_bricks()
file_writer = tf.summary.FileWriter(get_logdir(outdir),
tf.get_default_graph())
first_epoch,first_brick,first_batch= '0',bricks[0],'0'
fn= get_checkpoint(first_epoch,first_brick,outdir)+'.meta'
if os.path.exists(fn):
last_epoch,last_brick,last_batch= get_bookmark(outdir)
ckpt_fn= get_checkpoint(last_epoch,last_brick,outdir)
else:
last_epoch,last_brick,last_batch= first_epoch,first_brick,first_batch
ckpt_fn= None
last_ibrick= np.where(bricks == last_brick)[0][0] #+ 1 creates bug where last break skips all epochs
#bricks= ['1211p060']
with tf.Session(config=config) as sess:
if ckpt_fn is None:
sess.run(init)
print('Starting from scratch')
else:
saver.restore(sess, ckpt_fn)
print('Restored ckpt %s' % ckpt_fn)
batch_index= int(last_batch)
for epoch in range(int(last_epoch),n_epochs+1):
for ibrick,brick in enumerate(bricks):
# Don't repeat bricks when restart from ckpt
if ibrick < last_ibrick:
print('skipping: epoch,ibrick,last_ibrick', epoch,ibrick,last_ibrick)
continue
data_gen= BatchGen(brick,indir,batch_size)
for X_,y_ in data_gen:
sess.run(training_op, feed_dict={X: X_, y: y_})
batch_index+=1
if batch_index % 2 == 0:
step = batch_index
file_writer.add_summary(loss_summary.eval(feed_dict={X: X_, y: y_}),
step)
file_writer.add_summary(accur_summary.eval(feed_dict={X: X_, y: y_}),
step)
acc_train = accuracy.eval(feed_dict={X: X_, y: y_})
print(epoch, "Train accuracy:", acc_train)
# Save progress
fn= get_checkpoint(epoch,brick,outdir)
save_path = saver.save(sess, fn)
print('Wrote ckpt %s' % fn)
with open(bookmark_fn(outdir),'w') as f:
f.write('%d %s %d' % (epoch,brick,batch_index))
print('Updated %s' % bookmark_fn(outdir))
# Reset last_ibrick so use all bricks in next epoch
last_ibrick= 0
|
legacysurvey/obiwan
|
py/obiwan/dplearn/cnn.py
|
Python
|
bsd-3-clause
| 8,038
|
[
"Galaxy"
] |
458512b878c916ee38724d6c17065a90896471416446a828b3ebeb936d3e9151
|
#!/usr/bin/env python3
""" Computer-based immigration office for Kanadia """
__author__ = 'Susan Sim, Sasa Milic'
__email__ = "ses@drsusansim.org, milic@cs.toronto.edu"
__copyright__ = "2014 Susan Sim"
__license__ = "MIT License"
__status__ = "Prototype"
# imports one per line
import re
import datetime
import json
######################
## global constants ##
######################
REQUIRED_FIELDS = ["passport", "first_name", "last_name",
"birth_date", "home", "entry_reason", "from"]
######################
## global variables ##
######################
'''
countries:
dictionary mapping country codes (lowercase strings) to dictionaries
containing the following keys:
"code","name","visitor_visa_required",
"transit_visa_required","medical_advisory"
'''
COUNTRIES = None
'''
WATCH_PASSPORTS, WATCH_NAMES:
sets containing, respectively, passports (lowercase strings) and
names (lowercase strings in the format "{first_name} {last_name}")
of people on the "watchlist"
'''
WATCH_PASSPORTS = None
WATCH_NAMES = None
def decide(input_file, watchlist_file, countries_file):
"""
Decides whether a traveller's entry into Kanadia should be accepted
:param input_file: The name of a JSON formatted file that contains
cases to decide
:param watchlist_file: The name of a JSON formatted file that
contains names and passport numbers on a watchlist
:param countries_file: The name of a JSON formatted file that contains
country data, such as whether an entry or transit visa is required,
and whether there is currently a medical advisory
:return: List of strings. Possible values of strings are:
"Accept", "Reject", "Secondary", and "Quarantine"
"""
with open(input_file) as f:
records = json.load(f)
# convert record dicts to lowercase
records = [convert_to_lower(r) for r in records]
set_global_vars(watchlist_file, countries_file)
decisions = {
"Quarantine": is_quarantine,
"Reject": is_reject,
"Secondary": is_secondary,
}
results = []
for r in records:
# An exciting opportunity to use Python's for...else construct!
for d in ["Quarantine", "Reject", "Secondary"]:
if decisions[d](r):
results.append(d)
break
else:
results.append("Accept")
return results
def convert_to_lower(d):
"""
Convert all strings in dict d to upper case.
:param d: a dictionary with string keys, where
values are either strings or dicts
:return:
"""
new_d = {}
for k, v in d.items():
if type(v) is str:
new_d[k.lower()] = v.lower()
elif type(v) is dict:
new_d[k.lower()] = convert_to_lower(v)
return new_d
def set_global_vars(watchlist_file, countries_file):
"""
Populate global variables COUNTRIES, WATCH_PASSPORTS, and WATCH_NAMES
:param watchlist_file: JSON file
:param countries_file: JSON file
:return: None
"""
global COUNTRIES, WATCH_PASSPORTS, WATCH_NAMES
# read in all files into data structures
with open(watchlist_file) as f:
watchlist = json.load(f)
with open(countries_file) as f:
COUNTRIES = json.load(f)
# convert country codes to lowercase
COUNTRIES = convert_to_lower(COUNTRIES)
watchlist = [convert_to_lower(w) for w in watchlist]
# populate sets
WATCH_PASSPORTS = set([x["passport"] for x in watchlist])
WATCH_NAMES = set([" ".join([x["first_name"], x["last_name"]]) for x in watchlist])
def is_quarantine(record):
"""
Return True iff a traveller that has the given record should be
quarantined.
:param record: A dict that corresponds to a traveller record.
:return: Boolean; True if traveller should be quarantined,
False otherwise.
"""
# fields may not exist in record, thus
# default values of from_ and via are empty strings
from_ = record.get("from", {}).get("country", "")
via = record.get("via", {}).get("country", "")
# If the traveler is coming from or via a country that has a
# medical advisory, he or she must be sent to quarantine
if any([COUNTRIES.get(c, {}).get("medical_advisory", "") for c in [from_, via]]):
return True
return False
def is_reject(record):
"""
Return True iff a traveller that has the given record should be
rejected.
:param record: A dict that corresponds to a traveller record.
:return: Boolean; True if traveller should be rejected,
False otherwise.
"""
# Reject if required information is incomplete.
if not all([record.get(field, "") for field in REQUIRED_FIELDS]):
return True
# Reject if improper passport or date format
if not valid_passport_format(record["passport"]):
return True
if not valid_date_format(record["birth_date"]):
return True
# Reject if birth date on passport is more than 120 years ago
if is_more_than_x_years_ago(120, record["birth_date"]):
return True
# Reject traveller if they need a visa and it is not valid.
return requires_visa(record) and not is_valid_visa(record)
def is_secondary(record):
"""
Return True iff a traveller that has the given record should be
sent to secondary processing.
:param record: A dict that corresponds to a traveller record.
:return: Boolean; True if traveller should be sent to secondary,
False otherwise.
"""
# Check if name or passport on the watchlist
passport = record["passport"]
name = " ".join([record["first_name"], record["last_name"]])
return (passport in WATCH_PASSPORTS) or (name in WATCH_NAMES)
def requires_visa(record):
"""
Return whether a traveller requires a visa (transit or traveller)
:param record: A dict that corresponds to a traveller record.
:return: Boolean; True if the traveller requires a visa, False otherwise
"""
home = record["home"]["country"]
reason = record["entry_reason"]
if home == "kan":
return False
visitor_visa_required = int(COUNTRIES[home]["visitor_visa_required"])
transit_visa_required = int(COUNTRIES[home]["transit_visa_required"])
if reason == "visit" and visitor_visa_required:
return True
if reason == "transit" and transit_visa_required:
return True
# traveller is returning
return False
def is_valid_visa(record):
"""
Checks whether a visa is valid (a valid visa is one that is less than two years)
:param record: A dict that corresponds to a traveller record.
:return: Boolean; True if the visa is valid, False otherwise
"""
# Check whether the visa information is available,
# and in the proper format
visa = record.get("visa", {})
visa_code = visa.get("code", "")
visa_date = visa.get("date", "")
if not valid_visa_format(visa_code):
return False
if not valid_date_format(visa_date):
return False
# Check if visa is more than 2 years old
if is_more_than_x_years_ago(2, visa_date):
return False
return True
def is_more_than_x_years_ago(x, date_string):
"""
Check if date is less than x years ago.
:param x: int representing years
:param date_string: a date string in format "YYYY-mm-dd"
:return: True if date is less than x years ago; False otherwise.
"""
now = datetime.datetime.now()
x_years_ago = now.replace(year=now.year - x)
date = datetime.datetime.strptime(date_string, '%Y-%m-%d')
return (date - x_years_ago).total_seconds() < 0
def valid_visa_format(visa_code):
"""
Checks whether a visa code is two groups of five alphanumeric characters separated by a dash
:param visa_code: alpha-numeric string
:return: Boolean; True if the format is valid, False otherwise
"""
passport_format = re.compile('^\w{5}-\w{5}$')
return passport_format.match(visa_code)
def valid_passport_format(passport_number):
"""
Checks whether a pasport number is five sets of five alpha-number characters separated by dashes
:param passport_number: alpha-numeric string
:return: Boolean; True if the format is valid, False otherwise
"""
passport_format = re.compile('^\w{5}-\w{5}-\w{5}-\w{5}-\w{5}$')
return passport_format.match(passport_number)
def valid_date_format(date_string):
"""
Checks whether a date has the format YYYY-mm-dd in numbers
:param date_string: date to be checked
:return: Boolean True if the format is valid, False otherwise
"""
try:
datetime.datetime.strptime(date_string, '%Y-%m-%d')
return True
except ValueError:
return False
|
zavidovici/inf1340_2014_asst2_sample_solution
|
papers.py
|
Python
|
mit
| 8,773
|
[
"VisIt",
"exciting"
] |
401870b6338911b3e7406e5495ad72ad72baa07b67d4029f227ff69eb32d481d
|
# -*- encoding: utf-8 -*-
"""
flask.ext.espresso
------------------
:copyright: (c) 2013 by Morgan Delahaye-Prat.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import flask_espresso.coffeescript
import slimit
import execjs
import flask
import zlib
class Espresso(object):
"""
Central controller class that can be used to configure how Flask-Espresso
behaves. Each application that wants to use Flask-Espresso has to create,
or run :meth:`init_app` on, an instance of this class after the
configuration whas initialized.
There are two usage modes which work very similar. One is binding the
instance to a very Flask application::
app = Flask(__name__)
e = Espresso(app)
The other possibility is to create the object once and configure the
application later to support it::
e = Espresso(app)
def create_app():
app = Flask(__name__)
e.init_app(app)
return app
:param app: A Flask application.
:param compiler: An alternate Coffeescript compiler to use.
"""
cache = {} # A class level dict acting as a cache.
def __init__(self, app=None, compiler=None):
self.app = app
self._compiler = compiler
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
Set up this instance for use with ``app``, if no app was passed to the
constructor.
:param app: A Flask application.
"""
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['espresso'] = self
app.config.setdefault('ESPRESSO_DEFAULT_COMPILER', self._compiler)
# hot patching the spidermonkey hard-coded encoding.
app.config.setdefault('ESPRESSO_SPIDERMONKEY_ENCODING', 'utf8')
execjs._runtimes['SpiderMonkey']._encoding = app.config['ESPRESSO_SPIDERMONKEY_ENCODING']
def clear_cache(self):
"""
"""
Espresso.cache.clear()
def compute_key(value):
"""
Computes a key for a ``value``.
"""
# The CRC32 checksum is used because of the low security risk. If you
# intend to compile CS from the outside world or a large number of
# files, you should consider patching this method to use a stronger
# hashing algorithm.
return zlib.crc32(bytes(value.encode('utf-8')))
def from_cache(key, minified):
"""
"""
return Espresso.cache.get((key, minified), None)
def to_cache(key, minified, value):
"""
"""
Espresso.cache[key, minified] = value
def espresso(cs, force=False, cache=True, minify=False):
"""
Returns a real response object that is an instance of
"""
cs = flask.render_template(cs)
key = compute_key(cs)
resp = None
if not force:
resp = from_cache(key, minify)
if resp is None:
resp = flask_espresso.coffeescript.compile_cs(cs)
if minify:
resp = slimit.minify(resp, mangle=True, mangle_toplevel=True)
if cache: # the caching only happen if the
to_cache(key, minify, resp) # file is compiled.
return flask.Response(resp, mimetype='application/javascript')
|
morgan-del/flask-espresso
|
flask_espresso/espresso.py
|
Python
|
bsd-2-clause
| 3,342
|
[
"ESPResSo"
] |
e49fb007572de3fc303c1f24983f8bfde52e1d8eec04e4bb8f2f1edc1ab61915
|
import glob
import logging
import os
import sys
import traceback
from urllib import pathname2url, unquote
import urlparse
import signal
import time
from PyQt5 import uic
from PyQt5.QtCore import QDir
from PyQt5.QtCore import Qt, pyqtSignal, QStringListModel, QSettings, QPoint, QCoreApplication, pyqtSlot, QUrl, \
QObject, QTimer
from PyQt5.QtGui import QIcon, QDesktopServices
from PyQt5.QtGui import QKeySequence
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QMainWindow, QLineEdit, QTreeWidget, QSystemTrayIcon, QAction, QFileDialog, \
QCompleter, QApplication, QStyledItemDelegate, QListWidget
from PyQt5.QtWidgets import QShortcut
from Tribler.Core.Modules.process_checker import ProcessChecker
from TriblerGUI.tribler_action_menu import TriblerActionMenu
from TriblerGUI.core_manager import CoreManager
from TriblerGUI.debug_window import DebugWindow
from TriblerGUI.defs import PAGE_SEARCH_RESULTS, \
PAGE_HOME, PAGE_EDIT_CHANNEL, PAGE_VIDEO_PLAYER, PAGE_DOWNLOADS, PAGE_SETTINGS, PAGE_SUBSCRIBED_CHANNELS, \
PAGE_CHANNEL_DETAILS, PAGE_PLAYLIST_DETAILS, BUTTON_TYPE_NORMAL, BUTTON_TYPE_CONFIRM, PAGE_LOADING, \
PAGE_DISCOVERING, PAGE_DISCOVERED, PAGE_TRUST, SHUTDOWN_WAITING_PERIOD, DEFAULT_API_PORT
from TriblerGUI.dialogs.confirmationdialog import ConfirmationDialog
from TriblerGUI.dialogs.feedbackdialog import FeedbackDialog
from TriblerGUI.dialogs.startdownloaddialog import StartDownloadDialog
from TriblerGUI.tribler_request_manager import request_queue, TriblerRequestManager, dispatcher
from TriblerGUI.utilities import get_ui_file_path, get_image_path, get_gui_setting, is_dir_writable, quote_plus_unicode
# Pre-load form UI classes
fc_channel_torrent_list_item, _ = uic.loadUiType(get_ui_file_path('channel_torrent_list_item.ui'))
fc_channel_list_item, _ = uic.loadUiType(get_ui_file_path('channel_list_item.ui'))
fc_playlist_list_item, _ = uic.loadUiType(get_ui_file_path('playlist_list_item.ui'))
fc_home_recommended_item, _ = uic.loadUiType(get_ui_file_path('home_recommended_item.ui'))
fc_loading_list_item, _ = uic.loadUiType(get_ui_file_path('loading_list_item.ui'))
class MagnetHandler(QObject):
def __init__(self, window):
QObject.__init__(self)
self.window = window
@pyqtSlot(QUrl)
def on_open_magnet_link(self, url):
self.window.start_download_from_uri(url)
class TriblerWindow(QMainWindow):
resize_event = pyqtSignal()
escape_pressed = pyqtSignal()
tribler_crashed = pyqtSignal(str)
received_search_completions = pyqtSignal(object)
def on_exception(self, *exc_info):
if self.exception_handler_called:
# We only show one feedback dialog, even when there are two consecutive exceptions.
return
self.exception_handler_called = True
exception_text = "".join(traceback.format_exception(*exc_info))
logging.error(exception_text)
self.tribler_crashed.emit(exception_text)
self.delete_tray_icon()
# Stop the download loop
self.downloads_page.stop_loading_downloads()
# Add info about whether we are stopping Tribler or not
os.environ['TRIBLER_SHUTTING_DOWN'] = str(self.core_manager.shutting_down)
if not self.core_manager.shutting_down:
self.core_manager.stop(stop_app_on_shutdown=False)
self.setHidden(True)
if self.debug_window:
self.debug_window.setHidden(True)
dialog = FeedbackDialog(self, exception_text, self.core_manager.events_manager.tribler_version,
self.start_time)
dialog.show()
def __init__(self, core_args=None, core_env=None, api_port=None):
QMainWindow.__init__(self)
QCoreApplication.setOrganizationDomain("nl")
QCoreApplication.setOrganizationName("TUDelft")
QCoreApplication.setApplicationName("Tribler")
QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.gui_settings = QSettings()
api_port = api_port or int(get_gui_setting(self.gui_settings, "api_port", DEFAULT_API_PORT))
dispatcher.update_worker_settings(port=api_port)
self.navigation_stack = []
self.tribler_started = False
self.tribler_settings = None
self.debug_window = None
self.core_manager = CoreManager(api_port)
self.pending_requests = {}
self.pending_uri_requests = []
self.download_uri = None
self.dialog = None
self.new_version_dialog = None
self.start_download_dialog_active = False
self.request_mgr = None
self.search_request_mgr = None
self.search_suggestion_mgr = None
self.selected_torrent_files = []
self.vlc_available = True
self.has_search_results = False
self.last_search_query = None
self.last_search_time = None
self.start_time = time.time()
self.exception_handler_called = False
self.token_refresh_timer = None
sys.excepthook = self.on_exception
uic.loadUi(get_ui_file_path('mainwindow.ui'), self)
TriblerRequestManager.window = self
self.tribler_status_bar.hide()
# Load dynamic widgets
uic.loadUi(get_ui_file_path('torrent_channel_list_container.ui'), self.channel_page_container)
self.channel_torrents_list = self.channel_page_container.items_list
self.channel_torrents_detail_widget = self.channel_page_container.details_tab_widget
self.channel_torrents_detail_widget.initialize_details_widget()
self.channel_torrents_list.itemSelectionChanged.connect(self.channel_page.clicked_item)
uic.loadUi(get_ui_file_path('torrent_channel_list_container.ui'), self.search_page_container)
self.search_results_list = self.search_page_container.items_list
self.search_torrents_detail_widget = self.search_page_container.details_tab_widget
self.search_torrents_detail_widget.initialize_details_widget()
self.search_results_list.itemClicked.connect(self.on_channel_item_click)
self.search_results_list.itemSelectionChanged.connect(self.search_results_page.clicked_item)
self.token_balance_widget.mouseReleaseEvent = self.on_token_balance_click
def on_state_update(new_state):
self.loading_text_label.setText(new_state)
self.core_manager.core_state_update.connect(on_state_update)
self.magnet_handler = MagnetHandler(self.window)
QDesktopServices.setUrlHandler("magnet", self.magnet_handler, "on_open_magnet_link")
self.debug_pane_shortcut = QShortcut(QKeySequence("Ctrl+d"), self)
self.debug_pane_shortcut.activated.connect(self.clicked_menu_button_debug)
# Remove the focus rect on OS X
for widget in self.findChildren(QLineEdit) + self.findChildren(QListWidget) + self.findChildren(QTreeWidget):
widget.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.menu_buttons = [self.left_menu_button_home, self.left_menu_button_search, self.left_menu_button_my_channel,
self.left_menu_button_subscriptions, self.left_menu_button_video_player,
self.left_menu_button_downloads, self.left_menu_button_discovered]
self.video_player_page.initialize_player()
self.search_results_page.initialize_search_results_page()
self.settings_page.initialize_settings_page()
self.subscribed_channels_page.initialize()
self.edit_channel_page.initialize_edit_channel_page()
self.downloads_page.initialize_downloads_page()
self.home_page.initialize_home_page()
self.loading_page.initialize_loading_page()
self.discovering_page.initialize_discovering_page()
self.discovered_page.initialize_discovered_page()
self.trust_page.initialize_trust_page()
self.stackedWidget.setCurrentIndex(PAGE_LOADING)
# Create the system tray icon
if QSystemTrayIcon.isSystemTrayAvailable():
self.tray_icon = QSystemTrayIcon()
use_monochrome_icon = get_gui_setting(self.gui_settings, "use_monochrome_icon", False, is_bool=True)
self.update_tray_icon(use_monochrome_icon)
# Create the tray icon menu
menu = self.create_add_torrent_menu()
show_downloads_action = QAction('Show downloads', self)
show_downloads_action.triggered.connect(self.clicked_menu_button_downloads)
token_balance_action = QAction('Show token balance', self)
token_balance_action.triggered.connect(lambda: self.on_token_balance_click(None))
quit_action = QAction('Quit Tribler', self)
quit_action.triggered.connect(self.close_tribler)
menu.addSeparator()
menu.addAction(show_downloads_action)
menu.addAction(token_balance_action)
menu.addSeparator()
menu.addAction(quit_action)
self.tray_icon.setContextMenu(menu)
else:
self.tray_icon = None
self.hide_left_menu_playlist()
self.left_menu_button_debug.setHidden(True)
self.top_menu_button.setHidden(True)
self.left_menu.setHidden(True)
self.token_balance_widget.setHidden(True)
self.settings_button.setHidden(True)
self.add_torrent_button.setHidden(True)
self.top_search_bar.setHidden(True)
# Set various icons
self.top_menu_button.setIcon(QIcon(get_image_path('menu.png')))
self.search_completion_model = QStringListModel()
completer = QCompleter()
completer.setModel(self.search_completion_model)
completer.setCompletionMode(QCompleter.UnfilteredPopupCompletion)
self.item_delegate = QStyledItemDelegate()
completer.popup().setItemDelegate(self.item_delegate)
completer.popup().setStyleSheet("""
QListView {
background-color: #404040;
}
QListView::item {
color: #D0D0D0;
padding-top: 5px;
padding-bottom: 5px;
}
QListView::item:hover {
background-color: #707070;
}
""")
self.top_search_bar.setCompleter(completer)
# Toggle debug if developer mode is enabled
self.window().left_menu_button_debug.setHidden(
not get_gui_setting(self.gui_settings, "debug", False, is_bool=True))
# Start Tribler
self.core_manager.start(core_args=core_args, core_env=core_env)
self.core_manager.events_manager.received_search_result_channel.connect(
self.search_results_page.received_search_result_channel)
self.core_manager.events_manager.received_search_result_torrent.connect(
self.search_results_page.received_search_result_torrent)
self.core_manager.events_manager.torrent_finished.connect(self.on_torrent_finished)
self.core_manager.events_manager.new_version_available.connect(self.on_new_version_available)
self.core_manager.events_manager.tribler_started.connect(self.on_tribler_started)
self.core_manager.events_manager.events_started.connect(self.on_events_started)
self.core_manager.events_manager.low_storage_signal.connect(self.on_low_storage)
self.core_manager.events_manager.credit_mining_signal.connect(self.on_credit_mining_error)
self.core_manager.events_manager.tribler_shutdown_signal.connect(self.on_tribler_shutdown_state_update)
# Install signal handler for ctrl+c events
def sigint_handler(*_):
self.close_tribler()
signal.signal(signal.SIGINT, sigint_handler)
self.installEventFilter(self.video_player_page)
# Resize the window according to the settings
center = QApplication.desktop().availableGeometry(self).center()
pos = self.gui_settings.value("pos", QPoint(center.x() - self.width() * 0.5, center.y() - self.height() * 0.5))
size = self.gui_settings.value("size", self.size())
self.move(pos)
self.resize(size)
self.show()
def update_tray_icon(self, use_monochrome_icon):
if not QSystemTrayIcon.isSystemTrayAvailable() or not self.tray_icon:
return
if use_monochrome_icon:
self.tray_icon.setIcon(QIcon(QPixmap(get_image_path('monochrome_tribler.png'))))
else:
self.tray_icon.setIcon(QIcon(QPixmap(get_image_path('tribler.png'))))
self.tray_icon.show()
def delete_tray_icon(self):
if self.tray_icon:
try:
self.tray_icon.deleteLater()
except RuntimeError:
# The tray icon might have already been removed when unloading Qt.
# This is due to the C code actually being asynchronous.
logging.debug("Tray icon already removed, no further deletion necessary.")
self.tray_icon = None
def on_low_storage(self):
"""
Dealing with low storage space available. First stop the downloads and the core manager and ask user to user to
make free space.
:return:
"""
self.downloads_page.stop_loading_downloads()
self.core_manager.stop(False)
close_dialog = ConfirmationDialog(self.window(), "<b>CRITICAL ERROR</b>",
"You are running low on disk space (<100MB). Please make sure to have "
"sufficient free space available and restart Tribler again.",
[("Close Tribler", BUTTON_TYPE_NORMAL)])
close_dialog.button_clicked.connect(lambda _: self.close_tribler())
close_dialog.show()
def on_torrent_finished(self, torrent_info):
self.tray_show_message("Download finished", "Download of %s has finished." % torrent_info["name"])
def show_loading_screen(self):
self.top_menu_button.setHidden(True)
self.left_menu.setHidden(True)
self.token_balance_widget.setHidden(True)
self.settings_button.setHidden(True)
self.add_torrent_button.setHidden(True)
self.top_search_bar.setHidden(True)
self.stackedWidget.setCurrentIndex(PAGE_LOADING)
def tray_set_tooltip(self, message):
"""
Set a tooltip message for the tray icon, if possible.
:param message: the message to display on hover
"""
if self.tray_icon:
try:
self.tray_icon.setToolTip(message)
except RuntimeError as e:
logging.error("Failed to set tray tooltip: %s", str(e))
def tray_show_message(self, title, message):
"""
Show a message at the tray icon, if possible.
:param title: the title of the message
:param message: the message to display
"""
if self.tray_icon:
try:
self.tray_icon.showMessage(title, message)
except RuntimeError as e:
logging.error("Failed to set tray message: %s", str(e))
def on_tribler_started(self):
self.tribler_started = True
self.top_menu_button.setHidden(False)
self.left_menu.setHidden(False)
self.token_balance_widget.setHidden(False)
self.settings_button.setHidden(False)
self.add_torrent_button.setHidden(False)
self.top_search_bar.setHidden(False)
# fetch the settings, needed for the video player port
self.request_mgr = TriblerRequestManager()
self.fetch_settings()
self.downloads_page.start_loading_downloads()
self.home_page.load_popular_torrents()
if not self.gui_settings.value("first_discover", False) and not self.core_manager.use_existing_core:
self.window().gui_settings.setValue("first_discover", True)
self.discovering_page.is_discovering = True
self.stackedWidget.setCurrentIndex(PAGE_DISCOVERING)
else:
self.clicked_menu_button_home()
self.setAcceptDrops(True)
def on_events_started(self, json_dict):
self.setWindowTitle("Tribler %s" % json_dict["version"])
def show_status_bar(self, message):
self.tribler_status_bar_label.setText(message)
self.tribler_status_bar.show()
def hide_status_bar(self):
self.tribler_status_bar.hide()
def process_uri_request(self):
"""
Process a URI request if we have one in the queue.
"""
if len(self.pending_uri_requests) == 0:
return
uri = self.pending_uri_requests.pop()
if uri.startswith('file') or uri.startswith('magnet'):
self.start_download_from_uri(uri)
def perform_start_download_request(self, uri, anon_download, safe_seeding, destination, selected_files,
total_files=0, callback=None):
# Check if destination directory is writable
is_writable, error = is_dir_writable(destination)
if not is_writable:
gui_error_message = "Insufficient write permissions to <i>%s</i> directory. Please add proper " \
"write permissions on the directory and add the torrent again. %s" \
% (destination, error)
ConfirmationDialog.show_message(self.window(), "Download error <i>%s</i>" % uri, gui_error_message, "OK")
return
selected_files_uri = ""
if len(selected_files) != total_files: # Not all files included
selected_files_uri = u'&' + u''.join(u"selected_files[]=%s&" %
quote_plus_unicode(filename) for filename in selected_files)[:-1]
anon_hops = int(self.tribler_settings['download_defaults']['number_hops']) if anon_download else 0
safe_seeding = 1 if safe_seeding else 0
post_data = "uri=%s&anon_hops=%d&safe_seeding=%d&destination=%s%s" % (quote_plus_unicode(uri), anon_hops,
safe_seeding, destination,
selected_files_uri)
post_data = post_data.encode('utf-8') # We need to send bytes in the request, not unicode
request_mgr = TriblerRequestManager()
request_mgr.perform_request("downloads", callback if callback else self.on_download_added,
method='PUT', data=post_data)
# Save the download location to the GUI settings
current_settings = get_gui_setting(self.gui_settings, "recent_download_locations", "")
recent_locations = current_settings.split(",") if len(current_settings) > 0 else []
if isinstance(destination, unicode):
destination = destination.encode('utf-8')
encoded_destination = destination.encode('hex')
if encoded_destination in recent_locations:
recent_locations.remove(encoded_destination)
recent_locations.insert(0, encoded_destination)
if len(recent_locations) > 5:
recent_locations = recent_locations[:5]
self.gui_settings.setValue("recent_download_locations", ','.join(recent_locations))
def on_new_version_available(self, version):
if version == str(self.gui_settings.value('last_reported_version')):
return
self.new_version_dialog = ConfirmationDialog(self, "New version available",
"Version %s of Tribler is available.Do you want to visit the "
"website to download the newest version?" % version,
[('IGNORE', BUTTON_TYPE_NORMAL), ('LATER', BUTTON_TYPE_NORMAL),
('OK', BUTTON_TYPE_NORMAL)])
self.new_version_dialog.button_clicked.connect(lambda action: self.on_new_version_dialog_done(version, action))
self.new_version_dialog.show()
def on_new_version_dialog_done(self, version, action):
if action == 0: # ignore
self.gui_settings.setValue("last_reported_version", version)
elif action == 2: # ok
import webbrowser
webbrowser.open("https://tribler.org")
if self.new_version_dialog:
self.new_version_dialog.close_dialog()
self.new_version_dialog = None
def on_search_text_change(self, text):
self.search_suggestion_mgr = TriblerRequestManager()
self.search_suggestion_mgr.perform_request(
"search/completions?q=%s" % text, self.on_received_search_completions)
def on_received_search_completions(self, completions):
if completions is None:
return
self.received_search_completions.emit(completions)
self.search_completion_model.setStringList(completions["completions"])
def fetch_settings(self):
self.request_mgr = TriblerRequestManager()
self.request_mgr.perform_request("settings", self.received_settings, capture_errors=False)
def received_settings(self, settings):
if not settings:
return
# If we cannot receive the settings, stop Tribler with an option to send the crash report.
if 'error' in settings:
raise RuntimeError(TriblerRequestManager.get_message_from_error(settings))
self.tribler_settings = settings['settings']
# Set the video server port
self.video_player_page.video_player_port = settings["ports"]["video_server~port"]
# Disable various components based on the settings
if not self.tribler_settings['search_community']['enabled']:
self.window().top_search_bar.setHidden(True)
if not self.tribler_settings['video_server']['enabled']:
self.left_menu_button_video_player.setHidden(True)
self.downloads_creditmining_button.setHidden(not self.tribler_settings["credit_mining"]["enabled"])
self.downloads_all_button.click()
# process pending file requests (i.e. someone clicked a torrent file when Tribler was closed)
# We do this after receiving the settings so we have the default download location.
self.process_uri_request()
# Set token balance refresh timer and load the token balance
self.token_refresh_timer = QTimer()
self.token_refresh_timer.timeout.connect(self.load_token_balance)
self.token_refresh_timer.start(60000)
self.load_token_balance()
def on_top_search_button_click(self):
current_ts = time.time()
current_search_query = self.top_search_bar.text()
if self.last_search_query and self.last_search_time \
and self.last_search_query == self.top_search_bar.text() \
and current_ts - self.last_search_time < 1:
logging.info("Same search query already sent within 500ms so dropping this one")
return
self.left_menu_button_search.setChecked(True)
self.has_search_results = True
self.clicked_menu_button_search()
self.search_results_page.perform_search(current_search_query)
self.search_request_mgr = TriblerRequestManager()
self.search_request_mgr.perform_request("search?q=%s" % current_search_query, None)
self.last_search_query = current_search_query
self.last_search_time = current_ts
def on_settings_button_click(self):
self.deselect_all_menu_buttons()
self.stackedWidget.setCurrentIndex(PAGE_SETTINGS)
self.settings_page.load_settings()
self.navigation_stack = []
self.hide_left_menu_playlist()
def on_token_balance_click(self, _):
self.raise_window()
self.deselect_all_menu_buttons()
self.stackedWidget.setCurrentIndex(PAGE_TRUST)
self.load_token_balance()
self.trust_page.load_blocks()
self.navigation_stack = []
self.hide_left_menu_playlist()
def load_token_balance(self):
self.request_mgr = TriblerRequestManager()
self.request_mgr.perform_request("trustchain/statistics", self.received_trustchain_statistics,
capture_errors=False)
def received_trustchain_statistics(self, statistics):
if not statistics or "statistics" not in statistics:
return
self.trust_page.received_trustchain_statistics(statistics)
statistics = statistics["statistics"]
if 'latest_block' in statistics:
balance = (statistics["latest_block"]["transaction"]["total_up"] -
statistics["latest_block"]["transaction"]["total_down"])
self.set_token_balance(balance)
else:
self.token_balance_label.setText("0 MB")
# If trust page is currently visible, then load the graph as well
if self.stackedWidget.currentIndex() == PAGE_TRUST:
self.trust_page.load_blocks()
def set_token_balance(self, balance):
if abs(balance) > 1024 ** 4: # Balance is over a TB
balance /= 1024.0 ** 4
self.token_balance_label.setText("%.1f TB" % balance)
elif abs(balance) > 1024 ** 3: # Balance is over a GB
balance /= 1024.0 ** 3
self.token_balance_label.setText("%.1f GB" % balance)
else:
balance /= 1024.0 ** 2
self.token_balance_label.setText("%d MB" % balance)
def raise_window(self):
self.setWindowState(self.windowState() & ~Qt.WindowMinimized | Qt.WindowActive)
self.raise_()
self.activateWindow()
def create_add_torrent_menu(self):
"""
Create a menu to add new torrents. Shows when users click on the tray icon or the big plus button.
"""
menu = TriblerActionMenu(self)
browse_files_action = QAction('Import torrent from file', self)
browse_directory_action = QAction('Import torrent(s) from directory', self)
add_url_action = QAction('Import torrent from magnet/URL', self)
add_mdblob_action = QAction('Import Tribler metadata from file', self)
browse_files_action.triggered.connect(self.on_add_torrent_browse_file)
browse_directory_action.triggered.connect(self.on_add_torrent_browse_dir)
add_url_action.triggered.connect(self.on_add_torrent_from_url)
add_mdblob_action.triggered.connect(self.on_add_mdblob_browse_file)
menu.addAction(browse_files_action)
menu.addAction(browse_directory_action)
menu.addAction(add_url_action)
menu.addAction(add_mdblob_action)
return menu
def on_add_torrent_button_click(self, pos):
self.create_add_torrent_menu().exec_(self.mapToGlobal(self.add_torrent_button.pos()))
def on_add_torrent_browse_file(self):
filenames = QFileDialog.getOpenFileNames(self,
"Please select the .torrent file",
QDir.homePath(),
"Torrent files (*.torrent)")
if len(filenames[0]) > 0:
[self.pending_uri_requests.append(u"file:%s" % filename) for filename in filenames[0]]
self.process_uri_request()
def on_add_mdblob_browse_file(self):
filenames = QFileDialog.getOpenFileNames(self,
"Please select the .mdblob file",
QDir.homePath(),
"Tribler metadata files (*.mdblob)")
if len(filenames[0]) > 0:
for filename in filenames[0]:
self.pending_uri_requests.append(u"file:%s" % filename)
self.process_uri_request()
def start_download_from_uri(self, uri):
self.download_uri = uri
if get_gui_setting(self.gui_settings, "ask_download_settings", True, is_bool=True):
# If tribler settings is not available, fetch the settings and inform the user to try again.
if not self.tribler_settings:
self.fetch_settings()
ConfirmationDialog.show_error(self, "Download Error", "Tribler settings is not available yet. "
"Fetching it now. Please try again later.")
return
# Clear any previous dialog if exists
if self.dialog:
self.dialog.close_dialog()
self.dialog = None
self.dialog = StartDownloadDialog(self, self.download_uri)
self.dialog.button_clicked.connect(self.on_start_download_action)
self.dialog.show()
self.start_download_dialog_active = True
else:
# In the unlikely scenario that tribler settings are not available yet, try to fetch settings again and
# add the download uri back to self.pending_uri_requests to process again.
if not self.tribler_settings:
self.fetch_settings()
if self.download_uri not in self.pending_uri_requests:
self.pending_uri_requests.append(self.download_uri)
return
self.window().perform_start_download_request(self.download_uri,
self.window().tribler_settings['download_defaults'][
'anonymity_enabled'],
self.window().tribler_settings['download_defaults'][
'safeseeding_enabled'],
self.tribler_settings['download_defaults']['saveas'], [], 0)
self.process_uri_request()
def on_start_download_action(self, action):
if action == 1:
if self.dialog and self.dialog.dialog_widget:
self.window().perform_start_download_request(
self.download_uri, self.dialog.dialog_widget.anon_download_checkbox.isChecked(),
self.dialog.dialog_widget.safe_seed_checkbox.isChecked(),
self.dialog.dialog_widget.destination_input.currentText(),
self.dialog.get_selected_files(),
self.dialog.dialog_widget.files_list_view.topLevelItemCount())
else:
ConfirmationDialog.show_error(self, "Tribler UI Error", "Something went wrong. Please try again.")
logging.exception("Error while trying to download. Either dialog or dialog.dialog_widget is None")
if self.dialog:
self.dialog.close_dialog()
self.dialog = None
self.start_download_dialog_active = False
if action == 0: # We do this after removing the dialog since process_uri_request is blocking
self.process_uri_request()
def on_add_torrent_browse_dir(self):
chosen_dir = QFileDialog.getExistingDirectory(self,
"Please select the directory containing the .torrent files",
QDir.homePath(),
QFileDialog.ShowDirsOnly)
if len(chosen_dir) != 0:
self.selected_torrent_files = [torrent_file for torrent_file in glob.glob(chosen_dir + "/*.torrent")]
self.dialog = ConfirmationDialog(self, "Add torrents from directory",
"Are you sure you want to add %d torrents to Tribler?" %
len(self.selected_torrent_files),
[('ADD', BUTTON_TYPE_NORMAL), ('CANCEL', BUTTON_TYPE_CONFIRM)])
self.dialog.button_clicked.connect(self.on_confirm_add_directory_dialog)
self.dialog.show()
def on_confirm_add_directory_dialog(self, action):
if action == 0:
for torrent_file in self.selected_torrent_files:
escaped_uri = u"file:%s" % pathname2url(torrent_file.encode('utf-8'))
self.perform_start_download_request(escaped_uri,
self.window().tribler_settings['download_defaults'][
'anonymity_enabled'],
self.window().tribler_settings['download_defaults'][
'safeseeding_enabled'],
self.tribler_settings['download_defaults']['saveas'], [], 0)
if self.dialog:
self.dialog.close_dialog()
self.dialog = None
def on_add_torrent_from_url(self):
# Make sure that the window is visible (this action might be triggered from the tray icon)
self.raise_window()
if self.video_player_page.isVisible():
# If we're adding a torrent from the video player page, go to the home page.
# This is necessary since VLC takes the screen and the popup becomes invisible.
self.clicked_menu_button_home()
self.dialog = ConfirmationDialog(self, "Add torrent from URL/magnet link",
"Please enter the URL/magnet link in the field below:",
[('ADD', BUTTON_TYPE_NORMAL), ('CANCEL', BUTTON_TYPE_CONFIRM)],
show_input=True)
self.dialog.dialog_widget.dialog_input.setPlaceholderText('URL/magnet link')
self.dialog.dialog_widget.dialog_input.setFocus()
self.dialog.button_clicked.connect(self.on_torrent_from_url_dialog_done)
self.dialog.show()
def on_torrent_from_url_dialog_done(self, action):
if self.dialog and self.dialog.dialog_widget:
uri = self.dialog.dialog_widget.dialog_input.text()
# Remove first dialog
self.dialog.close_dialog()
self.dialog = None
if action == 0:
self.start_download_from_uri(uri)
def on_download_added(self, result):
if not result:
return
if len(self.pending_uri_requests) == 0: # Otherwise, we first process the remaining requests.
self.window().left_menu_button_downloads.click()
else:
self.process_uri_request()
def on_top_menu_button_click(self):
if self.left_menu.isHidden():
self.left_menu.show()
else:
self.left_menu.hide()
def deselect_all_menu_buttons(self, except_select=None):
for button in self.menu_buttons:
if button == except_select:
button.setEnabled(False)
continue
button.setEnabled(True)
if button == self.left_menu_button_search and not self.has_search_results:
button.setEnabled(False)
button.setChecked(False)
def clicked_menu_button_home(self):
self.deselect_all_menu_buttons(self.left_menu_button_home)
self.stackedWidget.setCurrentIndex(PAGE_HOME)
self.navigation_stack = []
self.hide_left_menu_playlist()
def clicked_menu_button_search(self):
self.deselect_all_menu_buttons(self.left_menu_button_search)
self.stackedWidget.setCurrentIndex(PAGE_SEARCH_RESULTS)
self.navigation_stack = []
self.hide_left_menu_playlist()
def clicked_menu_button_discovered(self):
self.deselect_all_menu_buttons(self.left_menu_button_discovered)
self.stackedWidget.setCurrentIndex(PAGE_DISCOVERED)
self.discovered_page.load_discovered_channels()
self.navigation_stack = []
self.hide_left_menu_playlist()
def clicked_menu_button_my_channel(self):
self.deselect_all_menu_buttons(self.left_menu_button_my_channel)
self.stackedWidget.setCurrentIndex(PAGE_EDIT_CHANNEL)
self.edit_channel_page.load_my_channel_overview()
self.navigation_stack = []
self.hide_left_menu_playlist()
def clicked_menu_button_video_player(self):
self.deselect_all_menu_buttons(self.left_menu_button_video_player)
self.stackedWidget.setCurrentIndex(PAGE_VIDEO_PLAYER)
self.navigation_stack = []
self.show_left_menu_playlist()
def clicked_menu_button_downloads(self):
self.raise_window()
self.left_menu_button_downloads.setChecked(True)
self.deselect_all_menu_buttons(self.left_menu_button_downloads)
self.stackedWidget.setCurrentIndex(PAGE_DOWNLOADS)
self.navigation_stack = []
self.hide_left_menu_playlist()
def clicked_menu_button_debug(self):
if not self.debug_window:
self.debug_window = DebugWindow(self.tribler_settings, self.core_manager.events_manager.tribler_version)
self.debug_window.show()
def clicked_menu_button_subscriptions(self):
self.deselect_all_menu_buttons(self.left_menu_button_subscriptions)
self.subscribed_channels_page.load_subscribed_channels()
self.stackedWidget.setCurrentIndex(PAGE_SUBSCRIBED_CHANNELS)
self.navigation_stack = []
self.hide_left_menu_playlist()
def hide_left_menu_playlist(self):
self.left_menu_seperator.setHidden(True)
self.left_menu_playlist_label.setHidden(True)
self.left_menu_playlist.setHidden(True)
def show_left_menu_playlist(self):
self.left_menu_seperator.setHidden(False)
self.left_menu_playlist_label.setHidden(False)
self.left_menu_playlist.setHidden(False)
def on_channel_item_click(self, channel_list_item):
list_widget = channel_list_item.listWidget()
from TriblerGUI.widgets.channel_list_item import ChannelListItem
if isinstance(list_widget.itemWidget(channel_list_item), ChannelListItem):
channel_info = channel_list_item.data(Qt.UserRole)
self.channel_page.initialize_with_channel(channel_info)
self.navigation_stack.append(self.stackedWidget.currentIndex())
self.stackedWidget.setCurrentIndex(PAGE_CHANNEL_DETAILS)
def on_playlist_item_click(self, playlist_list_item):
list_widget = playlist_list_item.listWidget()
from TriblerGUI.widgets.playlist_list_item import PlaylistListItem
if isinstance(list_widget.itemWidget(playlist_list_item), PlaylistListItem):
playlist_info = playlist_list_item.data(Qt.UserRole)
self.playlist_page.initialize_with_playlist(playlist_info)
self.navigation_stack.append(self.stackedWidget.currentIndex())
self.stackedWidget.setCurrentIndex(PAGE_PLAYLIST_DETAILS)
def on_page_back_clicked(self):
try:
prev_page = self.navigation_stack.pop()
self.stackedWidget.setCurrentIndex(prev_page)
if prev_page == PAGE_SEARCH_RESULTS:
self.stackedWidget.widget(prev_page).load_search_results_in_list()
if prev_page == PAGE_SUBSCRIBED_CHANNELS:
self.stackedWidget.widget(prev_page).load_subscribed_channels()
if prev_page == PAGE_DISCOVERED:
self.stackedWidget.widget(prev_page).load_discovered_channels()
except IndexError:
logging.exception("Unknown page found in stack")
def on_credit_mining_error(self, error):
ConfirmationDialog.show_error(self, "Credit Mining Error", error[u'message'])
def on_edit_channel_clicked(self):
self.stackedWidget.setCurrentIndex(PAGE_EDIT_CHANNEL)
self.navigation_stack = []
self.channel_page.on_edit_channel_clicked()
def resizeEvent(self, _):
# Resize home page cells
cell_width = self.home_page_table_view.width() / 3 - 3 # We have some padding to the right
max_height = self.home_page_table_view.height() / 3 - 4
cell_height = min(cell_width / 2 + 60, max_height)
for i in range(0, 3):
self.home_page_table_view.setColumnWidth(i, cell_width)
self.home_page_table_view.setRowHeight(i, cell_height)
self.resize_event.emit()
def exit_full_screen(self):
self.top_bar.show()
self.left_menu.show()
self.video_player_page.is_full_screen = False
self.showNormal()
def close_tribler(self):
if not self.core_manager.shutting_down:
def show_force_shutdown():
self.window().force_shutdown_btn.show()
self.delete_tray_icon()
self.show_loading_screen()
self.hide_status_bar()
self.loading_text_label.setText("Shutting down...")
self.shutdown_timer = QTimer()
self.shutdown_timer.timeout.connect(show_force_shutdown)
self.shutdown_timer.start(SHUTDOWN_WAITING_PERIOD)
self.gui_settings.setValue("pos", self.pos())
self.gui_settings.setValue("size", self.size())
if self.core_manager.use_existing_core:
# Don't close the core that we are using
QApplication.quit()
self.core_manager.stop()
self.core_manager.shutting_down = True
self.downloads_page.stop_loading_downloads()
request_queue.clear()
# Stop the token balance timer
if self.token_refresh_timer:
self.token_refresh_timer.stop()
def closeEvent(self, close_event):
self.close_tribler()
close_event.ignore()
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Escape:
self.escape_pressed.emit()
if self.isFullScreen():
self.exit_full_screen()
def dragEnterEvent(self, e):
file_urls = [_qurl_to_path(url) for url in e.mimeData().urls()] if e.mimeData().hasUrls() else []
if any(os.path.isfile(filename) for filename in file_urls):
e.accept()
else:
e.ignore()
def dropEvent(self, e):
file_urls = ([(_qurl_to_path(url), url.toString()) for url in e.mimeData().urls()]
if e.mimeData().hasUrls() else [])
for filename, fileurl in file_urls:
if os.path.isfile(filename):
self.start_download_from_uri(fileurl)
e.accept()
def clicked_force_shutdown(self):
process_checker = ProcessChecker()
if process_checker.already_running:
core_pid = process_checker.get_pid_from_lock_file()
os.kill(int(core_pid), 9)
# Stop the Qt application
QApplication.quit()
def on_tribler_shutdown_state_update(self, state):
self.loading_text_label.setText(state)
def _qurl_to_path(qurl):
parsed = urlparse.urlparse(qurl.toString())
return os.path.abspath(os.path.join(parsed.netloc, unquote(parsed.path)))
|
Captain-Coder/tribler
|
TriblerGUI/tribler_window.py
|
Python
|
lgpl-3.0
| 43,205
|
[
"VisIt"
] |
83b7c13519cb6b211dde2ef79f6282f840504d1fdcc87d0ed1bbfd321cfdcba5
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-get-job-pilot-output
# Author : Stuart Paterson
########################################################################
"""
Retrieve the output of the pilot that executed a given job
"""
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC ID of the Job' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for job in args:
try:
job = int( job )
except Exception as x:
errorList.append( ( 'Expected integer for JobID', job ) )
exitCode = 2
continue
result = diracAdmin.getJobPilotOutput( job )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit( exitCode )
|
fstagni/DIRAC
|
Interfaces/scripts/dirac-admin-get-job-pilot-output.py
|
Python
|
gpl-3.0
| 1,425
|
[
"DIRAC"
] |
e5a795f69b43785280c48852d6efdd465e5895c3f54e1bb192c69347c66eb99f
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Queries the SMU sqlite database.
Command line interface to extract molecules from thq SMU database.
"""
import contextlib
import csv
import enum
import itertools
import os.path
import random
import sys
from absl import app
from absl import flags
from absl import logging
import pandas as pd
from rdkit import Chem
from smu import dataset_pb2
from smu import smu_sqlite
from smu.geometry import bond_length_distribution
from smu.geometry import smu_molecule
from smu.geometry import topology_from_geom
from smu.geometry import utilities
from smu.parser import smu_utils_lib
from smu.parser import smu_writer_lib
class OutputFormat(enum.Enum):
pbtxt = 1
sdf_opt = 2
sdf_init = 3
sdf_init_opt = 4
atomic_input = 5
dat = 6
flags.DEFINE_string(
'input_sqlite', None,
'Path of sqlite file to read. Must be on the local filesystem.')
flags.DEFINE_string(
'output_path', None,
'Path to output file to write. If not specified, will write to stdout.')
flags.DEFINE_list('btids', [], 'List of bond topology ids to query')
flags.DEFINE_list('cids', [], 'List of conformer ids to query')
flags.DEFINE_list('smiles', [], 'List of smiles to query')
flags.DEFINE_list('stoichiometries', [], 'List of stoichiometries to query')
flags.DEFINE_list(
'topology_query_smiles', [],
'List of smiles to query, where the valid bond lengths are '
'given by --bond_lengths_csv and --bond_lengths. '
'Will return all conformers where the given smiles is a '
'valid decsription of that geometry given the bond lengths. '
'If you are using the default bond lengths, you should just '
'use --smiles as this method is much slower.')
flags.DEFINE_float('random_fraction', 0.0,
'Randomly return this fraction of DB.')
flags.DEFINE_enum_class('output_format', OutputFormat.pbtxt, OutputFormat,
'Format for the found SMU entries')
flags.DEFINE_boolean(
'sdf_include_all_bond_topologies', True,
'For all sdf outputs, whether to output separate entries '
'for each bond topology or only one')
flags.DEFINE_boolean(
'redetect_topology', False,
'Whether to rerun the topology detection on the conformers')
flags.DEFINE_string(
'bond_lengths_csv', None,
'File usually name <data>_bond_lengths.csv that contains the '
'observed distribution of bond lengths.')
flags.DEFINE_string(
'bond_lengths', None, 'Comma separated terms like form XYX:N-N '
'where X is an atom type (CNOF*), Y is a bond type (-=#.~), '
'and N is a possibly empty floating point number. ')
flags.DEFINE_string(
'bond_topology_csv', None,
'File which contains the desription of all bond topologies '
'considered in SMU.')
FLAGS = flags.FLAGS
class BondLengthParseError(Exception):
def __init__(self, term):
super().__init__(term)
self.term = term
def __str__(self):
('--bond_lengths must be comma separated terms like form XYX:N-N '
'where X is an atom type (CNOF*), Y is a bond type (-=#.~), '
'and N is a possibly empty floating point number. '
'"{}" did not parse.').format(self.term)
class GeometryData:
"""Class GeometryData."""
_singleton = None
# These are copied from pipeline.py. Shoudl they be shared somehere?
_BOND_LENGTHS_SIG_DIGITS = 3
_BOND_LENGTHS_UNBONDED_RIGHT_TAIL_MASS = 0.9
_ATOM_SPECIFICATION_MAP = {
'C': [dataset_pb2.BondTopology.ATOM_C],
'N': [dataset_pb2.BondTopology.ATOM_N],
'O': [dataset_pb2.BondTopology.ATOM_O],
'F': [dataset_pb2.BondTopology.ATOM_F],
'*': [
dataset_pb2.BondTopology.ATOM_C, dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_O, dataset_pb2.BondTopology.ATOM_F
],
}
_BOND_SPECIFICATION_MAP = {
'-': [dataset_pb2.BondTopology.BOND_SINGLE],
'=': [dataset_pb2.BondTopology.BOND_DOUBLE],
'#': [dataset_pb2.BondTopology.BOND_TRIPLE],
'.': [dataset_pb2.BondTopology.BOND_UNDEFINED],
'~': [
dataset_pb2.BondTopology.BOND_SINGLE,
dataset_pb2.BondTopology.BOND_DOUBLE,
dataset_pb2.BondTopology.BOND_TRIPLE
],
}
def __init__(self, bond_lengths_csv, bond_lengths_arg, bond_topology_csv):
if bond_lengths_csv is None:
raise ValueError('--bond_lengths_csv required')
logging.info('Loading bond_lengths')
with open(bond_lengths_csv, 'r') as infile:
df = pd.read_csv(infile, dtype={'length_str': str})
self.bond_lengths = bond_length_distribution.AllAtomPairLengthDistributions(
)
self.bond_lengths.add_from_sparse_dataframe(
df, self._BOND_LENGTHS_UNBONDED_RIGHT_TAIL_MASS,
self._BOND_LENGTHS_SIG_DIGITS)
logging.info('Done loading bond_lengths_csv')
self._parse_bond_lengths_arg(bond_lengths_arg)
if bond_topology_csv is None:
raise ValueError('--bond_topology_csv required')
logging.info('Loading bond topologies')
self.smiles_id_dict = {}
with open(bond_topology_csv, 'r') as infile:
reader = csv.reader(iter(infile))
next(reader) # skip the header line
for row in reader:
bt_id, _, _, _, _, smiles = row
self.smiles_id_dict[smiles] = int(bt_id)
logging.info('Done loading bond topologies')
def _parse_bond_lengths_arg(self, bond_lengths_arg):
"""Parses bond length argument."""
if not bond_lengths_arg:
return
terms = [x.strip() for x in bond_lengths_arg.split(',')]
for term in terms:
try:
atoms_a = self._ATOM_SPECIFICATION_MAP[term[0]]
bonds = self._BOND_SPECIFICATION_MAP[term[1]]
atoms_b = self._ATOM_SPECIFICATION_MAP[term[2]]
if term[3] != ':':
raise BondLengthParseError(term)
min_str, max_str = term[4:].split('-')
if min_str:
min_val = float(min_str)
else:
min_val = 0
if max_str:
max_val = float(max_str)
right_tail_mass = None
else:
# These numbers are pretty arbitrary
max_val = min_val + 0.1
right_tail_mass = 0.9
for atom_a, atom_b, bond in itertools.product(atoms_a, atoms_b, bonds):
self.bond_lengths.add(
atom_a, atom_b, bond,
bond_length_distribution.FixedWindowLengthDistribution(
min_val, max_val, right_tail_mass))
except (KeyError, IndexError, ValueError) as an_exception:
raise BondLengthParseError(term) from an_exception
@classmethod
def get_singleton(cls):
if cls._singleton is None:
cls._singleton = cls(FLAGS.bond_lengths_csv, FLAGS.bond_lengths,
FLAGS.bond_topology_csv)
return cls._singleton
def _get_geometry_matching_parameters():
out = smu_molecule.MatchingParameters()
out.must_match_all_bonds = True
out.smiles_with_h = False
out.smiles_with_labels = False
out.neutral_forms_during_bond_matching = True
out.consider_not_bonded = True
out.ring_atom_count_cannot_decrease = False
return out
def topology_query(db, smiles):
"""Find all conformers which have a detected bond topology.
Note that this *redoes* the detection. If you want to use the default detected
versions, you can just query by SMILES string. This is only useful if you
adjust the distance thresholds for what a matching bond is.
Args:
db: smu_sqlite.SMUSQLite
smiles: smiles string for the target bond topology
Yields:
dataset_pb2.Conformer
"""
mol = Chem.MolFromSmiles(smiles, sanitize=False)
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)
mol = Chem.AddHs(mol)
query_bt = utilities.molecule_to_bond_topology(mol)
expanded_stoich = smu_utils_lib.expanded_stoichiometry_from_topology(query_bt)
matching_parameters = _get_geometry_matching_parameters()
geometry_data = GeometryData.get_singleton()
cnt_matched_conformer = 0
cnt_conformer = 0
logging.info('Starting query for %s with stoich %s', smiles, expanded_stoich)
for conformer in db.find_by_expanded_stoichiometry(expanded_stoich):
if not smu_utils_lib.conformer_eligible_for_topology_detection(conformer):
continue
cnt_conformer += 1
matches = topology_from_geom.bond_topologies_from_geom(
bond_lengths=geometry_data.bond_lengths,
conformer_id=conformer.conformer_id,
fate=conformer.fate,
bond_topology=conformer.bond_topologies[0],
geometry=conformer.optimized_geometry,
matching_parameters=matching_parameters)
if smiles in [bt.smiles for bt in matches.bond_topology]:
cnt_matched_conformer += 1
del conformer.bond_topologies[:]
conformer.bond_topologies.extend(matches.bond_topology)
for bt in conformer.bond_topologies:
try:
bt.bond_topology_id = geometry_data.smiles_id_dict[bt.smiles]
except KeyError:
logging.error('Did not find bond topology id for smiles %s',
bt.smiles)
yield conformer
logging.info('Topology query for %s matched %d / %d', smiles,
cnt_matched_conformer, cnt_conformer)
class PBTextOutputter:
"""Simple internal class to write entries to text protocol buffer."""
def __init__(self, output_path):
"""Creates PBTextOutputter.
Args:
output_path: file path to write to
"""
if output_path:
self.outfile = open(output_path, 'w')
else:
self.outfile = sys.stdout
def output(self, conformer):
"""Writes a conformer.
Args:
conformer: dataset_pb2.Conformer
"""
self.outfile.write(str(conformer))
def close(self):
self.outfile.close()
class SDFOutputter:
"""Simple internal class to write entries as multi molecule SDF files."""
def __init__(self, output_path, init_geometry, opt_geometry,
include_all_bond_topologies):
"""Creates SDFOutputter.
At least one of init_geometry and opt_geometry should be True
Args:
output_path: file path to write to
init_geometry: bool, whether to write with initial_geometries
opt_geometry: bool, whether to write with optimized_geometry
include_all_bond_topologies: bool, whether to include all bond topologies
"""
self.init_geometry = init_geometry
self.opt_geometry = opt_geometry
self.include_all_bond_topologies = include_all_bond_topologies
if output_path:
self.writer = Chem.SDWriter(output_path)
else:
self.writer = Chem.SDWriter(sys.stdout)
def output(self, conformer):
"""Writes a Conformer.
Args:
conformer: dataset_pb2.Conformer
"""
for mol in smu_utils_lib.conformer_to_molecules(
conformer,
include_initial_geometries=self.init_geometry,
include_optimized_geometry=self.opt_geometry,
include_all_bond_topologies=self.include_all_bond_topologies):
self.writer.write(mol)
def close(self):
self.writer.close()
class AtomicInputOutputter:
"""Internal class to write output as the inputs to atomic code."""
def __init__(self, output_path):
"""Creates AtomicInputOutputter.
Args:
output_path: directory to write output files to
"""
self.output_path = output_path
if output_path and not os.path.isdir(self.output_path):
raise ValueError(
'Atomic input requires directory as output path, got {}'.format(
self.output_path))
self.atomic_writer = smu_writer_lib.AtomicInputWriter()
def output(self, conformer):
if self.output_path is None:
sys.stdout.write(self.atomic_writer.process(conformer))
else:
with open(
os.path.join(
self.output_path,
self.atomic_writer.get_filename_for_atomic_input(conformer)),
'w') as f:
f.write(self.atomic_writer.process(conformer))
def close(self):
pass
class DatOutputter:
"""Internal class to write output as the original .dat format."""
def __init__(self, output_path):
"""Creates DatOutputter.
Args:
output_path: file to write to
"""
self.writer = smu_writer_lib.SmuWriter(annotate=False)
if output_path:
self.outfile = open(output_path, 'w')
else:
self.outfile = sys.stdout
def output(self, conformer):
"""Writes a conformer.
Args:
conformer: dataset_pb2.Conformer
"""
self.outfile.write(self.writer.process_stage2_proto(conformer))
def close(self):
self.outfile.close()
class ReDetectTopologiesOutputter:
"""Reruns topology detection before handing to another outputter."""
def __init__(self, outputter):
self._wrapped_outputter = outputter
self._geometry_data = GeometryData.get_singleton()
self._matching_parameters = _get_geometry_matching_parameters()
def output(self, conformer):
"""Writes a Conformer.
Args:
conformer: dataset_pb2.Conformer
"""
matches = topology_from_geom.bond_topologies_from_geom(
bond_lengths=self._geometry_data.bond_lengths,
conformer_id=conformer.conformer_id,
fate=conformer.fate,
bond_topology=conformer.bond_topologies[0],
geometry=conformer.optimized_geometry,
matching_parameters=self._matching_parameters)
if not matches.bond_topology:
logging.error('No bond topology matched for %s', conformer.conformer_id)
else:
del conformer.bond_topologies[:]
conformer.bond_topologies.extend(matches.bond_topology)
for bt in conformer.bond_topologies:
try:
bt.bond_topology_id = self._geometry_data.smiles_id_dict[bt.smiles]
except KeyError:
logging.error('Did not find bond topology id for smiles %s',
bt.smiles)
self._wrapped_outputter.output(conformer)
def close(self):
self._wrapped_outputter.close()
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
logging.get_absl_handler().use_absl_log_file()
logging.info('Opening %s', FLAGS.input_sqlite)
db = smu_sqlite.SMUSQLite(FLAGS.input_sqlite, 'r')
if FLAGS.output_format == OutputFormat.pbtxt:
outputter = PBTextOutputter(FLAGS.output_path)
elif FLAGS.output_format == OutputFormat.sdf_init:
outputter = SDFOutputter(
FLAGS.output_path,
init_geometry=True,
opt_geometry=False,
include_all_bond_topologies=FLAGS.sdf_include_all_bond_topologies)
elif FLAGS.output_format == OutputFormat.sdf_opt:
outputter = SDFOutputter(
FLAGS.output_path,
init_geometry=False,
opt_geometry=True,
include_all_bond_topologies=FLAGS.sdf_include_all_bond_topologies)
elif FLAGS.output_format == OutputFormat.sdf_init_opt:
outputter = SDFOutputter(
FLAGS.output_path,
init_geometry=True,
opt_geometry=True,
include_all_bond_topologies=FLAGS.sdf_include_all_bond_topologies)
elif FLAGS.output_format == OutputFormat.atomic_input:
outputter = AtomicInputOutputter(FLAGS.output_path)
elif FLAGS.output_format == OutputFormat.dat:
outputter = DatOutputter(FLAGS.output_path)
else:
raise ValueError(f'Bad output format {FLAGS.output_format}')
if FLAGS.redetect_topology:
outputter = ReDetectTopologiesOutputter(outputter)
with contextlib.closing(outputter):
for cid in (int(x) for x in FLAGS.cids):
conformer = db.find_by_conformer_id(cid)
outputter.output(conformer)
for btid in (int(x) for x in FLAGS.btids):
conformers = db.find_by_bond_topology_id(btid)
if not conformers:
raise KeyError(f'Bond topology {btid} not found')
for c in conformers:
outputter.output(c)
for smiles in FLAGS.smiles:
conformers = db.find_by_smiles(smiles)
if not conformers:
raise KeyError(f'SMILES {smiles} not found')
for c in conformers:
outputter.output(c)
for stoich in FLAGS.stoichiometries:
conformers = db.find_by_stoichiometry(stoich)
for c in conformers:
outputter.output(c)
for smiles in FLAGS.topology_query_smiles:
for c in topology_query(db, smiles):
outputter.output(c)
if FLAGS.random_fraction:
for conformer in db:
if conformer.fate == dataset_pb2.Conformer.FATE_SUCCESS and random.random(
) < FLAGS.random_fraction:
outputter.output(conformer)
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
smu/query_sqlite.py
|
Python
|
apache-2.0
| 17,566
|
[
"RDKit"
] |
4cf1cd014aea796920df185e6a2c223cab90964cd8823812560dc6dedf72131c
|
# Copyright (c) 2014 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan, Yugang <yugang.fan@intel.com>
import time
import json
import re
import colorsys
import Image
import string
import os
import ConfigParser
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from atip.tizen import tizen
from atip.common import common
from atip.android import android
try:
from urlparse import urljoin, urlparse
except ImportError:
from urllib.parse import urljoin, urlparse
class WebAPP(common.APP):
def __init__(self, app_config=None, app_name=None,
apk_pkg_name=None, apk_activity_name=None):
self.driver = None
self.app_type = common.APP_TYPE_WEB
self.app_name = app_name
self.app_id = ""
self.cur_path = os.getcwd()
self.config_file = "data.conf"
self.device_platform = ""
self.test_type = ""
self.read_config()
self.test_url = app_config["test-url"]
self.baseline_path = self.test_url + "/../../data/" + self.device_platform
self.text_value = {}
self.picture_list = []
self.color_dict = {
"rgb(255, 0, 0)": "red",
"rgb(0, 255, 0)": "green",
"rgb(0, 0, 255)": "blue",
"rgb(255, 255, 0)": "yellow",
"rgb(0, 0, 0)": "black",
"rgb(0, 128, 0)": "green",
"rgb(255, 255, 255)": "white",
"rgba(0, 0, 0, 0)": "white"}
apk_activity_name = apk_activity_name
apk_pkg_name = apk_pkg_name
if "platform" in app_config and "name" in app_config["platform"]:
if app_config["platform"]["name"].upper().find('TIZEN') >= 0:
self.app_id = tizen.get_appid_by_name(
self.app_name, app_config["platform"], app_config["tizen_user"])
if app_config["platform"]["name"].upper().find('ANDROID') >= 0:
if apk_activity_name == apk_pkg_name is None:
if "app_launcher" in app_config and app_config[
"app_launcher"] == "XWalkLauncher":
self.app_name = self.app_name.replace("-", "_")
apk_name_update = "".join(
[i.capitalize() for i in self.app_name.split("_") if i])
apk_activity_name = ".%sActivity" % apk_name_update
apk_pkg_name = "org.xwalk.%s" % self.app_name
if "app_launcher" in app_config and app_config[
"app_launcher"] == "CordovaLauncher":
self.app_name = self.app_name.replace("-", "_")
apk_activity_name = ".%s" % self.app_name
apk_pkg_name = "org.xwalk.%s" % self.app_name
app_config_str = json.dumps(app_config).replace(
"TEST_APP_NAME", self.app_name).replace(
"TEST_APP_ID", self.app_id).replace(
"TEST_PKG_NAME", apk_pkg_name).replace(
"TEST_ACTIVITY_NAME", apk_activity_name)
self.app_config = json.loads(app_config_str)
if "url-prefix" in app_config:
self.url_prefix = app_config["url-prefix"]
else:
self.url_prefix = ""
def read_config(self):
try:
config = ConfigParser.ConfigParser()
with open(self.config_file, "r") as cfgfile:
config.readfp(cfgfile)
self.device_platform = config.get('info', 'platform')
self.test_type = config.get('info', 'test_type')
except Exception as e:
print "Parser config data.config failed: %s" % e
def __get_element_by_xpath(self, xpath, display=True):
try:
element = self.driver.find_element_by_xpath(xpath)
if display:
try:
if element.is_displayed():
return element
except StaleElementReferenceException:
pass
else:
return element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_key_attr(self, key, attr, display=True):
xpath = "//*[@%s='%s']" % (attr, key)
try:
element = self.driver.find_element_by_xpath(xpath)
if display:
try:
if element.is_displayed():
return element
except StaleElementReferenceException:
pass
else:
return element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_tag(self, key, display=True):
try:
element = self.driver.find_element_by_tag(key)
return element
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_key(self, key, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
"//*[@id='%(key)s']|"
"//*[@name='%(key)s']|"
"//*[@value='%(key)s']|"
"//*[contains(@class, '%(key)s')]|"
"//div[contains(text(), '%(key)s')]|"
"//button[contains(text(), '%(key)s')]|"
"//input[contains(text(), '%(key)s')]|"
"//textarea[contains(text(), '%(key)s')]|"
"//a[contains(text(), '%(key)s')]") % {'key': key}):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_keys(self, key_p, key_c, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
"//*[@id='%(key)s']|"
"//*[@name='%(key)s']|"
"//*[@value='%(key)s']|"
"//*[contains(@class, '%(key)s')]|"
"//div[contains(text(), '%(key)s')]|"
"//button[contains(text(), '%(key)s')]|"
"//input[contains(text(), '%(key)s')]|"
"//textarea[contains(text(), '%(key)s')]|"
"//a[contains(text(), '%(key)s')]") % {'key': key_p}):
get_element = False
if display:
try:
if i_element.is_displayed():
get_element = True
except StaleElementReferenceException:
pass
else:
get_element = True
if get_element:
print "%s ++ %s" % (i_element.get_attribute("id"), i_element.get_attribute("class"))
for ii_element in i_element.find_elements_by_xpath(str(
"./*[@id='%(key)s']|"
"./*[@name='%(key)s']|"
"./*[@value='%(key)s']|"
"./*[contains(@class, '%(key)s')]|"
"./div[contains(text(), '%(key)s')]|"
"./button[contains(text(), '%(key)s')]|"
"./input[contains(text(), '%(key)s')]|"
"./textarea[contains(text(), '%(key)s')]|"
"./a[contains(text(), '%(key)s')]") % {'key': key_c}):
if display:
try:
if ii_element.is_displayed():
return ii_element
except StaleElementReferenceException:
pass
else:
return ii_element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __check_normal_text(self, text, display=True):
try:
for i_element in self.driver.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def check_normal_text_element_not_exist(self, text, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
try:
e_list = element.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text)))
for i_element in e_list:
if i_element.text == text:
return False
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def __check_normal_text_element(self, text, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
try:
for i_element in element.find_elements_by_xpath(str(
'//*[@value="{text}"]|'
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def compare_two_values(self, first=None, second=None):
try:
if first.isdigit():
if eval(first) < eval(self.text_value[second]):
return True
else:
return False
else:
if eval(self.text_value[first]) < eval(self.text_value[second]):
return True
else:
return False
except Exception as e:
print "Failed to compare these two param: %s" % e
return False
def judge_saved_value(self, value=None, result=None):
try:
if self.text_value[value] == result:
return True
else:
return False
except Exception as e:
print "Failed to judge saved vaule: %s" % e
return False
def compare_two_values_range(self, first=None, second=None, value=None):
try:
result = eval(self.text_value[second]) - eval(self.text_value[first])
if result >= eval(value) :
return True
else:
return False
except Exception as e:
print "Failed to compare these two param with value: %s" % e
return False
def judge_value_range(self, value=None, first=None, second=None):
try:
result = eval(self.text_value[value])
if (eval(second) >= result) and (eval(first) <= result) :
return True
elif (eval(second) <= result) and (eval(first) >= result) :
return True
else:
return False
except Exception as e:
print "Failed to judge value range: %s" % e
return False
def save_content(self, p_name=None, key=None):
try:
js_script = 'var style=document.getElementById(\"' + \
key + '\").innerHTML; return style'
style = self.driver.execute_script(js_script)
self.text_value[p_name] = style
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def launch_app(self):
try:
desired_capabilities = self.app_config["desired-capabilities"]
self.driver = WebDriver(
str(self.app_config["driver-url"]), desired_capabilities)
except Exception as e:
print "Failed to launch %s: %s" % (self.app_name, e)
return False
return True
def switch_url(self, url, with_prefix=True):
if with_prefix:
url = urljoin(self.url_prefix, url)
try:
self.driver.get(url)
except Exception as e:
print "Failed to visit %s: %s" % (url, e)
return False
return True
def title(self):
try:
return self.driver.title
except Exception as e:
print "Failed to get title: %s" % e
return None
def current_url(self):
try:
return self.driver.current_url
except Exception as e:
print "Failed to get current url: %s" % e
return None
def reload(self):
self.driver.refresh()
return True
def back(self):
self.driver.back()
return True
def forward(self):
self.driver.forward()
return True
def check_normal_text_timeout(self, text=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text(text, display):
return True
time.sleep(0.2)
return False
def check_normal_title_timeout(self, text=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.title() == text:
return True
time.sleep(0.2)
return False
def check_normal_text_element_timeout(
self, text=None, key=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text_element(text, key, display):
return True
time.sleep(0.2)
return False
def check_normal_text_element_timeout_with_color(
self, text=None, key=None, color=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text_element(text, key, display):
if self.check_text_color(key, color):
return True
time.sleep(0.2)
return False
def check_normal_element_timeout_with_color(
self, key=None, color=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.check_background_color(key, color):
return True
time.sleep(0.2)
return False
def check_background_color(self, key=None, color=None, display=True):
try:
js_script = 'var bg_color=document.getElementById(\"' + \
key + '\").style.backgroundColor; return bg_color'
bg_color = self.driver.execute_script(js_script)
if not bg_color:
js_script = 'var element=document.getElementById(\"' + key + '\");' \
' if(element.currentStyle) {return element.currentStyle.backgroundColor;} ' \
' else { return document.defaultView.getComputedStyle(element,null).backgroundColor; } '
bg_color = self.driver.execute_script(js_script)
if not bg_color:
bg_color = "white"
number = re.match(r'[A-Za-z]+$', bg_color)
if not number:
bg_color = self.color_dict[bg_color]
if bg_color.strip() == color:
return True
except Exception as e:
print "Failed to get element color: %s" % e
return False
def check_text_color(self, key=None, color=None, display=True):
try:
js_script = 'var text_color=document.getElementById(\"' + \
key + '\").style.color; return text_color'
text_color = self.driver.execute_script(js_script)
if not text_color:
js_script = 'var element=document.getElementById(\"' + key + '\");' \
' if(element.currentStyle) {return element.currentStyle.color;} ' \
' else { return document.defaultView.getComputedStyle(element,null).color; } '
text_color = self.driver.execute_script(js_script)
if not text_color:
text_color = "black"
is_rgb = re.match(r'[A-Za-z]+$', text_color)
if not is_rgb:
text_color = self.color_dict[text_color]
if text_color.strip() == color:
return True
except Exception as e:
print "Failed to get element: %s" % e
return False
def check_content_type(self, key=None, display=True):
try:
js_script = 'var text=document.getElementById(\"' + \
key + '\").innerText; return text'
text = self.driver.execute_script(js_script)
if text.strip() == '':
return 'none'
number = re.match(r'(-?\d+)(\.\d+)?', text)
if number:
if "." in text:
return "float"
else:
return "int"
else:
if text.upper() == "TRUE" or text.upper() == "FALSE":
return "boolean"
else:
return "string"
except Exception as e:
print "Failed to get element text: %s" % e
def press_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def press_element_by_keys(self, key_p, key_c, display=True):
element = self.__get_element_by_keys(key_p, key_c, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def press_element_by_key_attr(self, key, attr, display=True):
element = self.__get_element_by_key_attr(key, attr, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
element.click()
return True
return False
def click_element_by_keys(self, key_p, key_c, display=True):
element = self.__get_element_by_keys(key_p, key_c, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
ActionChains(self.driver).click(element).perform()
return True
return False
def click_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
ActionChains(self.driver).click(element).perform()
return True
return False
# * The method click_element_by_key will fail when VKB shelter the button, and js can avoid this issue.
def click_element_by_id_with_js(self, key, display=True):
element = self.__get_element_by_key_attr(key, "id", display)
print "%s == %s" % (element.get_attribute("id"), element.get_attribute("class"))
if element:
js_script = 'document.getElementById(\"' + key + '\").click()'
self.driver.execute_script(js_script)
return True
return False
def click_element_coords(self, x, y, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
ActionChains(self.driver).move_to_element_with_offset(
element, x, y).click().perform()
return True
return False
def execute_js_code(self, js_code):
try:
return self.driver.execute_script(js_code)
except Exception as e:
print "Execute js code failed: %s" % e
return 0
# Calculate the location params of element
def calculate_element_location(self, key, width=0, height=0):
try:
if width:
width = string.atoi(width)
if height:
height = string.atoi(height)
js_script = 'var top=document.getElementById(\"' + \
key + '\").getBoundingClientRect().top; return top'
top = self.execute_js_code(js_script)
js_script = 'var left=document.getElementById(\"' + \
key + '\").getBoundingClientRect().left; return left'
left = self.execute_js_code(js_script)
if not width:
js_script = 'var width=document.getElementById(\"' + \
key + '\").getBoundingClientRect().width; return width'
width = self.execute_js_code(js_script)
if not height:
js_script = 'var height=document.getElementById(\"' + \
key + '\").getBoundingClientRect().height; return height'
height = self.execute_js_code(js_script)
return (left, top, left + width, top + height)
except Exception as e:
print "Get element location failed: %s" % e
return 0
def calculate_resolution_ratio(self, pic_name):
try:
js_script = 'var width=window.screen.availWidth; return width'
body_width = self.execute_js_code(js_script)
js_script = 'var height=window.screen.availHeight; return height'
body_height = self.execute_js_code(js_script)
im = Image.open(pic_name)
w, h = im.size
ratio_w = w / body_width
ratio_h = h / body_height
ration = 0
if ratio_w > ratio_h:
ratio = ratio_w
else:
ratio = ratio_h
return w / ratio, h / ratio
except Exception as e:
print "Calculate page picture resolution failed: %s" % e
return 0
# Save the specified element as a single picture
def save_div_as_picture(self, key, element_pic, width=0, height=0):
try:
page_pic = "page.png"
self.driver.get_screenshot_as_file(page_pic)
self.picture_list.append(page_pic)
ratio = self.calculate_resolution_ratio(page_pic)
self.convert_pic(page_pic, ratio)
box = self.calculate_element_location(key, width, height)
self.crop_pic(page_pic, element_pic, box)
self.picture_list.append(element_pic)
return True
except Exception as e:
print "Save element picture failed: %s" % e
return False
# Remove these temporary pictures
def remove_picture(self):
try:
picture_list = list(set(self.picture_list))
for element in picture_list:
os.remove(element)
self.picture_list = []
return True
except Exception as e:
print "Remove the tmp pictures fail: %s" % e
return False
# Check if 2 files content are the same
def check_md5_file_same(self, file_name):
try:
result_path = self.baseline_path + "/" + file_name + ".md5"
fp_result = open(result_path, "r")
str_result = fp_result.read()
fp_result.close()
baseline_path = self.baseline_path + \
"/" + file_name + "_baseline.md5"
fp_baseline = open(baseline_path, "r")
str_baseline = fp_baseline.read()
fp_baseline.close()
index = cmp(str_result, str_baseline)
if not index:
return True
else:
return False
except Exception as e:
print "Check md5 file failed: %s" % e
return False
# Save pic as base64 data's md5
def save_base64_md5_pic(self, pic_name):
try:
md5file_path = ""
if self.test_type == "result":
md5file_path = self.baseline_path + "/" + pic_name + ".md5"
elif self.test_type == "baseline":
md5file_path = self.baseline_path + \
"/" + pic_name + "_baseline.md5"
pic_base64 = self.driver.get_screenshot_as_base64()
pic_md5 = self.get_string_md5(pic_base64)
fp = open(md5file_path, "w")
fp.write(pic_md5)
fp.close()
return True
except Exception as e:
print "Save pic as base64 failed: %s" % e
return False
# Save page as pictures
def save_page_per_conf(self, pic_name):
try:
if not os.path.exists(self.baseline_path):
os.makedirs(self.baseline_path)
if self.test_type == "result":
picname_result = self.baseline_path + "/" + pic_name + ".png"
self.driver.get_screenshot_as_file(picname_result)
return True
elif self.test_type == "baseline":
picname_baseline = self.baseline_path + \
"/" + pic_name + "_baseline.png"
self.driver.get_screenshot_as_file(picname_baseline)
return True
else:
print "Test_type is wrong. It should be baseline or result. Please check the data.config file."
return False
except Exception as e:
print "Save baseline pictures fail: %s" % e
return False
def check_base_result_similarity(self, pic_name, similarity):
resu_pic = self.baseline_path + "/" + pic_name + ".png"
base_pic = self.baseline_path + "/" + pic_name + "_baseline.png"
if not os.path.exists(resu_pic):
print "The result picture %s is not existed! Case fail" % pic_name
return False
if not os.path.exists(base_pic):
print "The baseline picture %s is not existed! Case fail" % base_pic
return False
return self.check_pic_same(base_pic, resu_pic, similarity)
def fill_element_by_key(self, key, text, display=True):
element = self.__get_element_by_key(key, display)
if element:
element.send_keys(text)
return True
return False
def fill_element_by_key_attr(self, key, attr, text, display=True):
element = self.__get_element_by_key_attr(key, attr, display)
if element:
element.send_keys(text)
return True
return False
def check_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if not element.is_selected():
element.click()
return True
return False
def uncheck_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if element.is_selected():
element.click()
return True
return False
def get_alert_text(self):
try:
alert_element = self.driver.switch_to_alert()
if alert_element:
return alert_element.text
except Exception as e:
print "Failed to get alert text: %s" % e
return None
def check_alert_existing(self):
try:
self.driver.switch_to_alert().text
except NoAlertPresentException:
return False
return True
def accept_alert(self):
try:
alert_element = self.driver.switch_to_alert()
alert_element.accept()
return True
except Exception as e:
print "Failed to accept alert: %s" % e
return False
def quit(self):
if self.driver:
self.driver.quit()
def launch_webapp_by_name(
context, app_name, apk_pkg_name=None, apk_activity_name=None):
if not context.bdd_config:
assert False
if app_name in context.apps:
context.apps[app_name].quit()
context.apps.update(
{app_name: WebAPP(context.bdd_config, app_name, apk_pkg_name, apk_activity_name)})
context.apps.update(
{"android": android.Android(context.bdd_config, app_name, apk_pkg_name, apk_activity_name)})
context.web = context.apps[app_name]
context.android = context.apps["android"]
context.android.turnOnScreen()
context.android.pressKeyBy("home")
context.android.setDeviceOrientation("n")
if not context.web.launch_app():
assert False
assert True
|
qiuzhong/crosswalk-test-suite
|
tools/atip/atip/web/web.py
|
Python
|
bsd-3-clause
| 31,944
|
[
"VisIt"
] |
06936f93045f56944fbf1b655344a28a509f0c4fc8894a8dc3cd735e69c7c106
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the FeffInputSet abstract base class and a concrete
implementation for the Materials Project. The basic concept behind an input
set is to specify a scheme to generate a consistent set of Feff inputs from a
structure without further user intervention. This ensures comparability across
runs.
"""
import abc
import logging
import os
import sys
from copy import deepcopy
import numpy as np
from monty.json import MSONable
from monty.os.path import zpath
from monty.serialization import loadfn
from pymatgen.io.feff.inputs import Atoms, Header, Potential, Tags
__author__ = "Kiran Mathew"
__credits__ = "Alan Dozier, Anubhav Jain, Shyue Ping Ong"
__version__ = "1.1"
__maintainer__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
__date__ = "Sept 10, 2016"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s: %(levelname)s: %(name)s: %(message)s")
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
class AbstractFeffInputSet(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class representing a set of Feff input parameters.
The idea is that using a FeffInputSet, a complete set of input files
(feffPOT, feffXANES, feffEXAFS, ATOMS, feff.inp)set_
can be generated in an automated fashion for any structure.
"""
@abc.abstractmethod
def header(self):
"""
Returns header to be used in feff.inp file from a pymatgen structure
"""
pass
@property
@abc.abstractmethod
def atoms(self):
"""
Returns Atoms string from a structure that goes in feff.inp file.
Returns:
Atoms object.
"""
pass
@property
@abc.abstractmethod
def tags(self):
"""
Returns standard calculation parameters.
"""
return
@property
@abc.abstractmethod
def potential(self):
"""
Returns POTENTIAL section used in feff.inp from a structure.
"""
pass
def all_input(self):
"""
Returns all input files as a dict of {filename: feffio object}
"""
d = {"HEADER": self.header(), "PARAMETERS": self.tags}
if "RECIPROCAL" not in self.tags:
d.update({"POTENTIALS": self.potential, "ATOMS": self.atoms})
return d
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Writes a set of FEFF input to a directory.
Args:
output_dir: Directory to output the FEFF input files
make_dir_if_not_present: Set to True if you want the directory (
and the whole path) to be created if it is not present.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
feff = self.all_input()
feff_input = "\n\n".join(str(feff[k]) for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"] if k in feff)
for k, v in feff.items():
with open(os.path.join(output_dir, k), "w") as f:
f.write(str(v))
with open(os.path.join(output_dir, "feff.inp"), "w") as f:
f.write(feff_input)
# write the structure to cif file
if "ATOMS" not in feff:
self.atoms.struct.to(fmt="cif", filename=os.path.join(output_dir, feff["PARAMETERS"]["CIF"]))
class FEFFDictSet(AbstractFeffInputSet):
"""
Standard implementation of FeffInputSet, which can be extended by specific
implementations.
"""
def __init__(
self,
absorbing_atom,
structure,
radius,
config_dict,
edge="K",
spectrum="EXAFS",
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
radius (float): cluster radius
config_dict (dict): control tag settings dict
edge (str): absorption edge
spectrum (str): type of spectrum to calculate, available options :
EXAFS, XANES, DANES, XMCD, ELNES, EXELFS, FPRIME, NRIXS, XES.
The default is EXAFS.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings. To delete
tags, set the key '_del' in the user_tag_settings.
eg: user_tag_settings={"_del": ["COREHOLE", "EXCHANGE"]}
"""
self.absorbing_atom = absorbing_atom
self.structure = structure
self.radius = radius
self.config_dict = deepcopy(config_dict)
self.edge = edge
self.spectrum = spectrum
self.nkpts = nkpts
self.user_tag_settings = user_tag_settings or {}
self.config_dict["EDGE"] = self.edge
self.config_dict.update(self.user_tag_settings)
if "_del" in self.user_tag_settings:
for tag in self.user_tag_settings["_del"]:
if tag in self.config_dict:
del self.config_dict[tag]
del self.config_dict["_del"]
# k-space feff only for small systems. The hardcoded system size in
# feff is around 14 atoms.
self.small_system = len(self.structure) < 14 and "EXAFS" not in self.config_dict
def header(self, source="", comment=""):
"""
Creates header string from structure object
Args:
source: Source identifier used to create structure, can be defined
however user wants to organize structures, calculations, etc.
example would be Materials Project material ID number.
comment: comment to include in header
Returns:
Header
"""
return Header(self.structure, source, comment)
@property
def tags(self):
"""
FEFF job parameters.
Returns:
Tags
"""
if "RECIPROCAL" in self.config_dict:
if self.small_system:
self.config_dict["CIF"] = "{}.cif".format(self.structure.formula.replace(" ", ""))
self.config_dict["TARGET"] = self.atoms.center_index + 1
self.config_dict["COREHOLE"] = "RPA"
logger.warning("Setting COREHOLE = RPA for K-space calculation")
if not self.config_dict.get("KMESH", None):
abc = self.structure.lattice.abc
mult = (self.nkpts * abc[0] * abc[1] * abc[2]) ** (1 / 3)
self.config_dict["KMESH"] = [int(round(mult / l)) for l in abc]
else:
logger.warning(
"Large system(>=14 atoms) or EXAFS calculation, \
removing K-space settings"
)
del self.config_dict["RECIPROCAL"]
self.config_dict.pop("CIF", None)
self.config_dict.pop("TARGET", None)
self.config_dict.pop("KMESH", None)
self.config_dict.pop("STRFAC", None)
return Tags(self.config_dict)
@property
def potential(self):
"""
FEFF potential
Returns:
Potential
"""
return Potential(self.structure, self.absorbing_atom)
@property
def atoms(self):
"""
absorber + the rest
Returns:
Atoms
"""
return Atoms(self.structure, self.absorbing_atom, self.radius)
def __str__(self):
output = [self.spectrum]
output.extend([f"{k} = {str(v)}" for k, v in self.config_dict.items()])
output.append("")
return "\n".join(output)
@staticmethod
def from_directory(input_dir):
"""
Read in a set of FEFF input files from a directory, which is
useful when existing FEFF input needs some adjustment.
"""
sub_d = {}
for fname, ftype in [("HEADER", Header), ("PARAMETERS", Tags)]:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
# Generation of FEFFDict set requires absorbing atom, need to search
# the index of absorption atom in the structure according to the
# distance matrix and shell species information contained in feff.inp
absorber_index = []
radius = None
feffinp = zpath(os.path.join(input_dir, "feff.inp"))
if "RECIPROCAL" not in sub_d["parameters"]:
input_atoms = Atoms.cluster_from_file(feffinp)
shell_species = np.array([x.species_string for x in input_atoms])
# First row of distance matrix represents the distance from the absorber to
# the rest atoms
distance_matrix = input_atoms.distance_matrix[0, :]
# Get radius value
from math import ceil
radius = int(
ceil(
input_atoms.get_distance(
input_atoms.index(input_atoms[0]),
input_atoms.index(input_atoms[-1]),
)
)
)
for site_index, site in enumerate(sub_d["header"].struct):
if site.specie == input_atoms[0].specie:
site_atoms = Atoms(sub_d["header"].struct, absorbing_atom=site_index, radius=radius)
site_distance = np.array(site_atoms.get_lines())[:, 5].astype(np.float64)
site_shell_species = np.array(site_atoms.get_lines())[:, 4]
shell_overlap = min(shell_species.shape[0], site_shell_species.shape[0])
if np.allclose(distance_matrix[:shell_overlap], site_distance[:shell_overlap]) and np.all(
site_shell_species[:shell_overlap] == shell_species[:shell_overlap]
):
absorber_index.append(site_index)
if "RECIPROCAL" in sub_d["parameters"]:
absorber_index = sub_d["parameters"]["TARGET"]
absorber_index[0] = int(absorber_index[0]) - 1
# Generate the input set
if "XANES" in sub_d["parameters"]:
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPXANESSet.yaml"))
if radius is None:
radius = 10
return FEFFDictSet(
absorber_index[0],
sub_d["header"].struct,
radius=radius,
config_dict=CONFIG,
edge=sub_d["parameters"]["EDGE"],
nkpts=1000,
user_tag_settings=sub_d["parameters"],
)
raise ValueError("Bad input directory.")
class MPXANESSet(FEFFDictSet):
"""
FeffDictSet for XANES spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPXANESSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
radius,
MPXANESSet.CONFIG,
edge=edge,
spectrum="XANES",
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPEXAFSSet(FEFFDictSet):
"""
FeffDictSet for EXAFS spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPEXAFSSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
radius,
MPEXAFSSet.CONFIG,
edge=edge,
spectrum="EXAFS",
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPEELSDictSet(FEFFDictSet):
"""
FeffDictSet for ELNES spectroscopy.
"""
def __init__(
self,
absorbing_atom,
structure,
edge,
spectrum,
radius,
beam_energy,
beam_direction,
collection_angle,
convergence_angle,
config_dict,
user_eels_settings=None,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
spectrum (str): ELNES or EXELFS
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPELNESSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
self.beam_energy = beam_energy
self.beam_direction = beam_direction
self.collection_angle = collection_angle
self.convergence_angle = convergence_angle
self.user_eels_settings = user_eels_settings
eels_config_dict = deepcopy(config_dict)
if beam_direction:
beam_energy_list = [beam_energy, 0, 1, 1]
eels_config_dict[spectrum]["BEAM_DIRECTION"] = beam_direction
else:
beam_energy_list = [beam_energy, 1, 0, 1]
del eels_config_dict[spectrum]["BEAM_DIRECTION"]
eels_config_dict[spectrum]["BEAM_ENERGY"] = beam_energy_list
eels_config_dict[spectrum]["ANGLES"] = [collection_angle, convergence_angle]
if user_eels_settings:
eels_config_dict[spectrum].update(user_eels_settings)
super().__init__(
absorbing_atom,
structure,
radius,
eels_config_dict,
edge=edge,
spectrum=spectrum,
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPELNESSet(MPEELSDictSet):
"""
FeffDictSet for ELNES spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPELNESSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
beam_energy=100,
beam_direction=None,
collection_angle=1,
convergence_angle=1,
user_eels_settings=None,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPELNESSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
edge,
"ELNES",
radius,
beam_energy,
beam_direction,
collection_angle,
convergence_angle,
MPELNESSet.CONFIG,
user_eels_settings=user_eels_settings,
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPEXELFSSet(MPEELSDictSet):
"""
FeffDictSet for EXELFS spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPEXELFSSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
beam_energy=100,
beam_direction=None,
collection_angle=1,
convergence_angle=1,
user_eels_settings=None,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPEXELFSSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
edge,
"EXELFS",
radius,
beam_energy,
beam_direction,
collection_angle,
convergence_angle,
MPEXELFSSet.CONFIG,
user_eels_settings=user_eels_settings,
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
|
vorwerkc/pymatgen
|
pymatgen/io/feff/sets.py
|
Python
|
mit
| 19,267
|
[
"FEFF",
"pymatgen"
] |
0726940a8cc3b6c1b4f6bba5c8bba2ea4e71d814956123a14f3c5e38b6e4f384
|
#coding: utf-8
# 2006/02 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 2007/07/26 Slightly modified by Brian Schneider
#
# in order to support unicode files ( multipart_encode function )
# From http://peerit.blogspot.com/2007/07/multipartposthandler-doesnt-work-for.html
#
# 2013/07 Ken Olum <kdo@cosmos.phy.tufts.edu>
#
# Removed one of \r\n and send Content-Length
#
# 2014/05 Applied Fedora rpm patch
#
# https://bugzilla.redhat.com/show_bug.cgi?id=920778
# http://pkgs.fedoraproject.org/cgit/python-MultipartPostHandler2.git/diff/python-MultipartPostHandler2-cut-out-main.patch?id=c1638bb3e45596232b4d02f1e69901db0c28cfdb
#
# 2014/05/09 Sérgio Basto <sergio@serjux.com>
#
# Better deal with None values, don't throw an exception and just send an empty string.
# Simplified text example
#
# 2014/08/28 <lemori@foxmail.com>
# Feature:
# A simple api interface with more functions;
# Supports Non-ASCII values/dict and filenames well.
"""
Should support python 2 only.
Usage:
webutil.request(url, data=None, has_files=False|True)
When has_files is False:
data can be a relatively complicated structure, e.g.
{ "user": { "name": "bob", "age": "18"},
"colors": ["red", "blue", "green"] }
When has_files is True:
it POST with contenttype = multipart/form-data
and data should be a simple dict, e.g.
{ "username" : "bob", "password" : "riviera",
"file" : open("filepath", "rb") }
When data is None and has_files is False:
it GET resources from the url
"""
import urllib
import urllib2
import os, stat
import re
from mimetypes import guess_type
from cStringIO import StringIO
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# Controls how sequences are uncoded.
# If true, elements may be given multiple values by assigning a sequence.
doseq = 1
def has_non_ascii(value):
if re.search('\\\\x', repr(value)) or re.search('\\\\u', repr(value)):
return True
else:
return False
def encode_str(value):
if has_non_ascii(value):
value = value.decode('utf-8') if isinstance(value, str) else value
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def _json_dumps(obj, box):
# Better simplejson.dumps function
if isinstance(obj, basestring):
box.append( encode_str(obj).replace('\n', '\\n').replace('"', '\\"') )
return
elif isinstance(obj, list):
c = False
box.append('[')
for v in obj:
if c:
box.append(',')
c = True
b = '"' if isinstance(v, (basestring,int,bool)) else ''
box.append(b)
_json_dumps(v, box)
box.append(b)
box.append(']')
return
elif isinstance(obj, dict):
c = False
box.append('{')
for (k,v) in obj.items():
if c:
box.append(',')
c = True
box.append('"%s":' % encode_str(k))
b = '"' if isinstance(v, (basestring,int,bool)) else ''
box.append(b)
_json_dumps(v, box)
box.append(b)
box.append('}')
return
elif isinstance(obj, bool):
box.append('%s' % ('1' if obj else '0'))
return
else:
box.append( str(obj) )
return
class FormDataHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is None:
return request
if not isinstance(data, basestring):
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) == file:
v_files.append((key, value))
else:
v_vars.append((key, encode_str(value)))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (
request.get_header('content-type'),
'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
elif has_non_ascii(data):
request.add_data(encode_str(data))
return request
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = '----------ThIs_Is_tHe_bouNdaRY_$' # No guarantee
if buffer is None:
buffer = StringIO()
for(key, value) in vars:
value = "" if value is None else value
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"' % key)
buffer.write('\r\n\r\n' + value + '\r\n')
for(key, fd) in files:
fsize = os.fstat(fd.fileno())[stat.ST_SIZE]
fname = fd.name.split('/')[-1]
contenttype = guess_type(fname)[0] or 'application/octet-stream'
fname = encode_str(fname)
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, fname))
buffer.write('Content-Type: %s\r\n' % contenttype)
buffer.write('Content-Length: %s\r\n' % fsize)
fd.seek(0)
buffer.write('\r\n' + fd.read() + '\r\n')
buffer.write('--' + boundary + '--\r\n')
buffer = buffer.getvalue()
return boundary, buffer
multipart_encode = Callable(multipart_encode)
https_request = http_request
class BodyPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is None:
return request
if not isinstance(data, basestring):
newdata = []
_json_dumps(data, newdata)
request.add_data(''.join(newdata))
elif has_non_ascii(data):
request.add_data( encode_str(data) )
https_request = http_request
def request(url, data=None, has_files=False):
'''url: request URL, supports params.
body: content in the body field.'''
if has_files:
opener = urllib2.build_opener(FormDataHandler)
else:
opener = urllib2.build_opener(BodyPostHandler)
r = opener.open(url, data)
return loads(r.read())
|
lemori/pyrequest
|
pyrequest.py
|
Python
|
gpl-2.0
| 7,555
|
[
"Brian"
] |
415e2eaae7cca524e577fc8d6e53a42f821bae7f43e2efdbfd21d5dcad0df3a0
|
import numpy as np
import numpy.lib.recfunctions as rec
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy import units as u
from os import listdir
from os.path import isfile, join
import scipy.stats as stats
import scipy.optimize as opt
import matplotlib.pyplot as plt
# import extreme_deconvolution as XD
import confidence_contours as cc
from confidence_level_height_estimation import confidence_level_height_estimator, summed_gm, inverse_cdf_gm
# Matplot ticks
import matplotlib as mpl
mpl.rcParams['xtick.major.size'] = 15
mpl.rcParams['xtick.major.width'] = 2.
mpl.rcParams['ytick.major.size'] = 15
mpl.rcParams['ytick.major.width'] = 2.
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
colors = ["orange", "grey", "brown", "purple", "red", "salmon","black", "white","blue"]
cnames = ["Gold", "Silver", "LowOII", "NoOII", "LowZ", "NoZ", "D2reject", "DR3unmatched","D2unobserved"]
large_random_constant = -999119283571
deg2arcsec=3600
def return_file(fname):
with open (fname, "r") as myfile:
data=myfile.readlines()
return data
def HMS2deg(ra=None, dec=None):
rs, ds = 1, 1
if dec is not None:
D, M, S = [float(i) for i in dec.split(":")]
if str(D)[0] == '-':
ds, D = -1, abs(D)
dec= D + (M/60) + (S/3600)
if ra is not None:
H, M, S = [float(i) for i in ra.split(":")]
if str(H)[0] == '-':
rs, H = -1, abs(H)
ra = (H*15) + (M/4) + (S/240)
if (ra is not None) and (dec is not None):
return ra, dec
elif ra is not None:
return ra
else:
return dec
def MMT_study_color(grz, field, mask=None):
"""
field:
- 0 corresponds to 16hr
- 1 corresponds to 23hr
"""
g,r,z = grz
if mask is not None:
g = g[mask]
r = r[mask]
z = z[mask]
if field == 0:
return (g<24) & ((g-r)<0.8) & np.logical_or(((r-z)>(0.7*(g-r)+0.2)), (g-r)<0.2)
else:
return (g<24) & ((g-r)<0.8) & np.logical_or(((r-z)>(0.7*(g-r)+0.2)), (g-r)<0.2) & (g>20)
def MMT_DECaLS_quality(fits, mask=None):
gany,rany,zany = load_grz_anymask(fits)
givar, rivar, zivar = load_grz_invar(fits)
bp = load_brick_primary(fits)
if bp[0] == 0:
bp = (bp==0)
elif type(bp[0])==np.bool_:
bp = bp # Do nothing
else:
bp = bp=="T"
r_dev, r_exp = load_shape(fits)
if mask is not None:
gany, rany, zany = gany[mask], rany[mask], zany[mask]
givar, rivar, zivar =givar[mask], rivar[mask], zivar[mask]
bp = bp[mask]
r_dev, r_exp = r_dev[mask], r_exp[mask]
return (gany==0)&(rany==0)&(zany==0)&(givar>0)&(rivar>0)&(zivar>0)&(bp)&(r_dev<1.5)&(r_exp<1.5)
def load_MMT_specdata(fname, fib_idx=None):
"""
Given spHect* file address, return wavelength (x),
flux value (d), inverse variance (divar), and
AND_mask.
If fib_idx is not None, then return only spectra
indicated.
"""
table_spec = fits.open(fname)
x = table_spec[0].data
d = table_spec[1].data
divar = table_spec[2].data # Inverse variance
AND_mask = table_spec[3].data
if fib_idx is not None:
x = x[fib_idx,:]
d = d[fib_idx,:]
divar = divar[fib_idx,:]
AND_mask = AND_mask[fib_idx,:]
return x, d, divar, AND_mask
def box_car_avg(d,window_pixel_size=50,mask=None):
"""
Take a running average of window_piexl_size pixels.
Exclude masked pixels from averaging.
"""
# Filter
v = np.ones(window_pixel_size)
# Array that tells how many pixels were used
N_sample = np.ones(d.size)
if mask is not None:
N_sample[mask]=0
N_sample = np.convolve(N_sample, v,mode="same")
# Running sum of the data excluding masked pixels
if mask is not None:
d[mask]=0
d_boxed = np.convolve(d, v,mode="same")
# Taking average
d_boxed /= N_sample
return d_boxed
def process_spec(d, divar, width_guess, x_mean, mask=None):
"""
Given the data vector d and its corresopnding inverse
variance and pix_sigma width for the filter
compute, integrated flux A, var(A), and chi sq.
Also, return S2N. width_guess is in Angstrom.
"""
# Filter
pix_sigma = width_guess/x_mean # Peak width in terms of pixels
filter_size = np.ceil(pix_sigma*4*2) # How large the filter has to be to encompass 4-sig
# If filter size is odd, add one.
if filter_size%2==0:
filter_size+=1
# Centered around the filter, create a gaussian.
v_center = int(filter_size/2)
v = np.arange(int(filter_size))-v_center
v = np.exp(-(v**2)/(2*(pix_sigma**2)))/(pix_sigma*np.sqrt(2*np.pi))
# Note: v = G(A=1)
# If mask is used, then block out the appropriate
# portion.
if mask is not None:
d[mask]=0
divar[mask]=0
# varA: Running sum of (ivar*v^2) excluding masked pixels
varA = np.convolve(divar, v**2, mode="same")
# A_numerator: Running sum of (d*v*ivar)
A_numerator = np.convolve(d*divar, v, mode="same")
A = A_numerator/varA
# SN
S2N = A/np.sqrt(varA)
# Compute reduced chi. sq.
# To do, compute the number of samples used.
# Filter
v_N = np.ones(int(filter_size))
N_sample = np.ones(d.size)
if mask is not None:
N_sample[mask]=0
N_sample = np.convolve(N_sample, v_N, mode="same")
# Chi sq. # -1 since we are only estimating one parameter.
chi = -(-2*A_numerator*A+varA*(A**2))/(N_sample-1)
return A, varA, chi, S2N
def median_filter(data, mask=None, window_pixel_size=50):
"""
Given the data array and window size, compute median
without mask.
"""
array_length = data.size
if window_pixel_size%2==0:
window_pixel_size+=1
if mask is None:
mask = np.ones(array_length)
pass_mask = np.logical_not(mask)
ans = np.zeros(array_length)
for i in range(array_length):
idx_l = max(0, i-int(window_pixel_size/2))
idx_h = min(array_length, i+int(window_pixel_size/2))+1
# print(idx_l, idx_h)
tmp = data[idx_l:idx_h][pass_mask[idx_l:idx_h]]
# print(tmp.size)
ans[i] = np.median(tmp)
return ans
def spec_lines():
emissions = [3727.3, 4102.8, 4340, 4861.3, 4959,5006.8, 6562.8, 6716]
absorptions = [3933.7, 3968.6, 4304.4, 5175.3, 5984.0]
return emissions, absorptions
def OII_wavelength():
return 3727.3
def plot_fit(x, d, A, S2N, chi, threshold=5, mask=None, mask_caution=None, xmin=4500, xmax=8500, s=1,\
plot_show=True, plot_save=False, save_dir=None, plot_title=""):
"""
Plot a spectrum and its fits.
"""
if mask is not None:
S2N[mask] = 0
A[mask] = 0
chi[mask] = 0
d[mask] = 0
# Limit plot range
ibool = (x>xmin)&(x<xmax)
x_masked = x[ibool]
S2N_masked = S2N[ibool]
chi_masked = chi[ibool]
A_masked = A[ibool]
d_masked = d[ibool]
if mask_caution is not None:
mask_caution = mask_caution[ibool]
# Emission and absorption lines
emissions, absorptions = spec_lines()
OII_line = OII_wavelength()
# Find peaks in S2N. Must have 5-sigma.
isig5 = (S2N_masked>threshold)
# Create a vector that tells where a peak cluster starts and end.
S2N_start_end = np.zeros_like(S2N_masked)
S2N_start_end[isig5] = 1
S2N_start_end[1:] = S2N_start_end[1:]-S2N_start_end[:-1]
S2N_start_end[0] = 0
# For each [1,...,-1] cluster, finding the idx of maximum and find
# the corresponding x value.
starts = np.where(S2N_start_end==1)[0]
ends = np.where(S2N_start_end==-1)[0]
z_peak_list = []
s2n_peak_list = []
oii_flux_list = []
for i in range(len(ends)):
start = starts[i]
end = ends[i]
if (start==(end-1)) or (start==end):
val = S2N_masked[start]
else:
val = np.max(S2N_masked[start:end])
idx = np.where(S2N_masked ==val)[0]
z_peak_list.append(x_masked[idx]/OII_line-1)
s2n_peak_list.append(S2N_masked[idx])
oii_flux_list.append(A_masked[idx])
for guess_num,z_pk in enumerate(z_peak_list):
info_str = "-".join(["z%.2f"%z_pk,"oii%.2f"%oii_flux_list[guess_num], "s2n%.2f"%s2n_peak_list[guess_num]])
title_str = "-".join([plot_title, "guess%d"%guess_num , info_str])
# Create a figure where x-axis is shared
ft_size = 15
fig, (ax0, ax1,ax2,ax3) = plt.subplots(4,figsize=(12,10),sharex=True)
# Draw lines
for em in emissions:
ax0.axvline(x=(em*(z_pk+1)), ls="--", lw=2, c="red")
for ab in absorptions:
ax0.axvline(x=(ab*(z_pk+1)), ls="--", lw=2, c="green")
ax0.axvline(x=(OII_line*(z_pk+1)), ls="--", lw=2, c="blue")
ax0.set_title(title_str, fontsize=ft_size)
ax0.plot(x_masked,d_masked,lw=1, c="black")
ax0.set_xlim([xmin, xmax])
ax0.set_ylim([max(np.min(d_masked)*1.1,-2),np.max(d_masked)*1.1])
ax0.set_ylabel(r"Original Flux", fontsize=ft_size)
# Draw lines
for em in emissions:
ax1.axvline(x=(em*(z_pk+1)), ls="--", lw=2, c="red")
for ab in absorptions:
ax1.axvline(x=(ab*(z_pk+1)), ls="--", lw=2, c="green")
ax1.axvline(x=(OII_line*(z_pk+1)), ls="--", lw=2, c="blue")
ax1.scatter(x_masked,A_masked,s=s, c="black", edgecolor="none")
ax1.scatter(x_masked[isig5],A_masked[isig5],s=s, c="red", edgecolor="none")
if mask_caution is not None:
ax1.scatter(x_masked[mask_caution],A_masked[mask_caution],s=s, c="blue", edgecolor="none")
ax1.set_xlim([xmin, xmax])
ax1.set_ylim([-2,np.max(A_masked)*1.1])
ax1.set_ylabel(r"Integrated Flux", fontsize=ft_size)
# Draw lines
for em in emissions:
ax2.axvline(x=(em*(z_pk+1)), ls="--", lw=2, c="red")
for ab in absorptions:
ax2.axvline(x=(ab*(z_pk+1)), ls="--", lw=2, c="green")
ax2.axvline(x=(OII_line*(z_pk+1)), ls="--", lw=2, c="blue")
ax2.scatter(x_masked,S2N_masked,s=s, c="black", edgecolor="none")
ax2.scatter(x_masked[isig5],S2N_masked[isig5],s=s, c="red", edgecolor="none")
ax2.axhline(y=5, ls="--", lw=2, c="blue")
if mask_caution is not None:
ax2.scatter(x_masked[mask_caution],S2N_masked[mask_caution],s=s, c="blue", edgecolor="none")
ax2.set_xlim([xmin, xmax])
ax2.set_ylim([-1,np.max(S2N_masked)*1.1])
ax2.set_ylabel(r"S/N", fontsize=ft_size)
# Draw lines
for em in emissions:
ax3.axvline(x=(em*(z_pk+1)), ls="--", lw=2, c="red")
for ab in absorptions:
ax3.axvline(x=(ab*(z_pk+1)), ls="--", lw=2, c="green")
ax3.axvline(x=(OII_line*(z_pk+1)), ls="--", lw=2, c="blue")
ax3.scatter(x_masked,chi_masked,s=s, c="black", edgecolor="none")
ax3.scatter(x_masked[isig5],chi_masked[isig5],s=s, c="red", edgecolor="none")
if mask_caution is not None:
ax3.scatter(x_masked[mask_caution],chi_masked[mask_caution],s=s, c="blue", edgecolor="none")
ax3.set_xlim([xmin, xmax])
ax3.set_ylim([-0.5,np.max(chi_masked)*1.1])
ax3.set_xlabel("Wavelength ($\AA$)", fontsize=ft_size)
ax3.set_ylabel("neg. reduced $\chi^2$", fontsize=ft_size)
fig.subplots_adjust(hspace=0.05)
if plot_save:
plt.savefig(save_dir+title_str+".png", bbox_inches="tight", dpi=200)
if plot_show:
plt.show()
plt.close()
return
def process_spec_best(d, divar, width_guesses, x_mean, mask=None):
"""
The same as process_spec(), except returns A, varA, chi, S2N values for
best chi.
width guesses is either a list or numpy array.
"""
width_guesses = np.asarray(width_guesses)
# First
A, varA, chi, S2N = process_spec(d, divar, width_guesses[0], x_mean, mask=mask)
for i in range(1,width_guesses.size):
A_tmp, varA_tmp, chi_tmp, S2N_tmp = process_spec(d, divar, width_guesses[i], x_mean, mask=mask)
# Swith values if chi squar is higher. Note the we defined chi sq.
ibool = (chi_tmp>chi) #& ~np.isnan(chi_tmp)
# ibool = (np.abs(1-chi_tmp)<np.abs(1-chi)) #& ~np.isnan(chi_tmp)
# ibool = S2N_tmp>S2N
A[ibool] = A_tmp[ibool]
varA[ibool] = varA_tmp[ibool]
chi[ibool] = chi_tmp[ibool]
S2N[ibool] = S2N_tmp[ibool]
return A, varA, chi, S2N
def plot_spectrum(x,d,x2=None,d2=None, xmin=4000, xmax=8700, lw=0.25, lw2=1, mask=None, mask2=None):
"""
Plot a spectrum given x,d.
"""
if mask is not None:
d[mask] = 0
ibool = (x>xmin)&(x<xmax)
fig = plt.figure(figsize=(10,5))
plt.plot(x[ibool],d[ibool],lw=lw, c="black")
if (x2 is not None) and (d2 is not None):
ibool = (x2>xmin)&(x2<xmax)
if mask2 is not None:
d2[mask2]=0
plt.plot(x2[ibool],d2[ibool],lw=lw2, c="red")
ft_size = 15
plt.xlim([xmin, xmax])
plt.xlabel(r"Wavelength ($\AA$)", fontsize=ft_size)
plt.ylabel(r"Flux ($10^{-17}$ ergs/cm^2/s/$\AA$)", fontsize=ft_size)
plt.show()
plt.close()
def plot_S2N(x, S2N, mask=None, xmin=4500, xmax=8500, s=1):
"""
Plot a spectrum given x,d.
"""
if mask is not None:
S2N[mask] = 0
ibool = (x>xmin)&(x<xmax)
fig = plt.figure(figsize=(10,5))
S2N_masked = S2N[ibool]
plt.scatter(x[ibool],S2N_masked,s=s, c="black")
ft_size = 15
plt.xlim([xmin, xmax])
plt.ylim([np.min(S2N_masked)*1.2,np.max(S2N_masked)*1.2])
plt.xlabel(r"Wavelength ($\AA$)", fontsize=ft_size)
plt.ylabel(r"S/N", fontsize=ft_size)
plt.show()
plt.close()
def MMT_radec(field, MMT_data_directory="./MMT_data/"):
"""
field is one of [0,1,2]:
- 0: 16hr observation 1
- 1: 16hr observation 2
- 2: 23hr observation
MMT_data_directory: Where the relevant header files are stored.
"""
num_fibers = 300
if field==0:
# 16hr2_1
# Header file name
fname = MMT_data_directory+"config1FITS_Header.txt"
# Get info corresponding to the fibers
OnlyAPID = [line for line in return_file(fname) if line.startswith("APID")]
# Get the object type
APID_types = [line.split("= '")[1].split(" ")[0] for line in OnlyAPID]
# print(APID_types)
# Getting index of targets only
ibool1 = np.zeros(num_fibers,dtype=bool)
for i,e in enumerate(APID_types):
if e.startswith("5"):
ibool1[i] = True
APID_targets = [OnlyAPID[i] for i in range(num_fibers) if ibool1[i]]
fib = [i+1 for i in range(num_fibers) if ibool1[i]]
# Extract ra,dec
ra_str = [APID_targets[i].split("'")[1].split(" ")[1] for i in range(len(APID_targets))]
dec_str = [APID_targets[i].split("'")[1].split(" ")[2] for i in range(len(APID_targets))]
ra = [HMS2deg(ra=ra_str[i]) for i in range(len(ra_str))]
dec = [HMS2deg(dec=dec_str[i]) for i in range(len(ra_str))]
elif field==1:
# 16hr2_2
# Header file name
fname = MMT_data_directory+"config2FITS_Header.txt"
# Get info corresponding to the fibers
OnlyAPID = return_file(fname)[0].split("= '")[1:]
# Get the object type
APID_types = [line.split(" ")[0] for line in OnlyAPID]
# print(APID_types)
# Getting index of targets only
ibool2 = np.zeros(num_fibers,dtype=bool)
for i,e in enumerate(APID_types):
if e.startswith("5"):
ibool2[i] = True
APID_targets = [OnlyAPID[i] for i in range(num_fibers) if ibool2[i]]
fib = [i+1 for i in range(num_fibers) if ibool2[i]]
# print(APID_targets[0])
# Extract ra,dec
ra_str = [APID_targets[i].split(" ")[1] for i in range(len(APID_targets))]
dec_str = [APID_targets[i].split(" ")[2] for i in range(len(APID_targets))]
ra = [HMS2deg(ra=ra_str[i]) for i in range(len(ra_str))]
dec = [HMS2deg(dec=dec_str[i]) for i in range(len(ra_str))]
elif field==2:
# 23hr
# Header file name
fname = MMT_data_directory+"23hrs_FITSheader.txt"
# Get info corresponding to the fibers
OnlyAPID = return_file(fname)[0].split("= '")[1:]
# Get the object type
APID_types = [line.split(" ")[0] for line in OnlyAPID]
# print(APID_types)
# Getting index of targets only
ibool3 = np.zeros(num_fibers,dtype=bool)
for i,e in enumerate(APID_types):
if e.startswith("3"):
ibool3[i] = True
APID_targets = [OnlyAPID[i] for i in range(num_fibers) if ibool3[i]]
fib = [i+1 for i in range(num_fibers) if ibool3[i]]
# print(APID_targets[0])
# Extract ra,dec
ra_str = [APID_targets[i].split(" ")[1] for i in range(len(APID_targets))]
dec_str = [APID_targets[i].split(" ")[2] for i in range(len(APID_targets))]
ra = [HMS2deg(ra=ra_str[i]) for i in range(len(ra_str))]
dec = [HMS2deg(dec=dec_str[i]) for i in range(len(ra_str))]
return np.asarray(ra), np.asarray(dec), np.asarray(fib)
def plot_dNdz_selection(cn, w, iselect1, redz, area, dz=0.05, gold_eff=1, silver_eff=1, NoZ_eff=0.25, NoOII_eff=0.6,\
gold_eff2=1, silver_eff2=1, NoZ_eff2=0.25, NoOII_eff2=0.6,\
cn2=None, w2=None, iselect2=None, redz2=None, plot_total=True, fname="dNdz.png", color1="black", color2="red", color_total="green",\
label1="Selection 1", label2="Selection 2", label_total="DEEP2 Total", wNoOII=0.1, wNoZ=0.5, lw=1.5, \
label_np1="nP=1", color_np1="blue", plot_np1 = True):
"""
Given class number (cn), mask (iselect1), weights (w), redshifts, class efficiencies, plot the redshift
histogram.
dz: Histogram binwidth
**_eff: Gold and Silver are NOT always equal to one. NoZ and NoOII are objects wtih no redshift
in DEEP2 but are guessed to have efficiency of about 0.25.
**_eff2: The efficiencies for the second set.
iselect2: If not None, used as another set of mask to plot dNdz histogram.
plot_total: Plots total.
fname: Saves in fname.
color1: iselect1 color
color2: iselect2 color
color_total: total color
label1, label2, lbael_total: Labels
"""
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
if plot_total:
ibool = np.logical_or((cn==0),(cn==1))
plt.hist(redz[ibool], bins = np.arange(0.6,1.7,dz), weights=w[ibool]/area,\
histtype="step", color=color_total, label=label_total, lw=lw)
# NoOII:
ibool = (cn==3)
N_NoOII = NoOII_eff*w[ibool].sum();
plt.bar(left=0.7, height =N_NoOII/(wNoOII/dz), width=wNoOII, bottom=0., alpha=0.5,color=color_total, \
edgecolor =color_total, label=label_total+" NoOII (Proj.)", hatch="*")
# NoZ:
ibool = (cn==5)
N_NoZ = NoZ_eff*w[ibool].sum();
plt.bar(left=1.4, height =N_NoZ/(wNoZ/dz), width=wNoZ, bottom=0., alpha=0.5,color=color_total, \
edgecolor =color_total, label=label_total+" NoZ (Proj.)")
if iselect2 is not None:
# If the new cn, w and redz are given, then use those values. Else, use first set.
if cn2 is None:
redz2 = np.copy(redz)
cn2 = np.copy(cn)
w2 = np.copy(w)
# appropriately weighing the objects.
w_select2 = np.copy(w2)
w_select2[cn2==0] *= gold_eff2
w_select2[cn2==1] *= silver_eff2
w_select2[cn2==3] *= NoOII_eff2
w_select2[cn2==5] *= NoZ_eff2
ibool = np.logical_or((cn2==0),(cn2==1)) & iselect2
plt.hist(redz2[ibool], bins = np.arange(0.6,1.7,dz), weights=w_select2[ibool]/area,\
histtype="step", color=color2, label=label2, lw=lw)
# NoOII:
ibool = (cn2==3) & iselect2
N_NoOII = w_select2[ibool].sum();
plt.bar(left=0.7, height =N_NoOII/(wNoOII/dz), width=wNoOII, bottom=0., alpha=0.5,color=color2, \
edgecolor =color2, label=label2+ " NoOII (Proj.)", hatch="*")
plt.plot([0.7, 0.7+wNoOII], [N_NoOII/(wNoOII/dz)/NoOII_eff2, N_NoOII/(wNoOII/dz)/NoOII_eff2], color=color2, linewidth=2.0, ls="--")
# NoZ:
ibool = (cn2==5) & iselect2
N_NoZ = w_select2[ibool].sum();
plt.bar(left=1.4, height =N_NoZ/(wNoZ/dz), width=wNoZ, bottom=0., alpha=0.5,color=color2, \
edgecolor =color2, label=label2+" NoZ (Proj.)")
plt.plot([1.4, 1.4+wNoZ], [N_NoZ/(wNoZ/dz)/NoZ_eff2, N_NoZ/(wNoZ/dz)/NoZ_eff2], color=color2, linewidth=2.0, ls="--")
# Selection 1.
# appropriately weighing the objects.
w_select1 = np.copy(w)
w_select1[cn==0] *= gold_eff
w_select1[cn==1] *= silver_eff
w_select1[cn==3] *= NoOII_eff
w_select1[cn==5] *= NoZ_eff
ibool = np.logical_or((cn==0),(cn==1)) & iselect1 # Total
plt.hist(redz[ibool], bins = np.arange(0.6,1.7,dz), weights=w_select1[ibool]/area,\
histtype="step", color=color1, label=label1, lw=lw)
# NoOII:
ibool = (cn==3) & iselect1
N_NoOII = w_select1[ibool].sum();
plt.bar(left=0.7, height =N_NoOII/(wNoOII/dz), width=wNoOII, bottom=0., alpha=0.5,color=color1, \
edgecolor =color1, label=label1+" NoOII (Proj.)", hatch="*")
plt.plot([0.7, 0.7+wNoOII], [N_NoOII/(wNoOII/dz)/NoOII_eff, N_NoOII/(wNoOII/dz)/NoOII_eff], color=color1, linewidth=2.0, ls="--")
# NoZ:
ibool = (cn==5) & iselect1
N_NoZ = w_select1[ibool].sum();
plt.bar(left=1.4, height =N_NoZ/(wNoZ/dz), width=wNoZ, bottom=0., alpha=0.5, color=color1, \
edgecolor =color1, label=label1+" NoZ (Proj.)")
plt.plot([1.4, 1.4+wNoZ], [N_NoZ/(wNoZ/dz)/NoZ_eff, N_NoZ/(wNoZ/dz)/NoZ_eff], color=color1, linewidth=2.0, ls="--")
# Plotting np=1 line
if plot_np1:
X,Y = np1_line(dz)
plt.plot(X,Y, color=color_np1, label=label_np1, lw=lw*2., ls="-.")
plt.xlim([0.5,1.4+wNoZ+0.1])
plt.legend(loc="upper right", fontsize=15)
ymax=260
if plot_total:
ymax = 450
plt.ylim([0,ymax])
# plt.legend(loc="upper left")
plt.xlabel("Redshift z", fontsize=20)
plt.ylabel("dN/d(%.3fz) per sq. degs."%dz, fontsize=20)
plt.savefig(fname, bbox_inches="tight", dpi=400)
# plt.show()
plt.close()
def np1_line(dz=0.5):
"""
Given the binwidth dz, return np=1 line.
"""
X, Y = np.asarray([[0.14538014092363039, 1.1627906976744384],
[0.17035196758073518, 2.906976744186011],
[0.20560848729069203, 5.8139534883720785],
[0.2731789775637742, 10.465116279069775],
[0.340752313629068, 15.697674418604663],
[0.4083256496943619, 20.930232558139494],
[0.4729621281972476, 26.16279069767444],
[0.5405354642625415, 31.395348837209326],
[0.6081088003278353, 36.62790697674416],
[0.6756821363931291, 41.860465116279045],
[0.7403214606882265, 47.67441860465118],
[0.8078919509613086, 52.32558139534882],
[0.8754624412343909, 56.97674418604652],
[0.9430357772996848, 62.209302325581405],
[1.0106034217805555, 66.27906976744185],
[1.0811107696160458, 70.93023255813955],
[1.1486784140969166, 75],
[1.2162432127855753, 78.48837209302326],
[1.2867448690366428, 81.97674418604649],
[1.3543096677253015, 85.46511627906978],
[1.4248084781841568, 88.37209302325581],
[1.4953072886430125, 91.27906976744185],
[1.5687401108720649, 93.6046511627907],
[1.6392389213309202, 96.51162790697674],
[1.7097320402053522, 98.2558139534884],
[1.7802280048719963, 100.58139534883719],
[1.8507211237464292, 102.32558139534885],
[1.9212113968286495, 103.48837209302326],
[1.9917045157030815, 105.23255813953489]]).T
return X, Y*dz/0.1
def FDR_cut(grz):
"""
Given a list [g,r,z] magnitudes, apply the cut and return an indexing boolean vector.
"""
g,r,z=grz; yrz = (r-z); xgr = (g-r)
ibool = (r<23.4) & (yrz>.3) & (yrz<1.6) & (xgr < (1.15*yrz)-0.15) & (xgr < (1.6-1.2*yrz))
return ibool
def plot_grz_class(grz, cn, weight, area, mask=None, pick=None,fname=None,pt_size=0.5):
"""
Given [g,r,z] list, cn, weight of objects in a catalog and a particular class number and area,
plot the selected one in its color.
fname convention:
cc-(grz or grzperp)-(mag)(lim)-(cn)(cname)-(mask1)-(mask2)-...
"""
global colors
global cnames
bnd_lw =2
# Unpack the colors.
g,r,z=grz; xrz = (r-z); ygr = (g-r)
if mask is not None:
xrz = xrz[mask]
ygr = ygr[mask]
cn = cn[mask]
weight = weight[mask]
fig = plt.figure(figsize=(5,5))
if pick is None:
plt.scatter(xrz, ygr,c="black",s=pt_size, edgecolors="none")
else:
plt.scatter(xrz[cn==pick],ygr[cn==pick], c=colors[pick],s=pt_size*6, edgecolors="none", marker="s")
raw = np.sum(cn==pick)
if pick <6:
density = np.sum(weight[cn==pick])/area
else:
density = np.sum(cn==pick)/area
title_str = "%s: Raw=%d, Density=%d" %(cnames[pick],raw, density)
plt.title(title_str,fontsize=20)
# FDR boundary practice:
plt.plot( [0.3, 0.30], [-4, 0.195],'k-', lw=bnd_lw, c="blue")
plt.plot([0.3, 0.745], [0.195, 0.706], 'k-', lw=bnd_lw, c="blue")
plt.plot( [0.745, 1.6], [0.706, -0.32],'k-', lw=bnd_lw, c="blue")
plt.plot([1.6, 1.6], [-0.32, -4],'k-', lw=bnd_lw, c="blue")
# Broad
# plt.plot(xbroad,ybroad, linewidth=bnd_lw, c='blue')
# Decoration
plt.xlabel("$r-z$",fontsize=20)
plt.ylabel("$g-r$",fontsize=20)
plt.axis("equal")
plt.axis([-.5, 2.0, -.5, 2.0])
if fname is not None:
# plt.savefig(fname+".pdf", bbox_inches="tight",dpi=200)
plt.savefig(fname+".png", bbox_inches="tight",dpi=200)
# plt.show()
plt.close()
def plot_grzflux_class(grzflux, cn, weight, area, mask=None, pick=None,fname=None,pt_size=0.5, show_plot=False, \
xmin=0, xmax=100, ymin=0, ymax=100):
"""
Given [gflux, rflux, zflux] list, cn, weight of objects in a catalog and a particular class number and area,
plot the selected one in its color.
fname convention:
cc-(grz or grzperp)-(mag)(lim)-(cn)(cname)-(mask1)-(mask2)-...
"""
global colors
global cnames
bnd_lw =2
# Unpack the colors.
gflux,rflux,zflux=grzflux; xrz = zflux/rflux; ygr = rflux/gflux
if mask is not None:
xrz = xrz[mask]
ygr = ygr[mask]
cn = cn[mask]
weight = weight[mask]
fig = plt.figure(figsize=(5,5))
if pick is None:
plt.scatter(xrz, ygr,c="black",s=pt_size, edgecolors="none")
else:
plt.scatter(xrz[cn==pick],ygr[cn==pick], c=colors[pick],s=pt_size*6, edgecolors="none", marker="s")
raw = np.sum(cn==pick)
if pick <6:
density = np.sum(weight[cn==pick])/area
else:
density = np.sum(cn==pick)/area
title_str = "%s: Raw=%d, Density=%d" %(cnames[pick],raw, density)
plt.title(title_str,fontsize=20)
# # FDR boundary practice:
# plt.plot( [0.3, 0.30], [-4, 0.195],'k-', lw=bnd_lw, c="blue")
# plt.plot([0.3, 0.745], [0.195, 0.706], 'k-', lw=bnd_lw, c="blue")
# plt.plot( [0.745, 1.6], [0.706, -0.32],'k-', lw=bnd_lw, c="blue")
# plt.plot([1.6, 1.6], [-0.32, -4],'k-', lw=bnd_lw, c="blue")
# Broad
# plt.plot(xbroad,ybroad, linewidth=bnd_lw, c='blue')
# Decoration
plt.xlabel("$r-z$ flux ratio",fontsize=20)
plt.ylabel("$g-r$ flux ratio",fontsize=20)
plt.axis("equal")
plt.axis([xmin, xmax, ymin, ymax])
if fname is not None:
# plt.savefig(fname+".pdf", bbox_inches="tight",dpi=200)
plt.savefig(fname+".png", bbox_inches="tight",dpi=200)
if show_plot:
plt.show()
plt.close()
def plot_fluxratio_class(ratio1, ratio2, cn, weight, area, mask=None, pick=None,fname=None,pt_size=0.5, show_plot=False, \
xmin=0, xmax=100, ymin=0, ymax=100, xlabel="z-w1 flux", ylabel="g-r flux"):
"""
Given [gflux, rflux, zflux] list, cn, weight of objects in a catalog and a particular class number and area,
plot the selected one in its color.
fname convention:
cc-(grz or grzperp)-(mag)(lim)-(cn)(cname)-(mask1)-(mask2)-...
"""
global colors
global cnames
bnd_lw =2
# Unpack the colors.
if mask is not None:
ratio1 = ratio1[mask]
ratio2 = ratio2[mask]
cn = cn[mask]
weight = weight[mask]
fig = plt.figure(figsize=(5,5))
if pick is None:
plt.scatter(ratio1, ratio2,c="black",s=pt_size, edgecolors="none")
else:
plt.scatter(ratio1[cn==pick],ratio2[cn==pick], c=colors[pick],s=pt_size*6, edgecolors="none", marker="s")
raw = np.sum(cn==pick)
if pick <6:
density = np.sum(weight[cn==pick])/area
else:
density = np.sum(cn==pick)/area
title_str = "%s: Raw=%d, Density=%d" %(cnames[pick],raw, density)
plt.title(title_str,fontsize=20)
# # FDR boundary practice:
# plt.plot( [0.3, 0.30], [-4, 0.195],'k-', lw=bnd_lw, c="blue")
# plt.plot([0.3, 0.745], [0.195, 0.706], 'k-', lw=bnd_lw, c="blue")
# plt.plot( [0.745, 1.6], [0.706, -0.32],'k-', lw=bnd_lw, c="blue")
# plt.plot([1.6, 1.6], [-0.32, -4],'k-', lw=bnd_lw, c="blue")
# Broad
# plt.plot(xbroad,ybroad, linewidth=bnd_lw, c='blue')
# Decoration
plt.xlabel(xlabel,fontsize=20)
plt.ylabel(ylabel,fontsize=20)
plt.axis("equal")
plt.axis([xmin, xmax, ymin, ymax])
if fname is not None:
# plt.savefig(fname+".pdf", bbox_inches="tight",dpi=200)
plt.savefig(fname+".png", bbox_inches="tight",dpi=200)
if show_plot:
plt.show()
plt.close()
def plot_grz_class_all(grz, cn, weight, area, mask=None, fname=None, pt_size1=0.5, pt_size2=0.3):
"""
Given [g,r,z] list, cn, weight of objects in a catalog and a particular class number and area,
plot all the objects in their respective colors.
fname convention:
cc-(grz or grzperp)-(mag)(lim)-cnAll-(mask1)-(mask2)-...
"""
global colors
global cnames
bnd_lw =2
# Unpack the colors.
g,r,z=grz; xrz = (r-z); ygr = (g-r)
if mask is not None:
xrz = xrz[mask]
ygr = ygr[mask]
cn = cn[mask]
weight = weight[mask]
fig = plt.figure(figsize=(5,5))
for i,e in enumerate(cnames):
if i < 6:
plt.scatter(xrz[cn==i], ygr[cn==i], c=colors[i],s=pt_size1, edgecolors="none", marker="s")
elif i ==6:
plt.scatter(xrz[cn==i], ygr[cn==i], c=colors[i],s=pt_size2, edgecolors="none", marker="s")
# FDR boundary practice:
plt.plot( [0.3, 0.30], [-4, 0.195],'k-', lw=bnd_lw, c="blue")
plt.plot([0.3, 0.745], [0.195, 0.706], 'k-', lw=bnd_lw, c="blue")
plt.plot( [0.745, 1.6], [0.706, -0.32],'k-', lw=bnd_lw, c="blue")
plt.plot([1.6, 1.6], [-0.32, -4],'k-', lw=bnd_lw, c="blue")
# Broad
# plt.plot(xbroad,ybroad, linewidth=bnd_lw, c='blue')
# Decoration
plt.xlabel("$r-z$",fontsize=20)
plt.ylabel("$g-r$",fontsize=20)
plt.axis("equal")
plt.axis([-.5, 2.0, -.5, 2.0])
if fname is not None:
# plt.savefig(fname+".pdf", bbox_inches="tight",dpi=200)
plt.savefig(fname+".png", bbox_inches="tight",dpi=200)
# plt.show()
plt.close()
def load_params_XD_fit(i,K,tag=""):
fname = ("%d-params-fit-amps-glim24-K%d"+tag+".npy") %(i, K)
amp = np.load(fname)
fname = ("%d-params-fit-means-glim24-K%d"+tag+".npy") %(i, K)
mean= np.load(fname)
fname = ("%d-params-fit-covars-glim24-K%d"+tag+".npy") %(i, K)
covar = np.load(fname)
return amp, mean, covar
def load_params_XD_init(i,K,tag=""):
fname = ("%d-params-init-amps-glim24-K%d"+tag+".npy") %(i, K)
amp = np.load(fname)
fname = ("%d-params-init-means-glim24-K%d"+tag+".npy") %(i, K)
mean= np.load(fname)
fname = ("%d-params-init-covars-glim24-K%d"+tag+".npy") %(i, K)
covar = np.load(fname)
return amp, mean, covar
def plot_XD_fit(ydata, weight, Sxamp_init, Sxmean_init, Sxcovar_init, Sxamp, Sxmean, Sxcovar, mask=None, fname=None, pt_size=5, show=False):
"""
See the output.
"""
bnd_lw =1.5
# Unpack the colors.
xrz = ydata[:,0]; ygr = ydata[:,1]
if mask is not None:
ygr = ygr[mask]
xrz = xrz[mask]
# Broad boundary
# xbroad, ybroad = generate_broad()
# Figure ranges
grmin = -.5
rzmin = -.5
grmax = 2.5
rzmax = 2.5
# histogram binwidth
bw = 0.05
# Number of components/linewidth
K = Sxamp_init.size
elw = 1.5 # ellipse linewidth
ea = 0.75 # ellipse transparency
# Create figure
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14,14))
# 1: Plot the dots and the initial and final component gaussians.
ax1.scatter(xrz,ygr, c="black",s=pt_size, edgecolors="none")
# Initial
for i in range(K):
cc.plot_cov_ellipse(Sxcovar_init[i], Sxmean_init[i], volume=.6827, ax=ax1, ec="red", a=ea, lw=elw/2.) #1-sig
cc.plot_cov_ellipse(Sxcovar_init[i], Sxmean_init[i], volume=.9545, ax=ax1, ec="red", a=ea, lw=elw/2.) #2-sig
cc.plot_cov_ellipse(Sxcovar[i], Sxmean[i], volume=.6827, ax=ax1, ec="blue", a=ea, lw=elw)#1-sig
cc.plot_cov_ellipse(Sxcovar[i], Sxmean[i], volume=.9545, ax=ax1, ec="blue", a=ea, lw=elw)#2-sig
# FDR boundary:
ax1.plot( [0.3, 0.30], [-4, 0.195],'k-', lw=bnd_lw, c="red")
ax1.plot([0.3, 0.745], [0.195, 0.706], 'k-', lw=bnd_lw, c="red")
ax1.plot( [0.745, 1.6], [0.706, -0.32],'k-', lw=bnd_lw, c="red")
ax1.plot([1.6, 1.6], [-0.32, -4],'k-', lw=bnd_lw, c="red")
# Decoration
ax1.set_xlabel("$r-z$",fontsize=18)
ax1.set_ylabel("$g-r$",fontsize=18)
ax1.axis("equal")
ax1.axis([rzmin, rzmax, grmin, grmax])
# 2: Histogram in r-z
ax2.hist(ygr, bins = np.arange(grmin, grmax+0.9*bw, bw), weights=weight, normed=True, color="black", histtype="step", orientation="horizontal")
# Gaussian components
xvec = np.arange(-3,3,0.01) # vector range
sum_init = np.zeros_like(xvec) # place holder for gaussians.
sum_fit = np.zeros_like(xvec)
for i in range(K):
yvec = Sxamp_init[i]*stats.multivariate_normal.pdf(xvec, mean=Sxmean_init[i][1], cov=Sxcovar_init[i][1,1])
sum_init += yvec
ax2.plot(yvec, xvec,lw=elw, color ="red", alpha=0.5)
yvec = Sxamp[i]*stats.multivariate_normal.pdf(xvec, mean=Sxmean[i][1], cov=Sxcovar[i][1,1])
sum_fit += yvec
ax2.plot(yvec, xvec,lw=elw, color ="blue", alpha=0.5)
ax2.plot(sum_init, xvec,lw=elw*1.5, color ="red", alpha=1.)
ax2.plot(sum_fit, xvec,lw=elw*1.5, color ="blue", alpha=1.)
# Deocration
ax2.set_ylabel("$g-r$",fontsize=18)
ax2.set_xlabel("Normalized density", fontsize=20)
ax2.set_ylim([grmin, grmax])
# 3: Histogram in g-r
ax3.hist(xrz, bins = np.arange(grmin, grmax+0.9*bw, bw), weights=weight, normed=True, color="black", histtype="step")
# Gaussian components
xvec = np.arange(-3,3,0.01) # vector range
sum_init = np.zeros_like(xvec) # place holder for gaussians.
sum_fit = np.zeros_like(xvec)
for i in range(K):
yvec = Sxamp_init[i]*stats.multivariate_normal.pdf(xvec, mean=Sxmean_init[i][0], cov=Sxcovar_init[i][0,0])
sum_init += yvec
ax3.plot(xvec,yvec,lw=elw, color ="red", alpha=0.5)
yvec = Sxamp[i]*stats.multivariate_normal.pdf(xvec, mean=Sxmean[i][0], cov=Sxcovar[i][0,0])
sum_fit += yvec
ax3.plot(xvec, yvec,lw=elw, color ="blue", alpha=0.5)
ax3.plot(xvec,sum_init, lw=elw*1.5, color ="red", alpha=1.)
ax3.plot(xvec,sum_fit,lw=elw*1.5, color ="blue", alpha=1.)
# Decoration
ax3.set_xlabel("$r-z$",fontsize=18)
ax3.set_ylabel("Normalized density", fontsize=20)
ax3.set_xlim([rzmin, rzmax])
# 4: Plot the dots and the isocontours of GMM at 2, 10, 50, 90, 98
ax4.scatter(xrz,ygr, c="black",s=pt_size, edgecolors="none")
# Plot isocontours
magmin = min(grmin, rzmin)
magmax = max(grmax, rzmax)
vec = np.linspace(magmin,magmax, num=1e3,endpoint=True)
X,Y = np.meshgrid(vec, vec) # grid of point
Z = summed_gm(np.transpose(np.array([Y,X])), Sxmean, Sxcovar, Sxamp) # evaluation of the function on the grid
Xrange = Yrange = [magmin,magmax]; # Estimating density levels.
cvs = [0.98, 0.90, 0.50, 0.10, 0.02] # contour levels
cvsP =inverse_cdf_gm(cvs,Xrange, Yrange, Sxamp, Sxcovar, Sxmean, gridspacing=0.5e-2,gridnumber = 1e3)
ax4.contour(X,Y,Z,cvsP,linewidths=1.5, colors=["black", "blue", "red", "orange", "yellow"])
# FDR boundary:
ax4.plot( [0.3, 0.30], [-4, 0.195],'k-', lw=bnd_lw, c="red")
ax4.plot([0.3, 0.745], [0.195, 0.706], 'k-', lw=bnd_lw, c="red")
ax4.plot( [0.745, 1.6], [0.706, -0.32],'k-', lw=bnd_lw, c="red")
ax4.plot([1.6, 1.6], [-0.32, -4],'k-', lw=bnd_lw, c="red")
# Decoration
ax4.set_xlabel("$r-z$",fontsize=18)
ax4.set_ylabel("$g-r$",fontsize=18)
ax4.axis("equal")
ax4.axis([rzmin, rzmax, grmin, grmax])
if fname is not None:
# plt.savefig(fname+".pdf", bbox_inches="tight",dpi=200)
plt.savefig(fname+".png", bbox_inches="tight",dpi=200)
if show:
plt.show()
plt.close()
def XD_gr_rz_fit(ydata, ycovar, weight, niter, K, maxsnm=True, subsample = False, fixamp = None, snm=0, init_var=0.5**2, w_reg = 0.05**2):
"""
Given the appropriately formmated data, make fits and return the best
fit parameters (and the corresponding initial values).
K: Number of components
"""
# If subsmaple is true and the number of data points is greater than M ~ 2,000 than subsample before proceeding.
M = 3000
if (ydata.shape[0] > M) & subsample:
a = np.arange(0,M,1, dtype=int)
ibool = np.random.choice(a, size=M, replace=False, p=None)
ydata = ydata[ibool]
ycovar = ycovar[ibool]
weight = weight[ibool]
# Place holder for the log-likelihood
loglike = large_random_constant
best_loglike = large_random_constant
# Make niter number of fits
for i in range(niter):
if (i%2==0) & (niter<=25):
print(i)
if (i%10==0) & (niter>25):
print(i)
# Get initial condition
xamp_init, xmean_init, xcovar_init = XD_init(K, ydata, init_var)
# Copy the initial condition.
xamp = np.copy(xamp_init); xmean = np.copy(xmean_init); xcovar = np.copy(xcovar_init)
# XD fit
loglike = XD.extreme_deconvolution(ydata, ycovar, xamp, xmean, xcovar, weight=weight, tol=1e-06, w=w_reg, maxsnm=maxsnm, fixamp=fixamp, splitnmerge=snm)
if loglike > best_loglike:
best_loglike = loglike
Sxamp_init, Sxmean_init, Sxcovar_init = xamp_init, xmean_init, xcovar_init
Sxamp, Sxmean, Sxcovar = xamp, xmean, xcovar
return Sxamp_init, Sxmean_init, Sxcovar_init, Sxamp, Sxmean, Sxcovar
def sample_GMM(Sxamp,Sxmean, Sxcovar, ycovar):
"""
Return a sample based on the GMM input.
"""
N = ycovar.shape[0] # Number of data points.
sample = []
# For each data point, generate a sample based on the specified GMM.
for i in range(N):
sample.append(sample_GMM_generate(Sxamp,Sxmean, Sxcovar, ycovar[i]))
sample = np.asarray(sample)
# print sample.shape, sample
xgr_sample, yrz_sample = sample[:,0], sample[:,1]
return xgr_sample, yrz_sample
def sample_GMM_generate(Sxamp,Sxmean, Sxcovar, cov):
"""
sample from a gaussian mixture
"""
# Number of components.
K = Sxamp.size
if K == 1:
# print(Sxmean[0], (Sxcovar+cov)
one_sample = np.random.multivariate_normal(Sxmean[0], (Sxcovar+cov)[0], size=1)[0]
return one_sample
# Choose from the number based on multinomial
m = np.where(np.random.multinomial(1,Sxamp)==1)[0][0]
# Draw from the m-th gaussian.
one_sample = np.random.multivariate_normal(Sxmean[m], Sxcovar[m]+cov, size=1)[0]
return one_sample
def plot_XD_fit_K(ydata, ycovar, Sxamp, Sxmean, Sxcovar, fname=None, pt_size=5, mask=None, show=False):
"""
Used for model selection.
"""
bnd_lw = 1.
# Unpack the colors.
xgr = ydata[:,0]; yrz = ydata[:,1]
if mask is not None:
yrz = yrz[mask]
xgr = xgr[mask]
# # Broad boundary
# xbroad, ybroad = generate_broad()
# Figure ranges
grmin = -1.
rzmin = -.75
grmax = 2.5
rzmax = 2.75
# Create figure
f, axarr = plt.subplots(2, 2, figsize=(14,14))
# First panel is the original.
axarr[0,0].scatter(xgr,yrz, c="black",s=pt_size, edgecolors="none")
# FDR boundary:
axarr[0,0].plot([-4, 0.195], [0.3, 0.30], 'k-', lw=bnd_lw, c="red")
axarr[0,0].plot([0.195, 0.706],[0.3, 0.745], 'k-', lw=bnd_lw, c="red")
axarr[0,0].plot([0.706, -0.32], [0.745, 1.6], 'k-', lw=bnd_lw, c="red")
axarr[0,0].plot([-0.32, -4],[1.6, 1.6], 'k-', lw=bnd_lw, c="red")
# # Broad
# axarr[0,0].plot(xbroad,ybroad, linewidth=bnd_lw, c='blue')
# Decoration
axarr[0,0].set_xlabel("$g-r$",fontsize=18)
axarr[0,0].set_ylabel("$r-z$",fontsize=18)
axarr[0,0].set_title("Data",fontsize=20)
axarr[0,0].axis("equal")
axarr[0,0].axis([grmin, grmax, rzmin, rzmax])
# The remaining three are simulation based on the fit.
sim_counter = 1
for i in range(1,4):
xgr_sample, yrz_sample = sample_GMM(Sxamp,Sxmean, Sxcovar, ycovar)
axarr[i//2, i%2].scatter(xgr_sample,yrz_sample, c="black",s=pt_size, edgecolors="none")
# FDR boundary:
axarr[i//2, i%2].plot([-4, 0.195], [0.3, 0.30], 'k-', lw=bnd_lw, c="red")
axarr[i//2, i%2].plot([0.195, 0.706],[0.3, 0.745], 'k-', lw=bnd_lw, c="red")
axarr[i//2, i%2].plot([0.706, -0.32], [0.745, 1.6], 'k-', lw=bnd_lw, c="red")
axarr[i//2, i%2].plot([-0.32, -4],[1.6, 1.6], 'k-', lw=bnd_lw, c="red")
# Broad
# axarr[i//2, i%2].plot(xbroad,ybroad, linewidth=bnd_lw, c='blue')
# Decoration
axarr[i//2, i%2].set_xlabel("$g-r$",fontsize=18)
axarr[i//2, i%2].set_ylabel("$r-z$",fontsize=18)
axarr[i//2, i%2].set_title("Simulation %d" % sim_counter,fontsize=20); sim_counter+=1
axarr[i//2, i%2].axis("equal")
axarr[i//2, i%2].axis([grmin, grmax, rzmin, rzmax])
if fname is not None:
# plt.savefig(fname+".pdf", bbox_inches="tight",dpi=200)
plt.savefig(fname+".png", bbox_inches="tight",dpi=200)
if show:
plt.show()
plt.close()
def save_params(Sxamp_init, Sxmean_init, Sxcovar_init, Sxamp, Sxmean, Sxcovar, i, K, tag=""):
fname = ("%d-params-fit-amps-glim24-K%d"+tag) %(i, K)
np.save(fname, Sxamp)
fname = ("%d-params-fit-means-glim24-K%d"+tag) %(i, K)
np.save(fname, Sxmean)
fname = ("%d-params-fit-covars-glim24-K%d"+tag) %(i, K)
np.save(fname, Sxcovar)
# Initi parameters
fname = ("%d-params-init-amps-glim24-K%d"+tag) %(i, K)
np.save(fname, Sxamp_init)
fname = ("%d-params-init-means-glim24-K%d"+tag) %(i, K)
np.save(fname, Sxmean_init)
fname = ("%d-params-init-covars-glim24-K%d"+tag) %(i, K)
np.save(fname, Sxcovar_init)
return
def amp_init(K):
return np.ones(K,dtype=np.float)/np.float(K)
def mean_init(K, ydata):
S = np.random.randint(low=0,high=ydata.shape[0],size=K)
return ydata[S]
def covar_init(K, init_var):
covar = np.zeros((K, 2,2))
for i in range(K):
covar[i] = np.diag((init_var, init_var))
return covar
def XD_init(K, ydata, init_var):
xamp_init = amp_init(K)
# print xamp_init, xamp_init.shape
xmean_init = mean_init(K, ydata)
# print xmean_init, xmean_init.shape
xcovar_init = covar_init(K, init_var)
# print xcovar_init, xcovar_init.shape
return xamp_init, xmean_init, xcovar_init
def grz2gr_rz(grz):
return np.transpose(np.asarray([grz[0]-grz[1], grz[1]-grz[2]]))
def grz2rz_gr(grz):
return np.transpose(np.asarray([grz[1]-grz[2], grz[0]-grz[1]]))
def fvar2mvar(f, fivar):
return (1.08574)**2/(f**2 * fivar)
def gr_rz_covariance(grzflux, grzivar):
gflux = grzflux[0]
rflux = grzflux[1]
zflux = grzflux[2]
givar = grzivar[0]
rivar = grzivar[1]
zivar = grzivar[2]
gvar = fvar2mvar(gflux,givar)
rvar = fvar2mvar(rflux,rivar)
zvar = fvar2mvar(zflux,zivar)
gr_rz_covar = np.zeros((gvar.size ,2,2))
for i in range(gvar.size):
# if i % 100 == 0:
# print i
gr_rz_covar[i] = np.asarray([[gvar[i]+rvar[i], rvar[i]],[rvar[i], rvar[i]+zvar[i]]])
return gr_rz_covar
def rz_gr_covariance(grzflux, grzivar):
gflux = grzflux[0]
rflux = grzflux[1]
zflux = grzflux[2]
givar = grzivar[0]
rivar = grzivar[1]
zivar = grzivar[2]
gvar = fvar2mvar(gflux,givar)
rvar = fvar2mvar(rflux,rivar)
zvar = fvar2mvar(zflux,zivar)
rz_gr_covar = np.zeros((gvar.size ,2,2))
for i in range(gvar.size):
# if i % 100 == 0:
# print i
rz_gr_covar[i] = np.asarray([[rvar[i]+zvar[i], rvar[i]],[rvar[i], gvar[i]+rvar[i]]])
return rz_gr_covar
def pow_legend(params_pow):
alpha, A = params_pow
return r"$A=%.2f,\,\, \alpha=%.2f$" % (A, alpha)
def broken_legend(params_broken):
alpha, beta, fs, phi = params_broken
return r"$\alpha=%.2f, \,\, \beta=%.2f, \,\, f_i=%.2f, \,\, \phi=%.2f$" % (alpha, beta, fs, phi)
def broken_pow_phi_init(flux_centers, best_params_pow, hist,bw, fluxS):
"""
Return initial guess for phi.
"""
# selecting one non-zero bin
c_S = 0;
while c_S == 0:
S = np.random.randint(low=0,high=flux_centers.size,size=1)
f_S = flux_centers[S]
c_S = hist[S]
alpha = -best_params_pow[0]
beta = best_params_pow[0]
phi = c_S/broken_pow_law([alpha, beta, fluxS, 1.], f_S)/bw
# phi
return phi[0]
def pow_law(params, flux):
A = params[1]
alpha = params[0]
return A* flux**alpha
def broken_pow_law(params, flux):
alpha = params[0]
beta = params[1]
fs = params[2]
phi = params[3]
return phi/((flux/fs)**alpha+(flux/fs)**beta + 1e-12)
def pow_param_init(left_hist, left_f, right_hist, right_f, bw):
"""
Return initial guess for the exponent and normalization.
"""
# selecting non-zero bin one from left and one from right.
c_L = 0; c_R = 0
while c_L==0 or c_R == 0 or c_L >= c_R:
L = np.random.randint(low=0,high=left_hist.size,size=1)
f_L = left_f[L]
c_L = left_hist[L]
R = np.random.randint(low=0,high=right_hist.size,size=1)
f_R = right_f[R]
c_R = right_hist[R]
# print(L,R)
# exponent
alpha_init = np.log(c_L/np.float(c_R))/np.log(f_L/np.float(f_R))
A_init = c_L/(f_L**alpha_init * bw)
ans = np.zeros(2, dtype=np.float)
ans[0] = alpha_init
ans[1] = A_init
return ans
def mag2flux(mag):
return 10**(0.4*(22.5-mag))
def dNdm_fit(mag, weight, bw, magmin, magmax, area, niter = 5, cn2fit=0, pow_tol =1e-5, broken_tol=1e-2, fname=None, lw=1.5):
"""
Given the magnitudes and the corresponding weight, and the parameters for the histogram,
return the best fit parameters for a power law and a broken power law.
Note: This function could be much more modular. But for now I keep it as it is.
"""
# Computing the histogram.
bins = np.arange(magmin, magmax+bw*0.9, bw) # I am not sure what this is necessary but this works.
if cn2fit<6:
hist, bin_edges = np.histogram(mag, weights=weight/np.float(area), bins=bins)
else: # If D2reject, then do not weight except for the area.
hist, bin_edges = np.histogram(mag,weights=np.ones(mag.size)/np.float(area), bins=bins)
# Compute the median magnitude
magmed = np.median(mag)
# Compute bin centers. Left set and right set.
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2.
ileft = bin_centers < magmed
# left and right counts
left_hist = hist[ileft]
right_hist = hist[~ileft]
# left and right flux
left_f = mag2flux(bin_centers[ileft])
right_f = mag2flux(bin_centers[~ileft])
flux_centers = mag2flux(bin_centers)
# Place holder for the best parameters
best_params_pow = np.zeros(2,dtype=np.float)
# Empty list for the negative log-likelihood
list_nloglike = []
best_nloglike = -100.
# Define negative total loglikelihood function given the histogram.
def ntotal_loglike_pow(params):
"""
Total log likelihood.
"""
total_loglike = 0
for i in range(flux_centers.size):
total_loglike += stats.poisson.logpmf(hist[i].astype(int), pow_law(params, flux_centers[i])*bw)
return -total_loglike
# fit for niter times
print("Fitting power law")
counter = 0
while counter < niter:
# Generate initial parameters
init_params = pow_param_init(left_hist, left_f, right_hist, right_f, bw)
# print(init_param)
# Optimize the parameters.
res = opt.minimize(ntotal_loglike_pow, init_params,tol=pow_tol,method="Nelder-Mead" )
if res["success"]:
counter+=1
if counter % 2 == 0:
print(counter)
# print(counter)
# print(res["x"])
fitted_params = res["x"]
# Calculate the negative total likelihood
nloglike = ntotal_loglike_pow(fitted_params)
list_nloglike.append(nloglike)
# If loglike is the highest among seen, then update the parameters.
if nloglike > best_nloglike:
best_nloglike = nloglike
best_params_pow = fitted_params
# print(best_params_pow)
# Place holder for the best parameters
best_params_broken = np.zeros(4,dtype=np.float)
# Empty list for the negative log-likelihood
list_nloglike = []
best_nloglike = -100.
# Define negative total loglikelihood function given the histogram.
def ntotal_loglike_broken(params):
"""
Total log likelihood for broken power law.
"""
total_loglike = 0
for i in range(flux_centers.size):
total_loglike += stats.poisson.logpmf(hist[i].astype(int), broken_pow_law(params, flux_centers[i])*bw)
return -total_loglike
# fit for niter times
print("Fitting broken power law")
counter = 0
while counter < niter:
# Generate initial parameters
phi = broken_pow_phi_init(flux_centers, best_params_pow, hist, bw, mag2flux(magmed))
alpha = -best_params_pow[0]
beta = best_params_pow[0]
init_params = [alpha, beta,mag2flux(magmed), phi]
# print(init_params)
# Optimize the parameters.
res = opt.minimize(ntotal_loglike_broken, init_params,tol=broken_tol,method="Nelder-Mead" )
if res["success"]:
counter+=1
if counter % 2 == 0:
print(counter)
# print(res["x"])
fitted_params = res["x"]
# Calculate the negative total likelihood
nloglike = ntotal_loglike_broken(fitted_params)
list_nloglike.append(nloglike)
# If loglike is the highest among seen, then update the parameters.
if nloglike > best_nloglike:
best_nloglike = nloglike
best_params_broken = fitted_params
# print(best_params_broken)
# power law fit
xvec = np.arange(magmin, magmax, 1e-3)
yvec = pow_law(best_params_pow, mag2flux(xvec))*np.float(bw)
pow_str = pow_legend(best_params_pow)
plt.plot(xvec,yvec, c = "red", label = pow_str, lw=lw)
# broken power law fit
yvec = broken_pow_law(best_params_broken, mag2flux(xvec))*np.float(bw)
broken_str = broken_legend(best_params_broken)
plt.plot(xvec,yvec, c = "blue", label=broken_str, lw=lw)
# hist
plt.bar(bin_edges[:-1], hist, width=bw, alpha=0.5, color="g")
# deocration
plt.legend(loc="upper left")
plt.xlim([magmin,magmax])
plt.xlabel(r"Mag")
plt.ylabel(r"Number per %.2f mag bin"%bw)
if fname is not None:
plt.savefig(fname+".png", bbox_inches="tight", dpi=400)
# plt.show()
plt.close()
return best_params_pow, best_params_broken
def combine_grz(list1,list2,list3=None):
"""
Convenience function for combining two or three sets data in a list.
"""
if list3 is not None:
g = np.concatenate((list1[0], list2[0], list3[0]))
r = np.concatenate((list1[1], list2[1], list3[1]))
z = np.concatenate((list1[2], list2[2], list3[2]))
else:
g = np.concatenate((list1[0], list2[0]))
r = np.concatenate((list1[1], list2[1]))
z = np.concatenate((list1[2], list2[2]))
return [g, r,z]
def true_false_fraction(ibool):
"""
Given boolean index array count true and false proportion and print.
"""
counts = np.bincount(ibool)
tot = np.sum(counts).astype(float)
print("True: %d (%.4f)| False: %d (%.4f)" % (counts[1], counts[1]/tot, counts[0], counts[0]/tot))
return [counts[1], counts[1]/tot, counts[0], counts[0]/tot]
def load_cn(fits):
return fits["cn"].astype(int)
def load_DEEP2matched(table):
return table["DEEP2_matched"][:]
def window_mask(ra, dec, w_fname):
"""
Given the ra,dec of objects and a window function file name, the function
returns an boolean array whose elements equal True when the corresponding
objects lie within regions where the map is positive.
Note: the windowf.**.fits files were not set up in a convenient format
so below I had to perform a simple but tricky affine transformation.
"""
# Import the window map and get pixel limits.
window = fits.open(w_fname)[0].data.T # Note that transpose.
px_lim = window.shape[1]
py_lim = window.shape[0]
# Creating WCS object for the window.
w = WCS(w_fname)
# Convert ra/dec to pixel values and round.
px, py = w.wcs_world2pix(ra, dec, 0)
px_round = np.round(py).astype(int)
py_round = np.round(px).astype(int)
# Creating the array.
idx = np.zeros(px_round.size, dtype=bool)
for i in range(px.size):
if (px_round[i]>=0) and (px_round[i]<px_lim) and (py_round[i]>=0) and (py_round[i]<py_lim): # Check if the object lies within the window frame.
if (window[py_round[i],px_round[i]]>0): # Check if the object is in a region where there is spectroscopy.
idx[i] = True
return idx
def est_spec_area(w_fname):
"""
The following function estiamtes the spectroscopic area given a window
function file.
"""
# Creating WCS object for the window.
w = WCS(w_fname)
# Importing the window
window = fits.open(w_fname)[0].data
px_lim = window.shape[0]
py_lim = window.shape[1]
# Convert ra/dec to pixel values and round.
ra, dec = w.wcs_pix2world([0,px_lim], [0,py_lim], 0)
# Calculating the area
area = (ra[1]-ra[0])*(dec[1]-dec[0])
# Calculating the fraction covered by spectroscopy
frac = (window>0).sum()/(px_lim*py_lim+1e-12)
return frac*area
def import_zcat(z_fname):
"""
Given DEEP2 redshift catalog filename, import and return relevant fields.
"""
data = fits.open(z_fname)[1].data
return data["OBJNO"], data["RA"], data["DEC"], data["OII_3727"], data["OII_3727_ERR"], data["ZHELIO"], data["ZHELIO_ERR"], data["ZQUALITY"], data["TARG_WEIGHT"]
def match_objno(objno1, objno2):
"""
Given two objno arrays, return idx of items that match. This algorithm can be slow, O(N^2), but it should work.
The input arrays have to be a set, meaning a list of unique items.
"""
global large_random_constant
# Finding the intersection
intersection = np.intersect1d(objno1, objno2)
print("# of elements in intersection: %d"% intersection.size)
# Creating placeholders for idx's to be returned.
idx1 = np.ones(intersection.size,dtype=int)*large_random_constant
idx2 = np.ones(intersection.size,dtype=int)*large_random_constant
# Creating objno1, objno2 copies with integer tags before sorting.
objno1_tagged = np.rec.fromarrays((objno1, range(objno1.size)),dtype=[('id', int), ('tag', int)])
objno2_tagged = np.rec.fromarrays((objno2, range(objno2.size)),dtype=[('id', int), ('tag', int)])
# Sorting according id
objno1_tagged = np.sort(objno1_tagged, axis=0, order="id")
objno2_tagged = np.sort(objno2_tagged, axis=0, order="id")
# tags
tags1 = objno1_tagged["tag"]
tags2 = objno2_tagged["tag"]
# values
objno1_vals = objno1_tagged["id"]
objno2_vals = objno2_tagged["id"]
# For each id in the intersection set, find the corresponding indices in objno1 and objno2 and save.
for i,e in enumerate(intersection):
idx1[i] = tags1[np.searchsorted(objno1_vals,e)]
idx2[i] = tags2[np.searchsorted(objno2_vals,e)]
return idx1, idx2
def pcat_append(pcat, new_col, col_name, idx1, idx2):
"""
Given DEEP2 pcat recarray and a pair of a field column, and a field name,
append the new field column to the recarray using OBJNO-matched values and name the appended column the field name.
Must provide appropriate idx values for both pcats and additional catalogs.
"""
global large_random_constant
new_col_sorted = np.ones(pcat.shape[0])*large_random_constant
new_col_sorted[idx1] = new_col[idx2]
new_pcat = rec.append_fields(pcat, col_name, new_col_sorted, dtypes=new_col_sorted.dtype, usemask=False, asrecarray=True)
return new_pcat
def generate_class_col(pcat):
"""
Given a pcat array with required fields, produce a column that classify objects into different classes.
"""
# Extracting columns
OII = pcat["OII_3727"]*1e17
Z = pcat["RED_Z"]
ZQUALITY = pcat["ZQUALITY"]
OII_ERR = pcat["OII_3727_ERR"]
BRIcut = pcat["BRI_cut"]
# Placeholder for the class column.
class_col = np.ones(pcat.shape[0],dtype=int)*large_random_constant
# Gold, CN=0: OII>8, Z in [1.1, 1.6]
ibool = (OII>8) & (Z>1.1) & (Z<1.6) & (BRIcut==1) & (ZQUALITY>=3) & (OII_ERR>0)
class_col[ibool] = 0
# Silver, CN=1: OII>8, Z in [0.6, 1.1]
ibool = (OII>8) & (Z>0.6) & (Z<1.1) & (OII_ERR>0) & (BRIcut==1) & (ZQUALITY>=3)
class_col[ibool] = 1
# LowOII, CN=2: OII<8, Z in [0.6, 1.6]
ibool = (OII<8) & (Z>0.6) & (Z<1.6) & (OII_ERR>0) & (ZQUALITY>=3) & (BRIcut==1)
class_col[ibool] = 2
# Note that many ELG objects were assigned negative OII, which are unphysical.
# NoOII, CN=3: OII=?, Z in [0.6, 1.6] and secure redshift
ibool = (Z>0.6) & (Z<1.6) & (OII_ERR<=0) & (ZQUALITY>=3) & (BRIcut==1)
class_col[ibool] = 3
# LowZ, CN=4: OII=NA, Z outside [0.6, 1.6]
ibool = np.logical_or((np.logical_or((Z>1.6), (Z<0.6)) & (ZQUALITY>=3)),(ZQUALITY==-1)) & (OII_ERR<=0) & (BRIcut==1)
class_col[ibool] = 4
# NoZ, CN=5: OII=NA, Z undetermined.
ibool = np.logical_or.reduce(((ZQUALITY==-2) , (ZQUALITY==0) , (ZQUALITY==1) ,(ZQUALITY==2)))& (BRIcut==1) & (OII_ERR<=0)
class_col[ibool] = 5
# D2reject, CN=6
ibool = BRIcut!=1
class_col[ibool] = 6
# D2unobserved, CN=8
ibool = (BRIcut==1) & (ZQUALITY<-10) # Objects that were not assigned color-selection flag are classifed as DEEP2 color rejected objects.
class_col[ibool] = 8
return class_col
def count_nn(arr):
"""
Count the number of non-negative elements.
"""
return arr[arr>-1].size
def class_breakdown(fn, cn, weight, area, rwd="D"):
"""
Given a list of class fields and corresponding weights and areas, return the breakdown of object
for each class. fn gives the field number.
"""
# Place holder for tallying
counts = np.zeros(8)
# Generate counts
for i in range(len(fn)):
# Computing counts
if rwd == "R":
tmp = generate_raw_breakdown(cn[i])
elif rwd == "W":
tmp = generate_weighted_breakdown(cn[i], weight[i])
else:
tmp = generate_density_breakdown(cn[i], weight[i], area[i])
# Tallying counts
if rwd in ["R", "W"]:
counts += tmp
else:
counts += tmp/len(fn)
# Printing counts
print(str_counts(fn[i], rwd, tmp))
# Total or average counts
if rwd in ["R", "W"]:
print(str_counts("Total", rwd, counts))
else:
print(str_counts("Avg.", rwd, counts))
def str_counts(fn, rwd_str, counts):
"""
Given the counts of various class of objects return a formated string.
"""
if type(fn)==str:
return_str = "%s & %s " % (fn,rwd_str)
else:
return_str = "%d & %s " % (fn,rwd_str)
for i in range(counts.size):
return_str += "& %d " % counts[i]
return_str += "& %d " % np.sum(counts)
return_str += latex_eol()
return return_str
def generate_raw_breakdown(cn):
return np.delete(np.bincount(cn.astype(int)),7)
def generate_weighted_breakdown(cn, weight):
counts = np.zeros(8,dtype=int)
for i,e in enumerate(np.delete(np.arange(0,9,1, dtype=int),7)):
if (e!=6) and (e!=8):
counts[i] = np.sum(weight[cn==e])
else:
counts[i] = np.sum(cn==e)
return counts
def generate_density_breakdown(cn, weight,area):
counts = np.zeros(8,dtype=int)
for i,e in enumerate(np.delete(np.arange(0,9,1, dtype=int),7)):
if (e!=6) and (e!=8):
counts[i] = np.sum(weight[cn==e])/np.float(area)
else:
counts[i] = np.sum(cn==e)/np.float(area)
return counts
def generate_table_header():
return "Field & R/W/D & "+" & ".join(class_names()) + " & Total" + latex_eol() + latex_hline()
def latex_eol():
return "\\\\ \\hline"
def latex_hline():
return "\\hline"
def class_names():
"""
Provide a list of class names.
"""
return ["Gold", "Silver", "LowOII","NoOII", "LowZ", "NoZ", "D2reject", "D2unobserved"]
def return_bricknames(ra, dec, br_name, ra_range, dec_range,tol):
ibool = (ra>(ra_range[0]-tol)) & (ra<(ra_range[1]+tol)) & (dec>(dec_range[0]-tol)) & (dec<(dec_range[1]+tol))
return br_name[ibool]
def combine_tractor(fits_directory):
"""
Given the file directory, find all Tractor fits files combine them and return as a rec-array.
"""
onlyfiles = [f for f in listdir(fits_directory) if isfile(join(fits_directory, f))]
print("Number of files in %s %d" % (fits_directory, len(onlyfiles)-1))
DR3 = None
for i,e in enumerate(onlyfiles,start=1):
# If the file ends with "fits"
if e[-4:] == "fits":
print("Combining file %d. %s" % (i,e))
# If DR3 has been set with something.
tmp_table = apply_mask(fits.open(fits_directory+e)[1].data)
if DR3 is not None:
DR3 = np.hstack((DR3, tmp_table))
else:
DR3 = tmp_table
return DR3
def combine_tractor_nocut(fits_directory):
"""
Given the file directory, find all Tractor fits files combine them and return as a rec-array.
"""
onlyfiles = [f for f in listdir(fits_directory) if isfile(join(fits_directory, f))]
print("Number of files in %s %d" % (fits_directory, len(onlyfiles)-1))
DR3 = None
for i,e in enumerate(onlyfiles,start=1):
# If the file ends with "fits"
if e[-4:] == "fits":
print("Combining file %d. %s" % (i,e))
# If DR3 has been set with something.
tmp_table = fits.open(fits_directory+e)[1].data
if DR3 is not None:
DR3 = np.hstack((DR3, tmp_table))
else:
DR3 = tmp_table
return DR3
def apply_mask(table):
"""
Given a tractor catalog table, apply the standard mask. brick_primary and flux inverse variance.
"""
brick_primary = load_brick_primary(table)
givar, rivar, zivar = load_grz_invar(table)
ibool = (brick_primary==True) & (givar>0) & (rivar>0) &(zivar>0)
table_trimmed = np.copy(table[ibool])
return table_trimmed
def load_grz_anymask(fits):
g_anymask = fits['DECAM_ANYMASK'][:][:,1]
r_anymask = fits['DECAM_ANYMASK'][:][:,2]
z_anymask = fits['DECAM_ANYMASK'][:][:,4]
return g_anymask, r_anymask, z_anymask
def load_grz_allmask(fits):
g_allmask = fits['DECAM_ALLMASK'][:][:,1]
r_allmask = fits['DECAM_ALLMASK'][:][:,2]
z_allmask = fits['DECAM_ALLMASK'][:][:,4]
return g_allmask, r_allmask, z_allmask
def load_radec(fits):
ra = fits["ra"][:]
dec= fits["dec"][:]
return ra, dec
def load_radec_ext(pcat):
ra = pcat["RA_DEEP"]
dec = pcat["DEC_DEEP"]
return ra, dec
def cross_match_catalogs(pcat, pcat_ref, tol=0.5):
"""
Match pcat catalog to pcat_ref via ra and dec.
Incorporate astrometric correction if any.
"""
# Load radec
ra, dec = load_radec_ext(pcat)
ra_ref, dec_ref = load_radec_ext(pcat_ref)
# Create spherematch objects
c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree)
c_ref = SkyCoord(ra=ra_ref*u.degree, dec=dec_ref*u.degree)
idx, idx_ref, d2d, d3d = c_ref.search_around_sky(c, 1*u.arcsec)
# Find the median difference
ra_med_diff = np.median(ra_ref[idx_ref]-ra[idx])
dec_med_diff = np.median(dec_ref[idx_ref]-dec[idx])
print("ra,dec discrepancy: %.3f, %.3f"%(ra_med_diff*3600, dec_med_diff*3600))
# Finding matches again taking into account astrometric differnce.
c = SkyCoord(ra=(ra+ra_med_diff)*u.degree, dec=(dec+dec_med_diff)*u.degree)
c_ref = SkyCoord(ra=ra_ref*u.degree, dec=dec_ref*u.degree)
idx, idx_ref, d2d, d3d = c_ref.search_around_sky(c, 1*u.arcsec)
return idx, idx_ref
def load_brick_primary(fits):
return fits['brick_primary'][:]
def load_shape(fits):
r_dev = fits['SHAPEDEV_R'][:]
r_exp = fits['SHAPEEXP_R'][:]
return r_dev, r_exp
def load_star_mask(table):
return table["TYCHOVETO"][:].astype(int).astype(bool)
def load_oii(fits):
return fits["OII_3727"][:]
def new_oii_lim(N_new, N_old=2400):
"""
Return the new OII low threshold given the updated fiber number in units of
1e-17 ergs/A/cm^2/s
"""
return 8*np.sqrt(N_new/N_old)
def frac_above_new_oii(oii, weight, new_oii_lim):
"""
Given the oii and weights of the objects of interest and the new OII limit, return
the proportion of objects that meet the new criterion.
"""
ibool = oii>new_oii_lim
return weight[ibool].sum()/weight.sum()
def load_fits_table(fname):
"""Given the file name, load the first extension table."""
return fits.open(fname)[1].data
def save_fits(data, fname):
"""
Given a rec array and a file name (with "fits" filename), save it.
"""
cols = fits.ColDefs(np.copy(data)) # This is somehow necessary.
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto(fname, clobber=True)
return
def save_fits_join(data1,data2, fname):
"""
Given a rec array and a file name (with "fits" filename), save it.
"""
data = rec.merge_arrays((data1,data2), flatten=True, usemask=False,asrecarray=True)
cols = fits.ColDefs(data)
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto(fname, clobber=True)
return
def load_weight(fits):
return fits["TARG_WEIGHT"]
def fits_append(table, new_col, col_name, idx1, idx2, dtype="default", dtype_user=None):
"""
Given fits table and field column/name pair,
append the new field to the table using the idx1 and idx2 that correspond to
fits table and new column indices.
If dtype =="default", then the default of float variable type is used.
If dtype =="user", then user provided data type is used.
"""
global large_random_constant
new_col_sorted = np.ones(table.shape[0])*large_random_constant
new_col_sorted[idx1] = new_col[idx2]
if dtype=="default":
new_table = rec.append_fields(table, col_name, new_col_sorted, dtypes=new_col_sorted.dtype, usemask=False, asrecarray=True)
else:
new_table = rec.append_fields(table, col_name, new_col_sorted, dtypes=dtype_user, usemask=False, asrecarray=True)
return new_table
def load_fits_table(fname):
"""Given the file name, load the first extension table."""
return fits.open(fname)[1].data
def apply_star_mask(fits):
ibool = ~load_star_mask(fits)
return fits[ibool]
def load_grz_flux(fits):
"""
Return raw (un-dereddened) g,r,z flux values.
"""
g = fits['decam_flux'][:][:,1]
r = fits['decam_flux'][:][:,2]
z = fits['decam_flux'][:][:,4]
return g,r,z
def load_grz_flux_dereddened(fits):
# Colors: DECam model flux in ugrizY
# mag = 22.5-2.5log10(f)
g = fits['decam_flux'][:][:,1]/fits['decam_mw_transmission'][:][:,1]
r = fits['decam_flux'][:][:,2]/fits['decam_mw_transmission'][:][:,2]
z = fits['decam_flux'][:][:,4]/fits['decam_mw_transmission'][:][:,4]
return g, r, z
def load_grz_invar(fits):
givar = fits['DECAM_FLUX_IVAR'][:][:,1]
rivar = fits['DECAM_FLUX_IVAR'][:][:,2]
zivar = fits['DECAM_FLUX_IVAR'][:][:,4]
return givar, rivar, zivar
def load_grz(fits):
# Colors: DECam model flux in ugrizY
# mag = 22.5-2.5log10(f)
g = (22.5 - 2.5*np.log10(fits['decam_flux'][:][:,1]/fits['decam_mw_transmission'][:][:,1]))
r = (22.5 - 2.5*np.log10(fits['decam_flux'][:][:,2]/fits['decam_mw_transmission'][:][:,2]))
z = (22.5 - 2.5*np.log10(fits['decam_flux'][:][:,4]/fits['decam_mw_transmission'][:][:,4]))
return g, r, z
def load_W1W2_flux(fits):
"""
Return raw (un-dereddened) w1, w2 flux values.
"""
w1flux = fits["WISE_FLUX"][:][:,1]
w2flux = fits["WISE_FLUX"][:][:,2]
return w1flux, w2flux
def load_W1W2_fluxinvar(fits):
w1_ivar = fits["WISE_FLUX_IVAR"][:][:,1]
w2_ivar = fits["WISE_FLUX_IVAR"][:][:,2]
return w1_ivar, w2_ivar
def load_W1W2(fits):
# Colors: DECam model flux in ugrizY
# mag = 22.5-2.5log10(f)
w1 = (22.5 - 2.5*np.log10(fits['WISE_FLUX'][:][:,1]/fits['WISE_MW_TRANSMISSION'][:][:,0]))
w2 = (22.5 - 2.5*np.log10(fits['WISE_FLUX'][:][:,2]/fits['WISE_MW_TRANSMISSION'][:][:,1]))
return w1, w2
def load_redz(fits):
"""
Return redshift
"""
return fits["RED_Z"]
def reasonable_mask(table, decam_mask = "all", SN = True):
"""
Given DECaLS table, return a boolean index array that indicates whether an object passed flux positivity, reasonable color range, and allmask conditions
Impose SN>2 and decam_mask cut, only if it's requested.
"""
grzflux = load_grz_flux(table)
ibool = is_grzflux_pos(grzflux)
grz = load_grz(table)
ibool &= is_reasonable_color(grz)
if decam_mask == "all":
grz_allmask = load_grz_allmask(table)
ibool &= pass_grz_decammask(grz_allmask)
elif decam_mask == "any":
grz_anymask = load_grz_anymask(table)
ibool &= pass_grz_decammask(grz_anymask)
if SN:
grzivar = load_grz_invar(table)
ibool &= pass_grz_SN(grzflux, grzivar, thres=2)
return ibool
def pass_grz_SN(grzflux, grzivar, thres=2):
gf, rf, zf = grzflux
gi, ri, zi = grzivar
return ((gf*np.sqrt(gi))>thres)&((rf*np.sqrt(ri))>thres)&((zf*np.sqrt(zi))>thres)
def grz_S2N(grzflux, grzinvar):
g,r,z = grzflux
gi,ri,zi = grzinvar
return g*np.sqrt(gi),r*np.sqrt(ri),z*np.sqrt(zi)
def grz_flux_error(grzinvar):
"""
Given the inverse variance return flux error.
"""
gi,ri,zi = grzinvar
return np.sqrt(1/gi),np.sqrt(1/ri),np.sqrt(1/zi)
def mag_depth_Xsigma(f_err, sigma=5):
"""
Given flux error, return five sigma depth
"""
return flux2mag(f_err*sigma)
def flux2mag(flux):
return 22.5-2.5*np.log10(flux)
def pass_grz_decammask(grz_decammask):
gm, rm, zm = grz_decammask
return (gm==0) & (rm==0) & (zm==0)
def is_reasonable_color(grz):
"""
Given grz mag list, check whether colors lie within a reasonable range.
"""
g,r,z = grz
gr = g-r
rz = r-z
return (gr>-0.75) & (gr<2.5) & (rz>-0.5) &(rz<2.7)
def is_grzflux_pos(grzflux):
"""
Given a list [gflux, rflux, zflux], return a boolean array that tells whether each object has all good fluxes or not.
"""
ibool = (grzflux[0]>0) & (grzflux[1]>0) & (grzflux[2]>0)
return ibool
def check_astrometry(ra1,dec1,ra2,dec2,pt_size=0.3):
"""
Given two sets of ra/dec's return median difference in degrees.
"""
ra_diff = ra2-ra1
dec_diff = dec2-dec1
ra_med_diff = np.median(ra_diff)
dec_med_diff = np.median(dec_diff)
return ra_med_diff, dec_med_diff
def crossmatch_cat1_to_cat2(ra1, dec1, ra2, dec2, tol=1./(deg2arcsec+1e-12)):
"""
Return indices of cat1 (e.g., DR3) and cat2 (e.g., DEE2) cross matched to tolerance.
Note: Function used to cross-match DEEP2 and DR3 catalogs in each field
and test for any astrometric discrepancies. That is, for every object in
DR3, find the nearest object in DEEP2. For each DEEP2 object matched,
pick DR3 object that is the closest. The surviving objects after these
matching process are the cross-matched set.
"""
# Match cat1 to cat2 using astropy functions.
idx_cat1_to_cat2, d2d = match_cat1_to_cat2(ra1, dec1, ra2, dec2)
# Indicies of unique cat2 objects that were matched.
cat2matched = np.unique(idx_cat1_to_cat2)
# For each cat2 object matched, pick cat1 object that is the closest.
# Skip if the closest objects more than tol distance away.
idx1 = [] # Place holder for indices
idx2 = []
tag = np.arange(ra1.size,dtype=int)
for e in cat2matched:
ibool = (idx_cat1_to_cat2==e)
candidates = tag[ibool]
dist2candidates = d2d[ibool]
# Index of the minimum distance cat1 object
if dist2candidates.min()<tol:
idx1.append(candidates[np.argmin(dist2candidates)])
idx2.append(e)
# Turning list of indices into numpy arrays.
idx1 = np.asarray(idx1)
idx2 = np.asarray(idx2)
# Return the indices of cat1 and cat2 of cross-matched objects.
return idx1, idx2
def match_cat1_to_cat2(ra1, dec1, ra2, dec2):
"""
"c = SkyCoord(ra=ra1*u.degree, dec=dec1*u.degree)
catalog = SkyCoord(ra=ra2*u.degree, dec=dec2*u.degree)
idx, d2d, d3d = c.match_to_catalog_sky(catalog)
idx are indices into catalog that are the closest objects to each of the coordinates in c, d2d are the on-sky distances between them, and d3d are the 3-dimensional distances." -- astropy documentation.
Fore more information: http://docs.astropy.org/en/stable/coordinates/matchsep.html#astropy-coordinates-matching
"""
cat1 = SkyCoord(ra=ra1*u.degree, dec=dec1*u.degree)
cat2 = SkyCoord(ra=ra2*u.degree, dec=dec2*u.degree)
idx, d2d, d3d = cat1.match_to_catalog_sky(cat2)
return idx, d2d.degree
def closest_idx(arr, val):
return np.argmin(np.abs(arr-val))
##############################################################################
# The following is adpated from the URL indicated below.
# """
# ImagingLSS
# https://github.com/desihub/imaginglss/blob/master/imaginglss/analysis/tycho_veto.py
# veto objects based on a star catalogue.
# The tycho vetos are based on the email discussion at:
# Date: June 18, 2015 at 3:44:09 PM PDT
# To: decam-data@desi.lbl.gov
# Subject: decam-data Digest, Vol 12, Issue 29
# These objects takes a decals object and calculates the
# center and rejection radius for the catalogue in degrees.
# Note : The convention for veto flags is True for 'reject',
# False for 'preserve'.
# apply_tycho takes the galaxy catalog and appends a Tychoveto column
# the code works fine for ELG and LRGs. For other galaxy type, you need to adjust it!
# """
# Import modules
import sys
def BOSS_DR9(tycho):
bmag = tycho['BMAG']
# BOSS DR9-11
b = bmag.clip(6, 11.5)
R = (0.0802 * b ** 2 - 1.86 * b + 11.625) / 60. #
return R
def DECAM_LRG(tycho):
vtmag = tycho['VTMAG']
R = 10 ** (3.5 - 0.15 * vtmag) / 3600.
return R
DECAM_ELG = DECAM_LRG
def DECAM_QSO(tycho):
vtmag = tycho['VTMAG']
# David Schlegel recommends not applying a bright star mask
return vtmag - vtmag
def DECAM_BGS(tycho):
vtmag = tycho['VTMAG']
R = 10 ** (2.2 - 0.15 * vtmag) / 3600.
return R
def radec2pos(ra, dec):
""" converting ra dec to position on a unit sphere.
ra, dec are in degrees.
"""
pos = np.empty(len(ra), dtype=('f8', 3))
ra = ra * (np.pi / 180)
dec = dec * (np.pi / 180)
pos[:, 2] = np.sin(dec)
pos[:, 0] = np.cos(dec) * np.sin(ra)
pos[:, 1] = np.cos(dec) * np.cos(ra)
return pos
def tycho(filename):
"""
read the Tycho-2 catalog and prepare it for the mag-radius relation
"""
dataf = fits.open(filename)
data = dataf[1].data
tycho = np.empty(len(data),
dtype=[
('RA', 'f8'),
('DEC', 'f8'),
('VTMAG', 'f8'),
('VMAG', 'f8'),
('BMAG', 'f8'),
('BTMAG', 'f8'),
('VARFLAG', 'i8'),
])
tycho['RA'] = data['RA']
tycho['DEC'] = data['DEC']
tycho['VTMAG'] = data['MAG_VT']
tycho['BTMAG'] = data['MAG_BT']
vt = tycho['VTMAG']
bt = tycho['BTMAG']
b = vt - 0.09 * (bt - vt)
v = b - 0.85 * (bt - vt)
tycho['VMAG']=v
tycho['BMAG']=b
dataf.close()
return tycho
def txts_read(filename):
obj = np.loadtxt(filename)
typeobj = np.dtype([
('RA','f4'), ('DEC','f4'), ('COMPETENESS','f4'),
('rflux','f4'), ('rnoise','f4'), ('gflux','f4'), ('gnoise','f4'),
('zflux','f4'), ('znoise','f4'), ('W1flux','f4'), ('W1noise','f4'),
('W2flux','f4'), ('W2noise','f4')
])
nobj = obj[:,0].size
data = np.zeros(nobj, dtype=typeobj)
data['RA'][:] = obj[:,0]
data['DEC'][:] = obj[:,1]
data['COMPETENESS'][:] = obj[:,2]
data['rflux'][:] = obj[:,3]
data['rnoise'][:] = obj[:,4]
data['gflux'][:] = obj[:,5]
data['gnoise'][:] = obj[:,6]
data['zflux'][:] = obj[:,7]
data['znoise'][:] = obj[:,8]
data['W1flux'][:] = obj[:,9]
data['W1noise'][:] = obj[:,10]
data['W2flux'][:] = obj[:,11]
data['W2noise'][:] = obj[:,12]
#datas = np.sort(data, order=['RA'])
return data
def veto(coord, center, R):
"""
Returns a veto mask for coord. any coordinate within R of center
is vet.
Parameters
----------
coord : (RA, DEC)
center : (RA, DEC)
R : degrees
Returns
-------
Vetomask : True for veto, False for keep.
"""
from sklearn.neighbors import KDTree
pos_stars = radec2pos(center[0], center[1])
R = 2 * np.sin(np.radians(R) * 0.5)
pos_obj = radec2pos(coord[0], coord[1])
tree = KDTree(pos_obj)
vetoflag = ~np.zeros(len(pos_obj), dtype='?')
arg = tree.query_radius(pos_stars, r=R)
arg = np.concatenate(arg)
vetoflag[arg] = False
return vetoflag
def apply_tycho(objgal, tychofn,galtype='LRG'):
# reading tycho star catalogs
tychostar = tycho(tychofn)
#
# mag-radius relation
#
if galtype == 'LRG' or galtype == 'ELG': # so far the mag-radius relation is the same for LRG and ELG
radii = DECAM_LRG(tychostar)
else:
sys.exit("Check the apply_tycho function for your galaxy type")
#
#
# coordinates of Tycho-2 stars
center = (tychostar['RA'], tychostar['DEC'])
#
#
# coordinates of objects (galaxies)
coord = (objgal['ra'], objgal['dec'])
#
#
# a 0.0 / 1.0 array (1.0: means the object is contaminated by a Tycho-2 star, so 0.0s are good)
tychomask = (~veto(coord, center, radii)).astype('f4')
objgal = rec.append_fields(objgal, ['TYCHOVETO'], data=[tychomask], dtypes=tychomask.dtype, usemask=False)
return objgal
|
jaekor91/xd-elg-scripts
|
xd_elg_utils.py
|
Python
|
gpl-3.0
| 81,633
|
[
"Galaxy",
"Gaussian"
] |
6c0d30f2c539cb1dee2eecba5148a8b3c49b3fac50ae791c2201ed34f84e65cd
|
"""
sphinx.ext.apidoc
~~~~~~~~~~~~~~~~~
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx. It also
creates a modules index (named modules.<suffix>).
This is derived from the "sphinx-autopackage" script, which is:
Copyright 2008 Société des arts technologiques (SAT),
https://sat.qc.ca/
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import glob
import locale
import os
import sys
from copy import copy
from fnmatch import fnmatch
from importlib.machinery import EXTENSION_SUFFIXES
from os import path
from typing import Any, Generator, List, Tuple
import sphinx.locale
from sphinx import __display_version__, package_dir
from sphinx.cmd.quickstart import EXTENSIONS
from sphinx.locale import __
from sphinx.util.osutil import FileAvoidWrite, ensuredir
from sphinx.util.template import ReSTRenderer
# automodule options
if 'SPHINX_APIDOC_OPTIONS' in os.environ:
OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')
else:
OPTIONS = [
'members',
'undoc-members',
# 'inherited-members', # disabled because there's a bug in sphinx
'show-inheritance',
]
PY_SUFFIXES = ('.py', '.pyx') + tuple(EXTENSION_SUFFIXES)
template_dir = path.join(package_dir, 'templates', 'apidoc')
def is_initpy(filename: str) -> bool:
"""Check *filename* is __init__ file or not."""
basename = path.basename(filename)
for suffix in sorted(PY_SUFFIXES, key=len, reverse=True):
if basename == '__init__' + suffix:
return True
else:
return False
def module_join(*modnames: str) -> str:
"""Join module names with dots."""
return '.'.join(filter(None, modnames))
def is_packagedir(dirname: str = None, files: List[str] = None) -> bool:
"""Check given *files* contains __init__ file."""
if files is None and dirname is None:
return False
if files is None:
files = os.listdir(dirname)
return any(f for f in files if is_initpy(f))
def write_file(name: str, text: str, opts: Any) -> None:
"""Write the output file for module/package <name>."""
quiet = getattr(opts, 'quiet', None)
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
if not quiet:
print(__('Would create file %s.') % fname)
return
if not opts.force and path.isfile(fname):
if not quiet:
print(__('File %s already exists, skipping.') % fname)
else:
if not quiet:
print(__('Creating file %s.') % fname)
with FileAvoidWrite(fname) as f:
f.write(text)
def create_module_file(package: str, basename: str, opts: Any,
user_template_dir: str = None) -> None:
"""Build the text of the file and write the file."""
options = copy(OPTIONS)
if opts.includeprivate and 'private-members' not in options:
options.append('private-members')
qualname = module_join(package, basename)
context = {
'show_headings': not opts.noheadings,
'basename': basename,
'qualname': qualname,
'automodule_options': options,
}
text = ReSTRenderer([user_template_dir, template_dir]).render('module.rst_t', context)
write_file(qualname, text, opts)
def create_package_file(root: str, master_package: str, subroot: str, py_files: List[str],
opts: Any, subs: List[str], is_namespace: bool,
excludes: List[str] = [], user_template_dir: str = None) -> None:
"""Build the text of the file and write the file."""
# build a list of sub packages (directories containing an __init__ file)
subpackages = [module_join(master_package, subroot, pkgname)
for pkgname in subs
if not is_skipped_package(path.join(root, pkgname), opts, excludes)]
# build a list of sub modules
submodules = [sub.split('.')[0] for sub in py_files
if not is_skipped_module(path.join(root, sub), opts, excludes) and
not is_initpy(sub)]
submodules = [module_join(master_package, subroot, modname)
for modname in submodules]
options = copy(OPTIONS)
if opts.includeprivate and 'private-members' not in options:
options.append('private-members')
pkgname = module_join(master_package, subroot)
context = {
'pkgname': pkgname,
'subpackages': subpackages,
'submodules': submodules,
'is_namespace': is_namespace,
'modulefirst': opts.modulefirst,
'separatemodules': opts.separatemodules,
'automodule_options': options,
'show_headings': not opts.noheadings,
'maxdepth': opts.maxdepth,
}
text = ReSTRenderer([user_template_dir, template_dir]).render('package.rst_t', context)
write_file(pkgname, text, opts)
if submodules and opts.separatemodules:
for submodule in submodules:
create_module_file(None, submodule, opts, user_template_dir)
def create_modules_toc_file(modules: List[str], opts: Any, name: str = 'modules',
user_template_dir: str = None) -> None:
"""Create the module's index."""
modules.sort()
prev_module = ''
for module in modules[:]:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
modules.remove(module)
else:
prev_module = module
context = {
'header': opts.header,
'maxdepth': opts.maxdepth,
'docnames': modules,
}
text = ReSTRenderer([user_template_dir, template_dir]).render('toc.rst_t', context)
write_file(name, text, opts)
def is_skipped_package(dirname: str, opts: Any, excludes: List[str] = []) -> bool:
"""Check if we want to skip this module."""
if not path.isdir(dirname):
return False
files = glob.glob(path.join(dirname, '*.py'))
regular_package = any(f for f in files if is_initpy(f))
if not regular_package and not opts.implicit_namespaces:
# *dirname* is not both a regular package and an implicit namespace pacage
return True
# Check there is some showable module inside package
if all(is_excluded(path.join(dirname, f), excludes) for f in files):
# all submodules are excluded
return True
else:
return False
def is_skipped_module(filename: str, opts: Any, excludes: List[str]) -> bool:
"""Check if we want to skip this module."""
if not path.exists(filename):
# skip if the file doesn't exist
return True
elif path.basename(filename).startswith('_') and not opts.includeprivate:
# skip if the module has a "private" name
return True
else:
return False
def walk(rootpath: str, excludes: List[str], opts: Any
) -> Generator[Tuple[str, List[str], List[str]], None, None]:
"""Walk through the directory and list files and subdirectories up."""
followlinks = getattr(opts, 'followlinks', False)
includeprivate = getattr(opts, 'includeprivate', False)
for root, subs, files in os.walk(rootpath, followlinks=followlinks):
# document only Python module files (that aren't excluded)
files = sorted(f for f in files
if f.endswith(PY_SUFFIXES) and
not is_excluded(path.join(root, f), excludes))
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if includeprivate:
exclude_prefixes: Tuple[str, ...] = ('.',)
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
not is_excluded(path.join(root, sub), excludes))
yield root, subs, files
def has_child_module(rootpath: str, excludes: List[str], opts: Any) -> bool:
"""Check the given directory contains child module/s (at least one)."""
for root, subs, files in walk(rootpath, excludes, opts):
if files:
return True
return False
def recurse_tree(rootpath: str, excludes: List[str], opts: Any,
user_template_dir: str = None) -> List[str]:
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
implicit_namespaces = getattr(opts, 'implicit_namespaces', False)
# check if the base directory is a package and get its name
if is_packagedir(rootpath) or implicit_namespaces:
root_package = rootpath.split(path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in walk(rootpath, excludes, opts):
is_pkg = is_packagedir(None, files)
is_namespace = not is_pkg and implicit_namespaces
if is_pkg:
for f in files[:]:
if is_initpy(f):
files.remove(f)
files.insert(0, f)
elif root != rootpath:
# only accept non-package at toplevel unless using implicit namespaces
if not implicit_namespaces:
del subs[:]
continue
if is_pkg or is_namespace:
# we are in a package with something to document
if subs or len(files) > 1 or not is_skipped_package(root, opts):
subpackage = root[len(rootpath):].lstrip(path.sep).\
replace(path.sep, '.')
# if this is not a namespace or
# a namespace and there is something there to document
if not is_namespace or has_child_module(root, excludes, opts):
create_package_file(root, root_package, subpackage,
files, opts, subs, is_namespace, excludes,
user_template_dir)
toplevels.append(module_join(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == rootpath and root_package is None
for py_file in files:
if not is_skipped_module(path.join(rootpath, py_file), opts, excludes):
module = py_file.split('.')[0]
create_module_file(root_package, module, opts, user_template_dir)
toplevels.append(module)
return toplevels
def is_excluded(root: str, excludes: List[str]) -> bool:
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exclude "foo" also accidentally excluding "foobar".
"""
for exclude in excludes:
if fnmatch(root, exclude):
return True
return False
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> '
'[EXCLUDE_PATTERN, ...]',
epilog=__('For more information, visit <https://www.sphinx-doc.org/>.'),
description=__("""
Look recursively in <MODULE_PATH> for Python modules and packages and create
one reST file with automodule directives per package in the <OUTPUT_PATH>.
The <EXCLUDE_PATTERN>s can be file and/or directory patterns that will be
excluded from generation.
Note: By default this script will not overwrite already created files."""))
parser.add_argument('--version', action='version', dest='show_version',
version='%%(prog)s %s' % __display_version__)
parser.add_argument('module_path',
help=__('path to module to document'))
parser.add_argument('exclude_pattern', nargs='*',
help=__('fnmatch-style file and/or directory patterns '
'to exclude from generation'))
parser.add_argument('-o', '--output-dir', action='store', dest='destdir',
required=True,
help=__('directory to place all output'))
parser.add_argument('-q', action='store_true', dest='quiet',
help=__('no output on stdout, just warnings on stderr'))
parser.add_argument('-d', '--maxdepth', action='store', dest='maxdepth',
type=int, default=4,
help=__('maximum depth of submodules to show in the TOC '
'(default: 4)'))
parser.add_argument('-f', '--force', action='store_true', dest='force',
help=__('overwrite existing files'))
parser.add_argument('-l', '--follow-links', action='store_true',
dest='followlinks', default=False,
help=__('follow symbolic links. Powerful when combined '
'with collective.recipe.omelette.'))
parser.add_argument('-n', '--dry-run', action='store_true', dest='dryrun',
help=__('run the script without creating files'))
parser.add_argument('-e', '--separate', action='store_true',
dest='separatemodules',
help=__('put documentation for each module on its own page'))
parser.add_argument('-P', '--private', action='store_true',
dest='includeprivate',
help=__('include "_private" modules'))
parser.add_argument('--tocfile', action='store', dest='tocfile', default='modules',
help=__("filename of table of contents (default: modules)"))
parser.add_argument('-T', '--no-toc', action='store_false', dest='tocfile',
help=__("don't create a table of contents file"))
parser.add_argument('-E', '--no-headings', action='store_true',
dest='noheadings',
help=__("don't create headings for the module/package "
"packages (e.g. when the docstrings already "
"contain them)"))
parser.add_argument('-M', '--module-first', action='store_true',
dest='modulefirst',
help=__('put module documentation before submodule '
'documentation'))
parser.add_argument('--implicit-namespaces', action='store_true',
dest='implicit_namespaces',
help=__('interpret module paths according to PEP-0420 '
'implicit namespaces specification'))
parser.add_argument('-s', '--suffix', action='store', dest='suffix',
default='rst',
help=__('file suffix (default: rst)'))
parser.add_argument('-F', '--full', action='store_true', dest='full',
help=__('generate a full project with sphinx-quickstart'))
parser.add_argument('-a', '--append-syspath', action='store_true',
dest='append_syspath',
help=__('append module_path to sys.path, used when --full is given'))
parser.add_argument('-H', '--doc-project', action='store', dest='header',
help=__('project name (default: root module name)'))
parser.add_argument('-A', '--doc-author', action='store', dest='author',
help=__('project author(s), used when --full is given'))
parser.add_argument('-V', '--doc-version', action='store', dest='version',
help=__('project version, used when --full is given'))
parser.add_argument('-R', '--doc-release', action='store', dest='release',
help=__('project release, used when --full is given, '
'defaults to --doc-version'))
group = parser.add_argument_group(__('extension options'))
group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
action='append', help=__('enable arbitrary extensions'))
for ext in EXTENSIONS:
group.add_argument('--ext-%s' % ext, action='append_const',
const='sphinx.ext.%s' % ext, dest='extensions',
help=__('enable %s extension') % ext)
group = parser.add_argument_group(__('Project templating'))
group.add_argument('-t', '--templatedir', metavar='TEMPLATEDIR',
dest='templatedir',
help=__('template directory for template files'))
return parser
def main(argv: List[str] = sys.argv[1:]) -> int:
"""Parse and check the command line arguments."""
sphinx.locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')
parser = get_parser()
args = parser.parse_args(argv)
rootpath = path.abspath(args.module_path)
# normalize opts
if args.header is None:
args.header = rootpath.split(path.sep)[-1]
if args.suffix.startswith('.'):
args.suffix = args.suffix[1:]
if not path.isdir(rootpath):
print(__('%s is not a directory.') % rootpath, file=sys.stderr)
sys.exit(1)
if not args.dryrun:
ensuredir(args.destdir)
excludes = [path.abspath(exclude) for exclude in args.exclude_pattern]
modules = recurse_tree(rootpath, excludes, args, args.templatedir)
if args.full:
from sphinx.cmd import quickstart as qs
modules.sort()
prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
d = {
'path': args.destdir,
'sep': False,
'dot': '_',
'project': args.header,
'author': args.author or 'Author',
'version': args.version or '',
'release': args.release or args.version or '',
'suffix': '.' + args.suffix,
'master': 'index',
'epub': True,
'extensions': ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.todo'],
'makefile': True,
'batchfile': True,
'make_mode': True,
'mastertocmaxdepth': args.maxdepth,
'mastertoctree': text,
'language': 'en',
'module_path': rootpath,
'append_syspath': args.append_syspath,
}
if args.extensions:
d['extensions'].extend(args.extensions)
if args.quiet:
d['quiet'] = True
for ext in d['extensions'][:]:
if ',' in ext:
d['extensions'].remove(ext)
d['extensions'].extend(ext.split(','))
if not args.dryrun:
qs.generate(d, silent=True, overwrite=args.force,
templatedir=args.templatedir)
elif args.tocfile:
create_modules_toc_file(modules, args, args.tocfile, args.templatedir)
return 0
# So program can be started with "python -m sphinx.apidoc ..."
if __name__ == "__main__":
main()
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/sphinx/ext/apidoc.py
|
Python
|
apache-2.0
| 19,381
|
[
"VisIt"
] |
c7962a7de784139ff934c13c4c91fd0095976167ef6cfd00e93477fd9a68e13e
|
"""Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.deprecation import deprecated
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations.
This can also prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
If an array is passed, it must have the same number of entries as the
data used for fitting and is used as datapoint-dependent noise level.
Note that this is equivalent to adding a WhiteKernel with c=alpha.
Allowing to specify the noise level directly as a parameter is mainly
for convenience and for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
@property
@deprecated("Attribute rng was deprecated in version 0.19 and "
"will be removed in 0.21.")
def rng(self):
return self._rng
@property
@deprecated("Attribute y_train_mean was deprecated in version 0.19 and "
"will be removed in 0.21.")
def y_train_mean(self):
return self._y_train_mean
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self._y_train_mean
else:
self._y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self._y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i", np.dot(K_trans, K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the
random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
|
Titan-C/scikit-learn
|
sklearn/gaussian_process/gpr.py
|
Python
|
bsd-3-clause
| 20,061
|
[
"Gaussian"
] |
1e0e7c8b7f4c73023d6e5b8fd86f8a67465ef6abbda827afbbafa048866f2a4f
|
from __future__ import annotations
import logging
logger = logging.getLogger(__name__)
class MaskerBase:
"""A root class to that does overlap masking"""
def __init__(self, experiment):
"""Initialise the overlap masking algorithm
Params:
experiment The experiment data
"""
from dials.algorithms.shoebox import MaskOverlapping
# Construct the overlapping reflection mask
self.mask_overlapping = MaskOverlapping()
def __call__(self, reflections, adjacency_list=None):
"""Mask the given reflections.
Params:
reflections The reflection list
adjacency_list The adjacency_list (optional)
Returns:
The masked reflection list
"""
# Mask the overlaps if an adjacency list is given
if adjacency_list:
logger.info("Masking overlapping reflections")
self.mask_overlapping(
reflections["shoebox"], reflections["xyzcal.px"], adjacency_list
)
logger.info("Masked %s overlapping reflections", len(adjacency_list))
# Return the reflections
return reflections
class Masker3DProfile(MaskerBase):
"""A class to perform 3D profile masking"""
def __init__(self, experiments):
"""Initialise the masking algorithms
Params:
experiment The experiment data
delta_d The extent of the reflection in reciprocal space
delta_m The extent of the reflection in reciprocal space
"""
super().__init__(experiments[0])
self._experiments = experiments
def __call__(self, reflections, adjacency_list=None):
"""Mask the given reflections.
Params:
reflections The reflection list
adjacency_list The adjacency_list (optional)
Returns:
The masked reflection list
"""
reflections = super().__call__(reflections, adjacency_list)
# Mask the foreground region
self._experiment.profile.compute_mask(
reflections,
self._experiment.crystal,
self._experiment.beam,
self._experiment.detector,
self._experiment.goniometer,
self._experiment.scan,
)
# Return the reflections
return reflections
class MaskerEmpirical(MaskerBase):
"""A class to perform empirical masking"""
def __init__(self, experiment, reference):
"""Initialise the masking algorithms
Params:
experiment The experiment data
"""
super().__init__(experiment)
from dials.algorithms.shoebox import MaskEmpirical
# Construct the foreground pixel mask
self.mask_empirical = MaskEmpirical(reference)
self._reference = reference
def __call__(self, reflections, adjacency_list=None):
"""Mask the given reflections.
Params:
reflections The reflection list
adjacency_list The adjacency_list (optional)
Returns:
The masked reflection list
"""
reflections = super().__call__(reflections, adjacency_list)
if self.mask_empirical:
# Mask the foreground region
self.mask_empirical(reflections)
# Return the reflections
return reflections
|
dials/dials
|
algorithms/shoebox/masker.py
|
Python
|
bsd-3-clause
| 3,387
|
[
"CRYSTAL"
] |
12db23c9762b9969923aade169d8b8379c1e7e8498954f06665ca3843347aa47
|
#!/usr/bin/env python
#
# Copyright 2008,2009,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
DESC_KEY = 'desc'
SAMP_RATE_KEY = 'samp_rate'
LINK_RATE_KEY = 'link_rate'
GAIN_KEY = 'gain'
TX_FREQ_KEY = 'tx_freq'
DSP_FREQ_KEY = 'dsp_freq'
RF_FREQ_KEY = 'rf_freq'
AMPLITUDE_KEY = 'amplitude'
AMPL_RANGE_KEY = 'ampl_range'
WAVEFORM_FREQ_KEY = 'waveform_freq'
WAVEFORM_OFFSET_KEY = 'waveform_offset'
WAVEFORM2_FREQ_KEY = 'waveform2_freq'
FREQ_RANGE_KEY = 'freq_range'
GAIN_RANGE_KEY = 'gain_range'
TYPE_KEY = 'type'
def setter(ps, key, val): ps[key] = val
from gnuradio import gr, gru, uhd, eng_notation
from gnuradio.gr.pubsub import pubsub
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
import math
n2s = eng_notation.num_to_str
waveforms = { gr.GR_SIN_WAVE : "Complex Sinusoid",
gr.GR_CONST_WAVE : "Constant",
gr.GR_GAUSSIAN : "Gaussian Noise",
gr.GR_UNIFORM : "Uniform Noise",
"2tone" : "Two Tone",
"sweep" : "Sweep" }
#
# GUI-unaware GNU Radio flowgraph. This may be used either with command
# line applications or GUI applications.
#
class top_block(gr.top_block, pubsub):
def __init__(self, options, args):
gr.top_block.__init__(self)
pubsub.__init__(self)
self._verbose = options.verbose
#initialize values from options
self._setup_usrpx(options)
self[SAMP_RATE_KEY] = options.samp_rate
self[TX_FREQ_KEY] = options.tx_freq
self[AMPLITUDE_KEY] = options.amplitude
self[WAVEFORM_FREQ_KEY] = options.waveform_freq
self[WAVEFORM_OFFSET_KEY] = options.offset
self[WAVEFORM2_FREQ_KEY] = options.waveform2_freq
self[DSP_FREQ_KEY] = 0
self[RF_FREQ_KEY] = 0
#subscribe set methods
self.subscribe(SAMP_RATE_KEY, self.set_samp_rate)
self.subscribe(GAIN_KEY, self.set_gain)
self.subscribe(TX_FREQ_KEY, self.set_freq)
self.subscribe(AMPLITUDE_KEY, self.set_amplitude)
self.subscribe(WAVEFORM_FREQ_KEY, self.set_waveform_freq)
self.subscribe(WAVEFORM2_FREQ_KEY, self.set_waveform2_freq)
self.subscribe(TYPE_KEY, self.set_waveform)
#force update on pubsub keys
for key in (SAMP_RATE_KEY, GAIN_KEY, TX_FREQ_KEY,
AMPLITUDE_KEY, WAVEFORM_FREQ_KEY,
WAVEFORM_OFFSET_KEY, WAVEFORM2_FREQ_KEY):
self[key] = self[key]
self[TYPE_KEY] = options.type #set type last
def _setup_usrpx(self, options):
self._u = uhd.usrp_sink(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
self._u.set_samp_rate(options.samp_rate)
# Set the subdevice spec
if(options.spec):
self._u.set_subdev_spec(options.spec, 0)
# Set the gain on the usrp from options
if(options.gain):
self._u.set_gain(options.gain)
# Set the antenna
if(options.antenna):
self._u.set_antenna(options.antenna, 0)
# Setup USRP Configuration value
try:
usrp_info = self._u.get_usrp_info()
mboard_id = usrp_info.get("mboard_id").split(" ")[0]
mboard_serial = usrp_info.get("mboard_serial")
if mboard_serial == "":
mboard_serial = "no serial"
dboard_id = usrp_info.get("tx_id").split(" ")[0].split(",")[0]
dboard_serial = usrp_info.get("tx_serial")
if dboard_serial == "":
dboard_serial = "no serial"
subdev = self._u.get_subdev_spec()
antenna = self._u.get_antenna()
desc_key_str = "Motherboard: %s [%s]\n" % (mboard_id, mboard_serial)
desc_key_str += "Daughterboard: %s [%s]\n" % (dboard_id, dboard_serial)
desc_key_str += "Subdev: %s\n" % subdev
desc_key_str += "Antenna: %s" % antenna
except:
desc_key_str = "USRP configuration output not implemented in this version"
self.publish(DESC_KEY, lambda: desc_key_str)
self.publish(FREQ_RANGE_KEY, self._u.get_freq_range)
self.publish(GAIN_RANGE_KEY, self._u.get_gain_range)
self.publish(GAIN_KEY, self._u.get_gain)
print "UHD Signal Generator"
print "Version: %s" % uhd.get_version_string()
print "\nUsing USRP configuration:"
print desc_key_str + "\n"
# Direct asynchronous notifications to callback function
if options.show_async_msg:
self.async_msgq = gr.msg_queue(0)
self.async_src = uhd.amsg_source("", self.async_msgq)
self.async_rcv = gru.msgq_runner(self.async_msgq, self.async_callback)
def async_callback(self, msg):
md = self.async_src.msg_to_async_metadata_t(msg)
print "Channel: %i Time: %f Event: %i" % (md.channel, md.time_spec.get_real_secs(), md.event_code)
def _set_tx_amplitude(self, ampl):
"""
Sets the transmit amplitude sent to the USRP
@param ampl the amplitude or None for automatic
"""
ampl_range = self[AMPL_RANGE_KEY]
if ampl is None:
ampl = (ampl_range[1] - ampl_range[0])*0.15 + ampl_range[0]
self[AMPLITUDE_KEY] = max(ampl_range[0], min(ampl, ampl_range[1]))
def set_samp_rate(self, sr):
self._u.set_samp_rate(sr)
sr = self._u.get_samp_rate()
if self[TYPE_KEY] in (gr.GR_SIN_WAVE, gr.GR_CONST_WAVE):
self._src.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "2tone":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "sweep":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
else:
return True # Waveform not yet set
if self._verbose:
print "Set sample rate to:", sr
return True
def set_gain(self, gain):
if gain is None:
g = self[GAIN_RANGE_KEY]
gain = float(g.start()+g.stop())/2
if self._verbose:
print "Using auto-calculated mid-point TX gain"
self[GAIN_KEY] = gain
return
self._u.set_gain(gain)
if self._verbose:
print "Set TX gain to:", gain
def set_freq(self, target_freq):
if target_freq is None:
f = self[FREQ_RANGE_KEY]
target_freq = float(f.start()+f.stop())/2.0
if self._verbose:
print "Using auto-calculated mid-point frequency"
self[TX_FREQ_KEY] = target_freq
return
tr = self._u.set_center_freq(target_freq)
fs = "%sHz" % (n2s(target_freq),)
if tr is not None:
self._freq = target_freq
self[DSP_FREQ_KEY] = tr.actual_dsp_freq
self[RF_FREQ_KEY] = tr.actual_rf_freq
if self._verbose:
print "Set center frequency to", self._u.get_center_freq()
print "Tx RF frequency: %sHz" % (n2s(tr.actual_rf_freq),)
print "Tx DSP frequency: %sHz" % (n2s(tr.actual_dsp_freq),)
elif self._verbose:
print "Failed to set freq."
return tr
def set_waveform_freq(self, freq):
if self[TYPE_KEY] == gr.GR_SIN_WAVE:
self._src.set_frequency(freq)
elif self[TYPE_KEY] == "2tone":
self._src1.set_frequency(freq)
elif self[TYPE_KEY] == 'sweep':
#there is no set sensitivity, redo fg
self[TYPE_KEY] = self[TYPE_KEY]
return True
def set_waveform2_freq(self, freq):
if freq is None:
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
return
if self[TYPE_KEY] == "2tone":
self._src2.set_frequency(freq)
elif self[TYPE_KEY] == "sweep":
self._src1.set_frequency(freq)
return True
def set_waveform(self, type):
self.lock()
self.disconnect_all()
if type == gr.GR_SIN_WAVE or type == gr.GR_CONST_WAVE:
self._src = gr.sig_source_c(self[SAMP_RATE_KEY], # Sample rate
type, # Waveform type
self[WAVEFORM_FREQ_KEY], # Waveform frequency
self[AMPLITUDE_KEY], # Waveform amplitude
self[WAVEFORM_OFFSET_KEY]) # Waveform offset
elif type == gr.GR_GAUSSIAN or type == gr.GR_UNIFORM:
self._src = gr.noise_source_c(type, self[AMPLITUDE_KEY])
elif type == "2tone":
self._src1 = gr.sig_source_c(self[SAMP_RATE_KEY],
gr.GR_SIN_WAVE,
self[WAVEFORM_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
if(self[WAVEFORM2_FREQ_KEY] is None):
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
self._src2 = gr.sig_source_c(self[SAMP_RATE_KEY],
gr.GR_SIN_WAVE,
self[WAVEFORM2_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
self._src = gr.add_cc()
self.connect(self._src1,(self._src,0))
self.connect(self._src2,(self._src,1))
elif type == "sweep":
# rf freq is center frequency
# waveform_freq is total swept width
# waveform2_freq is sweep rate
# will sweep from (rf_freq-waveform_freq/2) to (rf_freq+waveform_freq/2)
if self[WAVEFORM2_FREQ_KEY] is None:
self[WAVEFORM2_FREQ_KEY] = 0.1
self._src1 = gr.sig_source_f(self[SAMP_RATE_KEY],
gr.GR_TRI_WAVE,
self[WAVEFORM2_FREQ_KEY],
1.0,
-0.5)
self._src2 = gr.frequency_modulator_fc(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
self._src = gr.multiply_const_cc(self[AMPLITUDE_KEY])
self.connect(self._src1,self._src2,self._src)
else:
raise RuntimeError("Unknown waveform type")
self.connect(self._src, self._u)
self.unlock()
if self._verbose:
print "Set baseband modulation to:", waveforms[type]
if type == gr.GR_SIN_WAVE:
print "Modulation frequency: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Initial phase:", self[WAVEFORM_OFFSET_KEY]
elif type == "2tone":
print "Tone 1: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Tone 2: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
elif type == "sweep":
print "Sweeping across %sHz to %sHz" % (n2s(-self[WAVEFORM_FREQ_KEY]/2.0),n2s(self[WAVEFORM_FREQ_KEY]/2.0))
print "Sweep rate: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
print "TX amplitude:", self[AMPLITUDE_KEY]
def set_amplitude(self, amplitude):
if amplitude < 0.0 or amplitude > 1.0:
if self._verbose:
print "Amplitude out of range:", amplitude
return False
if self[TYPE_KEY] in (gr.GR_SIN_WAVE, gr.GR_CONST_WAVE, gr.GR_GAUSSIAN, gr.GR_UNIFORM):
self._src.set_amplitude(amplitude)
elif self[TYPE_KEY] == "2tone":
self._src1.set_amplitude(amplitude/2.0)
self._src2.set_amplitude(amplitude/2.0)
elif self[TYPE_KEY] == "sweep":
self._src.set_k(amplitude)
else:
return True # Waveform not yet set
if self._verbose:
print "Set amplitude to:", amplitude
return True
def get_options():
usage="%prog: [options]"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args , [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-s", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate (bandwidth) [default=%default]")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-f", "--tx-freq", type="eng_float", default=None,
help="Set carrier frequency to FREQ [default=mid-point]",
metavar="FREQ")
parser.add_option("-x", "--waveform-freq", type="eng_float", default=0,
help="Set baseband waveform frequency to FREQ [default=%default]")
parser.add_option("-y", "--waveform2-freq", type="eng_float", default=None,
help="Set 2nd waveform frequency to FREQ [default=%default]")
parser.add_option("--sine", dest="type", action="store_const", const=gr.GR_SIN_WAVE,
help="Generate a carrier modulated by a complex sine wave",
default=gr.GR_SIN_WAVE)
parser.add_option("--const", dest="type", action="store_const", const=gr.GR_CONST_WAVE,
help="Generate a constant carrier")
parser.add_option("--offset", type="eng_float", default=0,
help="Set waveform phase offset to OFFSET [default=%default]")
parser.add_option("--gaussian", dest="type", action="store_const", const=gr.GR_GAUSSIAN,
help="Generate Gaussian random output")
parser.add_option("--uniform", dest="type", action="store_const", const=gr.GR_UNIFORM,
help="Generate Uniform random output")
parser.add_option("--2tone", dest="type", action="store_const", const="2tone",
help="Generate Two Tone signal for IMD testing")
parser.add_option("--sweep", dest="type", action="store_const", const="sweep",
help="Generate a swept sine wave")
parser.add_option("", "--amplitude", type="eng_float", default=0.15,
help="Set output amplitude to AMPL (0.0-1.0) [default=%default]",
metavar="AMPL")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Use verbose console output [default=%default]")
parser.add_option("", "--show-async-msg", action="store_true", default=False,
help="Show asynchronous message notifications from UHD [default=%default]")
(options, args) = parser.parse_args()
return (options, args)
# If this script is executed, the following runs. If it is imported,
# the below does not run.
def test_main():
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Note: failed to enable realtime scheduling, continuing"
# Grab command line options and create top block
try:
(options, args) = get_options()
tb = top_block(options, args)
except RuntimeError, e:
print e
sys.exit(1)
tb.start()
raw_input('Press Enter to quit: ')
tb.stop()
tb.wait()
# Make sure to create the top block (tb) within a function:
# That code in main will allow tb to go out of scope on return,
# which will call the decontructor on usrp and stop transmit.
# Whats odd is that grc works fine with tb in the __main__,
# perhaps its because the try/except clauses around tb.
if __name__ == "__main__":
test_main()
|
katsikas/gnuradio
|
gr-uhd/apps/uhd_siggen_base.py
|
Python
|
gpl-3.0
| 16,762
|
[
"Gaussian"
] |
f224f14ed378e2b6fba8d1590f85666d6582c2971bc22a880da928f105e01f0b
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Codon tables based on those from the NCBI.
These tables are based on parsing the NCBI file:
ftp://ftp.ncbi.nih.gov/entrez/misc/data/gc.prt
Last updated for Version 3.9
"""
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
unambiguous_dna_by_name = {}
unambiguous_dna_by_id = {}
unambiguous_rna_by_name = {}
unambiguous_rna_by_id = {}
generic_by_name = {} # unambiguous DNA or RNA
generic_by_id = {} # unambiguous DNA or RNA
ambiguous_dna_by_name = {}
ambiguous_dna_by_id = {}
ambiguous_rna_by_name = {}
ambiguous_rna_by_id = {}
ambiguous_generic_by_name = {} # ambiguous DNA or RNA
ambiguous_generic_by_id = {} # ambiguous DNA or RNA
# standard IUPAC unambiguous codons
standard_dna_table = None
standard_rna_table = None
# In the future, the back_table could return a statistically
# appropriate distribution of codons, so do not cache the results of
# back_table lookups!
class TranslationError(Exception):
pass
class CodonTable(object):
nucleotide_alphabet = Alphabet.generic_nucleotide
protein_alphabet = Alphabet.generic_protein
forward_table = {} # only includes codons which actually code
back_table = {} # for back translations
start_codons = []
stop_codons = []
# Not always called from derived classes!
def __init__(self, nucleotide_alphabet = nucleotide_alphabet,
protein_alphabet = protein_alphabet,
forward_table = forward_table, back_table = back_table,
start_codons = start_codons, stop_codons = stop_codons):
self.nucleotide_alphabet = nucleotide_alphabet
self.protein_alphabet = protein_alphabet
self.forward_table = forward_table
self.back_table = back_table
self.start_codons = start_codons
self.stop_codons = stop_codons
def __str__(self):
"""Returns a simple text representation of the codon table
e.g.
>>> import Bio.Data.CodonTable
>>> print Bio.Data.CodonTable.standard_dna_table
>>> print Bio.Data.CodonTable.generic_by_id[1]
"""
if self.id:
answer = "Table %i" % self.id
else:
answer = "Table ID unknown"
if self.names:
answer += " " + ", ".join(filter(None, self.names))
#Use the main four letters (and the conventional ordering)
#even for ambiguous tables
letters = self.nucleotide_alphabet.letters
if isinstance(self.nucleotide_alphabet, Alphabet.DNAAlphabet) \
or (letters is not None and "T" in letters):
letters = "TCAG"
else:
#Should be either RNA or generic nucleotides,
#e.g. Bio.Data.CodonTable.generic_by_id[1]
letters = "UCAG"
#Build the table...
answer=answer + "\n\n |" + "|".join( \
[" %s " % c2 for c2 in letters] \
) + "|"
answer=answer + "\n--+" \
+ "+".join(["---------" for c2 in letters]) + "+--"
for c1 in letters:
for c3 in letters:
line = c1 + " |"
for c2 in letters:
codon = c1+c2+c3
line = line + " %s" % codon
if codon in self.stop_codons:
line = line + " Stop|"
else:
try:
amino = self.forward_table[codon]
except KeyError:
amino = "?"
except TranslationError:
amino = "?"
if codon in self.start_codons:
line = line + " %s(s)|" % amino
else:
line = line + " %s |" % amino
line = line + " " + c3
answer = answer + "\n"+ line
answer=answer + "\n--+" \
+ "+".join(["---------" for c2 in letters]) + "+--"
return answer
def make_back_table(table, default_stop_codon):
# ONLY RETURNS A SINGLE CODON
# Do the sort so changes in the hash implementation won't affect
# the result when one amino acid is coded by more than one codon.
back_table = {}
for key in sorted(table):
back_table[table[key]] = key
back_table[None] = default_stop_codon
return back_table
class NCBICodonTable(CodonTable):
nucleotide_alphabet = Alphabet.generic_nucleotide
protein_alphabet = IUPAC.protein
def __init__(self, id, names, table, start_codons, stop_codons):
self.id = id
self.names = names
self.forward_table = table
self.back_table = make_back_table(table, stop_codons[0])
self.start_codons = start_codons
self.stop_codons = stop_codons
class NCBICodonTableDNA(NCBICodonTable):
nucleotide_alphabet = IUPAC.unambiguous_dna
class NCBICodonTableRNA(NCBICodonTable):
nucleotide_alphabet = IUPAC.unambiguous_rna
######### Deal with ambiguous forward translations
class AmbiguousCodonTable(CodonTable):
def __init__(self, codon_table,
ambiguous_nucleotide_alphabet,
ambiguous_nucleotide_values,
ambiguous_protein_alphabet,
ambiguous_protein_values):
CodonTable.__init__(self,
ambiguous_nucleotide_alphabet,
ambiguous_protein_alphabet,
AmbiguousForwardTable(codon_table.forward_table,
ambiguous_nucleotide_values,
ambiguous_protein_values),
codon_table.back_table,
# These two are WRONG! I need to get the
# list of ambiguous codons which code for
# the stop codons XXX
list_ambiguous_codons(codon_table.start_codons, ambiguous_nucleotide_values),
list_ambiguous_codons(codon_table.stop_codons, ambiguous_nucleotide_values)
)
self._codon_table = codon_table
# Be sneaky and forward attribute lookups to the original table.
# This lets us get the names, if the original table is an NCBI
# table.
def __getattr__(self, name):
return getattr(self._codon_table, name)
def list_possible_proteins(codon, forward_table, ambiguous_nucleotide_values):
c1, c2, c3 = codon
x1 = ambiguous_nucleotide_values[c1]
x2 = ambiguous_nucleotide_values[c2]
x3 = ambiguous_nucleotide_values[c3]
possible = {}
stops = []
for y1 in x1:
for y2 in x2:
for y3 in x3:
try:
possible[forward_table[y1+y2+y3]] = 1
except KeyError:
# If tripping over a stop codon
stops.append(y1+y2+y3)
if stops:
if possible:
raise TranslationError("ambiguous codon '%s' codes " % codon \
+ "for both proteins and stop codons")
# This is a true stop codon - tell the caller about it
raise KeyError(codon)
return possible.keys()
def list_ambiguous_codons(codons, ambiguous_nucleotide_values):
"""Extends a codon list to include all possible ambigous codons.
e.g. ['TAG', 'TAA'] -> ['TAG', 'TAA', 'TAR']
['UAG', 'UGA'] -> ['UAG', 'UGA', 'URA']
Note that ['TAG', 'TGA'] -> ['TAG', 'TGA'], this does not add 'TRR'.
Thus only two more codons are added in the following:
e.g. ['TGA', 'TAA', 'TAG'] -> ['TGA', 'TAA', 'TAG', 'TRA', 'TAR']
Returns a new (longer) list of codon strings.
"""
#Note ambiguous_nucleotide_values['R'] = 'AG' (etc)
#This will generate things like 'TRR' from ['TAG', 'TGA'], which
#we don't want to include:
c1_list = sorted(letter for (letter, meanings) \
in ambiguous_nucleotide_values.iteritems() \
if set([codon[0] for codon in codons]).issuperset(set(meanings)))
c2_list = sorted(letter for (letter, meanings) \
in ambiguous_nucleotide_values.iteritems() \
if set([codon[1] for codon in codons]).issuperset(set(meanings)))
c3_list = sorted(letter for (letter, meanings) \
in ambiguous_nucleotide_values.iteritems() \
if set([codon[2] for codon in codons]).issuperset(set(meanings)))
#candidates is a list (not a set) to preserve the iteration order
candidates = []
for c1 in c1_list:
for c2 in c2_list:
for c3 in c3_list:
codon = c1+c2+c3
if codon not in candidates and codon not in codons:
candidates.append(codon)
answer = codons[:] #copy
#print "Have %i new candidates" % len(candidates)
for ambig_codon in candidates:
wanted = True
#e.g. 'TRR' -> 'TAA', 'TAG', 'TGA', 'TGG'
for codon in [c1+c2+c3 \
for c1 in ambiguous_nucleotide_values[ambig_codon[0]] \
for c2 in ambiguous_nucleotide_values[ambig_codon[1]] \
for c3 in ambiguous_nucleotide_values[ambig_codon[2]]]:
if codon not in codons:
#This ambiguous codon can code for a non-stop, exclude it!
wanted=False
#print "Rejecting %s" % ambig_codon
continue
if wanted:
answer.append(ambig_codon)
return answer
assert list_ambiguous_codons(['TGA', 'TAA'],IUPACData.ambiguous_dna_values) == ['TGA', 'TAA', 'TRA']
assert list_ambiguous_codons(['TAG', 'TGA'],IUPACData.ambiguous_dna_values) == ['TAG', 'TGA']
assert list_ambiguous_codons(['TAG', 'TAA'],IUPACData.ambiguous_dna_values) == ['TAG', 'TAA', 'TAR']
assert list_ambiguous_codons(['UAG', 'UAA'],IUPACData.ambiguous_rna_values) == ['UAG', 'UAA', 'UAR']
assert list_ambiguous_codons(['TGA', 'TAA', 'TAG'],IUPACData.ambiguous_dna_values) == ['TGA', 'TAA', 'TAG', 'TAR', 'TRA']
# Forward translation is "onto", that is, any given codon always maps
# to the same protein, or it doesn't map at all. Thus, I can build
# off of an existing table to produce the ambiguous mappings.
#
# This handles the general case. Perhaps it's overkill?
# >>> t = CodonTable.ambiguous_dna_by_id[1]
# >>> t.forward_table["AAT"]
# 'N'
# >>> t.forward_table["GAT"]
# 'D'
# >>> t.forward_table["RAT"]
# 'B'
# >>> t.forward_table["YTA"]
# 'L'
class AmbiguousForwardTable(object):
def __init__(self, forward_table, ambiguous_nucleotide, ambiguous_protein):
self.forward_table = forward_table
self.ambiguous_nucleotide = ambiguous_nucleotide
self.ambiguous_protein = ambiguous_protein
inverted = {}
for name, val in ambiguous_protein.iteritems():
for c in val:
x = inverted.get(c, {})
x[name] = 1
inverted[c] = x
for name, val in inverted.iteritems():
inverted[name] = val.keys()
self._inverted = inverted
self._cache = {}
def get(self, codon, failobj = None):
try:
return self.__getitem__(codon)
except KeyError:
return failobj
def __getitem__(self, codon):
try:
x = self._cache[codon]
except KeyError:
pass
else:
if x is TranslationError:
raise TranslationError(codon) # no unique translation
if x is KeyError:
raise KeyError(codon) # it's a stop codon
return x
try:
x = self.forward_table[codon]
self._cache[codon] = x
return x
except KeyError:
pass
# XXX Need to make part of this into a method which returns
# a list of all possible encodings for a codon!
try:
possible = list_possible_proteins(codon,
self.forward_table,
self.ambiguous_nucleotide)
except KeyError:
self._cache[codon] = KeyError
raise KeyError(codon) # stop codon
except TranslationError:
self._cache[codon] = TranslationError
raise TranslationError(codon) # does not code
assert len(possible) > 0, "unambiguous codons must code"
# Hah! Only one possible protein, so use it
if len(possible) == 1:
self._cache[codon] = possible[0]
return possible[0]
# See if there's an ambiguous protein encoding for the multiples.
# Find residues which exist in every coding set.
ambiguous_possible = {}
for amino in possible:
for term in self._inverted[amino]:
ambiguous_possible[term] = ambiguous_possible.get(term, 0) + 1
n = len(possible)
possible = []
for amino, val in ambiguous_possible.iteritems():
if val == n:
possible.append(amino)
# No amino acid encoding for the results
if len(possible) == 0:
self._cache[codon] = TranslationError
raise TranslationError(codon) # no valid translation
# All of these are valid, so choose one
# To be unique, sort by smallet ambiguity then alphabetically
# Can get this if "X" encodes for everything.
#def _sort(x, y, table = self.ambiguous_protein):
# a = cmp(len(table[x]), len(table[y]))
# if a == 0:
# return cmp(x, y)
# return a
#Sort by key is 2.x and 3.x compatible
possible.sort(key=lambda x:(len(self.ambiguous_protein[x]), x))
x = possible[0]
self._cache[codon] = x
return x
def register_ncbi_table(name, alt_name, id,
table, start_codons, stop_codons):
"""Turns codon table data into objects, and stores them in the dictionaries (PRIVATE)."""
#In most cases names are divided by "; ", however there is also
#'Bacterial and Plant Plastid' (which used to be just 'Bacterial')
names = [x.strip() for x in name.replace(" and ","; ").split("; ")]
dna = NCBICodonTableDNA(id, names + [alt_name], table, start_codons,
stop_codons)
ambig_dna = AmbiguousCodonTable(dna,
IUPAC.ambiguous_dna,
IUPACData.ambiguous_dna_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
# replace all T's with U's for the RNA tables
rna_table = {}
generic_table = {}
for codon, val in table.iteritems():
generic_table[codon] = val
codon = codon.replace("T", "U")
generic_table[codon] = val
rna_table[codon] = val
rna_start_codons = []
generic_start_codons = []
for codon in start_codons:
generic_start_codons.append(codon)
codon = codon.replace("T", "U")
generic_start_codons.append(codon)
rna_start_codons.append(codon)
rna_stop_codons = []
generic_stop_codons = []
for codon in stop_codons:
generic_stop_codons.append(codon)
codon = codon.replace("T", "U")
generic_stop_codons.append(codon)
rna_stop_codons.append(codon)
generic = NCBICodonTable(id, names + [alt_name], generic_table,
generic_start_codons, generic_stop_codons)
#The following isn't very elegant, but seems to work nicely.
_merged_values = dict(IUPACData.ambiguous_rna_values.iteritems())
_merged_values["T"] = "U"
ambig_generic = AmbiguousCodonTable(generic,
Alphabet.NucleotideAlphabet(),
_merged_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
rna = NCBICodonTableRNA(id, names + [alt_name], rna_table,
rna_start_codons, rna_stop_codons)
ambig_rna = AmbiguousCodonTable(rna,
IUPAC.ambiguous_rna,
IUPACData.ambiguous_rna_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
if id == 1:
global standard_dna_table, standard_rna_table
standard_dna_table = dna
standard_rna_table = rna
unambiguous_dna_by_id[id] = dna
unambiguous_rna_by_id[id] = rna
generic_by_id[id] = generic
ambiguous_dna_by_id[id] = ambig_dna
ambiguous_rna_by_id[id] = ambig_rna
ambiguous_generic_by_id[id] = ambig_generic
if alt_name is not None:
names.append(alt_name)
for name in names:
unambiguous_dna_by_name[name] = dna
unambiguous_rna_by_name[name] = rna
generic_by_name[name] = generic
ambiguous_dna_by_name[name] = ambig_dna
ambiguous_rna_by_name[name] = ambig_rna
ambiguous_generic_by_name[name] = ambig_generic
### These tables created from the data file
### ftp://ftp.ncbi.nih.gov/entrez/misc/data/gc.prt
### using the following:
##import re
##for line in open("gc.prt").readlines():
## if line[:2] == " {":
## names = []
## id = None
## aa = None
## start = None
## bases = []
## elif line[:6] == " name":
## names.append(re.search('"([^"]*)"', line).group(1))
## elif line[:8] == " name":
## names.append(re.search('"(.*)$', line).group(1))
## elif line == ' Mitochondrial; Mycoplasma; Spiroplasma" ,\n':
## names[-1] = names[-1] + " Mitochondrial; Mycoplasma; Spiroplasma"
## elif line[:4] == " id":
## id = int(re.search('(\d+)', line).group(1))
## elif line[:10] == " ncbieaa ":
## aa = line[12:12+64]
## elif line[:10] == " sncbieaa":
## start = line[12:12+64]
## elif line[:9] == " -- Base":
## bases.append(line[12:12+64])
## elif line[:2] == " }":
## assert names != [] and id is not None and aa is not None
## assert start is not None and bases != []
## if len(names) == 1:
## names.append(None)
## print "register_ncbi_table(name = %s," % repr(names[0])
## print " alt_name = %s, id = %d," % \
## (repr(names[1]), id)
## print " table = {"
## s = " "
## for i in range(64):
## if aa[i] != "*":
## t = " '%s%s%s': '%s'," % (bases[0][i], bases[1][i],
## bases[2][i], aa[i])
## if len(s) + len(t) > 75:
## print s
## s = " " + t
## else:
## s = s + t
## print s, "},"
## s = " stop_codons = ["
## for i in range(64):
## if aa[i] == "*":
## t = " '%s%s%s'," % (bases[0][i], bases[1][i], bases[2][i])
## if len(s) + len(t) > 75:
## print s
## s = " " + t
## else:
## s = s + t
## print s, "],"
## s = " start_codons = ["
## for i in range(64):
## if start[i] == "M":
## t = " '%s%s%s'," % (bases[0][i], bases[1][i], bases[2][i])
## if len(s) + len(t) > 75:
## print s
## s = " " + t
## else:
## s = s + t
## print s, "]"
## print " )"
## elif line[:2] == "--" or line == "\n" or line == "}\n" or \
## line == 'Genetic-code-table ::= {\n':
## pass
## else:
## raise Exception("Unparsed: " + repr(line))
register_ncbi_table(name = 'Standard',
alt_name = 'SGC0', id = 1,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', 'TGA', ],
start_codons = [ 'TTG', 'CTG', 'ATG', ]
)
register_ncbi_table(name = 'Vertebrate Mitochondrial',
alt_name = 'SGC1', id = 2,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'GTT': 'V',
'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A', 'GCC': 'A',
'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E',
'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', 'AGA', 'AGG', ],
start_codons = [ 'ATT', 'ATC', 'ATA', 'ATG', 'GTG', ]
)
register_ncbi_table(name = 'Yeast Mitochondrial',
alt_name = 'SGC2', id = 3,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'T',
'CTC': 'T', 'CTA': 'T', 'CTG': 'T', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', ],
start_codons = [ 'ATA', 'ATG', ]
)
register_ncbi_table(name = 'Mold Mitochondrial; Protozoan Mitochondrial; Coelenterate Mitochondrial; Mycoplasma; Spiroplasma',
alt_name = 'SGC3', id = 4,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', ],
start_codons = [ 'TTA', 'TTG', 'CTG', 'ATT', 'ATC',
'ATA', 'ATG', 'GTG', ]
)
register_ncbi_table(name = 'Invertebrate Mitochondrial',
alt_name = 'SGC4', id = 5,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', ],
start_codons = [ 'TTG', 'ATT', 'ATC', 'ATA', 'ATG',
'GTG', ]
)
register_ncbi_table(name = 'Ciliate Nuclear; Dasycladacean Nuclear; Hexamita Nuclear',
alt_name = 'SGC5', id = 6,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAA': 'Q', 'TAG': 'Q', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P',
'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R',
'CGG': 'R', 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N',
'AAC': 'N', 'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S',
'AGA': 'R', 'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G',
'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TGA', ],
start_codons = [ 'ATG', ]
)
register_ncbi_table(name = 'Echinoderm Mitochondrial; Flatworm Mitochondrial',
alt_name = 'SGC8', id = 9,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', ],
start_codons = [ 'ATG', 'GTG', ]
)
register_ncbi_table(name = 'Euplotid Nuclear',
alt_name = 'SGC9', id = 10,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', ],
start_codons = [ 'ATG', ]
)
register_ncbi_table(name = 'Bacterial and Plant Plastid',
alt_name = None, id = 11,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', 'TGA', ],
start_codons = [ 'TTG', 'CTG', 'ATT', 'ATC', 'ATA',
'ATG', 'GTG', ]
)
register_ncbi_table(name = 'Alternative Yeast Nuclear',
alt_name = None, id = 12,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'S', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', 'TGA', ],
start_codons = [ 'CTG', 'ATG', ]
)
register_ncbi_table(name = 'Ascidian Mitochondrial',
alt_name = None, id = 13,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'G',
'AGG': 'G', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', ],
start_codons = [ 'TTG', 'ATA', 'ATG', 'GTG', ]
)
register_ncbi_table(name = 'Alternative Flatworm Mitochondrial',
alt_name = None, id = 14,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAA': 'Y', 'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P',
'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R',
'CGG': 'R', 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N',
'AAC': 'N', 'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S',
'AGA': 'S', 'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G',
'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAG', ],
start_codons = [ 'ATG', ]
)
register_ncbi_table(name = 'Blepharisma Macronuclear',
alt_name = None, id = 15,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAG': 'Q', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TGA', ],
start_codons = [ 'ATG', ]
)
register_ncbi_table(name = 'Chlorophycean Mitochondrial',
alt_name = None, id = 16,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAG': 'L', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TGA', ],
start_codons = [ 'ATG', ]
)
register_ncbi_table(name = 'Trematode Mitochondrial',
alt_name = None, id = 21,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TAA', 'TAG', ],
start_codons = [ 'ATG', 'GTG', ]
)
register_ncbi_table(name = 'Scenedesmus obliquus Mitochondrial',
alt_name = None, id = 22,
table = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y', 'TAG': 'L',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons = [ 'TCA', 'TAA', 'TGA', ],
start_codons = [ 'ATG', ]
)
register_ncbi_table(name = 'Thraustochytrium Mitochondrial',
alt_name = None, id = 23,
table = {
'TTT': 'F', 'TTC': 'F', 'TTG': 'L', 'TCT': 'S', 'TCC': 'S',
'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y', 'TGT': 'C',
'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L', 'CTA': 'L',
'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R',
'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I', 'ATC': 'I',
'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T', 'ACA': 'T',
'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K', 'AAG': 'K',
'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R', 'GTT': 'V',
'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A', 'GCC': 'A',
'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E',
'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons = [ 'TTA', 'TAA', 'TAG', 'TGA', ],
start_codons = [ 'ATT', 'ATG', 'GTG', ]
)
#Basic sanity test,
for key, val in generic_by_name.iteritems():
assert key in ambiguous_generic_by_name[key].names
for key, val in generic_by_id.iteritems():
assert ambiguous_generic_by_id[key].id == key
del key, val
for n in ambiguous_generic_by_id:
assert ambiguous_rna_by_id[n].forward_table["GUU"] == "V"
assert ambiguous_rna_by_id[n].forward_table["GUN"] == "V"
if n != 23 :
#For table 23, UUN = F, L or stop.
assert ambiguous_rna_by_id[n].forward_table["UUN"] == "X" #F or L
#R = A or G, so URR = UAA or UGA / TRA = TAA or TGA = stop codons
if "UAA" in unambiguous_rna_by_id[n].stop_codons \
and "UGA" in unambiguous_rna_by_id[n].stop_codons:
try:
print ambiguous_dna_by_id[n].forward_table["TRA"]
assert False, "Should be a stop only"
except KeyError:
pass
assert "URA" in ambiguous_generic_by_id[n].stop_codons
assert "URA" in ambiguous_rna_by_id[n].stop_codons
assert "TRA" in ambiguous_generic_by_id[n].stop_codons
assert "TRA" in ambiguous_dna_by_id[n].stop_codons
del n
assert ambiguous_generic_by_id[1] == ambiguous_generic_by_name["Standard"]
assert ambiguous_generic_by_id[4] == ambiguous_generic_by_name["SGC3"]
assert ambiguous_generic_by_id[11] == ambiguous_generic_by_name["Bacterial"]
assert ambiguous_generic_by_id[11] == ambiguous_generic_by_name["Plant Plastid"]
assert ambiguous_generic_by_id[15] == ambiguous_generic_by_name['Blepharisma Macronuclear']
assert generic_by_id[1] == generic_by_name["Standard"]
assert generic_by_id[4] == generic_by_name["SGC3"]
assert generic_by_id[11] == generic_by_name["Bacterial"]
assert generic_by_id[11] == generic_by_name["Plant Plastid"]
assert generic_by_id[15] == generic_by_name['Blepharisma Macronuclear']
|
bryback/quickseq
|
genescript/Bio/Data/CodonTable.py
|
Python
|
mit
| 40,803
|
[
"Biopython"
] |
b4bb149a7146c1c872c58ac0d1302db75d5b3c3ad83103a577374a2e497a34b4
|
../../../../../share/pyshared/orca/scripts/default.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/default.py
|
Python
|
gpl-3.0
| 53
|
[
"ORCA"
] |
aced1360333ea38ab250a38c83ff239d6ead8aa7a95afb444427051fed96a6f5
|
# -*- coding: utf-8 -*-
from __future__ import division
import skimage.io
import skimage.feature
import skimage.color
import skimage.transform
import skimage.util
import skimage.segmentation
import numpy
# "Selective Search for Object Recognition" by J.R.R. Uijlings et al.
#
# - Modified version with LBP extractor for texture vectorization
def _generate_segments(im_orig, scale, sigma, min_size):
"""
segment smallest regions by the algorithm of Felzenswalb and
Huttenlocher
"""
# open the Image
im_mask = skimage.segmentation.felzenszwalb(
skimage.util.img_as_float(im_orig), scale=scale, sigma=sigma,
min_size=min_size)
# merge mask channel to the image as a 4th channel
im_orig = numpy.append(
im_orig, numpy.zeros(im_orig.shape[:2])[:, :, numpy.newaxis], axis=2)
im_orig[:, :, 3] = im_mask
return im_orig
def _sim_colour(r1, r2):
"""
calculate the sum of histogram intersection of colour
"""
return sum([min(a, b) for a, b in zip(r1["hist_c"], r2["hist_c"])])
def _sim_texture(r1, r2):
"""
calculate the sum of histogram intersection of texture
"""
return sum([min(a, b) for a, b in zip(r1["hist_t"], r2["hist_t"])])
def _sim_size(r1, r2, imsize):
"""
calculate the size similarity over the image
"""
return 1.0 - (r1["size"] + r2["size"]) / imsize
def _sim_fill(r1, r2, imsize):
"""
calculate the fill similarity over the image
"""
bbsize = (
(max(r1["max_x"], r2["max_x"]) - min(r1["min_x"], r2["min_x"]))
* (max(r1["max_y"], r2["max_y"]) - min(r1["min_y"], r2["min_y"]))
)
return 1.0 - (bbsize - r1["size"] - r2["size"]) / imsize
def _calc_sim(r1, r2, imsize):
return (_sim_colour(r1, r2) + _sim_texture(r1, r2)
+ _sim_size(r1, r2, imsize) + _sim_fill(r1, r2, imsize))
def _calc_colour_hist(img):
"""
calculate colour histogram for each region
the size of output histogram will be BINS * COLOUR_CHANNELS(3)
number of bins is 25 as same as [uijlings_ijcv2013_draft.pdf]
extract HSV
"""
BINS = 25
hist = numpy.array([])
for colour_channel in (0, 1, 2):
# extracting one colour channel
c = img[:, colour_channel]
# calculate histogram for each colour and join to the result
hist = numpy.concatenate(
[hist] + [numpy.histogram(c, BINS, (0.0, 255.0))[0]])
# L1 normalize
hist = hist / len(img)
return hist
def _calc_texture_gradient(img):
"""
calculate texture gradient for entire image
The original SelectiveSearch algorithm proposed Gaussian derivative
for 8 orientations, but we use LBP instead.
output will be [height(*)][width(*)]
"""
ret = numpy.zeros((img.shape[0], img.shape[1], img.shape[2]))
for colour_channel in (0, 1, 2):
ret[:, :, colour_channel] = skimage.feature.local_binary_pattern(
img[:, :, colour_channel], 8, 1.0)
return ret
def _calc_texture_hist(img):
"""
calculate texture histogram for each region
calculate the histogram of gradient for each colours
the size of output histogram will be
BINS * ORIENTATIONS * COLOUR_CHANNELS(3)
"""
BINS = 10
hist = numpy.array([])
for colour_channel in (0, 1, 2):
# mask by the colour channel
fd = img[:, colour_channel]
# calculate histogram for each orientation and concatenate them all
# and join to the result
hist = numpy.concatenate(
[hist] + [numpy.histogram(fd, BINS, (0.0, 1.0))[0]])
# L1 Normalize
hist = hist / len(img)
return hist
def _extract_regions(img):
R = {}
# get hsv image
hsv = skimage.color.rgb2hsv(img[:, :, :3])
# pass 1: count pixel positions
for y, i in enumerate(img):
for x, (r, g, b, l) in enumerate(i):
# initialize a new region
if l not in R:
R[l] = {
"min_x": 0xffff, "min_y": 0xffff,
"max_x": 0, "max_y": 0, "labels": [l]}
# bounding box
if R[l]["min_x"] > x:
R[l]["min_x"] = x
if R[l]["min_y"] > y:
R[l]["min_y"] = y
if R[l]["max_x"] < x:
R[l]["max_x"] = x
if R[l]["max_y"] < y:
R[l]["max_y"] = y
# pass 2: calculate texture gradient
tex_grad = _calc_texture_gradient(img)
# pass 3: calculate colour histogram of each region
for k, v in list(R.items()):
# colour histogram
masked_pixels = hsv[:, :, :][img[:, :, 3] == k]
R[k]["size"] = len(masked_pixels / 4)
R[k]["hist_c"] = _calc_colour_hist(masked_pixels)
# texture histogram
R[k]["hist_t"] = _calc_texture_hist(tex_grad[:, :][img[:, :, 3] == k])
return R
def _extract_neighbours(regions):
def intersect(a, b):
if (a["min_x"] < b["min_x"] < a["max_x"]
and a["min_y"] < b["min_y"] < a["max_y"]) or (
a["min_x"] < b["max_x"] < a["max_x"]
and a["min_y"] < b["max_y"] < a["max_y"]) or (
a["min_x"] < b["min_x"] < a["max_x"]
and a["min_y"] < b["max_y"] < a["max_y"]) or (
a["min_x"] < b["max_x"] < a["max_x"]
and a["min_y"] < b["min_y"] < a["max_y"]):
return True
return False
R = list(regions.items())
neighbours = []
for cur, a in enumerate(R[:-1]):
for b in R[cur + 1:]:
if intersect(a[1], b[1]):
neighbours.append((a, b))
return neighbours
def _merge_regions(r1, r2):
new_size = r1["size"] + r2["size"]
rt = {
"min_x": min(r1["min_x"], r2["min_x"]),
"min_y": min(r1["min_y"], r2["min_y"]),
"max_x": max(r1["max_x"], r2["max_x"]),
"max_y": max(r1["max_y"], r2["max_y"]),
"size": new_size,
"hist_c": (
r1["hist_c"] * r1["size"] + r2["hist_c"] * r2["size"]) / new_size,
"hist_t": (
r1["hist_t"] * r1["size"] + r2["hist_t"] * r2["size"]) / new_size,
"labels": r1["labels"] + r2["labels"]
}
return rt
def selective_search(
im_orig, scale=1.0, sigma=0.8, min_size=50):
'''Selective Search
Parameters
----------
im_orig : ndarray
Input image
scale : int
Free parameter. Higher means larger clusters in felzenszwalb segmentation.
sigma : float
Width of Gaussian kernel for felzenszwalb segmentation.
min_size : int
Minimum component size for felzenszwalb segmentation.
Returns
-------
img : ndarray
image with region label
region label is stored in the 4th value of each pixel [r,g,b,(region)]
regions : array of dict
[
{
'rect': (left, top, width, height),
'labels': [...],
'size': component_size
},
...
]
'''
assert im_orig.shape[2] == 3, "3ch image is expected"
# load image and get smallest regions
# region label is stored in the 4th value of each pixel [r,g,b,(region)]
img = _generate_segments(im_orig, scale, sigma, min_size)
if img is None:
return None, {}
imsize = img.shape[0] * img.shape[1]
R = _extract_regions(img)
# extract neighbouring information
neighbours = _extract_neighbours(R)
# calculate initial similarities
S = {}
for (ai, ar), (bi, br) in neighbours:
S[(ai, bi)] = _calc_sim(ar, br, imsize)
# hierarchal search
while S != {}:
# get highest similarity
i, j = sorted(S.items(), key=lambda i: i[1])[-1][0]
# merge corresponding regions
t = max(R.keys()) + 1.0
R[t] = _merge_regions(R[i], R[j])
# mark similarities for regions to be removed
key_to_delete = []
for k, v in list(S.items()):
if (i in k) or (j in k):
key_to_delete.append(k)
# remove old similarities of related regions
for k in key_to_delete:
del S[k]
# calculate similarity set with the new region
for k in [a for a in key_to_delete if a != (i, j)]:
n = k[1] if k[0] in (i, j) else k[0]
S[(t, n)] = _calc_sim(R[t], R[n], imsize)
regions = []
for k, r in list(R.items()):
regions.append({
'rect': (
r['min_x'], r['min_y'],
r['max_x'] - r['min_x'], r['max_y'] - r['min_y']),
'size': r['size'],
'labels': r['labels']
})
return img, regions
|
AlpacaDB/selectivesearch
|
selectivesearch/selectivesearch.py
|
Python
|
mit
| 8,841
|
[
"Gaussian"
] |
5c5626969ed98229b08ecc4a4b319992ebe8bb1ac37a3420c66f9f9938cc4354
|
# import_export_maplight/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import import_maplight_from_json
from .models import MapLightCandidate
from candidate.models import CandidateCampaignManager
from django.contrib import messages
from django.contrib.messages import get_messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import redirect, render
from exception.models import handle_record_not_saved_exception
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
def import_export_maplight_index_view(request):
"""
Provide an index of import/export actions (for We Vote data maintenance)
"""
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'import_export_maplight/index.html', template_values)
def import_maplight_from_json_view(request):
"""
Take data from Test XML file and store in the local Voting Info Project database
"""
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
import_maplight_from_json(request)
messages.add_message(request, messages.INFO, 'Maplight sample data imported.')
return HttpResponseRedirect(reverse('import_export:import_export_index', args=()))
def transfer_maplight_data_to_we_vote_tables(request):
# TODO We need to perhaps set up a table for these mappings that volunteers can add to?
# We need a plan for how volunteers can help us add to these mappings
# One possibility -- ask volunteers to update this Google Sheet, then write a csv importer:
# https://docs.google.com/spreadsheets/d/1havD7GCxmBhi-zLLMdOpSJlU_DtBjvb5IJNiXgno9Bk/edit#gid=0
politician_name_mapping_list = []
one_mapping = {
"google_civic_name": "Betty T. Yee",
"maplight_display_name": "Betty Yee",
"maplight_original_name": "Betty T Yee",
}
politician_name_mapping_list.append(one_mapping)
one_mapping = {
"google_civic_name": "Edmund G. \"Jerry\" Brown",
"maplight_display_name": "Jerry Brown",
"maplight_original_name": "",
}
politician_name_mapping_list.append(one_mapping)
candidate_campaign_manager = CandidateCampaignManager()
maplight_candidates_current_query = MapLightCandidate.objects.all()
for one_candidate_from_maplight_table in maplight_candidates_current_query:
found_by_id = False
# Try to find a matching candidate
results = candidate_campaign_manager.retrieve_candidate_campaign_from_id_maplight(
one_candidate_from_maplight_table.candidate_id)
if not results['success']:
logger.warn(u"Candidate NOT found by MapLight id: {name}".format(
name=one_candidate_from_maplight_table.candidate_id
))
results = candidate_campaign_manager.retrieve_candidate_campaign_from_candidate_name(
one_candidate_from_maplight_table.display_name)
if not results['success']:
logger.warn(u"Candidate NOT found by display_name: {name}".format(
name=one_candidate_from_maplight_table.display_name
))
results = candidate_campaign_manager.retrieve_candidate_campaign_from_candidate_name(
one_candidate_from_maplight_table.original_name)
if not results['success']:
logger.warn(u"Candidate NOT found by original_name: {name}".format(
name=one_candidate_from_maplight_table.original_name
))
one_mapping_google_civic_name = ''
for one_mapping_found in politician_name_mapping_list:
if positive_value_exists(one_mapping_found['maplight_display_name']) \
and one_mapping_found['maplight_display_name'] == \
one_candidate_from_maplight_table.display_name:
one_mapping_google_civic_name = one_mapping_found['google_civic_name']
break
if positive_value_exists(one_mapping_google_civic_name):
results = candidate_campaign_manager.retrieve_candidate_campaign_from_candidate_name(
one_mapping_google_civic_name)
if not results['success'] or not positive_value_exists(one_mapping_google_civic_name):
logger.warn(u"Candidate NOT found by mapping to google_civic name: {name}".format(
name=one_mapping_google_civic_name
))
continue # Go to the next candidate
candidate_campaign_on_stage = results['candidate_campaign']
# Just in case the logic above let us through to here accidentally without a candidate_name value, don't proceed
if not positive_value_exists(candidate_campaign_on_stage.candidate_name):
continue
logger.debug(u"Candidate {name} found".format(
name=candidate_campaign_on_stage.candidate_name
))
try:
# Tie the maplight id to our record
if not found_by_id:
candidate_campaign_on_stage.id_maplight = one_candidate_from_maplight_table.candidate_id
# Bring over the photo
candidate_campaign_on_stage.photo_url_from_maplight = one_candidate_from_maplight_table.photo
# We can bring over other data as needed, like gender for example
candidate_campaign_on_stage.save()
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.INFO, 'MapLight data woven into We Vote tables.')
return HttpResponseRedirect(reverse('import_export:import_export_index', args=()))
|
wevote/WebAppPublic
|
import_export_maplight/views_admin.py
|
Python
|
bsd-3-clause
| 6,159
|
[
"VisIt"
] |
0edbb521653d44470fda32929129c5383ad4f5de04d9221c35ae6925eabdfc75
|
"""
It is used to load classes from a specific system.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import six
import DIRAC
from DIRAC import gLogger
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Extensions import extensionsByPriority
def loadObjects(path, reFilter=None, parentClass=None):
"""
:param str path: the path to the syetem for example: DIRAC/AccountingSystem
:param object reFilter: regular expression used to found the class
:param object parentClass: class instance
:return: dictionary containing the name of the class and its instance
"""
if not reFilter:
reFilter = re.compile(r".*[a-z1-9]\.py$")
pathList = List.fromChar(path, "/")
objectsToLoad = {}
# Find which object files match
for parentModule in extensionsByPriority():
if six.PY3:
objDir = os.path.join(os.path.dirname(os.path.dirname(DIRAC.__file__)), parentModule, *pathList)
else:
objDir = os.path.join(DIRAC.rootPath, parentModule, *pathList)
if not os.path.isdir(objDir):
continue
for objFile in os.listdir(objDir):
if reFilter.match(objFile):
pythonClassName = objFile[:-3]
if pythonClassName not in objectsToLoad:
gLogger.info("Adding to load queue %s/%s/%s" % (parentModule, path, pythonClassName))
objectsToLoad[pythonClassName] = parentModule
# Load them!
loadedObjects = {}
for pythonClassName in objectsToLoad:
parentModule = objectsToLoad[pythonClassName]
try:
# Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ]
# And the python class name is.. well, the python class name
objPythonPath = "%s.%s.%s" % (parentModule, ".".join(pathList), pythonClassName)
objModule = __import__(objPythonPath,
globals(),
locals(), pythonClassName)
objClass = getattr(objModule, pythonClassName)
except Exception as e:
gLogger.error("Can't load type", "%s/%s: %s" % (parentModule, pythonClassName, str(e)))
continue
if parentClass == objClass:
continue
if parentClass and not issubclass(objClass, parentClass):
gLogger.warn("%s is not a subclass of %s. Skipping" % (objClass, parentClass))
continue
gLogger.info("Loaded %s" % objPythonPath)
loadedObjects[pythonClassName] = objClass
return loadedObjects
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/Plotting/ObjectLoader.py
|
Python
|
gpl-3.0
| 2,494
|
[
"DIRAC"
] |
744c81c407b1798f8b6f4297946f1d7bfb0b55194e241fe0d85023ea43109cb7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pylid
from collections import Counter
hu = pylid.PyLID(3)
hu.total_ngrams = 91731180
hu.lang = 'hu'
hu.ngrams = Counter({
u'#a#': 938412,
u'#az': 582709,
u'az#': 531824,
u'\xe9s#': 497619,
u'#sz': 477375,
u'gy#': 468392,
u'en#': 453228,
u'#\xe9s': 408621,
u'ek#': 370133,
u'an#': 368510,
u'#ho': 361726,
u'#el': 341638,
u'ogy': 337770,
u'hog': 325762,
u'#me': 324712,
u'k#a': 324364,
u't#a': 315713,
u'sze': 303912,
u'ak#': 299611,
u'z#e': 276527,
u'ele': 266981,
u'meg': 265698,
u'egy': 254833,
u'et#': 243899,
u's\xe9g': 239103,
u'tt#': 235865,
u'nak': 235286,
u's\xe1g': 231279,
u'#k\xf6': 221515,
u'#eg': 219879,
u'ban': 217138,
u'em#': 214893,
u'len': 214164,
u't\xe1s': 213990,
u'ben': 209010,
u's#a': 207222,
u'#ke': 207108,
u'ell': 207013,
u'ok#': 204513,
u'ni#': 200019,
u'#te': 198900,
u'#ha': 188148,
u'ra#': 187921,
u'n#a': 186609,
u'ai#': 186501,
u'nek': 185842,
u'#ne': 184342,
u'#eu': 180586,
u'a#k': 179690,
u'nk#': 179276,
u'is#': 177540,
u'int': 177201,
u'#mi': 177193,
u'zer': 176934,
u'\xe1ll': 176854,
u'ere': 174760,
u'el#': 173514,
u'ott': 171589,
u'#va': 171404,
u'#fe': 170344,
u'nem': 170092,
u't\xe9s': 167788,
u'zet': 163736,
u'y#a': 159807,
u'kel': 159360,
u'ely': 158085,
u'#fo': 157322,
u'eur': 157290,
u'ame': 157115,
u'#le': 155370,
u'ent': 154695,
u'#am': 153805,
u'mel': 152132,
u'ur\xf3': 151974,
u'k\xf6z': 151881,
u'min': 151623,
u'el\u0151': 150943,
u'nt#': 150796,
u'sz\xe1': 150261,
u'al#': 150103,
u'szt': 148460,
u'sza': 147969,
u'#ta': 145073,
u'fel': 144882,
u'biz': 143988,
u'r\xf3p': 143310,
u'ett': 141881,
u'hat': 140999,
u'at#': 139421,
u'a#t': 139078,
u'lam': 138331,
u'tel': 138306,
u'#bi': 137273,
u'eze': 136779,
u'\xe9rt': 135986,
u'tet': 134658,
u'unk': 133041,
u'#k\xe9': 132944,
u'val': 132422,
u'#is': 132403,
u're#': 131884,
u'gye': 131509,
u'\xf3pa': 131001,
u'l#a': 130785,
u'ala': 129924,
u'ete': 129218,
u'let': 127490,
u'at\xe1': 127096,
u'\xe9ny': 126858,
u'os#': 126677,
u'lla': 126456,
u'#ki': 124985,
u'esz': 124981,
u'\xe9se': 124814,
u'd\xe9s': 124026,
u'k#e': 123353,
u'a#m': 123341,
u'tos': 123064,
u'i#a': 121994,
u'#ez': 121595,
u'a#a': 120728,
u'lat': 119718,
u'ter': 119418,
u'#be': 117817,
u'\xe1t#': 116665,
u'k#m': 116608,
u'k#k': 115657,
u't#k': 115580,
u'eke': 115580,
u'nye': 114845,
u'pai': 114749,
u'a#s': 113554,
u'lye': 112809,
u'ssz': 112308,
u'sok': 111150,
u'es#': 111023,
u'lle': 110970,
u'jel': 110764,
u'll#': 110112,
u'st#': 109777,
u'ik#': 108824,
u'agy': 108741,
u'tal': 108321,
u'fog': 108245,
u'\xe1g#': 108068,
u'g\xe1l': 107913,
u'z#a': 107079,
u's#e': 106098,
u'a#h': 104453,
u'l\xe1s': 104102,
u'szo': 103548,
u't#m': 103508,
u'\xe1s#': 103355,
u'ti#': 103256,
u'\xe1s\xe1': 103223,
u'#je': 103222,
u'rt#': 102944,
u'\xe9ge': 102497,
u'ez#': 102473,
u'#\xe1l': 101875,
u'ato': 101783,
u'ket': 101379,
u'a#v': 101162,
u'zt#': 100804,
u'\xf3l#': 100237,
u't#\xe9': 99576,
u'\xe1ny': 99158,
u'kat': 98791,
u'bb#': 98470,
u't#e': 98076,
u'\xe9sz': 97232,
u'e#a': 96915,
u'a#b': 96596,
u'leg': 96369,
u'v\xe1l': 96305,
u'#al': 96280,
u'vet': 95991,
u'nt\xe9': 95927,
u'het': 94747,
u'lt#': 94725,
u'enn': 94696,
u'on#': 93336,
u'\xe1so': 92722,
u'si#': 92419,
u'z\xe1s': 92419,
u'zot': 92302,
u'i#k': 92184,
u's\xedt': 91991,
u'koz': 91991,
u'et\xe9': 91973,
u'kor': 91685,
u'ont': 91621,
u's#m': 90199,
u'#t\xe1': 89105,
u'#\xe9r': 89039,
u'ami': 88997,
u'k#\xe9': 88953,
u'hoz': 88845,
u'elm': 88389,
u'men': 88297,
u'\xe9s\xe9': 88218,
u'ind': 87537,
u'kez': 87470,
u'oga': 87406,
u'#re': 87360,
u'#ko': 87307,
u'tat': 87290,
u'\xe9t#': 87159,
u't#h': 87113,
u'\xe1lt': 86763,
u's#k': 85166,
u'zon': 84856,
u'tan': 84688,
u'l\xf3#': 84111,
u'asz': 83417,
u'oly': 83021,
u't\xe1r': 82737,
u'al\xe1': 81545,
u'\xfcnk': 81248,
u'\xe1ra': 81043,
u'k#h': 80477,
u'm\xe9n': 80462,
u'\xe9g#': 80450,
u'tek': 79972,
u'#v\xe1': 79425,
u'gya': 78812,
u'ja#': 78389,
u'a#f': 78316,
u'#cs': 78226,
u'ren': 78224,
u'et\u0151': 78023,
u'nte': 77974,
u'gi#': 77884,
u'sen': 77580,
u'\xe1k#': 77401,
u'end': 77295,
u'ors': 77226,
u'izo': 77158,
u'eln': 76971,
u'n#e': 76902,
u'ten': 76896,
u'cso': 76610,
u'a#p': 76468,
u'n#k': 75412,
u'ene': 75393,
u'#in': 75219,
u'l#k': 75125,
u'\xf6ve': 75101,
u'#po': 75061,
u'yek': 75002,
u'tts': 74159,
u'art': 74098,
u'\xe9rd': 73126,
u'nde': 72923,
u'tik': 72574,
u'#v\xe9': 72572,
u't#t': 72471,
u'#t\xf6': 72343,
u'#ma': 72075,
u'#vi': 71871,
u'lem': 71734,
u'n#m': 71699,
u'n\xf6k': 71603,
u'pol': 71509,
u'gaz': 71369,
u'#ka': 71249,
u'k\xe9p': 71072,
u'del': 71060,
u'i#s': 71054,
u'yan': 70989,
u'lap': 70968,
u'\xe9be': 70872,
u'ese': 70621,
u'a#n': 70140,
u'rsz': 69831,
u'lta': 69588,
u'mi#': 69538,
u'z\xe9s': 69361,
u'\xe1go': 69273,
u'#jo': 69265,
u'sek': 69257,
u'k\xfcl': 69238,
u'zto': 69225,
u'k#s': 67804,
u'ts\xe1': 67754,
u'a#j': 67688,
u'al\xf3': 67271,
u'lis': 66859,
u'z\xe1g': 66761,
u'tot': 66548,
u'eg#': 66260,
u'isz': 66217,
u'ker': 66175,
u'\u0151l#': 66139,
u'ava': 65769,
u'jog': 65719,
u'azo': 65302,
u'el\xe9': 65244,
u'\xe1sa': 65105,
u'i#e': 64991,
u'r\xe1n': 64889,
u's#s': 64580,
u'oka': 64577,
u'lal': 64380,
u'izt': 64375,
u'zab': 64344,
u'uk#': 64145,
u'par': 63984,
u't#s': 63899,
u's#t': 63891,
u'ked': 63694,
u'ny#': 63682,
u'\xe1n#': 63185,
u'\xfcl#': 63056,
u'k\xe9r': 62863,
u'azt': 62616,
u'ony': 62105,
u'\xf6z\xf6': 62105,
u'k#t': 61857,
u'\xe9le': 61574,
u'tar': 61497,
u't\xe9k': 61245,
u'#pa': 60600,
u'z\xe1m': 60585,
u'ez\xe9': 60580,
u'm#a': 60572,
u'olg': 60513,
u'#ve': 60271,
u'lya': 60165,
u'emb': 60161,
u'mbe': 59904,
u'#vo': 59860,
u'#na': 59774,
u'oz\xe1': 59606,
u'y#e': 59568,
u'ehe': 59508,
u'#ak': 59266,
u'uni': 59114,
u'\xe1mo': 58957,
u'sa#': 58862,
u'\xedt\xe1': 58826,
u'ln\xf6': 58807,
u't#i': 58502,
u'r\xfcl': 58408,
u'etn': 58382,
u'd\xe1s': 58337,
u'mok': 58285,
u'nto': 58074,
u'alm': 58023,
u'rt\xe9': 57993,
u'yen': 57902,
u'kap': 57872,
u't#v': 57848,
u'n#t': 57673,
u'alk': 57660,
u'g#a': 57460,
u'gok': 57323,
u'#er': 57263,
u's#h': 57095,
u'i#p': 56774,
u'ot#': 56507,
u'l#s': 56405,
u'lit': 56387,
u'#un': 56188,
u'hel': 56184,
u'z#i': 56154,
u'vel': 56110,
u'ret': 55948,
u'ni\xf3': 55931,
u'n#s': 55920,
u'a#l': 55681,
u'gat': 55558,
u'tes': 55471,
u'vez': 55253,
u'om#': 55233,
u'#k\xfc': 55227,
u'sel': 55194,
u'#mo': 55068,
u'zat': 55009,
u'vis': 54966,
u'ly#': 54882,
u'oli': 54619,
u'r\xe9s': 54572,
u'#m\xe1': 54567,
u'#\xfar': 54501,
u'\xf6k#': 54492,
u'ta#': 54312,
u'zte': 53912,
u'ond': 53766,
u'szi': 53573,
u'nag': 53527,
u'\xe1ba': 53482,
u'i#t': 53414,
u'ezt': 53396,
u'y#m': 53274,
u'or#': 53269,
u'k#v': 53227,
u'n#h': 53212,
u'van': 53127,
u'sol': 53113,
u'kal': 52989,
u'#m\xe9': 52976,
u'm\xe1r': 52868,
u'\xe1si': 52845,
u'it#': 52679,
u'i#h': 52594,
u'sz\xfc': 52481,
u'yel': 52222,
u't\xe9n': 52196,
u'\xe1ro': 51819,
u'r\xf3l': 51698,
u'ts\xe9': 51697,
u'sz\xe9': 51423,
u'leh': 51387,
u'#ol': 51213,
u'iti': 51195,
u'gyo': 51064,
u'\xe9n#': 50977,
u'ag\xe1': 50651,
u'em\xe9': 50562,
u'l#e': 50435,
u'rla': 49915,
u'tag': 49528,
u'i#m': 49167,
u'ber': 48998,
u'#em': 48923,
u'sz\xf3': 48883,
u'se#': 48828,
u'l\xe9s': 48755,
u'\xe1na': 48704,
u'\xfck#': 48657,
u's#\xe9': 48621,
u'er\xfc': 48610,
u'zel': 48595,
u'\xe9re': 48447,
u'at\xf3': 48443,
u'i\xf3#': 48366,
u'eri': 48335,
u'#de': 48291,
u't#n': 48210,
u'#pr': 47975,
u'#mu': 47911,
u'csa': 47910,
u'ege': 47771,
u'\xfar#': 47751,
u'as\xe1': 47669,
u'\xfcgy': 47658,
u'\xedt\xe9': 47527,
u'm#h': 47431,
u'ani': 47395,
u'\xe1li': 47389,
u'lko': 47258,
u'nyi': 47180,
u'#r\xe9': 47125,
u'i#\xe9': 47076,
u'ci\xf3': 46941,
u'#or': 46848,
u'os\xed': 46716,
u'ata': 46506,
u'#ny': 46493,
u'\xe1st': 46458,
u'elt': 46398,
u'kon': 46281,
u'#he': 46270,
u'zem': 46254,
u'ola': 46243,
u'eti': 46192,
u't\xe1m': 46155,
u'#l\xe9': 46086,
u'les': 46018,
u'\xe9st': 45976,
u'mun': 45944,
u'n\xe1l': 45800,
u'lel': 45696,
u'oz#': 45686,
u'l#h': 45616,
u'\xf6ss': 45476,
u'den': 45476,
u'for': 45433,
u'lg\xe1': 45349,
u'gyi': 45325,
u'pcs': 45170,
u't#f': 45163,
u'#es': 45076,
u'apc': 45002,
u'k\xf6v': 44986,
u'k#f': 44974,
u'\xe1ly': 44956,
u'elj': 44913,
u'ed\xe9': 44809,
u'yez': 44683,
u'azd': 44578,
u'fon': 44518,
u'ia#': 44453,
u'\xfcl\xf6': 44451,
u'#so': 44451,
u'ks\xe9': 44440,
u'pro': 44202,
u'ika': 44116,
u'n#\xe9': 44080,
u'r\xe1s': 43951,
u'a#e': 43876,
u'ges': 43783,
u'lyo': 43737,
u'm\xe1s': 43712,
u'tok': 43657,
u'lma': 43580,
u's#f': 43540,
u'#gy': 43517,
u'arl': 43513,
u'\xe1bb': 43367,
u'ir\xe1': 43277,
u'\xe9k#': 43222,
u'\xe9gi': 43104,
u'kra': 43011,
u'abb': 42979,
u'\xf6tt': 42957,
u'nyo': 42952,
u'yi#': 42877,
u'v\xe9g': 42772,
u'mog': 42730,
u'ann': 42696,
u'ss\xe9': 42665,
u'r\u0151l': 42664,
u'\xe1ci': 42638,
u'zta': 42556,
u'elk': 42300,
u'tte': 42256,
u'\xe9ke': 41983,
u'\xe1gi': 41923,
u'n#v': 41859,
u'a#g': 41846,
u'\xe9ne': 41465,
u't\xf6r': 41411,
u'ess': 41330,
u'l#\xe9': 41278,
u'zda': 41253,
u'n#f': 41148,
u'ise': 41103,
u'#se': 41040,
u'#ja': 41013,
u'ass': 40944,
u'lni': 40816,
u'pon': 40734,
u'#en': 40731,
u'l\xf6n': 40685,
u'#ga': 40554,
u'\xe1la': 40528,
u'er\u0151': 40491,
u'das': 40459,
u's#v': 40390,
u'ig#': 40318,
u'von': 40294,
u'a#\xe9': 40236,
u'amo': 40229,
u'it\xe1': 40172,
u'#n\xe9': 40169,
u'z\xf3#': 40126,
u't\u0151#': 40123,
u'l#m': 40110,
u'etk': 39940,
u'oz\xf3': 39912,
u'ne#': 39892,
u'b\xe1l': 39826,
u'#ir': 39742,
u'z\xe9r': 39729,
u'erm': 39704,
u'nna': 39698,
u'k#n': 39688,
u'l\xedt': 39560,
u'\xe1r#': 39521,
u'eg\xe9': 39498,
u'z\xfck': 39477,
u'arr': 39448,
u'jav': 39226,
u's\xe1t': 39155,
u'#ar': 39134,
u'dol': 39037,
u'mer': 38996,
u'mag': 38966,
u'#p\xe9': 38834,
u'mon': 38772,
u'vag': 38617,
u'fej': 38609,
u'apo': 38607,
u'be#': 38556,
u'eni': 38408,
u'\xf6n\xf6': 38341,
u'kke': 38260,
u'juk': 38251,
u'#\xf6s': 38224,
u'tko': 38040,
u'e#v': 38011,
u'#fi': 37956,
u'\xe9si': 37802,
u'ill': 37786,
u'elv': 37750,
u'r#a': 37702,
u'de#': 37684,
u'm\xe1n': 37666,
u'tn\xe9': 37645,
u'zek': 37520,
u'rd\xe9': 37509,
u'get': 37477,
u'ebb': 37462,
u'atk': 37456,
u'ha#': 37412,
u'uta': 37293,
u'han': 37173,
u'rte': 37095,
u'zen': 37084,
u'orm': 36998,
u'#tu': 36984,
u'ki#': 36963,
u'okr': 36804,
u'olt': 36762,
u'ez\u0151': 36590,
u'rat': 36435,
u'i#u': 36377,
u'sak': 36308,
u'z#\xe9': 36256,
u'rek': 36183,
u'l#v': 36175,
u'e#k': 36102,
u'n#n': 36033,
u'l#t': 36015,
u'z\xf3l': 35967,
u'rde': 35949,
u'ozz': 35927,
u'am#': 35863,
u'has': 35820,
u's#n': 35772,
u's\xe9r': 35761,
u'\xe9te': 35713,
u'fol': 35712,
u'\xe9pv': 35681,
u'zni': 35623,
u'j\xe1r': 35602,
u'sz#': 35529,
u'yes': 35373,
u'oss': 35278,
u'ndo': 35260,
u'ona': 35234,
u'tud': 35000,
u'i#f': 34958,
u'yet': 34830,
u't\xf3#': 34825,
u'\xf6bb': 34791,
u'res': 34758,
u'k#b': 34733,
u'els': 34677,
u'vas': 34667,
u't#j': 34664,
u'ov\xe1': 34654,
u'i#v': 34590,
u'#ad': 34429,
u'a#r': 34354,
u'sz\xf6': 34342,
u'#il': 34262,
u'lmi': 34225,
u'#an': 34204,
u'#\xfaj': 34149,
u't\xf6b': 34131,
u'te#': 33913,
u'rin': 33888,
u'\xe9ve': 33818,
u'n#i': 33791,
u'sz\xed': 33706,
u'ana': 33585,
u'l\xe1t': 33569,
u't\xe9r': 33538,
u'pvi': 33477,
u'nka': 33469,
u'nne': 33466,
u's#p': 33454,
u'l\u0151#': 33451,
u'tle': 33397,
u'gal': 33377,
u'osa': 33275,
u'nny': 33254,
u'bel': 33207,
u'nat': 33151,
u'z\u0151#': 33115,
u'mos': 33089,
u'att': 32944,
u'egf': 32921,
u'jes': 32855,
u'\xfcks': 32823,
u'l#f': 32780,
u'v\xe1n': 32631,
u'hez': 32594,
u'dek': 32542,
u'k#\xfa': 32541,
u'an\xe1': 32490,
u't\xe9z': 32469,
u'ken': 32356,
u't\u0151s': 32220,
u'zav': 32156,
u'zi#': 32091,
u'at\xe9': 32069,
u'zol': 32057,
u'#hi': 32030,
u'y#k': 31976,
u'dig': 31954,
u'ert': 31825,
u's#b': 31791,
u'tta': 31790,
u'sem': 31773,
u'maz': 31739,
u'k\xf6r': 31555,
u'#id': 31541,
u'#c\xe9': 31470,
u'e#e': 31415,
u'\xfagy': 31396,
u'k\xe9n': 31387,
u'ada': 31313,
u'rra': 31302,
u'i#j': 31298,
u'c\xe9l': 31285,
u'kai': 31274,
u'i#b': 31214,
u'j\xe1k': 31141,
u'#ig': 31072,
u'vol': 31061,
u'm\xf3d': 31056,
u'\xfcle': 31019,
u'rm\xe1': 30977,
u'#to': 30803,
u's\xe1r': 30782,
u'k#l': 30778,
u'g#k': 30746,
u'lje': 30746,
u'li#': 30686,
u'emz': 30573,
u'kna': 30547,
u've#': 30513,
u'n#b': 30504,
u'g#e': 30502,
u'\u0151s\xe9': 30454,
u'm\xe9g': 30427,
u'san': 30395,
u'us#': 30387,
u'lek': 30383,
u'ton': 30375,
u'n\xe1c': 30370,
u'#\xe1t': 30270,
u'ner': 30268,
u'ako': 30252,
u'ort': 30132,
u'k#i': 30070,
u'rsa': 30048,
u'ogl': 29868,
u'\xe9nt': 29860,
u'ri#': 29833,
u'kka': 29818,
u'v\xe9d': 29816,
u'z#\xe1': 29715,
u'ab\xe1': 29704,
u'nti': 29643,
u'll\xe1': 29598,
u'j\xe1t': 29552,
u'n##': 29516,
u'gla': 29489,
u'\xedte': 29428,
u'erv': 29416,
u'tha': 29394,
u'#t\xe9': 29281,
u'ss\xe1': 29280,
u'roz': 29263,
u'gad': 29251,
u'a#i': 29234,
u'z\xf6t': 29215,
u'\xe1rs': 29117,
u'dsz': 29116,
u'aka': 29043,
u'lom': 28926,
u'zak': 28871,
u'gia': 28856,
u's#i': 28856,
u'lan': 28855,
u'#\xe9l': 28764,
u'zok': 28715,
u't#b': 28699,
u'eny': 28681,
u'#as': 28666,
u'tov': 28645,
u'dem': 28623,
u'v\xe1b': 28617,
u'\xf3s#': 28612,
u'por': 28582,
u'#st': 28556,
u'e#\xe9': 28483,
u'\xf6lt': 28418,
u'#m\xf3': 28401,
u'b\xf3l': 28393,
u'toz': 28337,
u'eng': 28220,
u'\xe9ko': 28195,
u'las': 28158,
u'szn': 28122,
u'ink': 28100,
u'\xf3#e': 27970,
u'i#i': 27967,
u'eme': 27941,
u'g\xe9s': 27898,
u'gy\xfc': 27879,
u'e#h': 27805,
u'aki': 27797,
u'zt\xe1': 27791,
u'm#e': 27775,
u'\xe9ly': 27764,
u'l\u0151t': 27755,
u'ejl': 27745,
u'sor': 27737,
u'hal': 27593,
u'g\xe1r': 27561,
u'\xe1cs': 27527,
u'zik': 27517,
u'ves': 27507,
u'#ti': 27507,
u'y#t': 27501,
u'er#': 27478,
u'mze': 27476,
u'iss': 27452,
u'eg\xe1': 27336,
u'gon': 27323,
u'ros': 27267,
u'ily': 27215,
u'e#m': 27190,
u'#\xe9v': 27159,
u'ati': 26978,
u'\xe1m\xe1': 26965,
u'ede': 26916,
u'dal': 26858,
u'a#c': 26802,
u'tis': 26720,
u'l\xe9t': 26662,
u'vaz': 26620,
u'tke': 26574,
u'tak': 26518,
u'yok': 26472,
u'\xe9de': 26465,
u'kbe': 26463,
u'etl': 26462,
u'z\xf6s': 26440,
u'haj': 26420,
u'tbe': 26333,
u'lak': 26329,
u'kin': 26316,
u'gek': 26306,
u'\u0151d\xe9': 26209,
u's\xe9t': 26184,
u'vit': 26110,
u'\xe9g\xe9': 26106,
u'tja': 26074,
u'cs#': 26072,
u'obb': 26056,
u'ik\xe1': 26050,
u'g#m': 26020,
u'\xf3k#': 25998,
u'k\xf6d': 25910,
u'i\xf3s': 25876,
u'ek\xe9': 25868,
u'lke': 25783,
u'y#s': 25725,
u'lme': 25704,
u'od\xe1': 25695,
u'zz\xe1': 25673,
u'\u0171k\xf6': 25673,
u'tba': 25645,
u'vil': 25604,
u'az\xe1': 25536,
u'ld\xe1': 25521,
u'tra': 25519,
u'\xf6rt': 25503,
u'm\u0171k': 25498,
u'rve': 25388,
u'ad\xe1': 25385,
u'n\xe9m': 25299,
u'il\xe1': 25223,
u'#k\xed': 25217,
u'k\xedv': 25202,
u'ev\xe9': 25196,
u'z#o': 25191,
u'eki': 25161,
u'ba#': 25146,
u'yos': 25023,
u'y#v': 25000,
u'm#m': 24981,
u'\u0151k#': 24924,
u'a#d': 24908,
u'nni': 24901,
u'lha': 24894,
u'akk': 24856,
u'rem': 24839,
u'kba': 24773,
u'\xf3do': 24764,
u'yoz': 24745,
u'ad\xf3': 24743,
u'p\xe9n': 24740,
u'ul#': 24733,
u'mil': 24721,
u'\xfclt': 24634,
u'eli': 24625,
u'k\xf6l': 24624,
u'lyz': 24544,
u'ozn': 24514,
u'l\xe9p': 24464,
u'\xe9pe': 24383,
u's#j': 24348,
u'#\xf6n': 24325,
u'mat': 24274,
u'm#s': 24250,
u'\xedta': 24244,
u'l\u0151s': 24132,
u'ons': 24131,
u'\xe1va': 24127,
u'gsz': 24126,
u'enl': 24092,
u'tre': 24079,
u'vek': 24060,
u'ina': 24058,
u'\xe1sr': 24035,
u'oko': 24002,
u'nya': 23994,
u'ive': 23972,
u'ls\u0151': 23969,
u'egk': 23921,
u'm#t': 23862,
u'e#t': 23851,
u'igy': 23831,
u'str': 23752,
u'y#n': 23707,
u'nun': 23542,
u'set': 23527,
u'zt\xe9': 23488,
u'\xe9nz': 23468,
u'y#h': 23437,
u'ma#': 23403,
u'old': 23390,
u'n\xfcn': 23385,
u'nge': 23330,
u'##a': 23330,
u'zin': 23329,
u'ns\xe1': 23313,
u'el\xfc': 23297,
u'oro': 23281,
u'tj\xe1': 23271,
u'\xfctt': 23257,
u'kko': 23168,
u'got': 23159,
u'l\xe1l': 23122,
u'gra': 23118,
u'ang': 23115,
u'mik': 23063,
u'z#u': 23057,
u'y\xfct': 23032,
u'k\xe9t': 23011,
u'elf': 22992,
u'\xfaj#': 22931,
u'v\xe9l': 22905,
u'm\xe9l': 22902,
u'\xe1ga': 22897,
u'lyt': 22849,
u'ozo': 22845,
u'lts': 22837,
u'sba': 22829,
u'#ah': 22802,
u'z\xe9l': 22799,
u'rto': 22792,
u'kne': 22776,
u'\xf3s\xe1': 22776,
u'\xe9g\xfc': 22756,
u'k\xf6t': 22742,
u'bbe': 22738,
u'tni': 22724,
u's#l': 22700,
u'a##': 22688,
u'z#h': 22665,
u'nba': 22654,
u'yze': 22650,
u'm\xe9r': 22631,
u'ozt': 22618,
u'edi': 22584,
u'ain': 22549,
u'n\xf6s': 22543,
u'tne': 22506,
u'#do': 22503,
u'elh': 22459,
u'm#k': 22433,
u'ekt': 22428,
u'dik': 22308,
u'aba': 22274,
u'bes': 22263,
u'#h\xe1': 22248,
u'fig': 22234,
u'okk': 22234,
u'\xf3#k': 22215,
u'pia': 22173,
u'atb': 22171,
u'yon': 22169,
u'osz': 22161,
u'lte': 22148,
u'reh': 22139,
u'etb': 22134,
u'gys': 22098,
u'atn': 22095,
u'ost': 22056,
u'gen': 22038,
u'nap': 21990,
u'iac': 21983,
u'bba': 21915,
u'\xedts': 21909,
u't\xe1n': 21889,
u'bad': 21868,
u'lja': 21855,
u'\u0151sz': 21838,
u'nds': 21833,
u's\xe1n': 21811,
u'zza': 21721,
u'\xe1sb': 21713,
u'l\xe1g': 21711,
u'sta': 21706,
u'#go': 21696,
u'gre': 21690,
u'ama': 21689,
u'asl': 21680,
u'rm\xe9': 21658,
u'lka': 21616,
u'sal': 21610,
u'alo': 21594,
u'sla': 21564,
u'n#l': 21553,
u'g\xedt': 21550,
u'kre': 21476,
u's#r': 21475,
u'lj\xe1': 21433,
u'rok': 21429,
u'rom': 21419,
u't\xe9t': 21412,
u'k#j': 21371,
u'zke': 21353,
u'\xe1j\xe1': 21287,
u'opo': 21183,
u'#ut': 21172,
u'ezd': 21116,
u'#j\xf3': 21069,
u'e#s': 21049,
u'lna': 20937,
u'ara': 20917,
u'ekr': 20904,
u'onb': 20896,
u'\xe9sr': 20887,
u'erg': 20880,
u'tun': 20879,
u'lfo': 20878,
u'eve': 20849,
u'lez': 20845,
u'\xf6sz': 20827,
u'ogo': 20826,
u'teg': 20811,
u'#\xfag': 20742,
u'i#n': 20738,
u'ism': 20727,
u'g\xe9n': 20690,
u'gfe': 20662,
u's\xfal': 20646,
u'rep': 20636,
u'i#r': 20622,
u'j\xe1n': 20587,
u'szk': 20553,
u'seg': 20546,
u't##': 20542,
u'\xfaly': 20541,
u'\xe9m#': 20510,
u'tom': 20503,
u'nda': 20469,
u'z\u0151d': 20405,
u't\xe1k': 20315,
u'#pi': 20253,
u'okn': 20217,
u'im#': 20198,
u'err': 20179,
u'zor': 20141,
u'\xf6s#': 20121,
u't#l': 20080,
u'gha': 20063,
u'rgi': 20043,
u't#p': 20040,
u'v\xe9n': 20039,
u'rme': 20036,
u'gos': 20006,
u'gba': 20006,
u'r\u0151s': 19994,
u'erz': 19979,
u'zde': 19979,
u'id\u0151': 19962,
u'egi': 19900,
u'ys\xe9': 19833,
u'zn\xe1': 19821,
u'tam': 19786,
u'nys': 19779,
u'k\xe9b': 19778,
u'oza': 19778,
u'tem': 19759,
u'sop': 19743,
u'atj': 19734,
u'm#\xe9': 19726,
u'#pe': 19721,
u'\xf3l\xf3': 19709,
u'zm\xe9': 19704,
u'kik': 19651,
u'ci\xe1': 19636,
u'\xe1ln': 19621,
u'r#e': 19595,
u'bbi': 19595,
u'om\xe1': 19587,
u'pod': 19584,
u'g#t': 19528,
u't\xe9b': 19468,
u'ens': 19450,
u'okb': 19447,
u'ka#': 19437,
u'orl': 19420,
u'z\xedt': 19415,
u'ali': 19357,
u'nt\u0151': 19347,
u'\xe9lk': 19294,
u'k\xe9s': 19283,
u'nk\xe1': 19267,
u'saj': 19230,
u'eu#': 19217,
u's\u0151#': 19189,
u'#ok': 19182,
u'd\u0151#': 19172,
u'\xe9gr': 19113,
u'\xe1r\xf3': 19112,
u'gna': 19106,
u'\xf6se': 19080,
u'nia': 19072,
u'lm\xe9': 19061,
u'rit': 18997,
u'n\xe9l': 18959,
u't#\xe1': 18913,
u'ran': 18889,
u'l#i': 18826,
u'sai': 18813,
u'#sa': 18809,
u'egs': 18799,
u'ult': 18794,
u'\xedr\xe1': 18785,
u'sra': 18785,
u'rob': 18695,
u'son': 18667,
u'cse': 18666,
u'\xe9zk': 18644,
u'red': 18635,
u'eg\xed': 18605,
u'ugy': 18566,
u't\xedv': 18565,
u'gy\xe9': 18560,
u'iga': 18547,
u'n#j': 18512,
u'rta': 18505,
u'net': 18499,
u'nyu': 18492,
u'\xedto': 18488,
u'oln': 18448,
u'ysz': 18440,
u't\xf3s': 18418,
u'nos': 18402,
u'egv': 18371,
u'ssa': 18359,
u'\xe1gb': 18356,
u'#ba': 18352,
u'yei': 18325,
u'egt': 18310,
u't\xf3l': 18306,
u'fen': 18299,
u'\u0151tt': 18279,
u'ein': 18270,
u'd\xf3#': 18249,
u'\xf6ze': 18248,
u'\xe9m\xe1': 18235,
u'miv': 18228,
u'\u0151en': 18223,
u'ver': 18218,
u'\xf6zi': 18195,
u'ajt': 18188,
u't\xfcn': 18183,
u'k#r': 18177,
u'ped': 18172,
u'g#n': 18140,
u'd#a': 18139,
u'er\xe9': 18061,
u'nle': 18045,
u'g#\xe9': 17992,
u'na#': 17959,
u'#ku': 17955,
u'n\xf6v': 17931,
u'ajd': 17917,
u'le#': 17858,
u'##m': 17842,
u'yta': 17828,
u'lve': 17827,
u'ny\xfa': 17822,
u'iku': 17791,
u'kus': 17789,
u'bl\xe9': 17789,
u'met': 17764,
u'dan': 17714,
u'obl': 17703,
u'tla': 17683,
u'ekk': 17672,
u'\xf3#m': 17652,
u'ls\xe1': 17639,
u'lto': 17588,
u'sse': 17587,
u'k#\xe1': 17571,
u'l\xe9m': 17554,
u'yil': 17524,
u'aho': 17495,
u'mis': 17467,
u'#j\xf6': 17454,
u'ine': 17385,
u'dos': 17365,
u'z\xe1l': 17343,
u'dot': 17340,
u'gol': 17306,
u'\xe1lj': 17294,
u'\xedv\xe1': 17287,
u'yak': 17282,
u'l#n': 17260,
u'mit': 17259,
u'j\xfck': 17230,
u'ers': 17223,
u'pes': 17207,
u'd\xedt': 17167,
u'aza': 17158,
u'aga': 17092,
u'ike': 17082,
u'dat': 17065,
u'ezz': 17064,
u'sik': 17062,
u'rz\u0151': 17059,
u'\xf6nt': 17055,
u'eha': 17047,
u'\xe1to': 16981,
u'bev': 16978,
u'eml': 16975,
u'mpo': 16961,
u'zal': 16960,
u'nev': 16952,
u'ul\xf3': 16948,
u'ezn': 16930,
u'cs\xf6': 16930,
u'nsz': 16927,
u'kit': 16889,
u'\xe9p\xe9': 16850,
u'\xfajt': 16846,
u'\xf6d\xe9': 16816,
u'iz\xe1': 16780,
u'ide': 16759,
u'\xe1g\xe1': 16744,
u'rai': 16743,
u'p\xe9s': 16741,
u'#e#': 16741,
u'\xe9ri': 16737,
u'tti': 16731,
u'p\xe9l': 16722,
u'\xe9pp': 16721,
u'k#p': 16661,
u'\xe9ld': 16648,
u'or\xe1': 16628,
u'kis': 16601,
u'#l\xe1': 16597,
u'v\xfcl': 16581,
u'z#v': 16542,
u'#n\xf6': 16531,
u'l\xfcl': 16520,
u'jle': 16520,
u'n#r': 16514,
u'nz\xfc': 16509,
u'z\xe1r': 16470,
u'sre': 16367,
u'z\xfcg': 16361,
u'j\xf6v': 16351,
u'bi#': 16325,
u'lgo': 16322,
u'atl': 16300,
u'\xf6te': 16291,
u'\xedv\xfc': 16264,
u'k##': 16252,
u'aso': 16248,
u'lad': 16239,
u'\xe9gv': 16217,
u'\xe9r\u0151': 16211,
u'rt\xe1': 16198,
u'the': 16197,
u'kod': 16197,
u'emo': 16187,
u'kar': 16158,
u'kul': 16139,
u'sab': 16106,
u'e#n': 16043,
u'hol': 16024,
u'l#b': 16023,
u'\xf3ba': 16009,
u'\xedv#': 15998,
u's#\xfa': 15998,
u'don': 15993,
u'yt#': 15989,
u'og#': 15981,
u'ger': 15975,
u'\xe9sb': 15966,
u'\xe1r\xe1': 15949,
u'n\xe9k': 15942,
u'eje': 15906,
u'olo': 15861,
u'ge#': 15847,
u'r\xe1l': 15841,
u'ego': 15796,
u'nyt': 15792,
u'ndi': 15781,
u'l\xe9r': 15769,
u'har': 15762,
u'\xe1rt': 15759,
u'orr': 15754,
u'tve': 15719,
u'ncs': 15718,
u'kek': 15715,
u'ado': 15712,
u's#\xe1': 15704,
u'viz': 15678,
u'aj\xe1': 15670,
u'\xf6kk': 15665,
u'k#o': 15661,
u'anu': 15645,
u'yal': 15623,
u'mut': 15602,
u'#j\xe1': 15598,
u'l\xe9g': 15583,
u'ske': 15577,
u'zze': 15573,
u'iat': 15560,
u'rgy': 15535,
u'\xe1ls': 15521,
u'j\xe1b': 15520,
u'\xe1rg': 15509,
u'z\xf6n': 15501,
u'dni': 15479,
u'azz': 15469,
u'ie#': 15436,
u'egh': 15426,
u's\xe9n': 15418,
u'ite': 15415,
u'\xf3#t': 15411,
u'd\xf6n': 15407,
u'k\xf6s': 15406,
u'ke#': 15391,
u't\xe1l': 15384,
u'\xf6n#': 15335,
u'g#v': 15330,
u'yik': 15307,
u'\xf3#s': 15300,
u'izs': 15292,
u'zle': 15286,
u'lt\xe9': 15252,
u'nie': 15209,
u'seb': 15186,
u'ppe': 15170,
u'g#s': 15139,
u'tk\xf6': 15127,
u'y#f': 15114,
u'ita': 15101,
u'\xf6r\xf6': 15089,
u'va#': 15087,
u'\xe1ri': 15085,
u'esk': 15080,
u'ekb': 15063,
u'r#m': 15054,
u's\xfcl': 14990,
u'm#v': 14975,
u'\xf3#h': 14915,
u'ul\xe1': 14915,
u'll\xed': 14910,
u'\u0151s#': 14903,
u'pa#': 14902,
u'iko': 14873,
u'y\xfaj': 14849,
u'cia': 14826,
u'd\xe9k': 14802,
u'\xfcgg': 14792,
u'sme': 14780,
u'rny': 14732,
u'pen': 14729,
u'\xe1ss': 14692,
u'yam': 14686,
u'ol\xf3': 14680,
u'e#i': 14666,
u's#u': 14658,
u'#li': 14654,
u'z#\xfa': 14645,
u'kom': 14644,
u't#o': 14632,
u'nci': 14574,
u'\xf3#a': 14565,
u'n#\xe1': 14536,
u's#c': 14523,
u'ttm': 14511,
u'\xf3#j': 14509,
u'\xe1l\xe1': 14508,
u'tm\u0171': 14505,
u'\xf3na': 14491,
u'ntj': 14489,
u'dm\xe9': 14475,
u'edm': 14468,
u'##e': 14466,
u'sad': 14464,
u'r\xe1c': 14464,
u'ogr': 14455,
u'\xe9tr': 14386,
u'f\xfcg': 14357,
u'\u0151#e': 14341,
u'nta': 14319,
u'\xfcl\xe9': 14318,
u'ekn': 14299,
u'mar': 14282,
u'\u0151#k': 14277,
u'rma': 14271,
u'ol#': 14235,
u'\xf6v\u0151': 14228,
u'sg\xe1': 14213,
u'zsg': 14142,
u'l#l': 14132,
u'er\u0171': 14128,
u'ad#': 14111,
u'\xf3t#': 14069,
u'ane': 14069,
u'z#\xf6': 14068,
u'lv\xe1': 14066,
u'bet': 14037,
u'zs\xe9': 14037,
u'l\xe1n': 14032,
u'lsz': 14016,
u'#m\u0171': 13995,
u'#d\xf6': 13903,
u'y#b': 13897,
u'n#p': 13886,
u'gel': 13885,
u's#g': 13881,
u'apj': 13858,
u'z\xfcl': 13849,
u'\xf6nb': 13836,
u'y#o': 13807,
u'rog': 13797,
u'mez': 13790,
u'tor': 13777,
u'and': 13765,
u'y#g': 13764,
u'##\xe9': 13726,
u'sbe': 13721,
u'gve': 13720,
u'k\xe1r': 13703,
u't#r': 13697,
u'i#c': 13689,
u'maj': 13682,
u'g#h': 13635,
u'rel': 13619,
u's\xf6k': 13580,
u'i\xe1l': 13568,
u'h\xe1z': 13524,
u'ei#': 13521,
u'oni': 13501,
u'ged': 13484,
u'ram': 13475,
u'zk\xf6': 13448,
u'rje': 13445,
u'enk': 13445,
u'tas': 13420,
u'eit': 13416,
u't\xe9g': 13385,
u'kot': 13380,
u'v\xe1r': 13378,
u'es\xed': 13376,
u'\xf6zl': 13368,
u'm#f': 13363,
u'v\u0151#': 13358,
u't\xf3k': 13357,
u'v\xe9t': 13321,
u'n\u0151#': 13309,
u'gk\xf6': 13306,
u'erj': 13284,
u'det': 13219,
u'\xe1t\xe1': 13216,
u'gge': 13214,
u'\xedtj': 13182,
u'l\xf3s': 13175,
u'vat': 13165,
u'ait': 13162,
u'f\xe9l': 13149,
u't\u0151l': 13144,
u'any': 13141,
u'\xe1l\xf3': 13117,
u'ans': 13105,
u's#o': 13085,
u'\xf3p\xe1': 13085,
u'\xe1rm': 13085,
u'y#i': 13080,
u'sei': 13073,
u'\xf6rn': 13058,
u'efo': 13045,
u'cs\xe1': 13036,
u'iai': 13011,
u'\xe9r\xe9': 13004,
u'\u0151cs': 13000,
u'#ug': 12991,
u'goz': 12972,
u'#b\xe1': 12970,
u'nik': 12967,
u'r\xe9g': 12957,
u'#\xfcg': 12943,
u'ord': 12930,
u'k\xf6n': 12926,
u'rre': 12926,
u'\xe9lj': 12915,
u'nd#': 12882,
u'm\xe9s': 12867,
u'jd#': 12866,
u'\u0151#s': 12851,
u'#ro': 12838,
u'as#': 12829,
u't#c': 12828,
u'l\u0151c': 12828,
u'p\xe1r': 12824,
u'rr\xe1': 12824,
u'#si': 12797,
u'#eb': 12797,
u'l\u0151k': 12794,
u'ogi': 12785,
u'gja': 12774,
u'rad': 12755,
u'l\u0151d': 12739,
u'ozi': 12727,
u'k#c': 12713,
u'i#l': 12706,
u'udj': 12705,
u'itt': 12699,
u'ige': 12692,
u'es\xfc': 12686,
u'etv': 12677,
u'b#m': 12658,
u'rz\xe9': 12579,
u'ura': 12554,
u'ega': 12522,
u't#d': 12517,
u'e#f': 12516,
u'h\xedv': 12513,
u'p\xedt': 12488,
u'lk\xfc': 12488,
u'\xe1ul': 12485,
u'mek': 12453,
u'\u0151s\xed': 12445,
u'z#k': 12440,
u'ire': 12427,
u'dja': 12406,
u'job': 12404,
u'za#': 12397,
u'\xe1ru': 12376,
u'ny\xed': 12361,
u'yob': 12359,
u'lli': 12354,
u'kol': 12328,
u'yer': 12301,
u'sfo': 12243,
u'v\xe9#': 12239,
u't\u0151e': 12226,
u'\xe1ho': 12225,
u'h\xe1n': 12220,
u'zeg': 12200,
u't#g': 12185,
u'iva': 12169,
u'ar\xe1': 12140,
u'ori': 12130,
u'ize': 12115,
u'lok': 12113,
u'mol': 12096,
u'en\xfc': 12037,
u't\xe1t': 12004,
u'zt\xf3': 11991,
u'kos': 11988,
u'\xe9ss': 11982,
u'mus': 11980,
u'all': 11977,
u'i\xf3k': 11968,
u'gi\xe1': 11952,
u'omo': 11940,
u'd\xe1u': 11909,
u'i#\xe1': 11907,
u'\u0151#a': 11889,
u'rs\xe9': 11851,
u'yas': 11840,
u'y#\xe9': 11834,
u'nkr': 11827,
u'#n\u0151': 11821,
u'b\u0151l': 11815,
u'r#k': 11801,
u'#\xe1r': 11799,
u'yre': 11797,
u'zes': 11765,
u'ech': 11764,
u'\xf3#i': 11763,
u'tju': 11763,
u'gas': 11741,
u'h\xe1t': 11737,
u'dok': 11709,
u'ga#': 11703,
u'm#l': 11695,
u'ybe': 11687,
u'szs': 11651,
u'oci': 11634,
u'mad': 11626,
u'\xf6ks': 11592,
u'nke': 11587,
u'gai': 11569,
u'yul': 11569,
u'#it': 11560,
u'\xe1nt': 11541,
u'zha': 11533,
u'#ur': 11522,
u'\xe9l#': 11516,
u'bon': 11515,
u'\u0151re': 11512,
u'di#': 11483,
u'ag#': 11482,
u'jt\xe1': 11468,
u'emp': 11451,
u'n#o': 11433,
u'gte': 11414,
u'#p\xe1': 11414,
u'nis': 11410,
u'rl\xe1': 11400,
u'lik': 11385,
u'sna': 11366,
u'l\u0151a': 11351,
u'zoc': 11328,
u'teh': 11324,
u'r#h': 11317,
u'en\u0151': 11299,
u'\u0151#m': 11297,
u'#r\xe1': 11293,
u'#ab': 11283,
u'ajl': 11280,
u'epe': 11270,
u'\xedro': 11268,
u'\xe9#t': 11259,
u'\u0151ke': 11252,
u'aim': 11228,
u'egn': 11210,
u'k#g': 11207,
u'ze#': 11191,
u'z#m': 11189,
u'r#i': 11184,
u'#f\xe9': 11182,
u'ogj': 11181,
u'mai': 11162,
u'g\xe9t': 11151,
u'yom': 11148,
u'n\xe9h': 11136,
u'ari': 11115,
u'rv\xe9': 11099,
u'ap#': 11089,
u'ila': 11089,
u'rna': 11085,
u'#ci': 11074,
u'zmu': 11071,
u'kie': 11065,
u'dta': 11051,
u'\u0151se': 11033,
u'y#l': 11031,
u'jl\u0151': 11016,
u'ibe': 11009,
u'\u0151ad': 11009,
u'moz': 11004,
u'on\xe1': 11000,
u'\xedgy': 11000,
u'izm': 10999,
u't\xfal': 10992,
u'gr\xe1': 10981,
u'\xe1l#': 10962,
u'ili': 10960,
u'y\xe9r': 10953,
u'ven': 10952,
u'ini': 10948,
u'ist': 10929,
u'lda': 10928,
u'ezi': 10921,
u'kia': 10919,
u'enc': 10899,
u'tv\xe1': 10894,
u'hag': 10887,
u'emm': 10862,
u'ut\xe1': 10858,
u'mia': 10851,
u'g\xe9r': 10816,
u'gga': 10808,
u'arm': 10783,
u'h\xe9z': 10778,
u'dez': 10775,
u'\xf6ld': 10773,
u'pve': 10745,
u'lag': 10738,
u'm\xfal': 10733,
u'apv': 10725,
u'm\xe9t': 10717,
u'\xe1za': 10713,
u'je#': 10707,
u'inf': 10699,
u'\xf6zt': 10688,
u'g\xe1t': 10686,
u'\xe1sf': 10682,
u'dom': 10663,
u'l#j': 10643,
u'veg': 10629,
u'\u0151be': 10628,
u'\xe1gn': 10626,
u'ol\xe1': 10624,
u'y\xedt': 10609,
u'ern': 10606,
u'kif': 10601,
u'atu': 10594,
u'en\xe9': 10593,
u'b\xe1r': 10588,
u'rni': 10582,
u'\xf3an': 10578,
u'#\xe9n': 10570,
u'mas': 10568,
u'ndk': 10552,
u's##': 10523,
u'#\xedg': 10516,
u'\xe9he': 10509,
u'lcs': 10506,
u'#la': 10483,
u'alt': 10481,
u'\xf6ke': 10470,
u'\xe1no': 10461,
u'sod': 10452,
u'#t\xfa': 10452,
u'\xe9zm': 10451,
u'ilv': 10448,
u's#d': 10444,
u'g#f': 10441,
u'k\xe1b': 10416,
u'yne': 10413,
u'l\xf3d': 10398,
u'z\xfcn': 10393,
u'\xe1sz': 10385,
u'adn': 10373,
u'\xf3la': 10369,
u'liz': 10345,
u'v\xe9s': 10332,
u'kiv': 10331,
u'\xe9h\xe1': 10313,
u'\xf6z\xe9': 10312,
u'i#g': 10279,
u'inc': 10276,
u'tol': 10235,
u'zne': 10222,
u'ads': 10194,
u'ago': 10191,
u'dha': 10185,
u'rul': 10167,
u'rse': 10159,
u'gva': 10151,
u'abo': 10142,
u'eho': 10135,
u'\xf6r\xfc': 10126,
u'ml\xed': 10118,
u'nan': 10102,
u'pj\xe1': 10085,
u't\xfcl': 10081,
u'rik': 10077,
u'\xf3#\xe9': 10076,
u'tai': 10072,
u'n\xe9z': 10064,
u't\xfck': 10064,
u'\u0151#h': 10062,
u'i\xe1n': 10059,
u'v\xedt': 10058,
u'v\xe1s': 10057,
u'm\xe1k': 10056,
u'z#\xfc': 10038,
u'\xf3#f': 10038,
u'bs\xe9': 10026,
u'#\xe9p': 10011,
u's\xe1v': 10001,
u'jai': 9987,
u'\xe1b\xf3': 9951,
u'\xf3j\xe1': 9947,
u'lm\u0171': 9944,
u'#bo': 9932,
u'y#j': 9921,
u'k\xedt': 9906,
u'\xe9kb': 9905,
u'et\xfc': 9904,
u'\xfalt': 9898,
u'ror': 9884,
u'\u0151v\xe9': 9882,
u'r\xedt': 9866,
u'\u0151#f': 9851,
u'k\xfcz': 9851,
u'\xfczd': 9845,
u'bal': 9845,
u'jon': 9835,
u'b\xf6z': 9830,
u'rhe': 9828,
u'nb\xf6': 9828,
u'fin': 9826,
u'n#d': 9824,
u'dju': 9821,
u'g\xe1n': 9819,
u'll\xe9': 9780,
u'\xe9n\u0151': 9763,
u'lhe': 9755,
u'jeg': 9733,
u'bef': 9726,
u'\xf3#v': 9723,
u'p\xe1n': 9720,
u'bb\xe1': 9718,
u'okt': 9710,
u'#di': 9706,
u'g#i': 9705,
u'oma': 9699,
u'bil': 9697,
u'egr': 9678,
u'\u0151#p': 9673,
u'gyr': 9668,
u'suk': 9668,
u'b#k': 9667,
u'b\xe1#': 9658,
u'lob': 9656,
u'olj': 9646,
u'ogs': 9642,
u'nnt': 9638,
u'ds\xe1': 9628,
u'e#l': 9625,
u'#ra': 9611,
u'etr': 9611,
u'r\u0151f': 9607,
u'ajn': 9605,
u'\xedth': 9593,
u'ocs': 9558,
u'r#s': 9556,
u'\xfcli': 9553,
u't\xe1v': 9552,
u'eim': 9540,
u'tab': 9535,
u'n\u0151r': 9528,
u'jez': 9517,
u'l#r': 9514,
u'#f\u0151': 9472,
u'\u0151rz': 9461,
u'#\xfcl': 9450,
u'#ag': 9442,
u'ris': 9404,
u'rbe': 9392,
u'lgy': 9357,
u'\xf6nn': 9353,
u'i#o': 9350,
u'd\xe1l': 9337,
u'zve': 9325,
u'r\xe9n': 9309,
u's\xe1b': 9298,
u'lne': 9297,
u'his': 9297,
u'gy\xe1': 9277,
u'\xe1th': 9273,
u'gs\xfa': 9273,
u'#f\xfc': 9251,
u'z\xe9d': 9246,
u'ktu': 9238,
u'fiz': 9232,
u'j\xf3#': 9229,
u'szl': 9217,
u'kte': 9214,
u'f\xe9r': 9212,
u'lju': 9210,
u'f\xf6l': 9192,
u'#fr': 9187,
u'tna': 9159,
u'rt\xf3': 9157,
u'\xe1nd': 9152,
u'\xe9lt': 9137,
u'szu': 9135,
u't\u0151v': 9119,
u'ial': 9106,
u'\xfcnt': 9071,
u'l\u0151r': 9054,
u'm#i': 9051,
u'l\xf3k': 9044,
u'ak\xed': 9044,
u'z\xf6r': 9043,
u'v\xe9b': 9038,
u'k#d': 9037,
u'rr\xf3': 9036,
u'jla': 9030,
u'ela': 9029,
u'l\xe9k': 9029,
u'\xe9rn': 9028,
u'neh': 9023,
u'b#e': 9016,
u'apa': 9011,
u'anc': 9008,
u'ipa': 9003,
u'\xe9gh': 8990,
u'y#r': 8986,
u'rtj': 8985,
u'bek': 8984,
u'n\xe9p': 8969,
u'kih': 8964,
u'\u0151#t': 8957,
u'ngs': 8954,
u'\xf6m#': 8950,
u'egj': 8922,
u'pet': 8892,
u'ny\xe9': 8863,
u'z#s': 8855,
u't\u0171z': 8854,
u'y#p': 8854,
u'kr\xf3': 8848,
u't\u0151k': 8845,
u'l##': 8832,
u'tev': 8831,
u'nin': 8801,
u'nul': 8795,
u'ant': 8792,
u'm\xe9k': 8791,
u'ets': 8786,
u'nfo': 8777,
u'glo': 8773,
u'#b\xed': 8766,
u'man': 8759,
u'gi\xf3': 8755,
u'ezm': 8747,
u'kad': 8747,
u'z#n': 8742,
u'ruk': 8740,
u'\xf6r#': 8738,
u'bra': 8723,
u'emt': 8721,
u'#gl': 8698,
u'eh\xe9': 8693,
u'boc': 8647,
u'nal': 8624,
u'az\xe9': 8621,
u'z\xedr': 8593,
u'\xe1ni': 8593,
u'#ni': 8589,
u'vid': 8570,
u'r#n': 8560,
u'\xe1lk': 8559,
u'zi\xf3': 8547,
u'i#d': 8538,
u'j\xe9k': 8529,
u'b#a': 8522,
u'rtn': 8515,
u'tul': 8490,
u'bbs': 8467,
u'dul': 8462,
u'zbe': 8459,
u'sho': 8456,
u'rol': 8450,
u'\xf6lg': 8442,
u'k\xe1j': 8434,
u'udo': 8418,
u'm\xe1c': 8396,
u'usz': 8393,
u'h\xf6l': 8377,
u'rak': 8370,
u'sne': 8368,
u'\u0151bb': 8356,
u'\xe1su': 8352,
u'e##': 8331,
u'l\xf3g': 8322,
u'lt\xe1': 8321,
u'lyn': 8320,
u'mba': 8312,
u'#\xf6r': 8312,
u'\xedti': 8296,
u'l\xe9v': 8295,
u'd#e': 8283,
u'n\xe9n': 8275,
u'jek': 8266,
u'rth': 8265,
u'lyi': 8264,
u'\xf3#p': 8247,
u'jen': 8238,
u'ank': 8234,
u'\xe1gg': 8232,
u'adj': 8221,
u't#\xfa': 8204,
u'n#c': 8197,
u'ion': 8178,
u'it\u0171': 8177,
u'umo': 8175,
u'tsz': 8172,
u'yeg': 8166,
u'n\u0151k': 8164,
u'kok': 8160,
u'k\xedn': 8159,
u'bbr': 8144,
u'#\xe1g': 8143,
u'r#\xe9': 8143,
u'niu': 8141,
u'nyv': 8134,
u'\xf3gi': 8128,
u'\u0151#l': 8119,
u'lk\xe9': 8105,
u'z\xf6v': 8103,
u'tap': 8103,
u'zna': 8095,
u'onl': 8090,
u'som': 8089,
u't\xf6n': 8071,
u'z\xe1n': 8064,
u'\xedt\xf3': 8064,
u'gfo': 8058,
u'b\xedr': 8049,
u'\u0151k\xe9': 8047,
u'el\xed': 8042,
u'at\xed': 8040,
u'tru': 8022,
u'z\u0151k': 8019,
u'tt\xe9': 8018,
u'adi': 8012,
u'nn\xfc': 7987,
u'#ju': 7982,
u'\xe1gr': 7946,
u'nd\u0151': 7946,
u'\xe1nk': 7943,
u'dk\xed': 7939,
u'##s': 7937,
u'est': 7933,
u'ukt': 7928,
u'i\xf3n': 7925,
u'\xe1tl': 7919,
u'#\xe9g': 7917,
u'lv#': 7917,
u'kta': 7909,
u'ekv': 7907,
u'av\xe1': 7904,
u'#s\xfa': 7902,
u'yor': 7891,
u'sat': 7882,
u'k#\xf6': 7878,
u't#\xf6': 7875,
u'n#\xfa': 7868,
u'v\u0151b': 7852,
u'g\xfcn': 7849,
u'akt': 7845,
u'tuk': 7842,
u'd\xe9l': 7822,
u'zan': 7817,
u'nd\xf3': 7814,
u'\u0151ga': 7801,
u'ek\xfc': 7791,
u'\u0151#\xe9': 7790,
u'r\u0171#': 7789,
u'niz': 7784,
u'\u0151ss': 7782,
u'kr\u0151': 7779,
u'spo': 7774,
u'\xf6z\u0151': 7769,
u'\u0171en': 7754,
u'mme': 7754,
u'jan': 7746,
u'll\xf3': 7739,
u'\u0151t#': 7737,
u'\xe9pz': 7736,
u'kho': 7736,
u'r#t': 7733,
u'gyz': 7726,
u'yag': 7725,
u'\xe1ja': 7718,
u'\xe1ma': 7714,
u'rba': 7707,
u'g\xfa#': 7698,
u'z\u0151g': 7686,
u'n#g': 7686,
u'agj': 7672,
u'adt': 7671,
u'abi': 7657,
u'ny\xe1': 7637,
u'chn': 7614,
u'\xe1g\xfa': 7610,
u'yar': 7592,
u's\xe9v': 7583,
u'atv': 7576,
u'laj': 7576,
u't\xe1b': 7572,
u'lk\xf6': 7554,
u'\xe9rv': 7544,
u'tec': 7534,
u'nyb': 7527,
u'api': 7527,
u'r\xe9t': 7514,
u's\xe9b': 7512,
u't\xe1j': 7512,
u'fra': 7511,
u'ye#': 7510,
u'efe': 7510,
u'kes': 7504,
u'jto': 7501,
u'#\xedr': 7500,
u'z\xe9k': 7499,
u'v\xe9k': 7481,
u'\xe9nk': 7471,
u'fes': 7469,
u'akr': 7459,
u'g\xfcg': 7457,
u'gor': 7450,
u'hos': 7444,
u'egb': 7443,
u'av\xed': 7435,
u'\xe1#a': 7430,
u'n\xfcl': 7405,
u'rke': 7401,
u'h\xe1r': 7388,
u'z\xe1j': 7380,
u'ntb': 7364,
u'lyb': 7363,
u'gbe': 7360,
u'e#j': 7355,
u'zt\xfc': 7346,
u'kr\xe1': 7317,
u'agg': 7309,
u'nyk': 7295,
u'ife': 7294,
u'e#b': 7292,
u'enz': 7292,
u'fia': 7291,
u'ys\xe1': 7290,
u'z#t': 7287,
u'jut': 7284,
u'reg': 7280,
u'riz': 7277,
u'ref': 7276,
u'd\u0151s': 7272,
u'j\xf3l': 7268,
u'zom': 7266,
u'dj\xe1': 7261,
u'k\xe1t': 7259,
u'gun': 7257,
u'nki': 7252,
u'nt\xe1': 7251,
u'mmi': 7246,
u's\xe1h': 7239,
u'i\xf3t': 7234,
u'zko': 7219,
u'l\xe1b': 7211,
u'l\xe9#': 7208,
u'tt\xfc': 7197,
u'ahe': 7193,
u'i\xf3b': 7193,
u'tus': 7186,
u'ahh': 7174,
u'hho': 7159,
u's\xe9h': 7157,
u'kut': 7156,
u'\xf6ny': 7149,
u'onn': 7143,
u'eh\xe1': 7138,
u's\xfck': 7133,
u'aci': 7129,
u'hi\xe1': 7127,
u'\xe1mu': 7117,
u'l\xf3b': 7104,
u'ns\xe9': 7095,
u'\xf3#b': 7095,
u'gei': 7093,
u'ldo': 7085,
u'l\u0151z': 7071,
u'oll': 7070,
u'#\xfcz': 7061,
u'ig\xe9': 7055,
u'dve': 7054,
u'zs\xe1': 7049,
u'til': 7048,
u'aro': 7035,
u'zig': 7026,
u'sul': 7020,
u'\xe1sk': 7019,
u'pas': 7017,
u'\u0151fe': 7007,
u'#h\xf6': 7000,
u'fek': 6995,
u'i\xf3j': 6983,
u'lki': 6982,
u'g#j': 6979,
u'tv\xe9': 6972,
u'l\xe9n': 6966,
u'yol': 6957,
u'ztr': 6955,
u'li\xf3': 6955,
u'nl\u0151': 6953,
u'roj': 6951,
u'r#b': 6947,
u'egg': 6944,
u'\xedt#': 6934,
u'ius': 6931,
u'nd\xed': 6928,
u'es\xe9': 6921,
u'tur': 6915,
u'\xe1ka': 6913,
u'zhe': 6907,
u'\u0151#j': 6900,
u'gv\xe1': 6895,
u'r\xf6k': 6882,
u'or\xfa': 6876,
u'kev': 6872,
u'iak': 6859,
u'zpo': 6854,
u'\xe9s\xfc': 6853,
u'bar': 6852,
u's#\xf6': 6843,
u'\xfcln': 6842,
u'tum': 6840,
u'hiv': 6840,
u'\xfcze': 6831,
u't\xfar': 6830,
u'm#c': 6806,
u'rro': 6795,
u'kci': 6788,
u'dar': 6784,
u'b#s': 6773,
u'fok': 6772,
u'n\u0151s': 6765,
u'y\xe9b': 6762,
u'\xe9p\xed': 6751,
u'\xedt\u0151': 6750,
u'hes': 6748,
u'agu': 6745,
u'm#n': 6744,
u'gle': 6742,
u'oje': 6739,
u'in\u0151': 6739,
u'#sp': 6739,
u'\u0151z\u0151': 6738,
u'odi': 6738,
u'cik': 6734,
u'\xf3i#': 6727,
u'tr\xe1': 6723,
u'lj\xfc': 6715,
u'ut\xf3': 6697,
u'gez': 6692,
u'\u0151#i': 6686,
u'lva': 6682,
u'nk\xe9': 6678,
u'sun': 6673,
u'lun': 6672,
u'\xe9v\u0151': 6669,
u'jta': 6669,
u'yve': 6660,
u'gyn': 6659,
u'\u0151#v': 6657,
u'sm\xe9': 6656,
u'v\xf6z': 6655,
u'\xe9rs': 6651,
u'lmo': 6647,
u'onk': 6641,
u'm\xe1j': 6638,
u't\xe9m': 6632,
u'kib': 6629,
u'\xf3#n': 6622,
u'#hu': 6618,
u'inn': 6617,
u'ron': 6616,
u'zti': 6614,
u'\xf3s\xed': 6613,
u'ikk': 6607,
u'z\xe9p': 6588,
u'ral': 6583,
u'\xf6zv': 6578,
u'gta': 6570,
u'ale': 6569,
u'in#': 6568,
u'id\xe9': 6562,
u'\xfcdv': 6562,
u'dv\xf6': 6561,
u'g#l': 6560,
u'\xe1m#': 6557,
u'ob\xe1': 6555,
u'\u0171z\xe9': 6554,
u'bor': 6542,
u'dor': 6539,
u'lvi': 6535,
u'mal': 6520,
u't\xf3b': 6517,
u'\xf6re': 6498,
u'mot': 6497,
u'rea': 6486,
u'z\xe1c': 6481,
u'l\xfcg': 6476,
u'##k': 6475,
u'zum': 6471,
u'pri': 6463,
u'#\u0151k': 6462,
u't\xe9l': 6460,
u'\xf3va': 6458,
u'i\xe1b': 6451,
u'#kr': 6447,
u't\u0171#': 6442,
u'#f\xf6': 6440,
u'ksz': 6440,
u'gs\xe9': 6410,
u'l#\xf6': 6397,
u'ltu': 6386,
u'tei': 6382,
u'm\u0171v': 6380,
u'pot': 6379,
u'sas': 6379,
u'mes': 6372,
u'\xf3ta': 6371,
u'amb': 6364,
u'ggy': 6360,
u'kt\xed': 6359,
u'tl\xe1': 6357,
u'i\xe1r': 6355,
u'nol': 6350,
u'#\xfat': 6349,
u's\xe1k': 6349,
u'tt\xe1': 6346,
u'yug': 6343,
u'lm\xfa': 6341,
u'k\xe1n': 6335,
u'in\xe1': 6335,
u'kav': 6332,
u'ira': 6331,
u'orb': 6320,
u'\xe1sh': 6316,
u'tj\xfc': 6314,
u'sko': 6304,
u'\u0151#o': 6302,
u'#k\xe1': 6300,
u'##c': 6293,
u'aco': 6292,
u'ste': 6276,
u'poz': 6272,
u'p\xe1b': 6269,
u'guk': 6268,
u'i\xfck': 6263,
u'v\xedz': 6260,
u'm\xe1t': 6257,
u'tfo': 6247,
u'j\xe9n': 6245,
u'z\xedn': 6243,
u'egl': 6243,
u'lm\xe1': 6230,
u'ni\xfc': 6205,
u'nn\xe9': 6204,
u'zun': 6196,
u'yba': 6194,
u'\xe9l\xe9': 6190,
u'\xedr\xf3': 6188,
u'yit': 6177,
u'pok': 6173,
u'csi': 6168,
u'd#m': 6158,
u'i\xe1k': 6155,
u'\xedtv': 6154,
u'\xf6zb': 6150,
u'oso': 6136,
u'ttu': 6136,
u'doz': 6135,
u'\xe9ma': 6134,
u'amp': 6131,
u'gym': 6126,
u'nyz': 6126,
u'l\xfcn': 6117,
u'm#b': 6115,
u'nov': 6111,
u'uga': 6108,
u'\xe9gg': 6093,
u'pr\xf3': 6093,
u'ev\xe1': 6088,
u'\u0151#n': 6081,
u'cha': 6073,
u'uln': 6073,
u'i\xe1t': 6072,
u'yem': 6052,
u'#v\xed': 6051,
u'#gr': 6048,
u'org': 6048,
u'lyr': 6047,
u'riu': 6047,
u'dej': 6043,
u'\xe1sp': 6042,
u'adh': 6039,
u'omm': 6037,
u'z\xf6k': 6037,
u'm\u0171#': 6023,
u'hit': 6023,
u'ndj': 6020,
u'kei': 6016,
u'\xf3bb': 6011,
u'aik': 6009,
u's\xfcn': 6007,
u'kan': 5994,
u'\xf3di': 5987,
u'n#\xf6': 5982,
u'erb': 5973,
u'dna': 5964,
u'\xf6ne': 5962,
u'##h': 5951,
u'l#\xe1': 5949,
u'edn': 5939,
u'#ip': 5936,
u'ar#': 5936,
u'#t\u0171': 5934,
u'kri': 5923,
u'lid': 5921,
u'r\xf3#': 5914,
u'#tr': 5910,
u'#b\xfc': 5907,
u'n\xe1n': 5902,
u'z\xe9t': 5886,
u'g\xfcl': 5886,
u'gy\u0151': 5884,
u'ur\xe1': 5883,
u'uto': 5882,
u'iuk': 5877,
u'##p': 5875,
u'rne': 5869,
u'ndt': 5867,
u'\xf3#r': 5859,
u'zit': 5854,
u'zt\xf6': 5850,
u'ibo': 5848,
u'omb': 5846,
u'nl\xf3': 5843,
u'nyl': 5835,
u'gje': 5827,
u'st\xe1': 5816,
u'eth': 5808,
u'y\u0151z': 5788,
u'i\xe9r': 5785,
u'iv\xe1': 5781,
u'\xf3ka': 5780,
u'#os': 5765,
u'lv\xe9': 5763,
u'ula': 5757,
u'yun': 5735,
u'pel': 5725,
u'azn': 5715,
u'r\xe1b': 5715,
u'\u0151i#': 5709,
u'##t': 5699,
u'l\u0151i': 5699,
u'ium': 5689,
u'kah': 5682,
u't\u0171n': 5672,
u'\xe1rd': 5662,
u'gar': 5662,
u'r\xf3b': 5659,
u'\xf6rv': 5646,
u'\xfcl\u0151': 5641,
u'ono': 5639,
u'\xe9v\xe9': 5637,
u'uka': 5632,
u'me#': 5630,
u'l#p': 5630,
u'sk\xe9': 5628,
u'g#b': 5620,
u'erk': 5618,
u'#br': 5610,
u'p\xfcl': 5609,
u'\xe9zi': 5609,
u'ac#': 5607,
u'oda': 5603,
u'ak\xe9': 5603,
u'asa': 5603,
u'y#\xfa': 5600,
u'og\xe1': 5590,
u'nyn': 5589,
u'eik': 5588,
u'ngo': 5584,
u'um#': 5583,
u'log': 5581,
u'b#t': 5580,
u'\u0151#b': 5575,
u'\xe9sn': 5570,
u'omp': 5559,
u's#\xfc': 5553,
u'jn\xe1': 5547,
u'ap\xed': 5543,
u'z\xe1f': 5540,
u'\xe1rn': 5536,
u'rej': 5535,
u'r\xe1t': 5534,
u'ozd': 5528,
u'gyu': 5519,
u'usa': 5515,
u'#at': 5513,
u'koh': 5513,
u'\xe1ts': 5509,
u'\xe9rh': 5504,
u'kb\xf3': 5502,
u'\xe1sn': 5500,
u'ml\xe9': 5500,
u'ddi': 5499,
u'\xf3#\xe1': 5499,
u'ock': 5494,
u'g#\xe1': 5491,
u'b\u0171n': 5489,
u'koc': 5488,
u'm\xe1l': 5483,
u'z\xf6l': 5477,
u'\xe9sh': 5472,
u'\xe9ze': 5472,
u'y\xe1r': 5471,
u'g\u0171#': 5471,
u'#b\u0171': 5470,
u'sz\xfa': 5469,
u'nes': 5457,
u'#b\xe9': 5455,
u'edv': 5454,
u'ez\xe1': 5446,
u'\xfcld': 5443,
u'ih\xed': 5443,
u'tin': 5443,
u'##f': 5436,
u'b#f': 5433,
u'ved': 5426,
u'\xfa#t': 5414,
u'azs': 5407,
u'acs': 5401,
u'gho': 5398,
u'ak\xe1': 5392,
u'l\u0151n': 5387,
u'hno': 5385,
u'\xe1z\xe1': 5383,
u'rd#': 5380,
u'\xf6lc': 5377,
u'r#v': 5373,
u'\u0151te': 5359,
u'#no': 5347,
u'\xe9rz': 5331,
u'\xe1f\xe9': 5328,
u'\xe9gz': 5320,
u'm#j': 5314,
u't\xf6t': 5301,
u'onf': 5295,
u'k#u': 5294,
u'is\xe9': 5291,
u'nva': 5289,
u'ham': 5287,
u'dhe': 5286,
u'y#\xe1': 5282,
u'kr\xe9': 5270,
u'az\xf3': 5260,
u'k\xe1k': 5255,
u'ifi': 5241,
u'\xfclv': 5238,
u'\xf3ja': 5237,
u'i#\xf6': 5237,
u'r\xf3s': 5233,
u'tsa': 5232,
u'\xe9ln': 5232,
u'lnu': 5225,
u'\xe1lh': 5216,
u'ras': 5215,
u'n\xedt': 5210,
u'rag': 5206,
u'e#p': 5199,
u'ntu': 5193,
u'ida': 5188,
u'sr\xf3': 5182,
u'e#c': 5182,
u'n\xe9#': 5174,
u'\xe1j\xe9': 5171,
u'ivi': 5169,
u'kt\xf3': 5163,
u'\xe9gt': 5157,
u'nai': 5146,
u'mi\xe9': 5137,
u'\xe9lo': 5137,
u'l#d': 5137,
u'ukr': 5129,
u'\xe1rb': 5114,
u'y##': 5110,
u'sr\u0151': 5110,
u'z#f': 5110,
u'per': 5109,
u'#au': 5104,
u'g\xe1s': 5101,
u'e#\xe1': 5092,
u'##n': 5084,
u'\xf3b\xe1': 5083,
u'zef': 5083,
u'kv\xe9': 5079,
u'ad\xe9': 5078,
u'\xf3#l': 5077,
u'nno': 5073,
u'y#c': 5067,
u'jra': 5063,
u'yab': 5060,
u'msz': 5057,
u'ruh': 5055,
u'#fa': 5052,
u'ya#': 5050,
u'i##': 5039,
u'rei': 5036,
u'ot\xe1': 5035,
u'er\xe1': 5031,
u'#\xfcd': 5029,
u'###': 5015,
u'ejt': 5013,
u'b#\xe9': 5012,
u'aku': 5007,
u'#ot': 5006,
u'ns\xfa': 5005,
u'e#r': 5004,
u'uh\xe1': 5003,
u'z\xfa#': 5001,
u'apu': 5000,
u'tr\xf3': 4997,
u'alu': 4989,
u'ik\xf6': 4989,
u'rio': 4989,
u'gvi': 4970,
u'\xe9ts': 4968,
u'iem': 4967,
u'atr': 4967,
u'ed\u0151': 4964,
u'h\xe1l': 4963,
u'\xfarn': 4959,
u'ath': 4953,
u'\xe1ju': 4952,
u'khe': 4946,
u'yza': 4942,
u'mma': 4940,
u'fri': 4932,
u'yk\xe9': 4927,
u'arc': 4926,
u'dia': 4921,
u'\u0151k\xf6': 4916,
u'd#k': 4904,
u'\u0171ve': 4903,
u'i#\xfc': 4901,
u'der': 4900,
u'\xe9#a': 4899,
u'llo': 4890,
u'egm': 4887,
u'nth': 4880,
u'da#': 4878,
u'\xfajr': 4877,
u'#af': 4869,
u'ior': 4860,
u'erh': 4857,
u'uso': 4857,
u'y\xe1s': 4855,
u'b#h': 4851,
u'pul': 4848,
u'l\xf3j': 4844,
u'tse': 4843,
u'dun': 4839,
u'jun': 4838,
u'k\xe1z': 4836,
u'los': 4829,
u'\xe9v#': 4827,
u'z\xe9n': 4827,
u'ume': 4820,
u'n\xe9r': 4820,
u'tsu': 4819,
u'fer': 4818,
u'\xf6r\xe9': 4814,
u'oku': 4814,
u'nt\u0171': 4811,
u'zd\xed': 4802,
u'pan': 4796,
u'yle': 4782,
u's\xe9k': 4777,
u'b\xfcn': 4775,
u'li\xe1': 4770,
u'kum': 4766,
u'lh\xed': 4766,
u't\xe9v': 4764,
u'zv\xe9': 4755,
u'rtu': 4754,
u'nel': 4748,
u'\xf3ra': 4746,
u'din': 4745,
u'ck\xe1': 4744,
u'y#\xf6': 4739,
u'nok': 4738,
u'lev': 4737,
u'rga': 4734,
u'okh': 4733,
u'\xe9gb': 4716,
u'ano': 4713,
u'gio': 4705,
u'ev\u0151': 4704,
u'z\xe1k': 4702,
u'rd\xed': 4696,
u'l\u0151e': 4691,
u'omi': 4680,
u'\xe1ld': 4677,
u'y#d': 4669,
u'b\xe9k': 4667,
u'r#f': 4664,
u'emr': 4654,
u'#r\xf6': 4648,
u'ej\xe9': 4648,
u'zz\xfc': 4648,
u'a#\xe1': 4644,
u'era': 4641,
u'\xf3v\xe1': 4639,
u'szh': 4637,
u'k\xe1l': 4632,
u'\xe1\xe9r': 4630,
u'gny': 4629,
u'g\xe1z': 4626,
u'\xf6zp': 4622,
u'ci#': 4620,
u'\xfcke': 4620,
u'#h\xe9': 4618,
u'kir': 4610,
u'b\xedz': 4605,
u'rez': 4600,
u'gis': 4599,
u'lga': 4597,
u'\xe9z#': 4596,
u'emi': 4595,
u'rmo': 4591,
u'pze': 4589,
u'mod': 4582,
u'lot': 4575,
u'r\xf6g': 4566,
u'\xe1ta': 4565,
u'd\xedj': 4559,
u'pek': 4551,
u'yna': 4550,
u'aln': 4540,
u'\xf6z#': 4540,
u'ikt': 4536,
u'l\xf3a': 4533,
u'l#g': 4531,
u'\xfar\xe1': 4529,
u'r\xf6m': 4527,
u'an\xe9': 4524,
u'\u0151ny': 4522,
u'elo': 4514,
u'#pl': 4508,
u'gne': 4503,
u'ljo': 4503,
u'g\xe1b': 4500,
u'm#r': 4496,
u'\u0151je': 4492,
u'la#': 4491,
u'pir': 4487,
u'mra': 4483,
u'\xe1lo': 4483,
u'ser': 4466,
u'edd': 4465,
u'r#j': 4461,
u'mcs': 4460,
u'm#p': 4456,
u'zd\xe9': 4452,
u'\u0151fo': 4452,
u'ing': 4451,
u'\xe1nv': 4449,
u'yai': 4447,
u'ate': 4446,
u'#bu': 4442,
u'#t\u0151': 4441,
u'r\u0151#': 4439,
u'ik\xe9': 4437,
u'n\xf6m': 4432,
u'g##': 4428,
u'zga': 4427,
u'b\xe9#': 4426,
u'udn': 4421,
u'l\xf6l': 4412,
u'odn': 4410,
u'air': 4410,
u'z#l': 4403,
u'\xfal#': 4397,
u'g\xf3#': 4397,
u't\xf6l': 4396,
u'aut': 4392,
u'sk\xf6': 4390,
u'\u0151le': 4389,
u'\xe9gn': 4387,
u'she': 4382,
u'pus': 4378,
u'\xe1tn': 4374,
u'\u0151d\u0151': 4372,
u'dne': 4369,
u'etj': 4365,
u'j\xf6n': 4364,
u'\xfcrg': 4355,
u'nd\xe9': 4355,
u'y\xe9n': 4355,
u'add': 4349,
u'ztu': 4349,
u's\xfcr': 4348,
u'#d\xe9': 4347,
u'r\xe9v': 4347,
u'ieg': 4345,
u'\xf6vi': 4343,
u'#m\xfa': 4343,
u'\xf6z\xfc': 4335,
u'd\u0151k': 4318,
u'r\xf6v': 4310,
u'mec': 4302,
u'm\xedt': 4302,
u'h\xf3n': 4300,
u'lde': 4299,
u'rtv': 4297,
u'spe': 4288,
u'afr': 4288,
u'h\xe9t': 4287,
u'#h\xf3': 4282,
u'zzu': 4274,
u'ssu': 4270,
u'\xe9#v': 4268,
u'ast': 4265,
u'ly\xe1': 4262,
u'uro': 4253,
u'\xedn\xe1': 4253,
u'bol': 4243,
u'vbe': 4237,
u'd\xf3k': 4235,
u'zre': 4228,
u'arj': 4219,
u'\xf3l\xed': 4218,
u'eru': 4214,
u'g#r': 4208,
u'rdu': 4203,
u'r\u0171s': 4201,
u'gyb': 4197,
u'nkb': 4193,
u'gyv': 4192,
u'eir': 4190,
u'l\xe1r': 4190,
u'\xe9k\xe9': 4190,
u'\xf3sz': 4188,
u'fr#': 4182,
u'##b': 4182,
u'emc': 4177,
u'el\xf6': 4170,
u'lt\xf6': 4169,
u'm\u0171e': 4165,
u'nkn': 4164,
u't#u': 4155,
u'yis': 4155,
u'csu': 4153,
u'#g\xe1': 4151,
u'ulc': 4151,
u'z\xf3k': 4150,
u'ezh': 4145,
u'tva': 4141,
u'jak': 4141,
u'#aj': 4141,
u'\xf6ko': 4137,
u'an\xfa': 4136,
u'gjo': 4129,
u'm#\xf6': 4129,
u'vi#': 4122,
u'\xf6nz': 4120,
u'n\xf6z': 4114,
u'\xe9rj': 4111,
u'raj': 4110,
u'#s\xfc': 4101,
u's\u0151s': 4101,
u'r\xe9b': 4098,
u'ld\xf6': 4095,
u'oh\xe9': 4090,
u'e\xe1l': 4087,
u'yir': 4087,
u'#t\xfc': 4084,
u'yz\u0151': 4083,
u'gem': 4079,
u'ecs': 4079,
u'yed': 4078,
u'szr': 4074,
u'\xe1nl': 4072,
u'##d': 4069,
u'\xfclm': 4067,
u'a#o': 4066,
u'#h\xed': 4062,
u'lin': 4061,
u'\u0151t\xe1': 4061,
u'lut': 4059,
u'onz': 4053,
u'#im': 4053,
u't\xf3i': 4049,
u'had': 4039,
u'\xe9ni': 4037,
u'kiz': 4037,
u'j\xf3v': 4034,
u'a#\xfa': 4027,
u'\xf3#g': 4023,
u'cs\xfa': 4022,
u'r\u0171e': 4020,
u'\xe1m\xed': 4017,
u'\xe9me': 4009,
u'kk\xe9': 4009,
u'gk\xfc': 4008,
u'ri\xe1': 4008,
u'lln': 4005,
u'l\xe1d': 4005,
u'it\xed': 3996,
u'v#e': 3992,
u'k\xeds': 3990,
u'\xeds\xe9': 3990,
u'ekh': 3983,
u'cok': 3979,
u'l\u0151m': 3979,
u'igo': 3978,
u'\xf6k\xe9': 3971,
u'nko': 3969,
u'jdo': 3969,
u'#za': 3964,
u'sup': 3954,
u'nn\xe1': 3954,
u'\xe1vo': 3951,
u'g\u0151s': 3948,
u'olv': 3947,
u'i#\xfa': 3944,
u'nve': 3935,
u'oms': 3933,
u'znu': 3931,
u'up\xe1': 3928,
u'\xe1rh': 3927,
u'kid': 3924,
u'#ru': 3904,
u'\u0151mo': 3901,
u'tez': 3901,
u'v\xe1c': 3887,
u'b#v': 3884,
u'lys': 3883,
u'ef\xfc': 3882,
u'tit': 3881,
u'r#l': 3879,
u'zno': 3876,
u'v\xe1#': 3874,
u'aha': 3873,
u'epl': 3873,
u'z\xedv': 3871,
u'l\xf3z': 3869,
u'd\xf3n': 3868,
u'iha': 3867,
u'pl#': 3858,
u'orv': 3855,
u'ian': 3854,
u'lib': 3853,
u'ozg': 3848,
u'or\xed': 3846,
u'ode': 3845,
u'eta': 3844,
u'gj\xe1': 3843,
u'g\xe9p': 3841,
u'\xe1#v': 3839,
u'\xe9l\u0151': 3837,
u'b#p': 3829,
u'ntn': 3826,
u'g\xe9l': 3818,
u'l#c': 3817,
u'e\xe9r': 3811,
u'\xe9di': 3810,
u'n#u': 3808,
u'\u0151#r': 3804,
u'\xf3##': 3794,
u'gg\xe9': 3793,
u'#\xf3r': 3789,
u'edh': 3787,
u'tt\xf3': 3786,
u'g#p': 3782,
u'jed': 3780,
u'oha': 3777,
u'gke': 3776,
u'\xe9rk': 3775,
u'l\xfa#': 3770,
u'as\xed': 3767,
u'iad': 3760,
u'l#o': 3759,
u'\xe1#t': 3758,
u'#\xf3t': 3756,
u'szv': 3752,
u'g\xf6r': 3751,
u'rab': 3748,
u'a#u': 3743,
u'\xfacs': 3740,
u'\xe1t\xf3': 3737,
u'ael': 3733,
u'mpr': 3733,
u'g#c': 3730,
u'a#\xf6': 3729,
u'mna': 3719,
u'pja': 3712,
u'z\xf3a': 3712,
u'\xe1bo': 3710,
u's\xfac': 3710,
u't\xf3z': 3709,
u'yte': 3699,
u'r\xe1r': 3696,
u'ogu': 3695,
u'n\xe1s': 3692,
u'ntr': 3688,
u'\u0171s\xed': 3681,
u'ogn': 3675,
u'rha': 3672,
u'ngy': 3669,
u'uma': 3667,
u'g\xfck': 3657,
u'nom': 3656,
u'b\xedt': 3656,
u'zus': 3656,
u'\xe1tt': 3653,
u'kem': 3652,
u'lbe': 3652,
u'alj': 3652,
u'tiz': 3650,
u'jte': 3640,
u'tro': 3637,
u'#z\xe1': 3636,
u'\xf3#c': 3634,
u'z\xe1#': 3633,
u'rle': 3630,
u'ria': 3627,
u'gze': 3618,
u'\xfcl\xfc': 3618,
u'dje': 3618,
u'e#g': 3618,
u'aer': 3614,
u'rju': 3614,
u'nzi': 3612,
u'neg': 3609,
u'zis': 3608,
u'\xe9rl': 3606,
u'd\xf6t': 3605,
u'\xf3n#': 3600,
u'pos': 3597,
u'\xe1tr': 3589,
u'zul': 3584,
u'yib': 3583,
u'#ge': 3582,
u'ols': 3582,
u'kas': 3561,
u'akn': 3557,
u'\xe9li': 3554,
u'kae': 3553,
u'\xfclj': 3552,
u'b\xe1n': 3552,
u'ido': 3544,
u'nor': 3543,
u'iba': 3541,
u'm\xedg': 3540,
u'm#\xe1': 3539,
u'og\xf3': 3536,
u'pt#': 3535,
u'\xedg#': 3535,
u'\xf3za': 3533,
u'ts\xfc': 3533,
u'rr\u0151': 3532,
u'daz': 3530,
u'sch': 3529,
u'rmi': 3528,
u'#\xf6t': 3527,
u'\xf3kn': 3523,
u'il#': 3518,
u'mak': 3514,
u'#ek': 3514,
u'\xe9p#': 3513,
u's\xf6n': 3512,
u'akc': 3512,
u'l\u0151\xed': 3506,
u'ilt': 3499,
u'\xf3gy': 3497,
u'k\xfcn': 3497,
u'#g\xf6': 3492,
u'al\xfa': 3488,
u'kt\xfa': 3485,
u'pez': 3485,
u'elz': 3477,
u'\xe9vi': 3472,
u'ull': 3471,
u'nz\xe9': 3469,
u'z\xe1z': 3468,
u'tb\xf3': 3463,
u'nfl': 3456,
u'\xe9d#': 3454,
u'uda': 3453,
u'ep\xfc': 3452,
u'zl\xf6': 3448,
u'##v': 3448,
u'r#\xfa': 3448,
u'rts': 3443,
u'l\xf6m': 3440,
u'var': 3438,
u'\xf3d\xf3': 3437,
u'gy\u0171': 3433,
u'nit': 3429,
u'it\xe9': 3427,
u'elg': 3425,
u'edj': 3422,
u'deg': 3422,
u'gin': 3420,
u'lej': 3419,
u'b#l': 3416,
u'a#z': 3411,
u'gak': 3409,
u'ile': 3408,
u'\xe9gs': 3400,
u'\u0151#c': 3396,
u'al\xe9': 3394,
u'c\xedm': 3393,
u'\xedna': 3392,
u'#ed': 3386,
u'v#a': 3386,
u'omr': 3382,
u'ece': 3382,
u'sd#': 3379,
u'aiv': 3378,
u'ito': 3376,
u'rdi': 3373,
u'hhe': 3373,
u'ep\xe9': 3372,
u'isa': 3370,
u'pl\u0151': 3369,
u'ehh': 3368,
u'm\xe9b': 3361,
u'esn': 3357,
u'ndu': 3356,
u'\u0151\xedr': 3355,
u'lly': 3348,
u'zeh': 3348,
u'mre': 3346,
u'\u0171ni': 3346,
u'f\u0151#': 3346,
u'afo': 3344,
u'zd\xe1': 3342,
u'hor': 3340,
u'oba': 3339,
u'j\xe9t': 3338,
u'\xe1rj': 3338,
u'lor': 3337,
u'\xf6nk': 3331,
u'gt\xf6': 3329,
u'gk\xe9': 3329,
u'hul': 3325,
u'\xedr#': 3324,
u'\xe9g\xe1': 3321,
u't#\xed': 3319,
u'r\xe1m': 3317,
u'\xe1tf': 3316,
u'lep': 3315,
u'kto': 3314,
u'd\xfcl': 3308,
u'y\xe9t': 3305,
u'iel': 3302,
u'cst': 3295,
u'f\u0151k': 3294,
u'lac': 3293,
u'elb': 3290,
u'ro#': 3287,
u'\xe1gh': 3285,
u'nzu': 3283,
u'ema': 3282,
u'dte': 3280,
u'run': 3278,
u'\xe1v\xfa': 3277,
u'\xe1gu': 3271,
u'env': 3269,
u'#sv': 3261,
u'rge': 3258,
u'mt\xe9': 3258,
u'ozh': 3256,
u'v\xfa#': 3256,
u'ije': 3255,
u'isk': 3253,
u'r\xe1k': 3251,
u'ym\xe1': 3248,
u'fik': 3245,
u'm##': 3242,
u'osn': 3241,
u'bio': 3237,
u'kij': 3235,
u'r#\xe1': 3235,
u'bsz': 3233,
u'dke': 3227,
u'b#i': 3225,
u'tt\u0151': 3223,
u'koo': 3223,
u'i\xf3r': 3221,
u'nfe': 3220,
u'n\xe1#': 3217,
u'vve': 3214,
u'bb\xe9': 3211,
u'rt\u0151': 3207,
u'j#t': 3201,
u'rj\xfc': 3199,
u'ora': 3196,
u't#\xfc': 3194,
u'\u0151ri': 3188,
u'ns#': 3187,
u'her': 3183,
u'che': 3183,
u'ime': 3183,
u'oor': 3179,
u'b\u0151v': 3178,
u's\xfa#': 3177,
u'id#': 3175,
u'\u0171#k': 3173,
u'v#m': 3164,
u'\xf3ds': 3161,
u'agr': 3159,
u'z#j': 3156,
u'\xe9gy': 3155,
u'gr\xf3': 3153,
u'akb': 3149,
u'\xf3ri': 3148,
u'haz': 3145,
u'y\xfcn': 3145,
u'\xe1z#': 3143,
u'god': 3143,
u'z#\xed': 3141,
u'psz': 3140,
u'\xedzu': 3138,
u'rm\u0171': 3135,
u'nze': 3133,
u'pil': 3133,
u'\xfaja': 3124,
u'pte': 3122,
u'r\xfa#': 3118,
u'tho': 3115,
u'ods': 3113,
u'\xf3dj': 3109,
u'\xe1ns': 3109,
u'\xf6r\u0171': 3108,
u'p#a': 3105,
u'ome': 3104,
u'#z\xf6': 3104,
u'\u0151z\xe9': 3103,
u'eg\xfa': 3102,
u'gyh': 3100,
u'rt\xfc': 3100,
u'faj': 3098,
u'aj\xf3': 3094,
u'ls\xf3': 3093,
u'lt\xfc': 3092,
u'l#\xfa': 3090,
u'lef': 3088,
u'r\xe1j': 3088,
u'rja': 3087,
u'yn\xf6': 3084,
u'dt#': 3080,
u'iza': 3074,
u'eks': 3072,
u'k\u0171#': 3071,
u'zn\xfc': 3071,
u'z##': 3068,
u'lj\xf6': 3068,
u'z\xf3t': 3066,
u'pz\xe9': 3066,
u'\xe1gs': 3065,
u'nhe': 3062,
u'g\xfaj': 3058,
u'jt\xf3': 3056,
u'pal': 3056,
u'rze': 3055,
u'nez': 3048,
u'mir': 3047,
u'azg': 3045,
u'yeb': 3045,
u'\xf3#d': 3037,
u'\xe9tl': 3030,
u'lei': 3029,
u'y\xfck': 3022,
u'n#\xfc': 3021,
u'enf': 3021,
u'##i': 3018,
u'imp': 3017,
u'rv\xe1': 3017,
u'ngb': 3016,
u'feg': 3008,
u'\xe9p\xfc': 3004,
u'#lo': 3004,
u'zuk': 3002,
u'#c\xed': 2999,
u'\u0151so': 2998,
u'rug': 2995,
u'r\xf3f': 2992,
u'ics': 2987,
u'ly\xe9': 2985,
u'vak': 2984,
u'ogv': 2981,
u'ln\xfc': 2979,
u'nst': 2976,
u'tme': 2975,
u'eun': 2971,
u'\xf3zk': 2970,
u'\xe9lv': 2969,
u'nk\xed': 2967,
u'\xe9r#': 2966,
u'rti': 2965,
u'vev': 2956,
u'eg\u0171': 2956,
u'ek\xf6': 2951,
u'\xe9g\u0171': 2950,
u'i\xe1v': 2948,
u'l\xe1#': 2946,
u'szp': 2945,
u'#eh': 2945,
u'\xe9gl': 2942,
u'erc': 2939,
u'\u0151#\xe1': 2935,
u'zz\xe9': 2930,
u'cio': 2928,
u'\xe9pj': 2927,
u'l\xf6k': 2926,
u'i\xe1j': 2926,
u'kam': 2925,
u'#b\u0151': 2922,
u'\xfcls': 2922,
u'obi': 2916,
u's\xf3#': 2916,
u'ggo': 2914,
u'erd': 2912,
u'gy\xf3': 2912,
u'ark': 2908,
u'\xfckr': 2906,
u'zil': 2896,
u's#\xed': 2894,
u'una': 2893,
u'gl\xe9': 2892,
u'ova': 2889,
u'd#t': 2889,
u'\u0151kn': 2885,
u'tn\xe1': 2884,
u'z\xf3b': 2884,
u'#iz': 2882,
u'kt\u0151': 2882,
u'k#\u0151': 2880,
u'rg\u0151': 2879,
u'#sc': 2877,
u'\xedci': 2877,
u'\xf3be': 2877,
u'llg': 2870,
u'z#b': 2867,
u'tnu': 2867,
u'led': 2864,
u'hum': 2864,
u'lt\xfa': 2863,
u'hib': 2863,
u'\xe1zt': 2861,
u'iah': 2861,
u'nyh': 2856,
u'\xe9b#': 2855,
u'\xe9k\u0171': 2842,
u'llj': 2839,
u'h\xe1b': 2838,
u'\xe9tf': 2832,
u'z#p': 2831,
u'\xe9lz': 2826,
u'dd#': 2824,
u'ote': 2819,
u'jab': 2815,
u'#co': 2814,
u'\xe1sd': 2814,
u't\u0151i': 2814,
u'lti': 2813,
u'\xf3#o': 2811,
u'e#\xfa': 2809,
u're\xe1': 2809,
u'b\xe1t': 2808,
u'kim': 2803,
u'yha': 2801,
u'\xfclh': 2800,
u'e#d': 2800,
u'yz\xe9': 2799,
u'#pu': 2799,
u'ndh': 2798,
u'iv\xe9': 2795,
u'dnu': 2793,
u'\xe9kk': 2792,
u'ltb': 2791,
u'v#s': 2788,
u'r\xe1g': 2788,
u'nyr': 2785,
u'r#r': 2783,
u'd\xf3s': 2781,
u'#\xedt': 2778,
u'er\xed': 2775,
u'rd\u0151': 2775,
u'\u0151j\xe9': 2772,
u'v\xe9v': 2772,
u'spa': 2771,
u'\u0151ne': 2769,
u'fli': 2767,
u'zed': 2763,
u'iaf': 2757,
u'lre': 2756,
u'\xe1t\xe9': 2754,
u'\xfati': 2753,
u'\xe1tu': 2752,
u'g\xe9b': 2751,
u'\xe1#h': 2742,
u'mig': 2741,
u'v\xe1t': 2740,
u's\xe9#': 2738,
u'##j': 2737,
u'\xe9sk': 2737,
u'evo': 2735,
u'mte': 2735,
u't\xedt': 2733,
u'lt\xf3': 2731,
u'g\xe9v': 2731,
u'zsi': 2729,
u'rv#': 2729,
u'v#k': 2728,
u'y\xe9k': 2726,
u'y\xf3g': 2724,
u'ir\u0151': 2723,
u'#\u0151s': 2723,
u'\xfa#k': 2723,
u'rev': 2723,
u'\xf6mm': 2721,
u'vei': 2720,
u'l\xe1m': 2720,
u'\xe1nn': 2717,
u'ld#': 2716,
u'aps': 2715,
u'\xf6d\u0151': 2713,
u'ni\xe1': 2710,
u'\xe1kr': 2709,
u'\xfara': 2708,
u'#et': 2705,
u'j\xfcn': 2704,
u'nts': 2702,
u'ust': 2702,
u'ss\xfc': 2701,
u'k\xe9l': 2700,
u'sar': 2699,
u'k\xe1s': 2697,
u'\xedn\u0171': 2696,
u'n\xe1r': 2695,
u'civ': 2694,
u'rl\xf3': 2694,
u'tny': 2691,
u'git': 2691,
u'\xe9ki': 2687,
u'\u0171#e': 2686,
u'jno': 2684,
u'zaj': 2683,
u'j\xe1v': 2679,
u'r\xe1#': 2678,
u'\u0151v\xed': 2675,
u's\u0151b': 2672,
u'\xe1gp': 2671,
u'rs#': 2670,
u'kil': 2670,
u'tm\xe1': 2669,
u'\xe9pn': 2663,
u'lov': 2658,
u'lig': 2658,
u'uba': 2656,
u'rae': 2649,
u'ozu': 2647,
u'dio': 2647,
u'zg\xe1': 2645,
u'apr': 2640,
u'l\u0151l': 2638,
u'\u0151#g': 2635,
u'apn': 2634,
u'rpo': 2633,
u'#uk': 2629,
u'pak': 2628,
u'\xf6t#': 2628,
u'kle': 2627,
u'et\xe1': 2625,
u'mbi': 2623,
u'im\xe1': 2623,
u'nl\xe1': 2623,
u'rbi': 2620,
u'm#o': 2620,
u'eag': 2617,
u'con': 2616,
u'szb': 2613,
u'g#o': 2611,
u'osk': 2610,
u'\xe1ha': 2610,
u'\xf3kr': 2604,
u'gan': 2600,
u'ktr': 2600,
u'bec': 2598,
u'd#s': 2598,
u'alh': 2597,
u'eg\u0151': 2596,
u'csf': 2595,
u'\xe9rf': 2593,
u'ml\xe1': 2592,
u'hon': 2591,
u'b#b': 2591,
u'zlo': 2591,
u'gpo': 2588,
u'\xe9vb': 2585,
u'mmu': 2585,
u'dei': 2585,
u'ln\xe9': 2585,
u'nkc': 2583,
u'v\xe1h': 2583,
u'lul': 2583,
u'j#k': 2581,
u'zaz': 2581,
u'gju': 2575,
u'\u0171#h': 2574,
u'le\xe9': 2567,
u'ok\xe1': 2567,
u'gb\xed': 2566,
u'dtu': 2566,
u'ny\u0171': 2565,
u'\xedrt': 2564,
u'd\u0151r': 2563,
u'r\u0151t': 2561,
u'\xe9l\xfc': 2554,
u't\xf6k': 2553,
u'\xe1#k': 2549,
u'u#k': 2548,
u'ekm': 2548,
u'pe#': 2547,
u'zra': 2543,
u'\u0151tl': 2543,
u'jts': 2541,
u'yia': 2539,
u'\xe9pt': 2534,
u'u\xe1l': 2533,
u'\xedja': 2531,
u'ebe': 2531,
u'aru': 2524,
u'djo': 2521,
u'az\xed': 2520,
u'kv\u0151': 2520,
u'nfr': 2520,
u'eiv': 2519,
u'pv\xe1': 2519,
u'#j\xfa': 2517,
u'nbs': 2513,
u'dec': 2511,
u'vre': 2506,
u'ug\xe1': 2504,
u'ajo': 2504,
u'mob': 2503,
u'd#h': 2501,
u'hen': 2499,
u'\u0171#m': 2498,
u'bia': 2494,
u'aia': 2493,
u'\xf6dn': 2492,
u'eld': 2491,
u'b#r': 2488,
u'\xe1tj': 2485,
u'y\xfal': 2481,
u'\xe1kk': 2480,
u'y\xe1t': 2479,
u'pp#': 2475,
u'ici': 2472,
u'dj\xe9': 2471,
u'ape': 2470,
u'zei': 2470,
u'odo': 2469,
u'\xe1ad': 2468,
u'#ca': 2466,
u'gv\xe9': 2466,
u'\xe9ps': 2465,
u'sle': 2464,
u'\xe1nj': 2457,
u'yra': 2455,
u'bri': 2454,
u'e#o': 2454,
u'\xf6nh': 2453,
u'lpo': 2452,
u'sso': 2452,
u'des': 2451,
u'ztj': 2448,
u'g#g': 2446,
u'v\u0151j': 2445,
u'\xf6zz': 2445,
u'\xedvn': 2442,
u'ekl': 2442,
u'd\xe1t': 2441,
u'\xe9zv': 2439,
u'z\xe9#': 2435,
u'nce': 2429,
u'\xe9lh': 2427,
u'\xe9#k': 2425,
u'd\u0151t': 2424,
u'rj\xe1': 2423,
u'\xf6zn': 2421,
u'bb\u0151': 2419,
u'sos': 2412,
u'edt': 2410,
u'jus': 2410,
u'km\xe9': 2408,
u'\xe9zn': 2406,
u'j\xf6t': 2404,
u'r\u0151p': 2402,
u'gg\u0151': 2402,
u'olh': 2401,
u'jul': 2397,
u'v#h': 2395,
u'uss': 2393,
u'\xe1gt': 2390,
u'pni': 2389,
u'otm': 2389,
u'nim': 2383,
u'k\xe9z': 2382,
u'od\xf3': 2382,
u'e#u': 2380,
u'\u0171#t': 2378,
u'#da': 2378,
u'sb\xe9': 2377,
u'm\xe1i': 2377,
u'ny\xf6': 2376,
u'y\xedl': 2373,
u'#ch': 2371,
u'ikn': 2371,
u'elu': 2370,
u'\u0151pi': 2369,
u'n\xfas': 2368,
u'gfi': 2367,
u'\xfa#e': 2367,
u'l\u0151b': 2366,
u'r#p': 2366,
u'#kl': 2363,
u'\xe9s\u0151': 2360,
u'di\xe1': 2359,
u'egp': 2358,
u'fed': 2358,
u'ulm': 2357,
u'#t\xed': 2357,
u'gyj': 2355,
u'seh': 2354,
u'rs\xe1': 2351,
u'mor': 2349,
u'enh': 2349,
u'utt': 2345,
u'u\xe1r': 2343,
u'ler': 2343,
u'#pt': 2343,
u'nd\xe1': 2340,
u'd\u0151b': 2340,
u'ap\xe1': 2339,
u'apt': 2338,
u'd#\xe9': 2336,
u'taz': 2336,
u'\xe9\xe9r': 2335,
u'en\xe1': 2335,
u'r##': 2333,
u'tj\xe9': 2327,
u'olu': 2327,
u'zba': 2324,
u'z\xf3d': 2323,
u'aib': 2322,
u'lon': 2320,
u'et\xed': 2316,
u'#r\xf3': 2310,
u'el\xe1': 2309,
u'kn\xe1': 2309,
u'#g\xe9': 2308,
u'gam': 2308,
u'igr': 2304,
u'sap': 2302,
u'onc': 2301,
u'\xfaju': 2299,
u'rfi': 2299,
u'k\xe1c': 2288,
u'ngz': 2286,
u'\xf6nl': 2285,
u'pi#': 2285,
u'\u0171n\xf6': 2284,
u'b#n': 2283,
u'le\xe1': 2281,
u'cs\xe9': 2280,
u'nf\xe9': 2280,
u'e#\xf6': 2279,
u'ihe': 2279,
u'\xe1\xedr': 2276,
u'\xe1zo': 2275,
u't\xe1z': 2275,
u'n\xe9g': 2274,
u'm\xe1b': 2272,
u'nma': 2271,
u'#fu': 2268,
u's\u0151t': 2266,
u'rco': 2266,
u'u#\xe9': 2266,
u'osu': 2264,
u'\xe9zs': 2264,
u'k#\xed': 2261,
u'\xf6go': 2261,
u'l\xe1\xed': 2260,
u'r\xe1z': 2259,
u'ias': 2257,
u'\xe9vv': 2257,
u'\xe9nn': 2256,
u'rt\xf6': 2255,
u'm#g': 2254,
u'cem': 2249,
u'\xfczl': 2248,
u'\xfal\xf3': 2248,
u'\xe1n\xe1': 2247,
u'y#u': 2247,
u'yat': 2243,
u'gd\xed': 2242,
u'sin': 2241,
u'itk': 2240,
u'kb\u0151': 2240,
u'n\xe1k': 2238,
u'\xfcsz': 2231,
u'\xedva': 2229,
u'zep': 2228,
u'\xe1rp': 2226,
u'ib\xe1': 2225,
u'tug': 2223,
u'd\xe1n': 2222,
u'bak': 2220,
u'y\xe1b': 2219,
u'gpr': 2218,
u'tri': 2217,
u'y\u0171#': 2216,
u'gg\xe1': 2216,
u'imi': 2214,
u't\xf3a': 2213,
u't\xe1c': 2212,
u'arn': 2210,
u'bea': 2208,
u'ly\xf3': 2207,
u'\u0171s\xe9': 2204,
u'one': 2203,
u'b#o': 2203,
u'##r': 2202,
u'zad': 2199,
u'ugd': 2198,
u'#i#': 2198,
u'v#v': 2198,
u'i\xf3v': 2198,
u'z#r': 2197,
u'onj': 2195,
u'lz\xf3': 2195,
u'o#\xfa': 2195,
u'ima': 2193,
u'izr': 2192,
u'p#e': 2191,
u'dre': 2190,
u'j#s': 2188,
u'nbe': 2188,
u'dk\xe9': 2187,
u'eub': 2186,
u'tav': 2183,
u'ulj': 2179,
u'ikr': 2179,
u'h\xe9r': 2175,
u'cs\xfc': 2175,
u'k#\xfc': 2172,
u'rus': 2172,
u'yz\xe1': 2171,
u'ed#': 2171,
u'dt\xe1': 2167,
u'k#\xf3': 2165,
u'med': 2163,
u'cen': 2160,
u'pre': 2159,
u'ud\xe1': 2159,
u'rvo': 2157,
u'oxi': 2156,
u'lmu': 2154,
u'alb': 2153,
u'd#p': 2152,
u'gb\xf3': 2150,
u'\xfas\xed': 2146,
u'g#\xfa': 2145,
u'\xfa#m': 2143,
u'\xe1ig': 2142,
u'nen': 2142,
u'ulh': 2142,
u'kii': 2140,
u'#s\xe9': 2139,
u'nam': 2136,
u'tej': 2134,
u'zai': 2129,
u'\xe1in': 2125,
u'zov': 2124,
u'hni': 2124,
u'b#j': 2123,
u'\xedtm': 2122,
u'lia': 2121,
u'sr\xe9': 2121,
u'jt#': 2120,
u'zzo': 2120,
u'feh': 2119,
u'ghe': 2116,
u'llt': 2112,
u'n\xe9s': 2112,
u'cot': 2110,
u'sv\xe9': 2109,
u'olk': 2107,
u'kt#': 2106,
u'd\xe9z': 2102,
u'dj\xfc': 2101,
u'm\xe9d': 2097,
u'\xe1s\xfa': 2094,
u'igi': 2094,
u'\xe9zz': 2090,
u'\xfat#': 2087,
u'i\xf3i': 2087,
u'bej': 2086,
u'ash': 2084,
u'\xf3#\xf6': 2081,
u'oks': 2080,
u'st\xf3': 2078,
u'iek': 2078,
u'\xe9ka': 2078,
u'cit': 2077,
u'naz': 2077,
u'us\xe1': 2075,
u'sam': 2074,
u'leb': 2073,
u'r\u0151k': 2072,
u'm\xe1v': 2071,
u'j#m': 2069,
u'#nu': 2065,
u'nuk': 2064,
u'ake': 2060,
u'g#d': 2059,
u'tje': 2058,
u'muk': 2057,
u'nje': 2057,
u'f\u0151b': 2055,
u'\xe9#h': 2053,
u'tso': 2050,
u'r\xf3t': 2046,
u'#od': 2045,
u'\xf3su': 2045,
u'rzi': 2044,
u't\xf3v': 2044,
u'z\xe9b': 2043,
u'l\u0151j': 2040,
u'nkk': 2039,
u'tog': 2037,
u'\u0151##': 2037,
u'\xf3d\xe1': 2037,
u'\xf3in': 2036,
u'ny\xfc': 2036,
u'ldi': 2035,
u'csn': 2034,
u'ldr': 2032,
u'#kk': 2030,
u'tim': 2027,
u'v#t': 2026,
u'\xf6kn': 2026,
u'\xe1rc': 2026,
u'ps#': 2025,
u'\xfa#h': 2025,
u'lfe': 2024,
u'i\xe1s': 2021,
u'exp': 2017,
u'l\xe1c': 2015,
u's\xe9l': 2009,
u'rod': 2009,
u'\xf6di': 2003,
u'\xe1zi': 2001,
u'tex': 2001,
u'n\xe1m': 1999,
u'is\xe1': 1999,
u'usi': 1998,
u'e\xe1r': 1995,
u'\xfa#a': 1994,
u'#ex': 1994,
u'\xf6t\xf6': 1994,
u'eh#': 1993,
u'ple': 1993,
u'y\xe1l': 1992,
u'\xe9lr': 1992,
u'\xf3l\xe9': 1991,
u'dit': 1991,
u'tif': 1991,
u'tm\xe9': 1990,
u'\xedg\xe9': 1987,
u'yr\u0151': 1985,
u'j#e': 1982,
u'p\xe1t': 1977,
u'kro': 1976,
u'#f\u0171': 1975,
u'nt\xf6': 1974,
u'ir\xf3': 1968,
u'ng#': 1967,
u'#gu': 1966,
u'zsz': 1965,
u'soh': 1964,
u'rof': 1959,
u'kop': 1956,
u'sed': 1955,
u'g\u0151r': 1955,
u'h\xedr': 1950,
u'et\u0171': 1950,
u'\xf3ho': 1949,
u'z\xfat': 1947,
u'zas': 1945,
u'\xf3#\xfa': 1944,
u'eav': 1943,
u'b\xe9r': 1943,
u'\xe1zn': 1942,
u'iox': 1941,
u'\u0151in': 1937,
u'lzo': 1935,
u'dra': 1933,
u'xpo': 1932,
u's\xfaj': 1931,
u'rva': 1929,
u'eto': 1926,
u'mj\xe1': 1925,
u'tr\u0151': 1922,
u'l\xfck': 1922,
u'nja': 1921,
u'yk\xf6': 1918,
u'\xf6nf': 1916,
u'gs\u0151': 1915,
u'sz\u0171': 1914,
u'\xe9nd': 1913,
u'vni': 1911,
u'zia': 1911,
u'lny': 1910,
u'gik': 1908,
u'y\xf6k': 1908,
u'opp': 1907,
u'xid': 1906,
u'zd\u0151': 1905,
u'mul': 1905,
u'egu': 1905,
u'bul': 1902,
u'\xf3dn': 1899,
u'\u0171#v': 1897,
u'pat': 1895,
u'odt': 1894,
u'm#d': 1891,
u'tk\xe9': 1891,
u'vos': 1888,
u'\u0171#s': 1886,
u'vul': 1885,
u'\xf6k\xf6': 1884,
u'lus': 1882,
u't\xf6m': 1881,
u'p\xe9t': 1880,
u'u#a': 1879,
u'mk\xe9': 1879,
u'\xe9gk': 1879,
u'm#u': 1878,
u'l\u0151f': 1873,
u'h\xe1g': 1872,
u'oml': 1872,
u'\xfatj': 1871,
u'mbu': 1870,
u'bur': 1868,
u'gma': 1867,
u'ztv': 1867,
u'usb': 1867,
u'yr\xe9': 1863,
u'z#c': 1862,
u'ln\xe1': 1859,
u'b#g': 1854,
u'gom': 1852,
u'so#': 1849,
u'udu': 1848,
u'#b\xf6': 1847,
u'u#t': 1847,
u'et\xf6': 1846,
u'nn#': 1846,
u'r#\xf6': 1844,
u'am\xe1': 1842,
u'sv\xe1': 1842,
u'elr': 1840,
u'#\xe1n': 1840,
u'\xfani': 1839,
u'\u0151#d': 1839,
u'p#m': 1839,
u'pci': 1833,
u'\xe1tm': 1833,
u'lf\xf6': 1832,
u'iap': 1830,
u'\xe1rv': 1830,
u'rot': 1827,
u'\u0171nc': 1825,
u'\xf3r\xe1': 1817,
u'r\xf6z': 1816,
u'in\xe9': 1815,
u'\u0171#j': 1814,
u'\xe1g\xed': 1813,
u'##l': 1813,
u'mom': 1811,
u'b\xfcr': 1810,
u'n#\xed': 1810,
u'gzo': 1809,
u'\xedve': 1808,
u'\xfcro': 1807,
u'bb\xed': 1803,
u'k\xe1v': 1801,
u'b#\xe1': 1801,
u'to#': 1799,
u'd#v': 1799,
u'utn': 1798,
u'azk': 1797,
u'u#e': 1797,
u'#ri': 1793,
u'oto': 1793,
u'akm': 1790,
u'nd\xfc': 1790,
u'eib': 1789,
u'\xe1n\xed': 1789,
u'\xe9ls': 1789,
u'\u0151#\xfc': 1787,
u'azu': 1785,
u'\xe9t\u0151': 1785,
u'avu': 1785,
u'rce': 1782,
u'\xe9t\xe9': 1782,
u'azi': 1780,
u'\xfa#s': 1778,
u'ned': 1778,
u'kr\xf6': 1770,
u'\xf3\xf6v': 1770,
u'gyf': 1767,
u'\u0171#\xe9': 1765,
u'vem': 1762,
u'z#\u0151': 1761,
u'#\u0151#': 1760,
u'ntt': 1759,
u'ntv': 1758,
u'kkv': 1758,
u'r\xf3\xf6': 1757,
u'rci': 1754,
u'app': 1754,
u'v\xe1m': 1752,
u'nl#': 1752,
u'll\u0151': 1751,
u'dim': 1751,
u'lz\xe9': 1749,
u'ppa': 1749,
u'\u0171#a': 1748,
u'esl': 1748,
u'sot': 1746,
u'mie': 1746,
u'imo': 1746,
u'l\xe9l': 1743,
u'\xedzh': 1742,
u'amn': 1742,
u'#m\xed': 1742,
u'if\xe9': 1741,
u'z\xe1t': 1741,
u'tiv': 1740,
u'oru': 1739,
u'ukl': 1734,
u'ept': 1732,
u'\xe9do': 1731,
u'pra': 1731,
u'f\u0171z': 1729,
u'iao': 1728,
u'\u0151#\xfa': 1726,
u'#op': 1724,
u'\xe9r\xfc': 1718,
u'nh\xe1': 1718,
u'#d\xed': 1718,
u'\xe9b\u0151': 1716,
u'#du': 1716,
u'osi': 1715,
u'y\u0171j': 1715,
u's\xfat': 1715,
u'\u0171jt': 1714,
u'j\xfan': 1713,
u's\u0151d': 1711,
u'nym': 1710,
u'g\u0151#': 1708,
u'\u0171le': 1704,
u'\xfali': 1704,
u's\xe9s': 1703,
u'as\xfa': 1703,
u'sm\xf3': 1701,
u'lk\xe1': 1700,
u'bru': 1697,
u'bic': 1697,
u'ed\xfc': 1696,
u'ukb': 1695,
u'aor': 1695,
u'irt': 1694,
u'nas': 1694,
u'd#f': 1694,
u'f\xe9n': 1693,
u'\xf3zu': 1691,
u'r\xfcn': 1690,
u'i\xf3z': 1690,
u'\xe1sm': 1690,
u'\xf3f\xe1': 1689,
u'\xe1do': 1689,
u'eig': 1688,
u'r\xfaz': 1687,
u'\xe9lb': 1687,
u'r#c': 1687,
u'acr': 1686,
u'abv': 1686,
u'tvi': 1685,
u'#s\u0151': 1685,
u'\xf3k\xe9': 1680,
u'\xf6l\xe9': 1678,
u'd\u0151n': 1677,
u'ki\xe1': 1676,
u'\xe9kt': 1676,
u'bv\xe1': 1675,
u'\xfazi': 1675,
u'vaj': 1674,
u'v#\xe9': 1673,
u'gmu': 1673,
u'eeg': 1670,
u'st\xfc': 1670,
u'gr\xfa': 1668,
u'gut': 1666,
u'rop': 1666,
u'rsu': 1663,
u'z\xf3n': 1660,
u'd\xf3j': 1660,
u'#dr': 1659,
u'\xedlt': 1658,
u'h\xf6z': 1657,
u'\u0151\xe1l': 1657,
u'\xfalz': 1657,
u'amj': 1656,
u'vai': 1656,
u'ald': 1652,
u'zex': 1652,
u'\u0171v\xe9': 1651,
u'zam': 1650,
u'zim': 1649,
u'mp\xe1': 1649,
u'a#w': 1648,
u'doh': 1644,
u'gt\xf3': 1644,
u'zn\xe9': 1643,
u'l\u0151\xe1': 1640,
u'm\xfa#': 1638,
u'lol': 1636,
u'afe': 1636,
u's\xe1#': 1635,
u'v\xe1k': 1634,
u'udt': 1634,
u'n\xe1b': 1633,
u'ej\xf6': 1631,
u'\xe9to': 1630,
u'\xe1rk': 1630,
u'##o': 1626,
u'v#f': 1626,
u'gme': 1623,
u'#h\xfa': 1623,
u'our': 1623,
u'adm': 1621,
u'i\xf3h': 1621,
u'dmi': 1621,
u'cra': 1620,
u'adu': 1620,
u'rid': 1620,
u'am\xed': 1619,
u'\u0151ve': 1618,
u'\xe1nb': 1617,
u'a\xfcg': 1614,
u'kma': 1611,
u'\xfclf': 1611,
u'rmj': 1611,
u'y\u0171l': 1611,
u'\xf3kk': 1611,
u'gs\xe1': 1610,
u't\xedp': 1609,
u'pjo': 1608,
u'ymi': 1608,
u'o#a': 1608,
u'zr\u0151': 1606,
u'ok\xe9': 1606,
u'##\xf3': 1605,
u'lm\xfc': 1605,
u'm#\xfa': 1604,
u'\xe9z\xe9': 1602,
u'gf\u0151': 1600,
u'iut': 1600,
u'ciu': 1599,
u'm\u0171s': 1596,
u'vig': 1596,
u'olc': 1596,
u'exu': 1595,
u'z\xf3i': 1593,
u'dn\xe1': 1592,
u'mz\xe9': 1592,
u'gil': 1589,
u'gh\xe1': 1588,
u'rn\xfc': 1585,
u'\xf6nm': 1584,
u'hai': 1579,
u'ove': 1579,
u'idi': 1576,
u'\xe9mi': 1575,
u'sma': 1575,
u'orn': 1570,
u'ogh': 1570,
u'ong': 1569,
u'ov\xf3': 1569,
u'ot\xf3': 1569,
u'nzt': 1568,
u'\xfa#i': 1568,
u'edk': 1566,
u'raz': 1566,
u'fi#': 1564,
u'km\xe1': 1563,
u'\u0151ir': 1563,
u'\xe1kn': 1563,
u'zl\xe1': 1562,
u'liu': 1561,
u'kvk': 1559,
u'pac': 1557,
u'ymu': 1554,
u'dag': 1554,
u'gyt': 1552,
u'\xf3r\xf3': 1552,
u'v\xe1g': 1551,
u'\xe1mv': 1550,
u'\u0151ze': 1549,
u'k\xe9k': 1548,
u'nvo': 1544,
u'\xe9lu': 1541,
u'mja': 1540,
u'dbe': 1540,
u'\xf6ln': 1539,
u'l\xf3t': 1539,
u'\xe1kb': 1539,
u'zva': 1539,
u'ifo': 1538,
u'mif': 1536,
u'\xe1lu': 1536,
u'\xf3zt': 1535,
u'\xe9g\xed': 1534,
u'ez\xfc': 1533,
u'kk#': 1533,
u'\xednv': 1531,
u'\xf6me': 1531,
u'o#e': 1531,
u'\xe1di': 1531,
u'ud#': 1530,
u'i#z': 1529,
u'\xf3l\xe1': 1527,
u'\xe1zs': 1525,
u'ore': 1525,
u'apb': 1525,
u'#us': 1525,
u'l#u': 1524,
u'\xe1zb': 1523,
u'\xf6v\xe9': 1522,
u'bez': 1522,
u'\u0151kk': 1520,
u'orz': 1519,
u'j\xe9r': 1519,
u'urg': 1518,
u'\u0151m\u0171': 1518,
u'\xe1tv': 1517,
u'v\xf3t': 1517,
u'ut#': 1516,
u'\xfato': 1514,
u'\xedz#': 1512,
u'ccs': 1512,
u'r\u0151m': 1510,
u'\u0151di': 1510,
u'tmo': 1509,
u'#\xfct': 1508,
u'plo': 1507,
u'are': 1507,
u'\xfcss': 1506,
u's\xe1\xe9': 1505,
u'#f\xe1': 1503,
u'ofi': 1499,
u'yv\xe9': 1497,
u'm\xf6g': 1494,
u'r#o': 1489,
u'z\xf3v': 1485,
u'u#s': 1485,
u'#m\xf6': 1484,
u'\xe9vr': 1483,
u'mr\xe9': 1483,
u'\xfclp': 1483,
u'pta': 1483,
u'okf': 1483,
u'nz\u0151': 1482,
u'\u0151ek': 1482,
u'#pp': 1480,
u'odu': 1478,
u'g#\xf6': 1476,
u'n\xe1t': 1476,
u'\xf3fi': 1475,
u'gab': 1474,
u'br\xfc': 1473,
u'\xf6t\xe9': 1473,
u'ng\xe9': 1471,
u'rum': 1468,
u'rdo': 1467,
u'mpe': 1467,
u'etm': 1466,
u'r\xfcs': 1465,
u'd\u0151d': 1465,
u'\xfals': 1463,
u'\xe1zz': 1462,
u'geg': 1462,
u'luk': 1458,
u'lra': 1457,
u'ero': 1457,
u'kfe': 1455,
u'hek': 1455,
u'nu\xe1': 1455,
u'g\xe1v': 1454,
u'lj\xe9': 1454,
u'h\xfaz': 1453,
u'y\xf3#': 1453,
u'scs': 1452,
u'\xe1v\xe1': 1452,
u'kf\xe9': 1451,
u't\u0151b': 1449,
u'fa#': 1448,
u'gse': 1447,
u'\xedmk': 1447,
u'\xf3ak': 1446,
u'eka': 1446,
u'nl\xed': 1445,
u'sim': 1444,
u'eci': 1444,
u't\u0151j': 1444,
u'v\xe9r': 1442,
u'\xf3k\xf6': 1442,
u'aly': 1440,
u'jtj': 1439,
u'jna': 1439,
u'\u0171#p': 1439,
u'rko': 1437,
u'gt\xe9': 1436,
u'\xf6g\xf6': 1432,
u'z#d': 1432,
u'tth': 1431,
u'nha': 1429,
u'\xf6t\u0151': 1427,
u'llu': 1426,
u'\xe9#f': 1425,
u'ilk': 1424,
u'bud': 1423,
u'\xedli': 1422,
u'\xf3ss': 1421,
u'ipl': 1418,
u'\xf3v\xe9': 1416,
u'z\xe1a': 1414,
u'\xe9ro': 1413,
u'xu\xe1': 1411,
u'\xedpu': 1409,
u'b\xf6r': 1409,
u'gbi': 1408,
u'\xedvj': 1408,
u'd\xf3z': 1408,
u'n#z': 1408,
u'lba': 1402,
u'is\xed': 1402,
u'bem': 1401,
u'ich': 1401,
u'\u0151kr': 1400,
u'ail': 1398,
u'col': 1397,
u'dn\xfc': 1394,
u'\xf3#u': 1394,
u'\xe1g\xfc': 1393,
u'eut': 1393,
u'itv': 1393,
u'dic': 1393,
u'nza': 1392,
u'\xe1nu': 1392,
u'kv\xf3': 1391,
u'dis': 1391,
u'vin': 1390,
u'\xe1m\xfa': 1389,
u'beh': 1389,
u'pna': 1388,
u'n\u0151t': 1387,
u'b#c': 1386,
u'j\xe1\xe9': 1384,
u'hu#': 1383,
u'ard': 1382,
u't\xedz': 1381,
u'#\u0151r': 1381,
u'ags': 1379,
u'kl\xe1': 1377,
u'zt\u0151': 1376,
u's\xe9\xe9': 1374,
u'\xf3nk': 1373,
u'gg#': 1373,
u'pec': 1373,
u'\u0151l\xfc': 1369,
u'gr\u0151': 1367,
u'anz': 1367,
u'r\xf3n': 1365,
u'b\xf6l': 1365,
u'tad': 1365,
u'kic': 1364,
u'bot': 1364,
u'd#b': 1363,
u'jam': 1362,
u'id\xe1': 1361,
u'rup': 1361,
u'afg': 1361,
u'aj#': 1357,
u'g\xf6t': 1356,
u'\xe1lb': 1354,
u'd\xe1k': 1354,
u'bis': 1349,
u'k\xe1i': 1348,
u'yiu': 1348,
u'p#\xe9': 1348,
u'r#g': 1348,
u'\xe1rl': 1347,
u'gyk': 1346,
u'rk\xe9': 1346,
u'yt\xe1': 1343,
u'\u0171#f': 1342,
u'agi': 1341,
u'dip': 1341,
u'ar\xed': 1340,
u'arb': 1338,
u'd#i': 1337,
u'\xf6z\xfa': 1336,
u'\xe1g\xe9': 1336,
u'ien': 1334,
u'r\xe9r': 1334,
u'\xe1i#': 1333,
u'yhe': 1333,
u'\xe1zh': 1332,
u'ntm': 1332,
u'\xe1t\xfc': 1332,
u'aid': 1329,
u'uri': 1329,
u'\xf6r\u0151': 1327,
u'f\xe1k': 1327,
u'azh': 1325,
u'rov': 1324,
u'dle': 1324,
u'#om': 1323,
u'emu': 1323,
u'ndr': 1320,
u'ulg': 1319,
u'r\xe9k': 1319,
u'p\xe1l': 1317,
u'gz\xe9': 1316,
u'umb': 1316,
u'bir': 1316,
u'\xf3so': 1316,
u'kl\u0151': 1312,
u'imb': 1311,
u'\xedtu': 1310,
u'#\xe1p': 1309,
u'ezs': 1308,
u'z\u0151e': 1307,
u'l\xe1z': 1307,
u'c\xe9g': 1306,
u'ej\xe1': 1305,
u'ej\u0171': 1305,
u'ext': 1304,
u'tib': 1304,
u'tn\xfc': 1303,
u'j\xf3k': 1303,
u'\xedzi': 1302,
u'ier': 1302,
u'\xe9gp': 1301,
u'miu': 1300,
u'b\xe1z': 1300,
u'an\xed': 1300,
u'oh\xe1': 1298,
u'#\xf6k': 1297,
u'ekc': 1296,
u'ilo': 1296,
u'\xe9kl': 1295,
u'loz': 1292,
u'sv#': 1292,
u't\xf3d': 1292,
u'um\xe1': 1292,
u'kak': 1291,
u'\u0151d\xf6': 1291,
u'odj': 1290,
u'ope': 1290,
u'yv#': 1289,
u'ajz': 1288,
u'rru': 1287,
u'y\xe1n': 1286,
u'iar': 1286,
u'lai': 1286,
u'\xf3li': 1286,
u'bre': 1285,
u'ikl': 1285,
u'\xe1#e': 1284,
u'\xf6rb': 1282,
u's#z': 1282,
u'zid': 1282,
u'v\u0151k': 1282,
u'uko': 1281,
u'v\xe1d': 1281,
u'k\xfcs': 1281,
u'ng\xfa': 1279,
u'rs\xed': 1278,
u'a\xe9l': 1277,
u'\u0171z\xf6': 1277,
u'\u0171bb': 1277,
u'iro': 1274,
u'ex#': 1273,
u'\xe1nc': 1273,
u'z\u0151i': 1272,
u'sto': 1272,
u't\u0151t': 1270,
u'\xfa#\xe9': 1269,
u'klu': 1269,
u'#lu': 1269,
u'd\xfcn': 1269,
u'rk\xf6': 1268,
u'p#k': 1266,
u'bbn': 1266,
u'imu': 1265,
u'izn': 1265,
u'ak\xf6': 1264,
u'\xe9ds': 1264,
u'\u0151dn': 1264,
u'n\u0171l': 1264,
u'z#g': 1263,
u'mz\u0151': 1261,
u'nad': 1261,
u'bei': 1260,
u'g\xe1m': 1257,
u'eg\xf3': 1256,
u'\xe9ll': 1254,
u'mbo': 1253,
u'lbo': 1252,
u'sb\u0151': 1250,
u'h\xfas': 1249,
u'szm': 1248,
u'\xe9dj': 1246,
u'r\xfaa': 1246,
u'rcs': 1242,
u'd\xe9n': 1242,
u'hir': 1242,
u'g#u': 1241,
u'gk\xed': 1241,
u'uva': 1240,
u'eku': 1240,
u'\xe9d\u0151': 1238,
u'd\xf3f': 1237,
u'zth': 1236,
u'ril': 1236,
u'jt\xe9': 1235,
u'#ce': 1234,
u'yke': 1234,
u'\xfaak': 1232,
u'za\xe9': 1231,
u'\xf3fa': 1230,
u'r\xe1f': 1230,
u'\u0171nt': 1228,
u'ldj': 1227,
u'#\xf6v': 1227,
u'rec': 1226,
u'\xf6lj': 1226,
u'p#s': 1226,
u'\xe9vt': 1225,
u'd\xf3a': 1225,
u'sti': 1225,
u'#kv': 1224,
u'ced': 1223,
u'\xe1gk': 1223,
u'abs': 1221,
u'\xf3t\xe1': 1220,
u'zub': 1219,
u'em\u0171': 1218,
u'nac': 1218,
u'\xe1cc': 1216,
u'kti': 1215,
u'rie': 1215,
u'fro': 1214,
u'\xedrj': 1214,
u't\xf3n': 1214,
u'\xe9k\xe1': 1214,
u'csb': 1214,
u'd\u0151j': 1213,
u't\u0151d': 1213,
u'\xe9r\xed': 1212,
u'l\xf3n': 1211,
u'd\xe1j': 1211,
u'\xedzo': 1210,
u'\xf3ln': 1209,
u'mve': 1207,
u'iun': 1206,
u'ses': 1206,
u'\u0151dl': 1206,
u'uds': 1206,
u'kig': 1205,
u'dsm': 1205,
u'dr\xe1': 1204,
u'g\xe1i': 1204,
u'\xfa#p': 1203,
u'#ii': 1203,
u'\xf6zs': 1202,
u'i#\u0151': 1202,
u'ppo': 1200,
u'ade': 1199,
u'u#m': 1199,
u'#f\xf3': 1198,
u'ac\xe1': 1196,
u'rk\xf3': 1195,
u'\xf3d#': 1195,
u'nz#': 1194,
u'\u0151#u': 1194,
u'ins': 1193,
u'ap\xfa': 1193,
u'z\u0151t': 1192,
u'\xf3kb': 1192,
u'b##': 1191,
u'nl\xe9': 1190,
u'uen': 1189,
u'onh': 1188,
u'nla': 1188,
u'r\xe1v': 1188,
u'\xfas\xe1': 1188,
u'zk\xe9': 1187,
u'pti': 1187,
u'nz\xe1': 1186,
u'zut': 1186,
u'iol': 1186,
u'z\xe1b': 1184,
u'uve': 1182,
u'\xe1sv': 1182,
u'j#j': 1181,
u'adk': 1181,
u'zdj': 1179,
u'imm': 1178,
u'yni': 1178,
u'\xf3z\xe1': 1176,
u'\xe1\xe1l': 1176,
u'umm': 1176,
u'j#p': 1176,
u'l\xe1k': 1175,
u'rho': 1174,
u'd#n': 1174,
u'z\xe1\xe1': 1173,
u'gpe': 1173,
u'\u0151nk': 1171,
u'#zs': 1170,
u'\xe9ta': 1170,
u'\xf3ni': 1170,
u'mei': 1169,
u'jei': 1168,
u'zt\xed': 1167,
u'b#d': 1167,
u'v\u0151s': 1167,
u'\xf3cs': 1165,
u'\u0171ek': 1165,
u'ldk': 1164,
u'\xf6rl': 1163,
u'\xe9#m': 1163,
u'#gd': 1162,
u'gig': 1162,
u'ntk': 1162,
u'j#\xe9': 1159,
u'kla': 1157,
u'pto': 1156,
u'vti': 1156,
u'r#d': 1154,
u'ace': 1153,
u'j#f': 1152,
u'l\xf3i': 1150,
u'mne': 1149,
u'nyf': 1147,
u'pp\xe1': 1145,
u'pap': 1145,
u'\xfa#f': 1144,
u'zuv': 1144,
u'kac': 1143,
u'opt': 1143,
u't\u0151z': 1143,
u'\xf6n\xe1': 1141,
u't#z': 1138,
u'jap': 1138,
u'gy\xf6': 1138,
u'\xe1ko': 1138,
u'ur#': 1136,
u'eal': 1136,
u'aus': 1136,
u'bou': 1136,
u'bk\xe9': 1136,
u'nse': 1136,
u'jem': 1135,
u'vk#': 1134,
u'tk\xe1': 1134,
u'lah': 1134,
u'j#i': 1133,
u'##u': 1133,
u'#\xf3v': 1132,
u'\xe1mb': 1132,
u'\u0151po': 1132,
u'ii#': 1130,
u'iho': 1129,
u'usr': 1129,
u'zla': 1128,
u'igl': 1128,
u'iab': 1127,
u'lyh': 1126,
u'eko': 1124,
u'ugo': 1123,
u'r\xf3k': 1123,
u't\xf3j': 1123,
u'yot': 1123,
u'\xedrn': 1122,
u'wto': 1122,
u'ki\xe9': 1119,
u'p\xfa#': 1117,
u'oki': 1117,
u'r\xfab': 1116,
u'\xfajs': 1115,
u'v\xf3#': 1115,
u'\xf6g#': 1114,
u'eol': 1114,
u'mt#': 1111,
u'y#\xed': 1110,
u'fos': 1110,
u'\xe9kf': 1109,
u'#wt': 1108,
u'##\xe1': 1108,
u'zar': 1107,
u'esi': 1107,
u'ib\u0151': 1106,
u'hom': 1106,
u'l#\xed': 1105,
u'gt\xe1': 1103,
u'hto': 1103,
u'sht': 1103,
u'kud': 1102,
u'olm': 1102,
u'lvr': 1101,
u'veh': 1100,
u'zi\xe1': 1098,
u'rim': 1098,
u'mta': 1097,
u'ebr': 1096,
u'upc': 1095,
u'cip': 1094,
u'ces': 1093,
u'b\xe1s': 1093,
u'ubs': 1092,
u'amm': 1090,
u'tcs': 1090,
u'\xfarr': 1089,
u'ogg': 1088,
u'\xfatt': 1087,
u'beb': 1087,
u'z\xf6b': 1087,
u'yip': 1086,
u'\u0171#b': 1085,
u'kl\xed': 1085,
u'm\xfck': 1084,
u't\xfcg': 1083,
u'd#l': 1083,
u'pba': 1082,
u'dva': 1081,
u'rg\xf3': 1081,
u'd\xf6s': 1081,
u'\xfaan': 1080,
u'ovi': 1080,
u'l\xedm': 1080,
u'ngl': 1079,
u'idk': 1078,
u'vja': 1078,
u'yja': 1078,
u'l\xf3l': 1076,
u'nju': 1076,
u'\u0151s\xf6': 1075,
u'lb\xfc': 1075,
u'\u0171n\u0151': 1074,
u'mac': 1073,
u'zah': 1073,
u'aml': 1072,
u'ov\xe9': 1072,
u'v#j': 1071,
u'\xf6zr': 1071,
u'kcs': 1071,
u'sb\xf3': 1071,
u'jb\xf3': 1070,
u'szf': 1069,
u'zaf': 1069,
u'\xe1fi': 1069,
u'nj\xe1': 1068,
u'rn\xe9': 1066,
u'at\xfa': 1065,
u'l\xe1v': 1065,
u'ia\xfc': 1064,
u'j\xedt': 1064,
u'\xf6tl': 1064,
u'kt\xe1': 1063,
u'isv': 1063,
u'urm': 1062,
u'j#a': 1060,
u'c#m': 1059,
u'axi': 1059,
u'\u0151rs': 1058,
u'\xe9#\xe9': 1058,
u'j\xe1h': 1058,
u'nep': 1057,
u'r\xe1a': 1057,
u'dk\xf6': 1056,
u'avi': 1056,
u'\xe1s\xe9': 1055,
u'dki': 1053,
u'eak': 1052,
u'\xedma': 1051,
u'#ob': 1050,
u'\xe1it': 1050,
u'nd\xf6': 1050,
u'vad': 1049,
u'v\xedv': 1048,
u'rn\xe1': 1047,
u'umk': 1047,
u'\xe9rb': 1046,
u'baj': 1045,
u'ose': 1044,
u'ym\xe9': 1044,
u'nga': 1044,
u'\xfcve': 1044,
u'bb\xf3': 1043,
u'umi': 1042,
u'#k\xf3': 1042,
u'bbo': 1042,
u'jzi': 1040,
u'un#': 1039,
u'\xfaj\xed': 1037,
u'dov': 1032,
u'z\u0151l': 1032,
u'sfe': 1032,
u'lf\xfc': 1032,
u't\xf3t': 1031,
u'\xfclb': 1030,
u'hid': 1030,
u'dai': 1029,
u'dzs': 1028,
u'rl\xe9': 1026,
u'\xe9tb': 1025,
u'#ec': 1024,
u'hab': 1023,
u'\xfa#v': 1023,
u'v#p': 1023,
u'js\xe1': 1022,
u'\xfa#g': 1021,
u'zl\xe9': 1021,
u'\xf3#\xfc': 1021,
u'div': 1021,
u'\xfajb': 1020,
u'\xe9s\u0171': 1020,
u'gdp': 1019,
u'asp': 1019,
u'fit': 1018,
u'#\xe1m': 1017,
u'xim': 1017,
u'ogb': 1017,
u'max': 1017,
u'sk#': 1017,
u'jth': 1016,
u'd\xf6z': 1016,
u'akh': 1016,
u'mpi': 1016,
u'f\u0151l': 1016,
u'svi': 1015,
u'\xf3ma': 1015,
u'lee': 1015,
u'\u0151#\xf6': 1014,
u'\u0151n#': 1013,
u'aks': 1013,
u'ly\xfc': 1013,
u'llh': 1010,
u'\xfabb': 1010,
u'fga': 1010,
u'lie': 1010,
u'ino': 1009,
u'\xe9ig': 1009,
u'lar': 1009,
u'\xe1ml': 1008,
u'ozv': 1007,
u'ltj': 1007,
u'\u0171#i': 1006,
u'bi\xe1': 1006,
u's\u0171#': 1005,
u'kvi': 1004,
u'h\xedt': 1003,
u'rki': 1002,
u'sbo': 999,
u'\xe9#e': 998,
u'l#\xfc': 998,
u'bos': 997,
u'\u0151jo': 997,
u'ipr': 997,
u'ole': 996,
u'z\u0151j': 993,
u'yh\xed': 993,
u'dde': 991,
u'nre': 990,
u'#fl': 990,
u'z\xf3j': 989,
u'\xf3ru': 989,
u'ukn': 989,
u'efi': 989,
u'g\xf3d': 989,
u'enj': 989,
u'\xe1#m': 988,
u'onu': 988,
u'\xedvo': 984,
u'l#\u0151': 984,
u'#\xfcv': 984,
u'kpo': 983,
u'nt\xfc': 983,
u'\xe1b\xed': 983,
u'y#\xfc': 982,
u'g\xe9#': 982,
u'##g': 981,
u'nra': 980,
u'#wa': 978,
u'mib': 978,
u'mb\xf3': 977,
u'rut': 976,
u'noh': 975,
u'bus': 975,
u'buk': 975,
u'lvb': 975,
u'ipt': 974,
u'niv': 974,
u'gg\xf3': 974,
u'\xf6m\xf6': 973,
u'\xe1pr': 973,
u'\xe1dj': 972,
u'#ps': 971,
u'mr\u0151': 969,
u'sia': 969,
u'\xf3ll': 969,
u'ric': 967,
u'\xe9bk': 965,
u'z\xedc': 965,
u'\xe9lf': 965,
u'n\xe1z': 964,
u'zeb': 964,
u'iso': 964,
u'iae': 963,
u'\xfalm': 963,
u'oz\xed': 962,
u'mho': 961,
u'm\xe9h': 960,
u'ca#': 958,
u'\xf6b\xf6': 958,
u'ol\xed': 958,
u'yho': 957,
u'kn\xe9': 956,
u's\xf6d': 954,
u'alv': 954,
u'd\xf3d': 953,
u'\xfcte': 952,
u'g\xf3t': 950,
u'omn': 949,
u'gro': 949,
u'k\xfa#': 948,
u'\u0151zn': 948,
u'b\xfcs': 948,
u'asb': 948,
u'jli': 947,
u'\xe9kr': 947,
u'h#k': 946,
u'osb': 944,
u'm#\xed': 944,
u'\xfcnn': 944,
u'ble': 943,
u'\xf6zh': 943,
u'ezv': 942,
u'ski': 942,
u'teb': 941,
u'vne': 940,
u'n\u0171s': 939,
u't\xf3r': 937,
u'#ap': 937,
u'l\xf3r': 936,
u'd\xe1i': 936,
u'vok': 934,
u'lum': 934,
u'd#o': 934,
u'\xedre': 933,
u'csk': 933,
u'm\xfcn': 931,
u'kh\xf6': 930,
u'iny': 929,
u'stb': 928,
u'j\xfal': 928,
u'ugt': 927,
u'y\xe9s': 927,
u'#ef': 925,
u'rc#': 924,
u'#sk': 923,
u'\xfckb': 923,
u'asm': 922,
u'mli': 921,
u'rn#': 919,
u'phe': 919,
u'b\xe1k': 918,
u'\u0151t\xe9': 918,
u'rm#': 917,
u't\xedc': 916,
u'k#z': 916,
u'non': 915,
u'#s#': 914,
u'rty': 914,
u'egd': 914,
u'\xfasz': 914,
u'ndb': 913,
u'\xe1gy': 912,
u'\xe1km': 912,
u'ru\xe1': 912,
u'sha': 912,
u'\u0171sz': 911,
u'e\xe1n': 909,
u'amh': 909,
u'v\u0151r': 909,
u'nyd': 909,
u'fal': 907,
u'us\xfa': 907,
u'\xedbi': 906,
u'gue': 905,
u'y\xe1z': 904,
u'epr': 904,
u'amr': 903,
u'de\xe9': 903,
u'mum': 902,
u'agn': 902,
u'e\xe9p': 901,
u'laf': 901,
u'ik\xfc': 901,
u'ase': 900,
u't\xfcz': 899,
u'zkr': 897,
u'y\xf6s': 897,
u'#on': 896,
u'ep\xed': 896,
u'feb': 895,
u'z\xf6g': 895,
u'\xe1lv': 894,
u'fak': 894,
u'eat': 893,
u'p#f': 893,
u'j\xf3z': 892,
u'yde': 891,
u'l\xedb': 891,
u't\xe1p': 890,
u'zud': 890,
u'f\xf3r': 889,
u'l#z': 889,
u'cs\u0151': 889,
u'\xe9nv': 888,
u'pla': 888,
u'ae#': 888,
u'irk': 887,
u'\xe1te': 887,
u'm\xf6l': 886,
u'z\xe9g': 886,
u'tto': 886,
u'k\xf3z': 886,
u'\u0171l\xe9': 885,
u'lim': 884,
u'\xf6tv': 884,
u'tb#': 883,
u'\xe9sc': 883,
u'etp': 881,
u'z\u0171n': 880,
u'dba': 880,
u'gn\xf6': 879,
u'om\xf3': 879,
u'#\xf6l': 878,
u'\xe9#s': 878,
u'#l\xed': 878,
u'fuv': 878,
u'p\xe1s': 878,
u'umr': 877,
u'afi': 877,
u'k\xe9v': 876,
u'd\xf3t': 875,
u'izz': 875,
u'gib': 874,
u'\u0151it': 874,
u'k\xf3r': 873,
u'jl\xf3': 872,
u'#\xfcr': 872,
u'\xe1re': 872,
u'ak\xf3': 869,
u'bbl': 869,
u'un\xe9': 868,
u'g\xedr': 868,
u'\xe9j\xe9': 867,
u'aca': 867,
u'teu': 867,
u'zee': 866,
u'gl\xe1': 866,
u'cel': 865,
u'chi': 864,
u'pru': 864,
u'u#p': 863,
u'sbi': 862,
u'\xfas#': 862,
u'#su': 861,
u'\xfcm\xf6': 861,
u'st\u0151': 861,
u'\xfajj': 860,
u'ed\xf3': 860,
u'lab': 860,
u'y\xfcm': 860,
u'ikb': 860,
u'gki': 860,
u'v#l': 859,
u'a#\xed': 859,
u't#\u0151': 858,
u'lzi': 858,
u'yla': 858,
u'z\xedl': 855,
u'\xe9g\xf3': 855,
u'sis': 854,
u'gaa': 853,
u'pl\xe1': 853,
u'j#h': 852,
u'dam': 852,
u'emn': 851,
u'cho': 851,
u'zdv': 851,
u'hov': 850,
u'd\u0151p': 850,
u'l\xe9b': 849,
u'ats': 848,
u'tfe': 847,
u'rip': 846,
u'\u0151kb': 845,
u'uk\xe1': 845,
u'\xe9ti': 845,
u'mm\xe1': 844,
u'\xfcre': 844,
u'r\u0171b': 843,
u'lt\u0151': 843,
u'lt\u0171': 843,
u'\u0171#l': 841,
u'pit': 841,
u'fil': 840,
u'zdu': 840,
u'rtt': 839,
u'lb\xe1': 839,
u'ebo': 839,
u'arg': 838,
u'i\xf3c': 837,
u'jj\xe1': 836,
u'kio': 836,
u'\xf6dj': 836,
u'r\xf6l': 836,
u'eil': 835,
u'lho': 835,
u'dog': 834,
u'ach': 834,
u'rh\xe1': 834,
u'k\xe9#': 832,
u'\xe1#\xe9': 831,
u'\xf6gz': 831,
u'g\xe9d': 831,
u'ok\xfa': 830,
u'v#n': 829,
u'ag\xe9': 829,
u'yj\xe1': 829,
u'emh': 828,
u'kv\xe1': 828,
u'be\xe9': 828,
u'r\xe9l': 828,
u'v#i': 827,
u'\xe1im': 825,
u'zli': 825,
u'\xe1zu': 824,
u'l\xf6z': 824,
u'ean': 824,
u'on\xf3': 823,
u'v\xe1z': 823,
u'\xe9kn': 823,
u'ork': 822,
u'gti': 822,
u'im\xe9': 821,
u'\xf6kr': 821,
u'obo': 820,
u'\xe1mt': 820,
u'rda': 820,
u'n#\u0151': 820,
u'u#h': 819,
u'\xe1#f': 819,
u'cs\xf3': 819,
u'plu': 818,
u'yfe': 816,
u't\xf3p': 816,
u'sb\xed': 815,
u'\xe1n\xe9': 815,
u'sl\xe9': 814,
u'ar\xe9': 814,
u'mr\xf3': 813,
u'iig': 813,
u'tu\xe1': 813,
u'sit': 812,
u'gul': 812,
u'cos': 812,
u'hin': 812,
u'\xe1vi': 812,
u'\xe9ha': 810,
u'gka': 810,
u'#\xe9h': 809,
u'\xf3po': 809,
u'n\xe9v': 808,
u'\xf6n\xed': 807,
u'iin': 807,
u'\xf6zg': 807,
u'mhe': 805,
u'avo': 805,
u'\xe1gj': 804,
u'j\u0171#': 803,
u'lh\xe1': 802,
u'j#l': 801,
u'csl': 801,
u'erp': 799,
u'#\xfcn': 797,
u'gz\xed': 796,
u'v\xe1j': 796,
u'imn': 796,
u'#d\xe1': 795,
u'car': 795,
u'\xf6kb': 794,
u'og\xe9': 794,
u'h#e': 793,
u'nz\xed': 792,
u'cht': 792,
u'y\xe1k': 792,
u'bin': 792,
u'ss#': 792,
u'n\u0151i': 791,
u'y#\u0151': 790,
u'y\xe1j': 790,
u'cas': 789,
u'usn': 789,
u'dny': 788,
u'ezr': 788,
u'z\xe9v': 787,
u'fun': 787,
u'g\xf3r': 787,
u'ezu': 786,
u'l\xf6t': 786,
u'kun': 786,
u'iot': 785,
u'nc#': 784,
u'a#\xfc': 783,
u'#bl': 783,
u'##\xfa': 783,
u'oho': 783,
u'\xe1ki': 783,
u'k\xe1h': 782,
u'ogk': 782,
u'\xfa#b': 782,
u'mle': 782,
u'erl': 781,
u'flo': 781,
u'#ep': 780,
u'aih': 779,
u'\xe1d#': 779,
u'blo': 779,
u'\xe9tt': 779,
u'oti': 779,
u'\xf6dt': 778,
u'njo': 777,
u'\xe9bb': 776,
u'\xf3ce': 775,
u'mav': 775,
u'zdt': 774,
u'g\xe1c': 774,
u'v#b': 774,
u'onv': 773,
u'cai': 772,
u'\xf6ge': 772,
u'b#\xf6': 772,
u'en\xed': 772,
u's#\u0151': 772,
u'uza': 771,
u'dj\xf6': 771,
u'huz': 771,
u'ukk': 770,
u'lso': 770,
u'alr': 770,
u'l\u0151v': 770,
u'enr': 770,
u'z\xf3r': 769,
u'ep#': 769,
u'dak': 769,
u'\xf6l\xf6': 769,
u'#th': 769,
u'aze': 768,
u'rza': 768,
u'j\xf3t': 768,
u'yl\u0151': 767,
u'j#b': 765,
u'hei': 765,
u'ce\xe1': 765,
u'#j\xe9': 764,
u'kha': 764,
u'eva': 763,
u'tvo': 763,
u'pje': 762,
u'ndn': 762,
u'rib': 762,
u'\xedvt': 761,
u'nt\xf3': 760,
u'yfo': 759,
u'y\xfa#': 759,
u'leo': 758,
u'\xf6le': 757,
u'eom': 756,
u'es\u0151': 756,
u'j#r': 755,
u'n\xe1v': 755,
u'\xe9d\xe9': 754,
u'lr\xf3': 753,
u'p\xedr': 753,
u'k\xe1\xe9': 753,
u'wal': 753,
u'gmo': 753,
u't\u0151n': 752,
u'ota': 751,
u'nso': 749,
u'zgy': 749,
u'#we': 748,
u's\xfct': 747,
u'tpr': 747,
u'akp': 747,
u'def': 747,
u'\xe9kh': 746,
u'c#\xe9': 745,
u'a#\u0151': 745,
u'eih': 743,
u'co#': 743,
u'rri': 743,
u'#nl': 742,
u'izi': 742,
u'ldt': 741,
u'rno': 741,
u'gzi': 739,
u'c#a': 739,
u'kaf': 738,
u'ab#': 738,
u'f\u0151i': 738,
u't\xe1h': 736,
u'c#k': 736,
u'b\xe9v': 735,
u'p#t': 735,
u'yzi': 734,
u'gyl': 734,
u'raf': 734,
u'z\u0171k': 733,
u'cat': 733,
u'zd#': 733,
u'\xfct\xf6': 733,
u'p#n': 733,
u'ldg': 732,
u'zez': 731,
u'bro': 730,
u'd#r': 730,
u'r#u': 730,
u'gui': 729,
u'\u0151dv': 729,
u'ty\xe1': 728,
u'j#v': 727,
u'abe': 727,
u'\xedvm': 726,
u'ng\xf3': 726,
u'vm\xe1': 726,
u'egz': 726,
u'n\u0151e': 725,
u'ies': 724,
u'zme': 724,
u'vr\u0151': 723,
u'\xf3n\u0151': 722,
u'bje': 721,
u'ohe': 721,
u'uls': 721,
u'ebi': 721,
u'p\xe9r': 720,
u's\u0151k': 720,
u'pne': 720,
u'u#n': 720,
u'eus': 719,
u'giu': 719,
u'y\xe9v': 718,
u'aph': 718,
u'o\xfcz': 717,
u'pin': 716,
u'c\xe1n': 716,
u'jet': 716,
u'tmi': 716,
u'mk\xf6': 715,
u'und': 714,
u'z#\xf3': 714,
u'uli': 713,
u'\xe9#n': 712,
u'm\xe9v': 712,
u'k\xf6k': 711,
u'\xedm\u0171': 710,
u'it\xf3': 710,
u'k\xf3d': 710,
u't\u0151r': 710,
u'\xe1ze': 709,
u'on\xed': 709,
u'\xf3ip': 709,
u'zf\xe9': 709,
u'geo': 708,
u'#if': 708,
u'\xe9ks': 708,
u'j\xe9v': 708,
u'#cu': 707,
u'\xf3it': 705,
u'\xe9l\xfa': 704,
u'd\xf3v': 704,
u'i\xe9p': 703,
u'dg\xe1': 703,
u'm\xf6m': 703,
u'u#v': 702,
u'\xfatv': 701,
u'aes': 701,
u'\xe1\xe1s': 701,
u'iet': 701,
u'zpi': 701,
u'l\xe1\xe1': 701,
u'm#\xfc': 700,
u'vir': 699,
u'io\xfc': 699,
u'cba': 698,
u'gn\xe9': 697,
u'mih': 697,
u'taj': 697,
u'\u0171ne': 696,
u'\xe9pk': 696,
u'jlo': 696,
u'bab': 696,
u'tia': 696,
u'\xe9pi': 695,
u'gy\xfa': 694,
u'kub': 693,
u'nab': 693,
u'jat': 692,
u'c##': 692,
u'tie': 692,
u'uin': 690,
u'\u0171zn': 690,
u'om\xe9': 689,
u'elp': 688,
u'atf': 688,
u'\u0171#c': 686,
u'#c#': 685,
u'\xedne': 684,
u'dso': 684,
u'v#\xe1': 684,
u'nkh': 683,
u'cep': 683,
u'\xfaz\xf3': 683,
u'pse': 683,
u'ajv': 683,
u'cin': 681,
u'\u0171#n': 681,
u'it\u0151': 681,
u'p#v': 681,
u'bbf': 681,
u'tig': 680,
u'\u0171z\u0151': 679,
u'urd': 679,
u'zeo': 679,
u'\xe1#s': 678,
u'i#\xed': 678,
u'gcs': 678,
u'\xe1ve': 677,
u'\xe9#i': 676,
u'\xe9ph': 676,
u'riv': 676,
u'rtb': 675,
u'\xedze': 674,
u'e#\xf3': 674,
u'g#\xfc': 674,
u'bna': 673,
u'nil': 672,
u'mts': 672,
u'rhu': 672,
u'#\xe9t': 671,
u'u#b': 671,
u'hig': 671,
u't\xe1g': 671,
u'\xf6mr': 670,
u'yv\xe1': 669,
u'yny': 669,
u'gua': 668,
u'dt\xfc': 668,
u'gus': 666,
u'd\u0151e': 666,
u'lil': 666,
u'rif': 666,
u'exi': 665,
u'ird': 664,
u'\u0171#r': 664,
u'\xe1r\xe9': 664,
u'amu': 663,
u'y\xe1v': 663,
u'\xfctk': 663,
u'kjo': 662,
u'obj': 662,
u'\xe1ke': 662,
u'lr\u0151': 661,
u'yko': 661,
u'f\xe1j': 660,
u'ttd': 659,
u'bat': 659,
u'ulu': 659,
u'gb\u0151': 658,
u'u##': 658,
u'\xe1pl': 657,
u'd\xf6b': 656,
u'ce#': 654,
u'tpo': 654,
u'f\xe1l': 654,
u'nz\xf6': 653,
u'gb\xe9': 653,
u'chu': 653,
u'zae': 653,
u'sej': 652,
u's\xf3s': 652,
u'deo': 652,
u'u#\xe1': 651,
u'd\xf3i': 651,
u'slo': 651,
u'd#d': 651,
u'yfa': 650,
u'v#r': 650,
u'd#j': 650,
u'cor': 650,
u'yju': 650,
u'c#e': 649,
u'\u0151ig': 649,
u'kli': 649,
u'tb\u0151': 648,
u'toc': 648,
u'ttn': 648,
u'p\xf3t': 647,
u'smi': 647,
u'\xe9nj': 646,
u'p\u0151#': 646,
u'r\xfas': 646,
u'n\xf3m': 645,
u'dex': 645,
u'jae': 644,
u'e#\xed': 644,
u'ad\xed': 644,
u'\xf3ha': 644,
u'rig': 644,
u'pei': 644,
u'lz\xe1': 643,
u'adv': 643,
u'zki': 643,
u'\u0151dj': 643,
u'yf\u0151': 642,
u'uk\xe9': 642,
u'cuk': 642,
u'dp#': 642,
u'gtu': 642,
u'#ac': 641,
u'xti': 641,
u'#n\xe1': 640,
u'erf': 638,
u'nco': 638,
u'pt\xe1': 638,
u'rli': 638,
u'\xe9n\xe9': 637,
u'kau': 637,
u'st\xe9': 637,
u'\xe9kp': 637,
u'ica': 636,
u'oge': 635,
u'itj': 635,
u'kb#': 635,
u'sk\xe1': 635,
u'mla': 634,
u'tir': 634,
u'\xf3zi': 633,
u'vsz': 633,
u'c#s': 633,
u'l\xf3p': 633,
u'jdn': 633,
u'xik': 632,
u'ze\xe1': 632,
u'td\xf6': 632,
u'bom': 632,
u'yie': 631,
u'd\xf6m': 631,
u'\xe9tv': 631,
u'aje': 631,
u'neb': 630,
u'dob': 629,
u'ezk': 628,
u'r\xf3r': 628,
u'sug': 628,
u'i\xe1h': 627,
u'ibi': 625,
u'rkv': 625,
u'gl#': 625,
u'nnm': 625,
u'j\xe9b': 625,
u'lex': 624,
u'\xe1r\xed': 623,
u'nop': 622,
u'bod': 622,
u'o#m': 622,
u'nli': 622,
u'\xf6sk': 622,
u'p#i': 622,
u'ch#': 621,
u'sni': 621,
u'g\xe1k': 621,
u'lth': 621,
u's#\xf3': 621,
u'vab': 620,
u'\xe1\xe9p': 620,
u'duk': 620,
u'\xe9sm': 619,
u'jug': 618,
u'een': 617,
u'd#g': 617,
u'izl': 617,
u'utc': 615,
u'keg': 614,
u'r\xfcg': 613,
u'g\xe9h': 613,
u'lyg': 613,
u'aja': 613,
u'os\xe1': 611,
u'ifj': 611,
u'bie': 611,
u'u#f': 610,
u't\xe1i': 609,
u'ceg': 609,
u'yzo': 608,
u'a#\xf3': 608,
u'v\u0171#': 608,
u'apk': 608,
u'k\xf3i': 608,
u'osl': 607,
u'n\u0151n': 607,
u'u#i': 606,
u'kva': 605,
u't\xfcr': 605,
u'fut': 604,
u'ig\xe1': 604,
u'\xe9la': 603,
u'aug': 602,
u'f\xe1r': 602,
u'abu': 601,
u'm#z': 600,
u'fj\xfa': 600,
u'mh\xe1': 600,
u'tdo': 600,
u'otr': 600,
u'zy#': 600,
u'p#h': 600,
u'g\xfaa': 599,
u'ndd': 598,
u'uel': 598,
u'rac': 598,
u'j\xf6j': 597,
u'aa#': 597,
u'\xf3zn': 597,
u'o#s': 597,
u'\xf6jj': 597,
u'\u0171#d': 596,
u'#zi': 596,
u'pko': 595,
u'abl': 594,
u'lb\xf3': 594,
u'\xe1z\xf3': 593,
u'\u0171l\xf6': 593,
u'p#l': 593,
u'ttj': 592,
u'ck#': 591,
u'shi': 590,
u'nar': 590,
u'aud': 589,
u'jsz': 589,
u'kef': 588,
u'b\xe9l': 588,
u'vje': 588,
u'ngj': 587,
u'uth': 587,
u'rd\xe1': 587,
u'\xf3b\xf3': 587,
u'o#k': 586,
u'nt\xfa': 585,
u'n#\xf3': 585,
u'\xf3de': 584,
u'rb\xe1': 583,
u'jda': 582,
u'efa': 582,
u'j\xfas': 581,
u'lvn': 581,
u'heg': 580,
u'ted': 580,
u'f\u0151t': 580,
u'dse': 579,
u'atc': 579,
u'\xf6l#': 579,
u'yid': 577,
u'ak\xfa': 577,
u'p\xf3l': 576,
u'ep\u0151': 576,
u'mpl': 576,
u'lgi': 575,
u'nic': 575,
u'asn': 575,
u't\xf3g': 574,
u'gd\xf6': 574,
u'\xfcne': 574,
u'okm': 574,
u'rka': 573,
u'pha': 573,
u'auk': 572,
u'\xfall': 572,
u'kov': 572,
u'j\xf3m': 571,
u'zif': 571,
u'y\xfcl': 571,
u'k\xfag': 570,
u'l\xfat': 570,
u'r\u0151d': 570,
u'dur': 570,
u'yad': 569,
u'd\u0151i': 569,
u'\xfaln': 569,
u'ik\xf3': 569,
u'g\xf3v': 569,
u'\xe9kj': 567,
u'ska': 567,
u'ezl': 566,
u'esb': 566,
u'bee': 566,
u'enb': 566,
u'ib\xf3': 566,
u'lec': 565,
u'abw': 565,
u'\u0151zz': 564,
u'\xedza': 564,
u'ovj': 564,
u'lau': 564,
u'\xe9sl': 564,
u'ia\xe1': 563,
u'vfo': 562,
u'e#\u0151': 561,
u'zsa': 561,
u'laz': 561,
u'\xf6d\xf6': 561,
u'lc#': 560,
u'kep': 560,
u'act': 560,
u'hih': 560,
u'\u0151id': 559,
u'rj\xe9': 558,
u'ure': 558,
u'\xe9vf': 557,
u'kaz': 557,
u'sil': 557,
u'\xe9nh': 556,
u'\u0171nn': 556,
u'\xf3kh': 556,
u'j#g': 555,
u'\xfa#l': 555,
u'\xfa#r': 555,
u's\xe1s': 555,
u'agb': 555,
u'\xe1po': 554,
u'jok': 554,
u'\xf6tn': 554,
u'dad': 554,
u'i\xe1i': 552,
u'ozy': 552,
u'\xedrs': 551,
u'yvo': 551,
u'eza': 550,
u'#av': 550,
u'sn\xe1': 550,
u'\u0151ta': 549,
u'pn\xfc': 548,
u'#b#': 548,
u'vta': 547,
u'ldh': 546,
u'nho': 546,
u'itr': 545,
u'lav': 545,
u'g\xe1h': 544,
u'epc': 544,
u'css': 544,
u'\xe1ik': 543,
u'\xf3lv': 543,
u'\xe1jc': 543,
u'\xe1td': 542,
u'u#j': 542,
u'\xf3lu': 542,
u'r\xe1d': 542,
u'not': 541,
u'mex': 541,
u'\xf6nj': 541,
u'\xe9z\u0151': 541,
u'sk\xfc': 541,
u'cal': 540,
u'ol\xfa': 540,
u'uan': 540,
u'rnu': 540,
u'gko': 540,
u'av\xe9': 540,
u'\xedjr': 539,
u'evi': 539,
u'tr\xf6': 539,
u'c#t': 539,
u'g#\xed': 539,
u'\xe1rr': 539,
u'\xfclk': 538,
u'gov': 538,
u'gly': 537,
u'op\xf3': 537,
u'iam': 536,
u'\u0171#\xe1': 535,
u'lvv': 535,
u'rgb': 534,
u'd#\xe1': 534,
u'\xf3t\xf3': 533,
u'gri': 533,
u'ptu': 533,
u'ud\xf3': 533,
u'p#\xe1': 533,
u'bli': 533,
u'ugu': 532,
u'ev\xed': 532,
u'n\xfag': 531,
u'oak': 530,
u'it\xf6': 530,
u'p\xe1j': 530,
u'gni': 529,
u'sie': 529,
u'r\u0151e': 529,
u'\xe9zt': 529,
u'd#u': 528,
u'vie': 528,
u'j\xf3g': 527,
u'\xe1tk': 527,
u'ivo': 526,
u'mic': 526,
u'\u0151v\xe1': 525,
u'l\u0151h': 525,
u'\xfalv': 524,
u'rvi': 524,
u'nz\xf3': 523,
u'z\u0171r': 523,
u'ka\xfc': 523,
u'\xe9tk': 523,
u'\u0151r\xf6': 522,
u'l\xf3v': 522,
u'zau': 522,
u'deb': 522,
u'm\xf3#': 521,
u't\u0171r': 521,
u'r\xf3m': 521,
u'c\xe1r': 521,
u'r\xf6s': 520,
u'pig': 520,
u'dro': 519,
u'rfo': 519,
u'bne': 519,
u'iki': 519,
u'hn#': 518,
u'cca': 518,
u'ogt': 518,
u'adr': 517,
u'kl\xf3': 517,
u'\xf3lt': 517,
u'bla': 516,
u'fl\xe1': 516,
u'ush': 516,
u'he#': 515,
u't#\xf3': 515,
u'jtu': 515,
u'e#\xfc': 515,
u'ubl': 515,
u'zev': 515,
u'\xe1kh': 515,
u'd#\xfa': 515,
u'gbo': 514,
u'sig': 514,
u'\xe1zv': 512,
u'k\xfck': 512,
u'#p\xf3': 512,
u'do#': 511,
u'\xfana': 511,
u'rcb': 511,
u'tob': 511,
u'y#z': 511,
u'ulz': 511,
u'okl': 511,
u'lz#': 509,
u'ee#': 509,
u'jre': 508,
u'y\xfat': 508,
u'vih': 507,
u'\xe1ne': 507,
u'\xf3ko': 507,
u'of\xe1': 506,
u'sfi': 506,
u'h\xe9a': 506,
u'bl\xe1': 506,
u'tt\xf6': 506,
u'ln\u0151': 506,
u'cr#': 505,
u'\xedz\xe1': 505,
u'o#\xe9': 505,
u'r\xe9j': 505,
u'fur': 505,
u'lv\u0171': 505,
u't\xe9#': 504,
u'ltt': 504,
u'#ej': 504,
u'zir': 504,
u'\xf3#z': 504,
u'\u0151g\xe9': 504,
u'web': 502,
u'bam': 502,
u'\xe1v#': 502,
u'sva': 501,
u'g\xe1#': 501,
u'aal': 500,
u'ucc': 500,
u'ng\xed': 500,
u'roh': 500,
u'\xe1gv': 500,
u'\u0151iv': 500,
u'\xe1#n': 499,
u't\xf3e': 499,
u'tka': 499,
u'rtr': 499,
u'r\xfck': 498,
u'ckh': 498,
u'lyk': 498,
u'a\xe1l': 498,
u'\u0171#g': 496,
u'ehn': 496,
u'bfe': 496,
u'e\xedr': 495,
u'\u0151an': 494,
u'ems': 493,
u'\xf6kh': 493,
u'otj': 493,
u'n\xfai': 493,
u'yin': 492,
u'\xfatl': 491,
u'\xfai#': 491,
u'lo#': 491,
u'cco': 491,
u'nfi': 491,
u'nzp': 490,
u'g\xe9i': 490,
u'\xe9da': 490,
u'f\u0151v': 490,
u'g\xf3i': 490,
u'lds': 489,
u'ts#': 489,
u'sur': 488,
u'lux': 488,
u'l\u0151g': 488,
u'\xednz': 487,
u'kur': 487,
u'\xfa#c': 486,
u'\xf6rr': 486,
u'\xe1le': 486,
u'arh': 486,
u'\xf6ki': 485,
u'\xfal\xe9': 485,
u'rjo': 484,
u'\xf3ik': 484,
u'ecr': 484,
u'dif': 484,
u'\xf6ns': 483,
u'rah': 483,
u'\xf6zk': 483,
u'thi': 482,
u'far': 482,
u'\xedme': 481,
u'ube': 481,
u'ah\xe1': 481,
u'\xf3ke': 481,
u'noz': 480,
u'm\u0171h': 480,
u'#\xe1z': 480,
u'd\xf3p': 480,
u'\xe1rf': 479,
u'\xfckk': 479,
u'\xfclr': 478,
u'ea#': 478,
u'z\u0151b': 478,
u'sz\u0151': 477,
u'row': 477,
u'rku': 477,
u'le\xed': 477,
u'nyp': 477,
u'roc': 476,
u'\xe1ir': 475,
u'#sw': 475,
u'#\u0151t': 475,
u'\u0171#o': 475,
u'yuk': 475,
u'g\xe1\xe9': 475,
u'#h\u0151': 475,
u'csh': 475,
u'\xe9ra': 474,
u'jal': 474,
u'gto': 474,
u'\xf3m#': 474,
u'igh': 474,
u'vk\xf6': 473,
u'gie': 473,
u'\xe1sc': 473,
u'fag': 473,
u'yb\u0151': 473,
u'z\u0151v': 472,
u'm#\u0151': 471,
u'\xf3no': 471,
u'\xf3n\xe1': 471,
u'asi': 471,
u'io#': 471,
u'p\xfcn': 470,
u'ez\xed': 469,
u'acc': 469,
u'b#u': 469,
u'neo': 469,
u'\xe9rr': 468,
u't\xe1#': 468,
u'egc': 468,
u'cga': 467,
u'acg': 467,
u'n\xe1i': 467,
u'lip': 467,
u'kk\xe1': 467,
u'rap': 467,
u'fis': 466,
u'rg#': 466,
u'#\xe9b': 465,
u'ack': 465,
u'zeu': 465,
u'gf\xe9': 464,
u'v##': 464,
u'f\xe9k': 464,
u't\xfa#': 462,
u'ibb': 462,
u'c#f': 462,
u'dla': 462,
u'p##': 462,
u'can': 462,
u'jj\xf6': 461,
u'\xf3mi': 461,
u'epa': 461,
u'jas': 460,
u'nk\xf6': 460,
u'kme': 460,
u'etf': 460,
u'ulo': 460,
u'\xe1vk': 460,
u'\xf3re': 460,
u'\xe9rc': 459,
u'\xfa#n': 459,
u'sut': 459,
u'mpa': 459,
u'ave': 459,
u'miz': 458,
u'o#t': 457,
u'\xedt\xfc': 457,
u'nus': 457,
u'nkt': 456,
u'gm\xe9': 456,
u'ghi': 456,
u'kk\xf3': 456,
u'\u0171so': 455,
u'imf': 455,
u'tgy': 455,
u'lur': 455,
u'd#c': 454,
u'zg\xf3': 453,
u'pb\xf3': 452,
u'\xf3ju': 452,
u'\xe1lp': 452,
u'#uj': 452,
u'sk\xf3': 452,
u'y#\xf3': 451,
u'eai': 451,
u'r\u0151i': 451,
u'ir#': 450,
u'rsf': 450,
u'urc': 450,
u'rch': 450,
u'\xfcr\xfc': 450,
u'tl\u0151': 450,
u'zap': 450,
u't\xf6d': 449,
u'tb\xe1': 449,
u'\xe1tg': 448,
u'ars': 448,
u'mam': 447,
u'rz\xe1': 446,
u'paz': 446,
u'iom': 446,
u'dus': 445,
u'rzu': 444,
u'\xf6mp': 444,
u'#sd': 444,
u'dri': 444,
u'\xf6nr': 444,
u'##\xf6': 444,
u'p\xe1\xe9': 444,
u'ly\xed': 444,
u'r#\xfc': 444,
u'gt\u0151': 443,
u'nlo': 443,
u'rvr': 442,
u'd\xf3r': 442,
u'idr': 442,
u'\xf6de': 442,
u'bai': 442,
u'lku': 441,
u'rg\xe1': 441,
u'\xe9th': 441,
u'ead': 440,
u'f\u0171t': 440,
u'dda': 440,
u'mp\xe9': 440,
u'\xfckn': 440,
u'ybr': 440,
u'dv\xe9': 439,
u'sl#': 439,
u'kbi': 439,
u'tch': 438,
u'rms': 438,
u'#ai': 437,
u'rsp': 437,
u'j\xfa#': 437,
u'xtu': 436,
u'itn': 436,
u'bny': 435,
u'\xednt': 435,
u'z\u0151n': 435,
u'buz': 435,
u'obe': 435,
u'd\xe1r': 435,
u'mb\xe1': 434,
u'cid': 434,
u'\xfa#j': 434,
u'tgo': 434,
u'u#g': 434,
u'b#\xfa': 434,
u'gz\u0151': 433,
u'lea': 433,
u'hev': 432,
u'akl': 432,
u'opa': 432,
u'ios': 432,
u'tl\xe9': 432,
u'kl\xe9': 431,
u'ual': 431,
u'yr\xf3': 430,
u'c#l': 430,
u'dt\xe9': 430,
u'\xe9vs': 429,
u'lod': 429,
u'orh': 429,
u'\xe1nr': 429,
u'\xe9#\xe1': 429,
u'urt': 428,
u'jko': 428,
u'\xf3er': 428,
u'lg\u0151': 428,
u'\xf3ci': 427,
u'ez\xfa': 427,
u'aad': 427,
u'\u0151r\u0151': 427,
u'aur': 427,
u'\xf6rd': 427,
u'j\xf3s': 427,
u'o#p': 427,
u'k\xf3#': 427,
u'rk\xe1': 426,
u'\xfala': 426,
u'pie': 426,
u'f\xfar': 426,
u'dn\xe9': 425,
u'r\xf3v': 425,
u'\xe1g\xf3': 425,
u'b\xe1j': 425,
u'ev#': 424,
u'r\xf3z': 424,
u'lge': 424,
u'smu': 424,
u'\xe1se': 424,
u'be\xe1': 424,
u'mb\xed': 423,
u'odh': 423,
u'tee': 423,
u'b\xedc': 423,
u'kk\xf6': 423,
u'di\xf3': 423,
u'ees': 422,
u'omt': 422,
u'\xe9ku': 422,
u'ngr': 421,
u'orj': 421,
u'd\u0151v': 421,
u'#gi': 420,
u'am\xf3': 420,
u'ko#': 420,
u'c\xe1t': 419,
u'\xe1lg': 419,
u'mi\xe1': 418,
u'kuk': 418,
u'\xf6rz': 417,
u'v\xfcn': 417,
u'\xedj#': 416,
u'oel': 416,
u'r\u0151v': 416,
u'\xe1bl': 416,
u'\xe1lm': 415,
u're\xe9': 415,
u'\u0171n\xfc': 415,
u'mvi': 414,
u'\xf3rh': 414,
u'j\u0171l': 414,
u'ui#': 414,
u'beg': 414,
u'#lt': 414,
u'\xf6dh': 414,
u'e#z': 413,
u'umn': 413,
u'lsi': 412,
u'tya': 412,
u'bas': 412,
u'\xf3du': 412,
u'\xfa#\xe1': 411,
u'isb': 411,
u'cam': 410,
u'v\u0151t': 410,
u'kab': 409,
u'age': 409,
u'tr\xe9': 408,
u'bg#': 408,
u'alz': 408,
u'eo#': 408,
u'\xe1sg': 408,
u'm\u0171b': 407,
u'at\xf6': 407,
u'sgy': 406,
u'amt': 406,
u'hne': 406,
u'geb': 406,
u'rc\xe1': 406,
u's\xe1d': 406,
u'udi': 406,
u'\xe1da': 405,
u'r\xe1i': 405,
u'\u0151dt': 405,
u's\xf3b': 405,
u'udh': 405,
u'\xfck\xf6': 405,
u'\xe1jd': 405,
u'sje': 404,
u'iis': 404,
u'anr': 404,
u'tc\xe1': 403,
u'#mc': 403,
u'\u0151ti': 403,
u'g\xf3s': 403,
u'#g#': 403,
u'\xf3g\xe9': 402,
u'kiu': 402,
u'v#d': 402,
u'eco': 402,
u'fan': 402,
u'g\xfal': 401,
u'idl': 401,
u'f\xe1n': 401,
u'tio': 401,
u'ej#': 401,
u'lvt': 401,
u'yh\xe1': 400,
u'mti': 400,
u'mt\xf3': 400,
u'c#h': 399,
u'agm': 399,
u'cr\xf3': 398,
u'v#c': 398,
u'vb\u0151': 398,
u'mb\u0151': 397,
u'tbi': 397,
u'bwe': 396,
u'vt\xe1': 396,
u'akv': 396,
u'i\xf3m': 396,
u'za\xe1': 396,
u'r#\xed': 396,
u'sav': 396,
u'd\xe1v': 396,
u'pj\xfc': 395,
u'\xedri': 395,
u'#s\xe1': 395,
u'kip': 395,
u'n\u0151j': 395,
u'u#r': 395,
u'agk': 395,
u'af#': 395,
u'p#b': 395,
u'nnu': 395,
u'sny': 394,
u'n\xfck': 394,
u'\xf6rm': 394,
u'sep': 394,
u'\xe1t\xed': 393,
u'b#\xfc': 393,
u'yz\xf3': 392,
u'#sr': 392,
u'\u0151el': 392,
u'ena': 392,
u'jva': 391,
u'zp\xe9': 391,
u'mlo': 391,
u'\xe9dr': 391,
u'zur': 391,
u'buc': 391,
u'jer': 390,
u'ou#': 390,
u'#\u0171r': 390,
u'\xe9rg': 389,
u'a\xe9r': 389,
u'gn\xe1': 389,
u'\u0151me': 389,
u'\u0151v\xfc': 389,
u'j#\xe1': 388,
u'pun': 388,
u'\xedzt': 388,
u'iag': 388,
u'g\xf3b': 388,
u'ej\xfc': 388,
u'uy#': 387,
u'gh\xed': 387,
u'#ue': 387,
u'pad': 387,
u'kny': 386,
u'f\xf3k': 386,
u'f\xfcl': 386,
u'nc\xe9': 385,
u'alp': 385,
u'\xe9p\xe1': 385,
u'ypo': 385,
u'\xfa#d': 384,
u'hun': 384,
u'jeb': 383,
u'siv': 383,
u'uho': 383,
u'mt\u0151': 383,
u'vot': 382,
u'alg': 382,
u'nav': 382,
u'tip': 382,
u'\xedzn': 381,
u'ulv': 381,
u'wsk': 381,
u'p#j': 381,
u'f\xf6d': 381,
u'ngu': 380,
u'\xfa#u': 380,
u'u#\xfa': 380,
u'\xe9sv': 380,
u'arp': 380,
u'zod': 379,
u'cre': 379,
u'epp': 379,
u'eck': 379,
u'nyj': 379,
u'd##': 379,
u'yvi': 378,
u'uha': 378,
u'\xe1k\xe9': 378,
u'\xf3he': 378,
u'\u0171ho': 378,
u'r\xe9m': 378,
u'#ze': 377,
u'a#x': 377,
u'\u0151ik': 377,
u'neu': 377,
u'fre': 376,
u'azv': 376,
u'os\xf3': 376,
u'ukh': 376,
u's\xf6p': 376,
u'#ei': 376,
u'otn': 376,
u'zdh': 375,
u'\xfajo': 375,
u'gpi': 375,
u'pk\xf6': 375,
u'pop': 375,
u'eds': 375,
u'ho#': 375,
u'p\xfck': 375,
u'\u0171zt': 374,
u'i\xe9n': 374,
u'agh': 374,
u'bah': 374,
u'\xe9zh': 374,
u'p\xe1z': 374,
u'gli': 373,
u'mpu': 373,
u'eg\xfc': 373,
u'vju': 373,
u'y\xf3l': 373,
u'\u0151vi': 372,
u'\xe1zk': 371,
u'eam': 371,
u'\xe1gm': 371,
u'ekf': 371,
u'nea': 371,
u'\xfck\xe9': 371,
u'rvb': 371,
u'#sh': 370,
u's\xedr': 369,
u'yt\u0151': 369,
u'lvo': 369,
u't\xe9h': 368,
u'f\xe1z': 368,
u'ffe': 367,
u'\xf3dh': 367,
u'mau': 366,
u'zts': 366,
u'xem': 366,
u'uxe': 366,
u'hav': 365,
u'uny': 365,
u'\u0171n\xe9': 365,
u'sec': 365,
u'isi': 365,
u'rau': 365,
u'uke': 364,
u'k\xe1#': 364,
u'o#b': 364,
u'rpe': 364,
u'\xe9p\u0151': 364,
u'ziv': 364,
u'kkb': 364,
u'ioa': 364,
u'uge': 363,
u'il\xe9': 363,
u'tub': 362,
u'ov#': 361,
u'gyp': 361,
u'fg\xe1': 361,
u'n\u0171#': 361,
u'nee': 361,
u'opu': 361,
u'sid': 360,
u'g\xfan': 360,
u'lno': 360,
u'tmu': 359,
u'\xfcrk': 359,
u'ds\xe9': 359,
u'\xf3el': 359,
u'tm\xf3': 359,
u'ss\xed': 359,
u'ar\xf3': 359,
u'jba': 358,
u'zec': 358,
u'\u0151he': 358,
u'iod': 358,
u'p#r': 358,
u'eff': 357,
u'dub': 357,
u'yvb': 356,
u'\xfart': 356,
u'bha': 356,
u'nig': 356,
u'jt\u0151': 356,
u'yg\xf3': 356,
u'ssi': 356,
u'\xe9zb': 356,
u'ujj': 355,
u'l\xe1j': 355,
u'dac': 355,
u'igg': 355,
u'eb\xf6': 355,
u'v\xf3i': 355,
u'ef\xe1': 354,
u'sir': 354,
u'\xe1mn': 353,
u'sjo': 353,
u'gir': 353,
u'\xedtk': 353,
u'rer': 353,
u'zse': 352,
u'ktj': 352,
u'\u0151lt': 352,
u'pi\xe1': 352,
u'ukc': 351,
u'v\xfck': 351,
u'\xe9br': 351,
u'ps\xe1': 351,
u'esa': 351,
u'eso': 351,
u'\xfcl\xe1': 350,
u'fic': 350,
u'utu': 350,
u'\xf3dt': 350,
u'd\xe1c': 350,
u'#oe': 349,
u'g#z': 349,
u'atm': 349,
u'\xfarh': 349,
u'mzi': 348,
u'c#n': 348,
u'puy': 348,
u'\xf6dv': 348,
u'kve': 346,
u'#s\xed': 346,
u'eum': 346,
u'als': 346,
u'\xe9#b': 346,
u'atg': 346,
u'j\xe1u': 346,
u'd\xe1b': 346,
u'wei': 345,
u'pju': 345,
u'oui': 345,
u'#gm': 344,
u'#wi': 344,
u'aul': 344,
u'nv\xe1': 344,
u'rz\xed': 343,
u'uzu': 343,
u'o#v': 343,
u'rpi': 343,
u'd\xf3c': 343,
u'daf': 343,
u'ti\xf3': 343,
u'asv': 342,
u'\u0171#\xfc': 342,
u'c#v': 342,
u'kso': 342,
u'oul': 342,
u'tic': 342,
u'a\xe1r': 342,
u'sco': 341,
u'n\xf3#': 341,
u'meh': 341,
u'rou': 341,
u'de\xe1': 341,
u'dav': 341,
u'cna': 341,
u'ude': 340,
u'tod': 340,
u'lhi': 340,
u'\xe1tc': 340,
u'usk': 340,
u'cif': 339,
u'rca': 339,
u'ipu': 339,
u'gsp': 339,
u'\xe9r\xe1': 338,
u'no#': 338,
u'tf\u0151': 338,
u'v\u0151n': 338,
u'new': 338,
u'#pn': 338,
u'tj\xf3': 337,
u'put': 337,
u'jor': 337,
u'ids': 337,
u'ltn': 336,
u'spi': 336,
u'ry#': 336,
u'eug': 335,
u'u#l': 335,
u'zic': 335,
u'\xe9k\xfa': 335,
u'ikv': 335,
u'ew#': 335,
u't#w': 334,
u'r\xf3i': 334,
u'pnr': 334,
u'j#n': 334,
u'rc\xe9': 334,
u'#\xe1s': 334,
u'roa': 334,
u'nj\xfc': 334,
u'izh': 334,
u'taa': 334,
u'ows': 334,
u'\xe1dt': 333,
u'z\u0151p': 333,
u'ro\xf6': 333,
u'zue': 333,
u'f\xe1t': 333,
u'adb': 332,
u'#ik': 332,
u'#iv': 332,
u'\xe9pa': 332,
u's\xf3c': 332,
u'vte': 331,
u'\xe9pm': 331,
u'cta': 331,
u'rtm': 330,
u'\u0171tl': 330,
u'dv\xe1': 329,
u'mem': 329,
u'epv': 329,
u'hil': 329,
u'\xf6g\xe9': 328,
u'euh': 328,
u'z\xfar': 328,
u'ary': 328,
u'irb': 327,
u'oj\xe1': 327,
u'rb#': 327,
u'#ic': 327,
u'ty#': 327,
u'th\xe1': 327,
u'\xf3te': 327,
u'vj\xe1': 327,
u'use': 327,
u'eba': 327,
u'edo': 326,
u'si\xe1': 326,
u'bok': 326,
u'mst': 326,
u'k\xedr': 325,
u'kpa': 325,
u'l\xe1h': 325,
u'\u0151dh': 325,
u'csr': 325,
u'ji#': 324,
u'\u0151zt': 324,
u'epi': 324,
u'jti': 324,
u'\xf3tl': 324,
u'k\xf3h': 324,
u'z\xf6m': 324,
u'\xf3ku': 324,
u'lzh': 323,
u'hre': 323,
u'zl\xf3': 323,
u'irr': 322,
u'rk#': 322,
u'\xe9z\xfc': 322,
u'mje': 321,
u'gno': 321,
u'nch': 321,
u'muz': 321,
u'jos': 321,
u'\xe1mm': 320,
u'hsz': 320,
u'\xe9#l': 320,
u'ish': 320,
u'abh': 320,
u'esm': 320,
u'vom': 319,
u'lgu': 319,
u'#s\xf6': 319,
u'\u0151z\xf6': 319,
u'##\xfc': 318,
u'vna': 318,
u'\xfajf': 317,
u'v#\xfa': 317,
u'#h\u0171': 317,
u'\u0151ol': 316,
u'\xf3zo': 316,
u'cet': 316,
u'#wo': 316,
u'\u0151s\xfc': 316,
u'\xedm\xe9': 315,
u'o#c': 315,
u'pea': 315,
u'lb\xe9': 315,
u'j#d': 314,
u'emk': 314,
u'nob': 314,
u'\xf6he': 314,
u'esc': 314,
u'j\xf6h': 313,
u'heu': 313,
u'#\xf3h': 313,
u'jea': 313,
u'ngt': 313,
u'bow': 313,
u'ld\xf3': 313,
u'g#\u0151': 313,
u'\u0151ip': 313,
u'd\xf3b': 313,
u'nme': 313,
u'upe': 313,
u'jdz': 313,
u'rb\u0151': 312,
u'\xedvb': 312,
u'\xfazn': 312,
u'zmi': 312,
u'r\xe9#': 312,
u'eju': 312,
u'owa': 312,
u'jaz': 311,
u'nc\xfa': 311,
u'o\xf6v': 311,
u'abn': 311,
u'gr\xf6': 311,
u'\xe9je': 310,
u'ahi': 310,
u'veb': 310,
u'ipo': 310,
u'lms': 310,
u'tss': 310,
u'l\xedc': 310,
u'zf\xf6': 310,
u'azf': 309,
u'yi\xfc': 309,
u'tou': 309,
u'g\u0171e': 309,
u'nip': 309,
u'eot': 309,
u'zog': 308,
u'\xe1iv': 308,
u'\xedrk': 308,
u'sve': 308,
u'rg\xe9': 308,
u'\xfaz#': 308,
u'\xf3to': 308,
u'r\u0151r': 308,
u'ree': 308,
u'd\xe9b': 308,
u'fi\xe1': 307,
u'nfa': 307,
u'cig': 306,
u'\xe1#i': 306,
u'orf': 306,
u'#\xe9j': 306,
u'amk': 306,
u'v#g': 306,
u'ohn': 306,
u'ziz': 306,
u'lbu': 306,
u'c\xfan': 305,
u't\xfat': 305,
u'rfe': 305,
u'#ev': 305,
u'usc': 305,
u's\u0151o': 304,
u'\xe1ti': 304,
u'\xe9lg': 304,
u'yto': 304,
u'zup': 304,
u'zfo': 304,
u'f\xe9m': 304,
u'd#\xf6': 303,
u'og\xfa': 303,
u'n\xfab': 303,
u'i\xe1\xe9': 302,
u'f\u0151r': 302,
u'mf#': 302,
u'aip': 301,
u'\xedzz': 301,
u'via': 301,
u'acn': 301,
u'etu': 301,
u'isc': 301,
u'reb': 301,
u'gza': 300,
u'zdi': 300,
u'eop': 300,
u'ulk': 300,
u'h#\xfa': 300,
u'h\xedz': 299,
u'k\xe9h': 299,
u'\xf6b\xe9': 299,
u'#b\xfa': 299,
u'\xfabi': 299,
u'ds#': 299,
u'ul\xe9': 299,
u'abr': 299,
u'kje': 298,
u'\u0151ok': 298,
u'\xedvu': 298,
u'jje': 298,
u'mug': 298,
u'd\xf3m': 298,
u'pih': 298,
u'j\xe1s': 298,
u'ynu': 298,
u'\xe9vn': 297,
u'em\xfc': 297,
u'sn\xe9': 297,
u'\u0171ip': 297,
u'\xf6kv': 297,
u'\xf3lj': 297,
u'\xf3sa': 297,
u'hus': 297,
u'stu': 296,
u'mfo': 296,
u'nzb': 295,
u'#cr': 295,
u'm\u0171i': 295,
u'une': 295,
u'yt\xf3': 295,
u'pag': 295,
u'#k\u0151': 295,
u'lze': 294,
u'urv': 294,
u'\xfa#o': 294,
u'el\xfa': 294,
u'ek\u0171': 294,
u'a\xe1g': 294,
u'hma': 294,
u'euv': 293,
u'que': 293,
u'\u0171t\u0151': 293,
u'sp\xe1': 293,
u'mke': 293,
u'ok\xf6': 293,
u'\u0151b\xed': 292,
u'dr\xf3': 292,
u'cke': 292,
u'\u0151kt': 291,
u'l\xfaj': 291,
u'dox': 291,
u'alf': 291,
u'\xe1kt': 291,
u'\xe1#l': 291,
u'\xe1br': 291,
u'g\xf3n': 291,
u'kja': 290,
u'jev': 290,
u'v#u': 290,
u'ejk': 290,
u'iri': 289,
u'ofe': 289,
u'dr\xe9': 289,
u'\xfan#': 289,
u'top': 289,
u'iec': 289,
u'd\xe9m': 289,
u'ump': 289,
u'\xf6lh': 289,
u'itu': 288,
u'odv': 288,
u'bik': 288,
u'\xf3kt': 288,
u'nog': 287,
u'am\xe9': 287,
u'edu': 287,
u'boz': 287,
u'#xx': 287,
u'com': 287,
u'p#p': 287,
u'd\xe9t': 287,
u'efd': 287,
u'a\xedt': 286,
u'#gs': 286,
u'if\xe1': 286,
u'sf\xe9': 286,
u'ac\xe9': 286,
u'i#\xf3': 286,
u'iog': 286,
u'ss\xfa': 286,
u't\xf6z': 285,
u'aar': 285,
u'u#c': 285,
u'epo': 285,
u'hte': 285,
u'y\xf6r': 285,
u'ajk': 285,
u'taf': 285,
u'kaj': 284,
u's\xf6t': 283,
u'l\xf6r': 283,
u'er\xf6': 283,
u'\xedrh': 283,
u'dr\u0151': 283,
u'ldn': 283,
u'ntp': 283,
u'ojt': 283,
u'mso': 283,
u'pr\xe1': 283,
u'\xe9gm': 283,
u'rzs': 282,
u'was': 282,
u'nsa': 282,
u'vla': 282,
u'eos': 282,
u'es\xf3': 282,
u'ei\xe9': 281,
u'eac': 281,
u'\xf3bi': 281,
u'al\xed': 281,
u'\xe1gz': 281,
u'fd#': 281,
u'fle': 281,
u'csz': 281,
u'\xf3dr': 281,
u'bit': 281,
u'ugs': 280,
u'omh': 280,
u'o#f': 280,
u'cla': 280,
u'pt\xe9': 280,
u'zkv': 279,
u'wat': 279,
u'zd\xfc': 279,
u'u#o': 279,
u'\xe9lm': 279,
u'nei': 279,
u'l\xe9h': 279,
u'\xfatm': 278,
u'z\xf3f': 278,
u'orp': 278,
u'sea': 278,
u'n#w': 278,
u'\xfask': 278,
u'omf': 277,
u'n\u0151m': 277,
u'g\xedg': 277,
u'ndv': 277,
u'z\xe1v': 277,
u'mth': 277,
u'ruc': 277,
u'iov': 277,
u'yjo': 277,
u'fas': 277,
u'bbk': 277,
u'vic': 276,
u'y\xfcg': 276,
u's\xe1l': 276,
u'foj': 276,
u'ovo': 276,
u'ntc': 276,
u'ox#': 276,
u'arv': 276,
u'swi': 276,
u'hac': 275,
u'ilm': 275,
u'tga': 275,
u'rpa': 275,
u'da\xed': 275,
u'juh': 275,
u'toj': 274,
u'uer': 274,
u'tpa': 274,
u'\xf6ti': 274,
u'hie': 274,
u'\xf3z\xf3': 273,
u'gur': 273,
u'ift': 273,
u'\xe1db': 273,
u'h\xe9s': 273,
u'pmu': 273,
u'\xfatr': 272,
u'gz\xf3': 272,
u'z\u0151s': 272,
u'br\xe1': 272,
u'c#i': 272,
u'lic': 272,
u'#l\u0151': 272,
u'\xf6mb': 271,
u'unt': 271,
u'y\xf6n': 271,
u'\xf3lo': 271,
u'ikh': 271,
u'\xe1n\xf3': 271,
u'rbo': 270,
u'\xf3bu': 270,
u'\xf3ir': 270,
u'eph': 270,
u'izu': 270,
u'j#c': 269,
u'ipi': 269,
u'\xe9t\xfc': 269,
u'hle': 269,
u'ub\xe1': 268,
u'#yo': 268,
u'tl\xf3': 268,
u'yvv': 268,
u'h#a': 268,
u'nks': 267,
u'am\xfa': 267,
u'ycs': 267,
u'sh#': 267,
u'ajs': 267,
u'pr\u0151': 267,
u'r\xf3j': 266,
u'orc': 266,
u'#vu': 266,
u'hti': 266,
u'zu\xe1': 266,
u'\u0151h\xe1': 266,
u'f\u0151n': 266,
u'\xedv\xe9': 265,
u'ung': 265,
u'#\xedz': 265,
u'ith': 265,
u'exe': 265,
u'\xe9db': 265,
u'#of': 264,
u'\xfatu': 264,
u'ngi': 264,
u'c#\xfa': 264,
u'ohi': 264,
u'ojk': 264,
u'g\xf3a': 264,
u'\xe8s#': 264,
u'v\xf3v': 264,
u'zfe': 263,
u'ey#': 263,
u'mid': 263,
u'\xedj\xe1': 263,
u'zag': 263,
u'hah': 262,
u'uca': 262,
u'#z\xf3': 262,
u'pik': 262,
u'tae': 262,
u'iii': 261,
u'out': 261,
u'n\xedg': 261,
u'b\xe1i': 261,
u'rav': 261,
u'#sl': 260,
u'ubo': 260,
u'rst': 260,
u'cof': 260,
u'idt': 260,
u'mcc': 260,
u'mo#': 260,
u'udv': 260,
u'gk#': 260,
u'nou': 259,
u'cun': 259,
u'\xf3j\xfa': 259,
u'rgo': 259,
u'coe': 259,
u'abd': 259,
u'ums': 258,
u'wif': 258,
u'\xe9dh': 258,
u'voz': 258,
u'mha': 257,
u'\xe9na': 257,
u'lyu': 257,
u'\xe1jk': 257,
u'ajb': 257,
u'pr\xe9': 257,
u'rvv': 257,
u'r\u0171t': 256,
u'sca': 256,
u'ppi': 256,
u'lpu': 256,
u'v\u0151i': 256,
u'b\xe9n': 256,
u'p#d': 256,
u'r\xfag': 256,
u's#w': 256,
u'l\xf3e': 255,
u'\u0151zh': 254,
u'\xedru': 254,
u'eis': 254,
u'keh': 254,
u'ieb': 254,
u'olr': 254,
u'd\u0151z': 254,
u'g\u0151e': 254,
u'tma': 254,
u'lbi': 254,
u'j\xe1g': 254,
u'yva': 253,
u'\xf6zj': 253,
u'nd\u0171': 253,
u'k\xf3t': 253,
u'\xe9sp': 253,
u'reo': 253,
u'\xfath': 252,
u'jkv': 252,
u'\xe9t\xf3': 252,
u'lub': 252,
u'amf': 251,
u'kaa': 251,
u'l\xfaa': 251,
u'urk': 251,
u'jja': 251,
u'j\xf3n': 251,
u'mi\xf3': 251,
u'\xe1be': 251,
u'ks\xe1': 251,
u'tid': 251,
u'anm': 251,
u'maa': 250,
u'#\xe1b': 250,
u'isn': 250,
u'pli': 250,
u'aji': 250,
u'mfe': 250,
u'inb': 249,
u'\xf3al': 249,
u'ask': 249,
u'oom': 249,
u'gvo': 249,
u'arz': 249,
u'y\xf3s': 249,
u'nr\xf3': 248,
u'ija': 248,
u'urr': 248,
u'chr': 248,
u'\xe9h\xe9': 248,
u'd\xf3h': 248,
u'th#': 248,
u'nid': 248,
u'rui': 248,
u'ak\xfc': 248,
u'bog': 248,
u'pid': 248,
u'k\xe9j': 247,
u'\u0151bi': 247,
u'nss': 247,
u'i\xfcn': 247,
u'rcr': 247,
u'rtf': 247,
u'rf\xf6': 247,
u'etg': 247,
u'ype': 247,
u'flu': 247,
u'k\xf3c': 247,
u'r\xe9h': 247,
u'rar': 247,
u'gi\xe9': 246,
u'\xf3me': 246,
u'\xf6kt': 246,
u'\xe1fo': 246,
u'\u0171rh': 245,
u'vhe': 245,
u'gau': 245,
u'nh\xf6': 245,
u'l#\xf3': 245,
u'oth': 245,
u'r\xfat': 245,
u'uor': 244,
u'\xedv\xf3': 244,
u'jfe': 244,
u'#f\xfa': 244,
u'aas': 244,
u'\xe9t\xe1': 244,
u'#ly': 244,
u'hi#': 244,
u'#kb': 244,
u'\xedzb': 243,
u'\u0151j\xe1': 243,
u'zso': 243,
u'##\xed': 243,
u'\u0171t\xe9': 243,
u'\xf3db': 243,
u'\xe1ms': 242,
u'asu': 242,
u'jci': 242,
u'r\u0151n': 242,
u'\xf6gt': 241,
u'ld\xe9': 241,
u'm\xe1h': 241,
u'\xe9dt': 241,
u'ot\xed': 241,
u'kgo': 241,
u'ice': 241,
u'#kh': 241,
u'v#o': 240,
u'fla': 240,
u'peu': 240,
u'k\xfas': 239,
u'j#u': 239,
u'ps\xe9': 239,
u'ff\xe9': 239,
u'ip#': 239,
u'jt\xf6': 239,
u'\xf6lb': 239,
u'\xf3vi': 238,
u'z\u0151r': 238,
u'olf': 238,
u'eor': 238,
u'\xf3t\xe9': 238,
u'dpj': 238,
u'\xf3\xfcg': 238,
u'zb\u0151': 238,
u'rvk': 238,
u'onr': 237,
u'uci': 237,
u'ns\xfc': 237,
u'rf\xfa': 237,
u'spr': 237,
u'eg\xf6': 237,
u'zbi': 237,
u'ev\u0171': 236,
u'p\xfag': 236,
u'c\xe9t': 236,
u'yaz': 236,
u'\u0151j\xfc': 236,
u'y\xe9#': 236,
u'zd\xf6': 236,
u'pp\xfa': 236,
u'pou': 236,
u'n\xfcg': 236,
u'ekg': 236,
u'ugr': 235,
u'nkl': 235,
u'uze': 235,
u'\xe1mh': 235,
u'l\xfaz': 235,
u'oal': 235,
u'isr': 235,
u'rmu': 235,
u'ams': 234,
u'ng\xe1': 234,
u'euo': 234,
u'\xedns': 234,
u'ecd': 234,
u'sl\xe1': 234,
u'abj': 234,
u'\xfckh': 234,
u'x#m': 233,
u'b\xfaj': 233,
u'\xe1#b': 233,
u'#z\u0171': 233,
u'gy\xed': 233,
u'lb\xed': 233,
u'o#n': 233,
u'nsp': 233,
u'roe': 233,
u'bh\xe1': 233,
u'ytu': 233,
u'rtk': 233,
u'\u0171r\u0171': 232,
u'\u0151r#': 232,
u'n\xfct': 232,
u'ulr': 232,
u'izg': 232,
u'lv\xfc': 232,
u'r\u0171n': 231,
u'\xf6ng': 231,
u'#gc': 231,
u'g\xe9\xe9': 231,
u'n\xe9b': 231,
u'zio': 231,
u'lz\u0151': 230,
u'pj\xe9': 230,
u'omj': 230,
u'nlj': 230,
u'jgu': 230,
u'\xf6d\xfc': 230,
u'\xe9va': 229,
u'zos': 229,
u'#cl': 229,
u'pso': 229,
u'c#\xe1': 229,
u'ltr': 229,
u'eps': 229,
u'ruz': 229,
u'd\xe1z': 229,
u'gn\u0151': 228,
u'sr\xed': 228,
u's\xe9i': 228,
u'rkm': 228,
u'\xe9##': 228,
u'uty': 228,
u'\u0171rz': 227,
u'g\xfas': 227,
u'\xed#l': 227,
u'lk\xed': 227,
u'cap': 227,
u'\xfazz': 227,
u'#v\xf6': 227,
u'b\xe1b': 227,
u'ec#': 227,
u'dep': 227,
u'\xf6li': 227,
u'uch': 226,
u'yme': 226,
u'\xf6b#': 226,
u'ls\xfa': 226,
u'acf': 226,
u'sev': 226,
u'\xfak#': 226,
u'uti': 226,
u'yg\xe9': 226,
u'\xedjk': 225,
u'\u0171zs': 225,
u'gb\xfc': 225,
u'lop': 225,
u'eup': 225,
u'ahr': 225,
u'lpr': 225,
u'r\xe1h': 225,
u'r\xed#': 225,
u'r\xf6n': 225,
u'\u0151sk': 225,
u'hed': 224,
u'\xf3zz': 224,
u'euk': 224,
u'uml': 224,
u'gbu': 224,
u'cfe': 224,
u'm\xf6r': 223,
u'vd\xed': 223,
u'iav': 223,
u'eh\xed': 223,
u'#l\xf6': 223,
u'cs\xed': 223,
u'r\xfak': 223,
u't\xe1\xe9': 222,
u'jv\xe9': 222,
u'c#r': 222,
u'ovd': 222,
u'\xfcll': 222,
u'zhi': 222,
u'\xe9h\xed': 222,
u'ze\xfc': 222,
u'\xf3pi': 222,
u'ht#': 222,
u'p\xe1v': 222,
u'h\xedn': 221,
u'\xf3#\xed': 221,
u'atp': 221,
u'xi#': 221,
u'ccr': 221,
u'kkr': 221,
u'gc\xe9': 221,
u'h\xe9n': 220,
u'\u0171zz': 220,
u'\xe1mr': 220,
u'qui': 220,
u'pk\xe9': 220,
u'wob': 220,
u'swo': 220,
u'koa': 220,
u'rr\xe9': 220,
u'usu': 220,
u'c#j': 219,
u'yhi': 219,
u'vum': 219,
u's\u0151n': 219,
u'\xe9gf': 219,
u'zvi': 219,
u'zk\xe1': 218,
u'chm': 218,
u's\xedn': 218,
u'lls': 218,
u'ruf': 218,
u'ded': 218,
u'v\xf3b': 218,
u'd\u0171#': 217,
u'boe': 217,
u'aif': 217,
u'\xf6nv': 217,
u'odl': 217,
u'\xe9k\xfc': 217,
u'ign': 217,
u'kr\xed': 216,
u'lst': 216,
u'm\xfag': 216,
u'zmo': 216,
u'jc#': 216,
u'leu': 216,
u'lir': 216,
u'i\xf6b': 216,
u'arf': 216,
u'\xf3ki': 216,
u'vk\xe9': 215,
u'#qu': 215,
u'k\xedm': 215,
u'j\xf3i': 215,
u'ts\xf6': 215,
u'\xf6pr': 215,
u'ai\xe9': 214,
u'rss': 214,
u'giz': 214,
u's\xe1i': 214,
u'\u0151\xedt': 214,
u'\xe9tm': 214,
u'acu': 214,
u'odr': 214,
u'#l\xf3': 214,
u'dup': 214,
u'\xedm#': 213,
u'ltv': 213,
u'uis': 213,
u'rlo': 213,
u'x#s': 213,
u'v\xf6r': 213,
u'dy#': 213,
u'\xe1rz': 213,
u'\u0171k#': 213,
u'bbv': 213,
u'uzd': 212,
u'umh': 212,
u'\u0151m\xe9': 212,
u'ley': 212,
u'\xe9kv': 212,
u'vn\xe1': 212,
u'icc': 212,
u'anj': 212,
u'osr': 210,
u'eas': 210,
u'oet': 210,
u'oec': 210,
u'\xe1lr': 210,
u'\u0171nh': 210,
u'upl': 210,
u'\xe9lp': 210,
u'dap': 210,
u'rvn': 210,
u'on\xe9': 209,
u'\u0151zi': 209,
u'en\xf6': 209,
u'ms#': 209,
u'rz\xf6': 208,
u'#gn': 208,
u'ij\xe1': 208,
u'\xe1ib': 208,
u'oju': 208,
u'it\xfa': 208,
u'ule': 208,
u'diu': 208,
u'dab': 208,
u'ri\xf3': 208,
u'hmi': 208,
u'nkj': 207,
u's\u0171r': 207,
u'iff': 207,
u'fat': 207,
u'llv': 207,
u'sip': 207,
u'sac': 207,
u'coh': 207,
u'id\xf3': 207,
u'g\u0151b': 207,
u'eb\xe9': 207,
u's\xf6z': 206,
u'edb': 206,
u'apd': 206,
u'\xf3\xe9r': 206,
u'rd\xf6': 206,
u'\u0151t\xfc': 206,
u'id\xed': 206,
u'otf': 206,
u'die': 206,
u'dau': 206,
u'lfa': 206,
u'p\xe1h': 206,
u'\xf3gu': 205,
u'ng\xfc': 205,
u'\u0171##': 205,
u'uts': 205,
u'eon': 205,
u'csp': 205,
u'rhi': 205,
u'dev': 205,
u'enp': 205,
u'h\u0151m': 205,
u'hau': 204,
u'iv#': 204,
u'zho': 204,
u'd\xe9g': 204,
u'#\xe1\xe9': 204,
u'dh\xe1': 204,
u'umu': 204,
u'ao#': 204,
u'liv': 204,
u'lue': 204,
u'mko': 204,
u'rv\xed': 204,
u't\u0171e': 203,
u'jac': 203,
u'aig': 203,
u'v\u0171s': 203,
u'\xf3iv': 203,
u'd\u0151l': 203,
u'lfi': 203,
u'd\xf3\xfc': 203,
u's\xfas': 203,
u'tjo': 203,
u'\xf3ve': 202,
u'rsj': 202,
u'zs#': 202,
u'its': 202,
u'ktb': 202,
u'nej': 202,
u'r\xedz': 202,
u'ef\xe9': 202,
u'mfi': 202,
u'osp': 201,
u'\xe1##': 201,
u'eua': 201,
u'esh': 201,
u'jjl': 201,
u'xxi': 201,
u'\u0151#\xed': 201,
u'\xedpi': 201,
u'rh\xe9': 201,
u'\xe1j#': 201,
u'go#': 201,
u'p#c': 201,
u'lvj': 201,
u'rb\xf3': 200,
u'\xe1mk': 200,
u'\u0151r\xfc': 200,
u'\xedzs': 200,
u'aue': 200,
u'or\xe9': 200,
u'ajr': 200,
u'euu': 200,
u'lh\xfa': 200,
u'\xe9pr': 200,
u'epk': 200,
u'\xfaci': 200,
u'\xf3hi': 200,
u'aju': 200,
u'#zu': 199,
u'viv': 199,
u'z\xe1\xe9': 199,
u'\u0151t\u0151': 199,
u'\xfast': 199,
u'uku': 198,
u'rbu': 198,
u'lou': 198,
u'zs\xed': 198,
u'eye': 198,
u'd\u0151m': 198,
u'sez': 198,
u'tac': 198,
u'sou': 197,
u'iv\xf3': 197,
u'ciz': 197,
u'l\xf6v': 197,
u'emf': 197,
u'eev': 197,
u'#\xfan': 197,
u'zsd': 197,
u'd\xf6g': 197,
u'mp\xf3': 197,
u'beo': 197,
u'otb': 197,
u'cop': 197,
u'\u0171#\xf6': 196,
u'scu': 196,
u'\xf3jo': 196,
u'ls\xe9': 196,
u'\xf3e#': 196,
u't\xe1d': 196,
u'\xe9dn': 196,
u'opr': 196,
u'mou': 196,
u'fuk': 196,
u'aff': 196,
u'igu': 196,
u'igm': 196,
u'gfr': 195,
u'kmu': 195,
u'rcc': 195,
u'ahm': 195,
u'#gp': 195,
u'z\xe1i': 195,
u'rmf': 195,
u'#dz': 195,
u'nyg': 195,
u'\xe9kg': 195,
u'ggn': 195,
u'#\xf3c': 194,
u'ufu': 194,
u'm\xfcg': 194,
u'n\xedc': 194,
u'it\xfc': 194,
u'd\xf3e': 194,
u'ekp': 194,
u'tno': 194,
u's\xf3k': 194,
u'p\xe1p': 194,
u'\u0151ki': 193,
u'uki': 193,
u'r\xf3d': 193,
u'bni': 193,
u'ais': 193,
u'uzs': 193,
u'ozm': 193,
u'el\u0171': 193,
u'\xf3im': 193,
u'ltp': 193,
u'tuc': 193,
u'lym': 193,
u'eul': 192,
u'hri': 192,
u'ah\xfa': 192,
u'v\xe9\xe9': 192,
u'\xfari': 192,
u'acb': 192,
u'zej': 192,
u'ra\xe9': 192,
u'who': 192,
u'fac': 192,
u'\xe9rm': 191,
u'g\xf6l': 191,
u'bvi': 191,
u'\u0151f\xf6': 191,
u'dil': 191,
u'naf': 191,
u'\xfalh': 191,
u'n\xfaj': 191,
u'cbe': 191,
u'cer': 190,
u'o#d': 190,
u'o#r': 190,
u'elc': 190,
u'v\xedr': 190,
u'yse': 190,
u'ico': 190,
u'j\xe9h': 190,
u'gr\xe9': 190,
u'but': 189,
u'pke': 189,
u'odp': 189,
u'w#y': 189,
u'szj': 189,
u'yn\xe1': 189,
u'\u0151kh': 188,
u'\u0151r\xe9': 188,
u'yli': 188,
u's\xfcg': 188,
u'zie': 188,
u'cce': 188,
u'dpe': 188,
u'\xe1f\u0171': 188,
u'vvi': 188,
u's\xfaf': 188,
u'wan': 187,
u'pn\xe1': 187,
u'n\xf3z': 187,
u'urb': 187,
u'\u0161pi': 187,
u'xan': 187,
u'p\u0151d': 187,
u'isg': 187,
u'\u0151h\xf6': 187,
u'f#\xfa': 187,
u'yvt': 187,
u'bja': 186,
u'evy': 186,
u'asf': 186,
u'ngn': 186,
u'hnb': 186,
u'\xf6n\xe9': 186,
u'\xf3ib': 186,
u'nut': 186,
u'ick': 186,
u'hur': 186,
u'h#\xe9': 186,
u'p\xe9v': 185,
u'lsk': 185,
u'#\xe9d': 185,
u'sfa': 185,
u'ltk': 185,
u'opc': 185,
u'\xf6th': 185,
u'nz\xfa': 184,
u'\xedn#': 184,
u'in\xed': 184,
u'auz': 184,
u'\xe1ng': 184,
u'rtp': 184,
u'jou': 184,
u'vi\xe1': 184,
u'vy#': 184,
u'gs#': 184,
u'af\xfc': 184,
u'bvp': 183,
u'\u0171zo': 183,
u'\u0151zs': 183,
u'\xf3ro': 183,
u'ah#': 183,
u'exa': 183,
u'\xedtt': 183,
u'st\xed': 183,
u'g\u0151v': 183,
u'lvh': 183,
u'\u0151z\xed': 182,
u'ibr': 182,
u'kec': 182,
u'ng\u0151': 182,
u'tst': 182,
u'ag\xf3': 182,
u'\u0151db': 182,
u'tut': 182,
u'ajf': 182,
u'rmr': 182,
u'nb\xf3': 182,
u'\u0171r\xe9': 181,
u'\u0171z#': 181,
u'gn\xf3': 181,
u't\xfac': 181,
u'\xf3r#': 181,
u'urn': 181,
u'vha': 181,
u'o#g': 181,
u'h\u0151s': 181,
u'ck\xe9': 181,
u'dst': 181,
u'k\u0151o': 181,
u'mf\u0151': 181,
u'os\xe9': 180,
u'm\xf3c': 180,
u'jtv': 180,
u'\xe9js': 180,
u'j#o': 180,
u'ihi': 180,
u'lio': 180,
u'f\u0151s': 180,
u'him': 180,
u'\xedni': 179,
u'nru': 179,
u'aya': 179,
u'maf': 179,
u'pd\xe1': 179,
u'cki': 179,
u'is\xfc': 179,
u'eno': 179,
u'efl': 179,
u'irg': 178,
u'p\xf6r': 178,
u'e\xe1b': 178,
u'm\u0171t': 178,
u'ff#': 178,
u'#ji': 178,
u'iir': 178,
u'bda': 178,
u'\u0151m#': 178,
u'rlu': 178,
u'ru#': 178,
u'bau': 178,
u'deh': 178,
u'\xf3r\xe9': 177,
u'yev': 177,
u'e\xfct': 177,
u'aty': 177,
u'tli': 177,
u'oc\xed': 177,
u'k#w': 177,
u'chw': 176,
u'\xedzm': 176,
u'z#\u0171': 176,
u'nzz': 175,
u'lgs': 175,
u'kea': 175,
u'cqu': 175,
u'\u0151ni': 175,
u'\xfafo': 175,
u'\xe1ku': 175,
u'cc#': 175,
u'stv': 175,
u'tua': 175,
u'pam': 175,
u'kbp': 175,
u'\u0171#u': 174,
u'\xf3v\xed': 174,
u'ch\xe1': 174,
u'acq': 174,
u'gso': 174,
u'pae': 174,
u't\xfas': 173,
u'hof': 173,
u'cto': 173,
u'\xf6p\xf6': 173,
u'nzr': 172,
u'rfa': 172,
u'\u0144sk': 172,
u'lk\u0171': 172,
u'o#h': 172,
u'v#\xfc': 172,
u'rd\xfc': 172,
u'tyi': 172,
u'b\xf6s': 172,
u'yb\xed': 172,
u'zv\xe1': 172,
u'mb\xf6': 171,
u'rn\xf6': 171,
u'stm': 171,
u'rf\xfc': 171,
u'ml\xf3': 171,
u'agf': 171,
u'agt': 171,
u'r\u0151b': 171,
u'#km': 171,
u't\xedl': 170,
u'\xedjb': 170,
u'\xe9nb': 170,
u'nk\xfc': 170,
u'ffi': 170,
u'ld\xfc': 170,
u'rcu': 170,
u'how': 170,
u'\u0151es': 170,
u'g\xe9k': 170,
u'r\xe8s': 170,
u'pho': 170,
u'hi\xfa': 170,
u'\xedny': 169,
u'nk\xf3': 169,
u'noc': 169,
u'\xedno': 169,
u'o#i': 169,
u'keb': 169,
u'edz': 169,
u't\xf3h': 169,
u'\u0151e#': 169,
u'itc': 169,
u'x#a': 169,
u'dea': 169,
u's\u0151v': 169,
u'vra': 169,
u'\xfcst': 169,
u'bp#': 169,
u'vke': 168,
u'ij#': 168,
u'r\xf3a': 168,
u'ofs': 168,
u'\xf3vj': 168,
u'fst': 168,
u'ld\xed': 168,
u'\u0171nj': 168,
u'in\xf3': 168,
u'ua#': 168,
u'j\xe9g': 168,
u'r#\u0151': 168,
u'\xe9r\xf3': 167,
u'cej': 167,
u'rsk': 167,
u'rsr': 167,
u'y\xe9m': 167,
u'o#l': 167,
u'\u0151#z': 167,
u'\xe9#d': 167,
u'\xf3pu': 167,
u'\xf3tt': 167,
u'wor': 167,
u'upo': 167,
u'l\xe1i': 167,
u'ejp': 167,
u'iz#': 167,
u'#t\xf3': 167,
u'm\xf3i': 166,
u'ogm': 166,
u'may': 166,
u'j\xf3r': 166,
u'gda': 166,
u'll\xfa': 166,
u'#r\xfc': 166,
u'etc': 166,
u'ekj': 166,
u'esp': 166,
u'\xe1j\xf6': 166,
u'bbj': 166,
u'\u0151b\u0151': 165,
u'doy': 165,
u'owi': 165,
u'bw\xe9': 165,
u'oyl': 165,
u'#ia': 165,
u'joh': 165,
u'm\xe1#': 165,
u'upt': 165,
u'bag': 165,
u'#\u0161p': 165,
u't\xe1u': 164,
u'br\u0151': 164,
u's\xe9p': 164,
u'\xe1h\xfa': 164,
u'sey': 164,
u'ypr': 164,
u'op#': 164,
u'ab\xe9': 164,
u'\xfckt': 164,
u'ebh': 164,
u'lv\xed': 164,
u'we#': 163,
u'yvr': 163,
u'keu': 163,
u'\xf3ny': 163,
u'd\xf6k': 163,
u'uye': 163,
u'\xe1pa': 163,
u'eda': 163,
u'vah': 163,
u'gs\xfc': 163,
u'luz': 163,
u'zi\xe9': 163,
u't\u0151m': 163,
u'fid': 162,
u'zka': 162,
u'szc': 162,
u'aun': 162,
u'\xe9#j': 162,
u'imh': 162,
u'uus': 162,
u'\u0151er': 162,
u'\xe9\xfcl': 162,
u'i\xf3\xe9': 162,
u'pis': 162,
u'j\xe9i': 162,
u'\xf3vo': 161,
u'\xfa#\xfa': 161,
u'\xf6k\xfc': 161,
u'thy': 161,
u'dum': 161,
u'\xe1zm': 160,
u'aak': 160,
u'nr\xe9': 160,
u'\xe9nm': 160,
u'off': 160,
u'ouy': 160,
u't\xf3m': 160,
u'ktt': 160,
u'did': 160,
u'b\xf6n': 160,
u'x#k': 159,
u'g\xf6n': 159,
u'\u0171zi': 159,
u'er\xe8': 159,
u'ipe': 159,
u'ufo': 159,
u'ap\xe9': 159,
u'i\xf3a': 159,
u'sts': 159,
u'goo': 159,
u'd\xe9v': 159,
u'rv\xfc': 159,
u'bf\xe9': 159,
u'cek': 158,
u'\xfaba': 158,
u'lb\u0151': 158,
u'vun': 158,
u'ay#': 158,
u'ulf': 158,
u'lud': 158,
u'nue': 158,
u'lro': 157,
u'k\xe1d': 157,
u'dfo': 157,
u'ls\xf6': 157,
u'c\xe1b': 157,
u'roo': 157,
u'\xfa#\xfc': 157,
u'moh': 157,
u'pl\xe9': 157,
u'puk': 157,
u'wol': 157,
u'bav': 157,
u'ggv': 157,
u'ktk': 157,
u'oc#': 157,
u'ti\xe1': 157,
u'l\xf6s': 156,
u'ls#': 156,
u'n\u0151v': 156,
u'#\xf6b': 156,
u'ue#': 156,
u'ktn': 156,
u'lif': 156,
u'ag\xfa': 156,
u'g\xf3g': 156,
u'aeg': 155,
u'c#b': 155,
u'lsa': 155,
u'o##': 155,
u'#ea': 155,
u'uit': 155,
u'\xe1os': 155,
u'aol': 155,
u'b#\xed': 155,
u'otu': 155,
u'ygy': 155,
u't\u0151a': 155,
u'rdt': 155,
u'nzo': 154,
u'\xednr': 154,
u'k\xe1o': 154,
u'ug\xf3': 154,
u'f#a': 154,
u'eid': 154,
u'#s\xf3': 154,
u'sty': 154,
u'#wh': 154,
u'sno': 154,
u'\xf6ri': 154,
u'shm': 154,
u'wit': 154,
u'\xe9hs': 154,
u'dts': 154,
u'afu': 154,
u'nb\xfc': 154,
u'ijt': 153,
u'cad': 153,
u'if\xf6': 153,
u'oan': 153,
u'hs\xe9': 153,
u'#ij': 153,
u'\xe9gc': 153,
u'ues': 153,
u'\xf3h\xe1': 153,
u'p\xe1k': 153,
u'osc': 152,
u'\xedn\xe9': 152,
u'\xe9j\xfc': 152,
u'ugv': 152,
u'p\xf6t': 152,
u'hra': 152,
u'sof': 152,
u'stn': 152,
u'gm\xe1': 152,
u'urj': 152,
u'o#\xe1': 152,
u'tki': 152,
u'ipc': 152,
u'sau': 152,
u'lm#': 152,
u'\xe9l\xe1': 152,
u'zug': 152,
u'\xe9za': 152,
u'ifa': 151,
u'm#w': 151,
u'r\xfan': 151,
u'euf': 151,
u'e\xfcl': 151,
u'dho': 151,
u'd\u0151h': 151,
u'uij': 151,
u'agp': 151,
u'dpi': 151,
u'xua': 151,
u'\xe9ga': 151,
u'\xe1vl': 151,
u'own': 151,
u'e\xe1v': 150,
u'h\xedg': 150,
u'm#\xf3': 150,
u'n\xfa#': 150,
u'ied': 150,
u'm\xe9j': 150,
u'\xe9sf': 150,
u'h\xedj': 149,
u'inj': 149,
u'\xf3vn': 149,
u'rso': 149,
u'yap': 149,
u'rgh': 149,
u'ia\xe9': 149,
u'g\xe1j': 149,
u'npi': 149,
u'k\xf3k': 149,
u'beu': 149,
u'l\u0171#': 148,
u'nr\u0151': 148,
u'\xedjj': 148,
u'lca': 148,
u'uz\xed': 148,
u'ncb': 148,
u'ear': 148,
u'iu#': 148,
u'g#\xf3': 148,
u'xin': 148,
u'umt': 148,
u'xet': 148,
u'gh\xfa': 148,
u'#ud': 148,
u'vny': 148,
u'nbi': 148,
u'mbr': 147,
u't\xe9p': 147,
u'pbe': 147,
u'\xe1mj': 147,
u'asr': 147,
u'c#p': 147,
u'yur': 147,
u's\u0151f': 147,
u'tk\xed': 147,
u'\xedlu': 147,
u'ml\u0151': 147,
u'\xe1#p': 147,
u'uaf': 147,
u'ep\xe1': 147,
u'b\u0151r': 147,
u'av#': 147,
u'lru': 146,
u'cie': 146,
u'ev\xfc': 146,
u'amc': 146,
u'\xf3pe': 146,
u'zje': 146,
u't\u0151c': 146,
u'lyp': 146,
u'hut': 146,
u'avr': 146,
u'vka': 145,
u'k\xe9m': 145,
u'yma': 145,
u'sn\xfc': 145,
u'zsb': 145,
u'y\xe9h': 145,
u'sbu': 145,
u'rc\xed': 145,
u'm\xe1g': 145,
u'dl\xe1': 145,
u'vtu': 145,
u'\xe9pc': 145,
u'\xfar\xf3': 145,
u'ful': 145,
u'bun': 145,
u'p#o': 145,
u'ic#': 145,
u'h#m': 145,
u'nzn': 144,
u'azb': 144,
u'\xedvv': 144,
u'em\u0151': 144,
u'f\xf3b': 144,
u'oz\xe9': 144,
u'\xedzk': 144,
u'apf': 144,
u'f\xfcs': 144,
u'zpr': 144,
u'k\xf3b': 144,
u'gg\xf6': 144,
u'uru': 143,
u'cu#': 143,
u'ahs': 143,
u'\xe1l\xe9': 143,
u'i\xfas': 143,
u'f#e': 143,
u'##\u0151': 143,
u'kj\xe1': 142,
u'cis': 142,
u'z\xf3s': 142,
u'i#w': 142,
u'u#\xf6': 142,
u'\xe1#j': 142,
u'zaa': 142,
u'ne\xe1': 142,
u'eja': 142,
u'an\xf3': 142,
u'\xe1mi': 141,
u'\xedzg': 141,
u'ovs': 141,
u'unc': 141,
u'oi#': 141,
u's\xedk': 141,
u'yh\xf6': 141,
u'yti': 141,
u'bac': 141,
u'\u0151sl': 141,
u'g\xf3j': 141,
u'#p\xf6': 141,
u'ezo': 140,
u'sga': 140,
u'\xe1#o': 140,
u'nck': 140,
u'\xedzv': 140,
u'map': 140,
u'iv\u0151': 140,
u'\xe1dz': 140,
u'\xe9#r': 140,
u'rt\xed': 140,
u'v#\xf6': 140,
u'agl': 140,
u'kt\xe9': 140,
u'yk\xfc': 140,
u'mt\xe1': 140,
u'ute': 140,
u'\xf3lh': 140,
u'\u0151t\xf6': 140,
u'#dn': 140,
u'\xe9cs': 140,
u'goe': 140,
u'b\xf6g': 140,
u'haf': 139,
u'j#\xf6': 139,
u'\xfat\xe1': 139,
u's\u0151j': 139,
u'b\xe9c': 139,
u'yaf': 139,
u'zsu': 139,
u'#z\xe9': 139,
u'llr': 139,
u'yus': 139,
u'wle': 139,
u'\xe1ge': 139,
u'd\xfch': 139,
u'k\u0151#': 139,
u'za\xfa': 138,
u'rz\xfc': 138,
u'#gb': 138,
u'of#': 138,
u'ym\xf3': 138,
u'oes': 138,
u'\xf6nd': 138,
u'b\u0151s': 138,
u'ych': 138,
u'\xedmz': 138,
u'ly\xfa': 138,
u'owl': 138,
u'gum': 137,
u'eui': 137,
u'tox': 137,
u'pt\xf3': 137,
u'cd#': 137,
u'kp\xe1': 137,
u'nyc': 137,
u'ois': 137,
u'\xe1j\xfa': 137,
u'r#z': 137,
u'hia': 137,
u'\xe1zp': 136,
u'\xe9v\xe1': 136,
u'\xe1m\u0171': 136,
u'nca': 136,
u'au#': 136,
u'jfa': 136,
u'nae': 136,
u'hwa': 136,
u'l\xedr': 136,
u'kbv': 136,
u'hae': 135,
u's\xf6s': 135,
u'm\u0171n': 135,
u'm\u0171z': 135,
u'yiv': 135,
u'eaz': 135,
u'ayo': 135,
u'y\xf6t': 135,
u'c\xe1k': 135,
u'tca': 135,
u'h\u0171t': 135,
u'i\xf3f': 135,
u'\u0151sn': 135,
u's\u0151r': 135,
u'mt\xfc': 135,
u'\xe1vr': 135,
u'h#t': 135,
u'emv': 134,
u'\xedra': 134,
u'\xf3as': 134,
u'omu': 134,
u'n\u0151d': 134,
u'roi': 134,
u'\xf3i\xf6': 134,
u'xtr': 134,
u'l\xf3m': 134,
u'i\xf3p': 134,
u'\xe1bi': 134,
u'gv\xf3': 134,
u'tv\xf6': 133,
u'\xf3fo': 133,
u'\xf6zf': 133,
u'jka': 133,
u'\xf6sn': 133,
u'g\xf3k': 133,
u'lyf': 133,
u'anh': 133,
u'fi\xfa': 132,
u'inz': 132,
u'#r\xfa': 132,
u'edr': 132,
u'sib': 132,
u'd\u0151g': 132,
u'\xf6kl': 132,
u'mps': 132,
u'\xf6zm': 132,
u'#hs': 132,
u'\xfalj': 132,
u'vnu': 132,
u'\xf3m\u0171': 132,
u'buj': 132,
u'och': 132,
u'ic\xe1': 132,
u'rmp': 132,
u'#oo': 132,
u'tpi': 132,
u'guc': 131,
u'ib\xe9': 131,
u'z\xfcr': 131,
u'\xe1dl': 131,
u'rck': 131,
u'\xe1nh': 131,
u'\xf6gb': 131,
u'ttk': 131,
u'ity': 131,
u'oog': 131,
u'ool': 131,
u'p\xf3r': 131,
u'an\u0151': 131,
u'o#j': 131,
u'nkf': 130,
u'uz\xe8': 130,
u'pog': 130,
u'\xe9#p': 130,
u'z\xe8s': 130,
u'\xf6sl': 130,
u'yb\xf3': 130,
u'z\xf3h': 129,
u'eau': 129,
u'j\xf3h': 129,
u'\xfajd': 129,
u'rkb': 129,
u'hoc': 129,
u'asj': 129,
u'of\xf3': 129,
u'utr': 129,
u'k\xf3v': 129,
u'\xe9km': 129,
u'ebv': 129,
u'\xe1#r': 128,
u'wes': 128,
u'dow': 128,
u'\u0151na': 128,
u'\xe9a#': 128,
u'ous': 128,
u'ih\xe1': 128,
u'alc': 128,
u'zuh': 128,
u'lyv': 128,
u'\u0151l\u0151': 128,
u'atz': 127,
u'wer': 127,
u'h\xe1v': 127,
u'boj': 127,
u'\xf6nc': 127,
u'iaa': 127,
u'yh\xe9': 127,
u'saa': 127,
u'u#d': 127,
u'ft#': 127,
u'\xe3o#': 127,
u'puh': 127,
u'#ul': 127,
u'ty\xfa': 127,
u'mcg': 127,
u'kob': 127,
u'reu': 127,
u'p#\xfa': 127,
u'yn\xe9': 127,
u'zob': 126,
u'#s\u0171': 126,
u'dr#': 126,
u'ujg': 126,
u'\xfczb': 126,
u'\u0151\xfcg': 126,
u'\xe9ho': 126,
u'rdj': 126,
u'np#': 126,
u'akj': 126,
u'sp#': 126,
u'yog': 126,
u'sl\xf3': 126,
u'ggl': 126,
u'ewa': 126,
u'lyc': 126,
u'tz#': 125,
u'\u0171ze': 125,
u'tr\xfc': 125,
u'szw': 125,
u'jar': 125,
u'ucs': 125,
u'zwo': 125,
u'zs\xfa': 125,
u'eyc': 125,
u'o#u': 125,
u'y\xe1#': 125,
u'gif': 125,
u'nt\xed': 125,
u'cgu': 125,
u'okv': 125,
u'nj\xe9': 125,
u'fav': 125,
u'r#\xf3': 125,
u'y\u0171g': 125,
u'bbh': 125,
u'zof': 124,
u'\u0151ra': 124,
u'h\xedd': 124,
u'\xedrp': 124,
u'm\u0171g': 124,
u'wie': 124,
u'qua': 124,
u'ztm': 124,
u'anb': 124,
u'uil': 124,
u'ild': 124,
u'pub': 124,
u'zja': 124,
u'big': 124,
u'd\xe9r': 124,
u'fai': 124,
u'\u0171r\u0151': 123,
u'vkn': 123,
u'rfu': 123,
u'r\xf3h': 123,
u'inv': 123,
u'\xe1lc': 123,
u'\xedz\xfc': 123,
u'\xeda#': 123,
u'p#g': 123,
u'\xfajg': 123,
u'vp#': 123,
u'\xf3m\xe1': 123,
u'th\xe9': 123,
u'ryu': 123,
u'\u0171gy': 123,
u'\xe1zr': 122,
u'j#\xfc': 122,
u'\xfa##': 122,
u'om\xed': 122,
u'\xfanb': 122,
u'cul': 122,
u'pt\xfc': 122,
u'sio': 122,
u'm\xe9\xe9': 122,
u'\xe9tj': 122,
u'ag\xed': 122,
u'isl': 122,
u'hy#': 122,
u'\u0151si': 122,
u'anf': 122,
u'h#s': 122,
u'bb\xfc': 122,
u'osf': 121,
u'#c\xe1': 121,
u'g\xf6z': 121,
u'ifu': 121,
u'omk': 121,
u'nhi': 121,
u'aou': 121,
u'utb': 121,
u's\xf3n': 121,
u'usd': 121,
u'mki': 121,
u'\u0151l\xe9': 121,
u'zbo': 121,
u'\u0171g\xf6': 121,
u'dug': 121,
u'wn#': 121,
u'#ky': 121,
u'n\xf6t': 121,
u'\u0171rt': 120,
u'\u0171r#': 120,
u'sov': 120,
u'c\xe9j': 120,
u'iqu': 120,
u'ldu': 120,
u'ub\xf3': 120,
u'vik': 120,
u'\xe1nf': 120,
u'v\xe9h': 120,
u'sus': 120,
u'#ib': 120,
u'vi\u010d': 120,
u'nio': 120,
u'ab\xf3': 120,
u't\u0151\xfc': 120,
u'#d\xfc': 120,
u'rex': 120,
u'y#w': 120,
u't\xedn': 119,
u'pfo': 119,
u'aum': 119,
u'\u0151nn': 119,
u'klo': 119,
u'lhu': 119,
u'\xe9#c': 119,
u'seu': 119,
u'asc': 119,
u'oft': 119,
u'\xf3#\xf3': 119,
u'eji': 119,
u'aan': 118,
u'ai\xf6': 118,
u'loj': 118,
u'i\xe9g': 118,
u'um\xe9': 118,
u'\xf6kj': 118,
u'\xe9dd': 118,
u'lyj': 118,
u'an\xe7': 118,
u'rsm': 117,
u'zk#': 117,
u'zdo': 117,
u'#vl': 117,
u'dlo': 117,
u'n\xe1j': 117,
u'ct#': 117,
u'od#': 117,
u'rb\xed': 117,
u'#\u0161e': 117,
u'\xf6l\u0151': 117,
u'ib\xfa': 117,
u'nzf': 116,
u'k\xe1m': 116,
u'kni': 116,
u'\xe9n\xed': 116,
u'inu': 116,
u'\xe9ba': 116,
u'tb\xe9': 116,
u'bo#': 116,
u'sf\xfc': 116,
u'#ay': 116,
u'\xe1gf': 116,
u'\xe9t\u0171': 116,
u'gdi': 116,
u'zma': 116,
u'npa': 116,
u'rl\u0151': 116,
u'\xe9dz': 116,
u'y\xf3k': 116,
u'm\xf3n': 115,
u'\u0171zv': 115,
u'\xe1#g': 115,
u'n\xf3d': 115,
u'e#w': 115,
u'l\xf3h': 115,
u's\xfcz': 115,
u'z\xe1h': 115,
u'lew': 115,
u'nb\xe1': 115,
u'uad': 115,
u'\u0171nb': 115,
u'\xf6s\xf6': 115,
u'il\xf3': 115,
u'\xf3tu': 115,
u'\xf6gi': 115,
u'\xe1v\xe9': 115,
u'#cd': 115,
u'us\xed': 115,
u'ug#': 114,
u'gj\xf3': 114,
u'key': 114,
u'pcc': 114,
u'nsr': 114,
u'ubb': 114,
u'yl\xe9': 114,
u'ix#': 114,
u'rpr': 114,
u'd\u0151o': 114,
u'\xe9tu': 114,
u'n\xe1h': 114,
u'paf': 114,
u'\xfalr': 114,
u'kou': 114,
u'iou': 114,
u'y\u0171s': 114,
u'\xe1r\u0151': 114,
u'g\xf6k': 113,
u'\xe1#\xfa': 113,
u'wab': 113,
u'loo': 113,
u'\xedmm': 113,
u'ldf': 113,
u'mu#': 113,
u'ahn': 113,
u'\xfaz\xe1': 113,
u'jki': 113,
u'ttr': 113,
u'zm\xe1': 113,
u'zib': 113,
u'rvh': 113,
u'\xe8re': 113,
u'rud': 113,
u'gpm': 113,
u'sde': 113,
u'mk\xed': 113,
u'vro': 113,
u'zjo': 113,
u'nzm': 112,
u'\u0171rn': 112,
u'osh': 112,
u'\xfatn': 112,
u'oeg': 112,
u'\u0171#\xfa': 112,
u'mnu': 112,
u'fbe': 112,
u'now': 112,
u'urz': 112,
u'dk\u0151': 112,
u'\xedmj': 112,
u'#\xe9k': 112,
u'tsb': 112,
u'yje': 112,
u'm\xe9z': 112,
u'b\xfcl': 112,
u'\xf6k\u0151': 112,
u'\xedln': 112,
u'i\xf3e': 112,
u'yp\xe1': 112,
u'luc': 112,
u'ney': 112,
u'dir': 112,
u'#d\xfa': 112,
u'rey': 112,
u'p\xe9b': 111,
u'zr\xf3': 111,
u'hee': 111,
u'e\xe1g': 111,
u'war': 111,
u'eem': 111,
u'wis': 111,
u'\u0171n#': 111,
u'hbe': 111,
u'oin': 111,
u'gdu': 111,
u'oei': 111,
u'odz': 111,
u'kga': 111,
u'ews': 111,
u'afa': 111,
u's#q': 111,
u'dno': 110,
u'aen': 110,
u'oun': 110,
u'ndm': 110,
u'x#e': 110,
u'\xedg\xed': 110,
u'r\xe1\xe9': 110,
u'kku': 110,
u'pau': 110,
u'f#k': 110,
u'oup': 110,
u'anp': 110,
u'fao': 110,
u'x#h': 109,
u'vkr': 109,
u'\xe1m\xfc': 109,
u'ymo': 109,
u'imr': 109,
u'toh': 109,
u'ieu': 109,
u'i\xe8r': 109,
u'\xe9tn': 109,
u'exr': 109,
u'x#\xfa': 109,
u'puc': 109,
u'\u0151d#': 109,
u'ny\u0151': 109,
u'fus': 109,
u'cou': 109,
u'd\xedz': 109,
u'ar\xfa': 109,
u'#t#': 109,
u'h#p': 109,
u'bbm': 109,
u'bj\xe1': 108,
u'dz\xe1': 108,
u'rsi': 108,
u'c\xeda': 108,
u'c\xe1f': 108,
u'dc#': 108,
u'y\xe1h': 108,
u'rp\xf3': 108,
u'f\xfcr': 108,
u'tp#': 108,
u'eok': 108,
u'upr': 108,
u'kgy': 108,
u'paj': 108,
u'yst': 108,
u'esr': 108,
u'ouz': 108,
u'ajh': 108,
u'rm\u0151': 108,
u'yf\xe9': 107,
u'j#\xfa': 107,
u'm\xf3r': 107,
u'aat': 107,
u'#gh': 107,
u'cil': 107,
u'er\xf3': 107,
u'eer': 107,
u'ke\xe1': 107,
u'#\xb0c': 107,
u'mz\xe1': 107,
u'c\u0103u': 107,
u'nsn': 107,
u'adp': 107,
u'pom': 107,
u'ic\u0103': 107,
u'j##': 107,
u'\xf6zu': 107,
u'vod': 107,
u'bt#': 107,
u'nij': 107,
u'jpi': 107,
u'i#\u0171': 107,
u'\u0151ha': 107,
u'oke': 107,
u'zj\xf3': 107,
u'hip': 107,
u'i\u010d#': 107,
u'g\xfat': 106,
u'\xe1e#': 106,
u'ev\xf3': 106,
u'aup': 106,
u'#b\xf3': 106,
u'll\xfc': 106,
u'\xe1pu': 106,
u'\xf3mb': 106,
u'xus': 106,
u'n\xe1e': 106,
u'pua': 106,
u'rua': 106,
u'\u0163ic': 106,
u'ood': 106,
u'bib': 106,
u'rij': 106,
u'#kn': 106,
u'cir': 105,
u'pfe': 105,
u'ib#': 105,
u'sri': 105,
u'tf\xe9': 105,
u'n\xf3k': 105,
u'ozl': 105,
u'nsi': 105,
u'g#w': 105,
u'l\xedv': 105,
u'vn\xe9': 105,
u'ajm': 105,
u'a\xfad': 105,
u'y\u0151r': 105,
u'd\xe1m': 105,
u'bbu': 105,
u'aeu': 104,
u'jzs': 104,
u'nku': 104,
u'lc\xe1': 104,
u'dou': 104,
u'#zo': 104,
u'd\xfas': 104,
u'\xf6v\xf6': 104,
u'thu': 104,
u'oub': 104,
u'vac': 104,
u'hop': 104,
u'h\xf3d': 104,
u'\xe1gc': 104,
u'ihl': 104,
u'i\xf3\xe1': 104,
u'yki': 104,
u'yku': 104,
u'nm\xf3': 104,
u'\u0103u#': 104,
u'sd\xe9': 104,
u'r#w': 104,
u'#ou': 103,
u'azp': 103,
u'af\xe9': 103,
u'lgr': 103,
u'\xf6m\xfc': 103,
u'\xf6ml': 103,
u'\xe1me': 103,
u'poc': 103,
u'tty': 103,
u'agc': 103,
u'bta': 103,
u'nau': 103,
u't\u0151h': 103,
u'usj': 103,
u'mk\xf3': 103,
u'zfl': 103,
u'esj': 103,
u'\u0151ko': 102,
u'uc#': 102,
u'yv\xed': 102,
u'#sm': 102,
u'wil': 102,
u'\xe1\xe9l': 102,
u'olb': 102,
u'idd': 102,
u'dm\u0171': 102,
u'deu': 102,
u'usm': 102,
u'\xf3k\xe1': 102,
u'gns': 101,
u'amz': 101,
u'ip\u0151': 101,
u'eys': 101,
u'euc': 101,
u'adz': 101,
u'a\u0144s': 101,
u'tk\xfc': 101,
u'gpu': 101,
u'n\xe1p': 101,
u'rdr': 101,
u'bep': 101,
u'dme': 101,
u'\xf6ds': 101,
u'r\xedn': 101,
u'r\xe9d': 101,
u'rao': 101,
u'mse': 101,
u'#ph': 101,
u'tah': 101,
u'#cc': 100,
u'\u0151rt': 100,
u'h\xe1k': 100,
u'ngv': 100,
u'nsk': 100,
u'z\u0151a': 100,
u'\xf6rg': 100,
u'\xfav\xe1': 100,
u'\xf3\xe9p': 100,
u'z#z': 100,
u'ooi': 100,
u'abt': 100,
u'uh#': 100,
u'ys#': 100,
u'pme': 100,
u'ynn': 100,
u'tzi': 99,
u't\xe9j': 99,
u'k\xe1g': 99,
u'r\xf3c': 99,
u'f#\xe9': 99,
u'nv\xe9': 99,
u'ffa': 99,
u'uno': 99,
u'\xe1de': 99,
u'm\xe1m': 99,
u'n\xfcz': 99,
u'\xf3ig': 99,
u'\xe1nz': 99,
u'gsi': 99,
u'km#': 99,
u'ils': 99,
u'\u0171ke': 99,
u'lza': 98,
u'ngh': 98,
u'#bh': 98,
u'j\xf3b': 98,
u'llm': 98,
u'f#t': 98,
u'y\u0171r': 98,
u'ih\xfa': 98,
u'\xe9du': 98,
u'mof': 98,
u't\xe9\xe9': 97,
u'jip': 97,
u'#\xf3l': 97,
u'\xe9jj': 97,
u'\xf3zh': 97,
u'\u0171be': 97,
u'v\xe1v': 97,
u'win': 97,
u'oit': 97,
u'ed\xed': 97,
u'umc': 97,
u'zpl': 97,
u'gdo': 97,
u'kts': 97,
u'b#z': 97,
u'\u0171lt': 97,
u'esv': 97,
u'ict': 97,
u'\u0171k\xed': 97,
u'#mf': 97,
u'ays': 96,
u's\xe9m': 96,
u'gt#': 96,
u'lpa': 96,
u'uid': 96,
u'lao': 96,
u'lij': 96,
u'utm': 96,
u'jho': 96,
u'k\u0151z': 96,
u'gru': 96,
u'\xfass': 96,
u'\xfalo': 96,
u'da\xe9': 96,
u's\u0151i': 96,
u'nf\xf6': 96,
u'gz\xfc': 95,
u'oja': 95,
u'cve': 95,
u'l\xf6p': 95,
u'\xeate': 95,
u'cei': 95,
u'\xe1\xe9#': 95,
u'adl': 95,
u'\xf6zo': 95,
u'uu#': 95,
u'rd\xf3': 95,
u'lay': 95,
u'mtv': 95,
u'nuc': 95,
u'eb#': 95,
u'cz#': 94,
u'nzh': 94,
u'\u0171ri': 94,
u'\xe1#c': 94,
u'ofo': 94,
u'jz\xe9': 94,
u'es\xfa': 94,
u'k+f': 94,
u't\xeat': 94,
u'\xfajn': 94,
u'pka': 94,
u'cro': 94,
u'hok': 94,
u'n\xe9t': 94,
u'et\xea': 94,
u'uxu': 94,
u'boo': 94,
u'#h\xfc': 94,
u'jl\xe9': 94,
u'sli': 94,
u'nj\xf6': 94,
u'ajj': 94,
u'm\u0151k': 94,
u'oco': 94,
u'uzz': 94,
u'#k+': 94,
u'zop': 93,
u'\u0151rk': 93,
u'of\u0151': 93,
u'\xf6rh': 93,
u'o#o': 93,
u'tsc': 93,
u'fje': 93,
u'\xfazt': 93,
u'w\xe9b': 93,
u'itz': 93,
u'dpn': 93,
u'sdo': 93,
u'\xf3sp': 93,
u'cza': 92,
u'\xe1ak': 92,
u'\xedvh': 92,
u'cm#': 92,
u'ts\xf3': 92,
u'lco': 92,
u'dof': 92,
u'#r\u0151': 92,
u'z\xe9h': 92,
u'vt\u0151': 92,
u'mde': 92,
u'\u0151ib': 92,
u'ddh': 92,
u'itb': 92,
u'l\xe1a': 92,
u'jha': 92,
u'ook': 92,
u'f#s': 92,
u'tm\xfa': 92,
u'\xf6ps': 92,
u'fam': 92,
u'a\xe1t': 92,
u'fe#': 92,
u'nzg': 91,
u'hec': 91,
u'g\xf6s': 91,
u'\xf3rr': 91,
u'rj\xfa': 91,
u'\xedzf': 91,
u'\xfarb': 91,
u'\xe9in': 91,
u'\xf6n\xfc': 91,
u'mub': 91,
u'm\xe9#': 91,
u'tci': 91,
u'ipp': 91,
u'ftm': 91,
u'#pm': 91,
u'phi': 91,
u'baa': 91,
u'f#m': 91,
u'udy': 91,
u'\xf3dd': 91,
u'f\xfaj': 91,
u'x#\xe9': 91,
u'k\u0171s': 90,
u'i\xe9#': 90,
u'c\xe9v': 90,
u'sj\xf3': 90,
u'\xf3fe': 90,
u'\xfajz': 90,
u'm\xe1d': 90,
u'po#': 90,
u'lh\u0151': 90,
u'sm\xe1': 90,
u'at\xfc': 90,
u'#ee': 90,
u'b\xe1v': 90,
u'\xe1je': 90,
u'\xfalk': 90,
u'hea': 89,
u'bve': 89,
u'\xf6mt': 89,
u'noa': 89,
u'\xfada': 89,
u'euj': 89,
u'omd': 89,
u'unn': 89,
u'apl': 89,
u'ap\xf3': 89,
u'ztt': 89,
u'lae': 89,
u'aop': 89,
u'\xe1s\xfc': 89,
u'gst': 89,
u'rub': 89,
u'k\xf3s': 89,
u'd#\xed': 89,
u'ggi': 89,
u'ks#': 89,
u'p#u': 89,
u'efs': 89,
u'#kg': 89,
u'\xf3zs': 88,
u'n\xe7o': 88,
u'gnu': 88,
u'zcs': 88,
u'jve': 88,
u'\xe1mf': 88,
u'\xe1mp': 88,
u'ifr': 88,
u'\xe7oi': 88,
u'ujt': 88,
u'#\u0163i': 88,
u'n\u0151b': 88,
u'uff': 88,
u'vii': 88,
u'\xe1np': 88,
u'\xe9pl': 88,
u'vej': 88,
u'\u0151mi': 88,
u'zip': 88,
u'aos': 88,
u'pab': 88,
u'g\xf3h': 88,
u'af\xf6': 88,
u'dah': 88,
u'udf': 88,
u'ioc': 88,
u'\xf3dv': 88,
u'\xe9n\xfc': 87,
u'kv#': 87,
u'eic': 87,
u'#sy': 87,
u'rs\xf3': 87,
u'c\xe1i': 87,
u'h\xf6k': 87,
u'ufe': 87,
u'\xf6rj': 87,
u'cli': 87,
u'\xe9#o': 87,
u'mdo': 87,
u'\u0171tr': 87,
u'npr': 87,
u'upi': 87,
u'uld': 87,
u'nnk': 87,
u'\xe9go': 87,
u'hme': 87,
u'x#t': 86,
u'uei': 86,
u'mni': 86,
u'vo#': 86,
u'umj': 86,
u's\u0151z': 86,
u'gz\xe1': 86,
u'jjo': 86,
u'oep': 86,
u'rf#': 86,
u'hou': 86,
u'nih': 86,
u'op\xe1': 86,
u'#xi': 86,
u'p\xe1c': 86,
u'tef': 86,
u't\xedr': 85,
u'soz': 85,
u'\xfatb': 85,
u'zku': 85,
u'jzo': 85,
u'ncl': 85,
u'esf': 85,
u'c#c': 85,
u'rcn': 85,
u'ehr': 85,
u'g\u0171n': 85,
u'ips': 85,
u'hth': 85,
u'sp\xf3': 85,
u'vuk': 85,
u'\xe1sj': 85,
u'lg\xe9': 85,
u'rue': 85,
u'kki': 85,
u'\xedkr': 85,
u'daa': 85,
u'fae': 85,
u'tao': 85,
u'ebl': 85,
u'\u0171rp': 84,
u'nr#': 84,
u'\xe1#d': 84,
u'\xednp': 84,
u'bk\xf6': 84,
u'j\xf3f': 84,
u'quo': 84,
u'y\xe1i': 84,
u'xx#': 84,
u'#+#': 84,
u'sah': 84,
u'\xe1s\xf6': 84,
u'ulb': 84,
u'#hn': 84,
u'#dh': 84,
u'l\xe9j': 84,
u'fei': 84,
u'\xfct\u0151': 84,
u'zfi': 84,
u'\xf6lm': 84,
u'hug': 84,
u'\xfag\xe1': 84,
u'#\u0171z': 84,
u'teo': 84,
u'v\xf3r': 84,
u'frs': 83,
u'\xedjp': 83,
u'a#q': 83,
u'hr#': 83,
u'fot': 83,
u'tf\xf3': 83,
u'v\xe1i': 83,
u'r\xfcb': 83,
u'i\u0107#': 83,
u'#ya': 83,
u'\xe9n\xe1': 83,
u'\xe9d\xfc': 83,
u'ecc': 83,
u'yga': 83,
u'isp': 83,
u'\xe1b\xe1': 83,
u'f\xe1v': 83,
u'rdn': 83,
u'rmm': 83,
u'lj#': 83,
u'gr\xe4': 83,
u'bbt': 83,
u'mf\xe9': 83,
u'nzk': 82,
u'e\xe1t': 82,
u'k\xe9\xe9': 82,
u'f\xf3l': 82,
u'\xf3bo': 82,
u'twe': 82,
u'mur': 82,
u'ndz': 82,
u'\xfcbi': 82,
u'itl': 82,
u'uat': 82,
u'upp': 82,
u'isj': 82,
u'moc': 82,
u'\xf3da': 82,
u'#dt': 82,
u'ioe': 82,
u'nb\u0151': 82,
u'#d\u0151': 82,
u'yf\xfc': 81,
u'osj': 81,
u'ezb': 81,
u'\xe9nr': 81,
u'szg': 81,
u'eey': 81,
u'dvt': 81,
u'\xedz\xe9': 81,
u'c#o': 81,
u'iev': 81,
u'i\xf6v': 81,
u'mhi': 81,
u'uar': 81,
u'ypi': 81,
u'ek\xe1': 81,
u'\xe1bn': 81,
u'd#\xfc': 81,
u'bju': 80,
u'inh': 80,
u'\xedrm': 80,
u'loe': 80,
u'\xfa#\xf6': 80,
u'omc': 80,
u'mah': 80,
u'\xe9ac': 80,
u'#m\xfc': 80,
u'hot': 80,
u'khp': 80,
u'oh\xf3': 80,
u'uiz': 80,
u'ftv': 80,
u'nbu': 80,
u'pui': 80,
u'shr': 80,
u'gg\xfc': 80,
u'aby': 80,
u'bih': 80,
u'ly\u0171': 80,
u'p#\xf6': 80,
u'csm': 80,
u'\u010dov': 80,
u'\xfcd\xfc': 80,
u'yb\xe1': 80,
u'mbs': 79,
u'\xe9vh': 79,
u'iv\xed': 79,
u'b\xfaz': 79,
u'dfe': 79,
u'h\xe9\xe1': 79,
u'o#z': 79,
u'\u0151\xe1r': 79,
u'nh\xe9': 79,
u'\u0151#\xf3': 79,
u'\xe9e#': 79,
u'lpe': 79,
u'zpa': 79,
u'ml#': 79,
u'epn': 79,
u'jpo': 79,
u'hl#': 79,
u'aw#': 79,
u'g\u0151m': 79,
u'v\xf6l': 79,
u'f\xe1b': 79,
u'\xe9kc': 79,
u'#d#': 79,
u's\u0151e': 79,
u'\xe9gj': 79,
u'#pv': 79,
u'\xe1zf': 78,
u'\u0161ef': 78,
u'kno': 78,
u'aem': 78,
u'guy': 78,
u'\xe1md': 78,
u'\xfcbn': 78,
u'\xedzr': 78,
u'xsz': 78,
u'o#\xf6': 78,
u'eue': 78,
u'rcz': 78,
u'ahl': 78,
u'#mr': 78,
u'at\u0151': 78,
u'exn': 78,
u'kt\xfc': 78,
u'v\u0151e': 78,
u'#um': 78,
u'h\xfcb': 78,
u'r\xe1p': 78,
u'zu#': 78,
u'oop': 78,
u'xne': 78,
u'\u0151sh': 78,
u'zb\xf3': 78,
u'\xf6ts': 78,
u'f\xfaz': 78,
u'#tf': 78,
u'owh': 78,
u'\xe1pi': 77,
u'\xedrr': 77,
u'eel': 77,
u'ady': 77,
u'#\xf3z': 77,
u'#gf': 77,
u'fta': 77,
u'\u0151pa': 77,
u'ntl': 77,
u'ect': 77,
u'r\xe1e': 77,
u'kkt': 77,
u'\xfalb': 77,
u'fet': 77,
u'r\xfar': 77,
u'rm\xf3': 77,
u'igv': 77,
u'faa': 77,
u'ef\u010d': 77,
u'v\u0151v': 77,
u'duc': 77,
u'du#': 77,
u'f\u010do': 77,
u'\xf6m\xe9': 76,
u'mvo': 76,
u'pnu': 76,
u'c\xe9r': 76,
u'rsh': 76,
u'c#g': 76,
u'\xe1\xe9v': 76,
u'omv': 76,
u'fsa': 76,
u'p\xf3s': 76,
u'd\xfal': 76,
u'sb#': 76,
u'gap': 76,
u'mio': 76,
u'\xe9mh': 76,
u'\xfcrd': 76,
u'ehl': 76,
u'\xe9#u': 76,
u'\xfazo': 76,
u'jk#': 76,
u'dhi': 76,
u'\xfcnc': 76,
u'itm': 76,
u'exs': 76,
u'ckm': 76,
u'niq': 76,
u'rl#': 76,
u'oon': 76,
u'd#z': 76,
u'r\xe9p': 76,
u'nfb': 76,
u'hic': 76,
u'duj': 76,
u'e\xf6l': 76,
u'#tb': 76,
u'jme': 75,
u'chl': 75,
u'h\xe1s': 75,
u'\xf3rv': 75,
u'pny': 75,
u'tfr': 75,
u'un\xe1': 75,
u'zsn': 75,
u'\xfany': 75,
u'iaj': 75,
u'\xe9pg': 75,
u'le\xf6': 75,
u't\xfcd': 75,
u'ety': 75,
u'ntw': 75,
u'kth': 75,
u'b\xe9t': 75,
u'l#w': 75,
u'r\u0151h': 75,
u'jl\xe1': 75,
u'v\xf6k': 75,
u'\u0151de': 75,
u'bme': 75,
u'#bg': 75,
u'iok': 75,
u'pic': 75,
u'd\xedc': 75,
u'pz\u0151': 75,
u'ivu': 74,
u'ez\u0171': 74,
u'sih': 74,
u'pn\xe9': 74,
u't\xf6s': 74,
u'#aa': 74,
u'cec': 74,
u'rsc': 74,
u'i\xedr': 74,
u'dop': 74,
u'ki\xed': 74,
u'jf\xfa': 74,
u'#\xe1v': 74,
u'poo': 74,
u'\xe9#g': 74,
u'apz': 74,
u'\xf3#\u0151': 74,
u'ouc': 74,
u'jol': 74,
u'\xfar\xe9': 74,
u'rds': 74,
u'up#': 74,
u'\u0171he': 74,
u'\u0151d\xfc': 74,
u'goj': 74,
u's\xfar': 74,
u'ow#': 74,
u'df\xe9': 73,
u'rff': 73,
u'ngd': 73,
u'c#d': 73,
u'#ng': 73,
u'\xf6r\xed': 73,
u'muf': 73,
u'vt\xf6': 73,
u'llb': 73,
u'g\u0171r': 73,
u'siz': 73,
u'oua': 73,
u'hod': 73,
u'lpi': 73,
u'\xfava': 73,
u'umd': 73,
u'\u0151\xfcl': 73,
u'ckx': 73,
u'iln': 73,
u'ght': 73,
u'\xe9dv': 73,
u'b\xe9b': 73,
u'awi': 73,
u'gps': 73,
u'pmo': 73,
u'zz\xf6': 73,
u'zac': 72,
u'kfu': 72,
u'\u0171re': 72,
u'jia': 72,
u'dj#': 72,
u'mn\xe9': 72,
u'yig': 72,
u'y\xfak': 72,
u'ka\xe9': 72,
u'n\xf3h': 72,
u'ibu': 72,
u'#\xedv': 72,
u'm\xfaz': 72,
u'ah\xed': 72,
u'm\xedr': 72,
u'\xe1g\u0171': 72,
u'ph#': 72,
u'lmb': 72,
u'bed': 72,
u'awa': 72,
u'vfe': 72,
u'#lv': 72,
u'\xe1bu': 72,
u'kof': 72,
u'r\xe9i': 72,
u'biv': 72,
u'm\u0151f': 72,
u'\xfaze': 72,
u'fec': 72,
u'fru': 71,
u'lri': 71,
u'onp': 71,
u't#\u0171': 71,
u'b\xfav': 71,
u'\xf3g#': 71,
u'p\u0151k': 71,
u'nvi': 71,
u'k\xe9i': 71,
u'uzg': 71,
u'nox': 71,
u'\xf3rb': 71,
u'eie': 71,
u'tfa': 71,
u'ngg': 71,
u'unh': 71,
u'\xednh': 71,
u'mm#': 71,
u'\xe1d\xe1': 71,
u'#\xf6z': 71,
u'toi': 71,
u'dih': 71,
u'llk': 71,
u'cko': 71,
u'bte': 71,
u'\u0171nm': 71,
u'gh\xe9': 71,
u'pum': 71,
u'nay': 71,
u'ec\xed': 71,
u'mfc': 71,
u'czo': 70,
u'\xe4\xdfl': 70,
u'cri': 70,
u'k\u0171e': 70,
u'h\xe9b': 70,
u'emd': 70,
u'pn#': 70,
u'ka\xf3': 70,
u'c\xe1j': 70,
u'kmi': 70,
u'vhi': 70,
u'\u0151n\xe9': 70,
u'im\xed': 70,
u'bsd': 70,
u'poh': 70,
u'#io': 70,
u'#iu': 70,
u'r\xe4\xdf': 70,
u'\xe9pf': 70,
u'ex\xe9': 70,
u'hp#': 70,
u'\xe9lc': 70,
u'\xf3\xe1r': 70,
u'yod': 70,
u'\xdfle': 70,
u'b\xe9d': 70,
u'idb': 70,
u'\u0151hi': 70,
u'\xfag\xf3': 70,
u'f#f': 70,
u'h\u0151#': 70,
u'efr': 70,
u'ef#': 70,
u'rr#': 70,
u'xre': 70,
u'lvk': 70,
u'mn\xe1': 69,
u'sge': 69,
u'mju': 69,
u'\xe9nu': 69,
u'loc': 69,
u'i\xe9l': 69,
u'wik': 69,
u'\u0151fi': 69,
u'\xe1\xe9n': 69,
u'ki\xfa': 69,
u'ls\xfc': 69,
u'mmo': 69,
u'mip': 69,
u'sp+': 69,
u'kuv': 69,
u'\xf6rk': 69,
u'\xf3m\xe9': 69,
u'gk\xe1': 69,
u'\xe9pb': 69,
u'jk\xe1': 69,
u'\xedlj': 69,
u'ghr': 69,
u'idz': 69,
u'\xfasu': 69,
u'\xf6lv': 69,
u'enw': 69,
u'\xf6tm': 69,
u's\xfaa': 69,
u'#ml': 69,
u'mfa': 69,
u'gfc': 68,
u't\u0171d': 68,
u'rbl': 68,
u'\xedrv': 68,
u'wic': 68,
u'ip\xe1': 68,
u'jny': 68,
u'c\xedz': 68,
u'\xe1\xe9b': 68,
u'gop': 68,
u'uum': 68,
u'#ih': 68,
u'sag': 68,
u'ftp': 68,
u't\xfct': 68,
u'nec': 68,
u'zny': 68,
u'\xf3d\xed': 68,
u'epu': 68,
u'da\u0144': 68,
u'bu#': 68,
u'ar\xf6': 68,
u'kf\xf6': 67,
u'h\xe9j': 67,
u'#o#': 67,
u'jad': 67,
u'obz': 67,
u'\xf3rt': 67,
u'tft': 67,
u'r\xfcz': 67,
u'adc': 67,
u'y\xe1m': 67,
u'ubj': 67,
u'kuu': 67,
u'rsn': 67,
u'\xfa#\xf3': 67,
u'fcm': 67,
u'iew': 67,
u'jk\xe9': 67,
u'gdr': 67,
u'law': 67,
u'ilb': 67,
u'coc': 67,
u'b\xedn': 67,
u'idm': 67,
u'h\xfcl': 67,
u'\xe1b#': 67,
u'\xe9k\xed': 67,
u'af\xe1': 67,
u'pm#': 67,
u'#tv': 67,
u'tze': 66,
u'\xedjs': 66,
u'\xfchl': 66,
u'\xe1er': 66,
u'wel': 66,
u'\xe1ih': 66,
u'eio': 66,
u'r\xe3o': 66,
u'\xedn\xf3': 66,
u'\xe1\xe9k': 66,
u'\xf3f\xe9': 66,
u'\xe1dn': 66,
u'\xf6v\xfc': 66,
u'si\xe8': 66,
u'cti': 66,
u'\xfaza': 66,
u'\xe1cl': 66,
u'see': 66,
u'n\u0171b': 66,
u'\xf6sv': 66,
u'#qi': 66,
u'lui': 66,
u'sh\xfa': 66,
u'\u0171s\xf6': 66,
u'pao': 66,
u'bui': 66,
u'nnh': 66,
u'm\xe9p': 66,
u'h#l': 66,
u'#kp': 66,
u'\xfcd\u0151': 66,
u'\u0151r\xe1': 65,
u'\xfch\xf6': 65,
u'\u0171zh': 65,
u'\xedv\u0151': 65,
u'\u0151zv': 65,
u'm\u0171a': 65,
u'lse': 65,
u'dko': 65,
u'aj\xf6': 65,
u'oij': 65,
u'\xe7a#': 65,
u'oen': 65,
u'oat': 65,
u'\xf6rf': 65,
u'hs#': 65,
u'gid': 65,
u'bhu': 65,
u'\xfav\xf3': 65,
u'kh\xe1': 65,
u'n\xe1g': 65,
u'akd': 65,
u'ilj': 65,
u'nir': 65,
u'sh\xe1': 65,
u'mcd': 65,
u'kkn': 65,
u'moo': 65,
u'udd': 65,
u'enm': 65,
u'm\u0171p': 65,
u'zzs': 65,
u'usp': 65,
u'tau': 65,
u'x#\xfc': 65,
u'a\xe9p': 64,
u'jim': 64,
u'rz#': 64,
u'ivf': 64,
u'aab': 64,
u'ugh': 64,
u'iju': 64,
u'w#d': 64,
u'am\u0171': 64,
u'soc': 64,
u'tfi': 64,
u'dtp': 64,
u'\u0171ns': 64,
u'to\u010d': 64,
u'\xedd#': 64,
u'd\u0151\xed': 64,
u'\u0151ma': 64,
u'agd': 64,
u'cty': 64,
u'\u0151pr': 64,
u'i\xf3d': 64,
u'kx#': 64,
u'puf': 64,
u'woo': 64,
u'bub': 64,
u'j\xe1l': 64,
u'oce': 64,
u'jub': 64,
u'o#\xfc': 64,
u'p+#': 64,
u'\xfaj\xf3': 63,
u's\xf6v': 63,
u'\xe1#\xe1': 63,
u'obn': 63,
u'\xe9mo': 63,
u'lo\u015f': 63,
u'dzk': 63,
u'\u010dni': 63,
u'kao': 63,
u'n\xf3r': 63,
u'\xe1\xe9t': 63,
u'zst': 63,
u'j\xf3\xe9': 63,
u'\xf3fr': 63,
u'\xfaj\xe1': 63,
u'pgy': 63,
u'ppf': 63,
u'rtz': 63,
u'\u0151\xedv': 63,
u'ohl': 63,
u'\xf6km': 63,
u'bhe': 63,
u'agv': 63,
u'o\u010dn': 63,
u'\xf3le': 63,
u'i\xfak': 63,
u'l\xe9\xe9': 63,
u'pex': 63,
u'o\u015f#': 63,
u'ssy': 63,
u'tep': 63,
u'uo#': 62,
u'\xfcti': 62,
u'uk\xed': 62,
u'zty': 62,
u'r\xf3\xe9': 62,
u'mj\xf6': 62,
u'db\xf3': 62,
u'hro': 62,
u'ki\xf6': 62,
u'n\u0151l': 62,
u'ozs': 62,
u'\xe9mj': 62,
u'dcs': 62,
u'gij': 62,
u'icz': 62,
u'rkh': 62,
u'jke': 62,
u'qim': 62,
u'uez': 62,
u'npo': 62,
u'kpi': 62,
u'h\u0171#': 62,
u'yk#': 62,
u'mt\xf6': 62,
u'\xf3\xe1l': 62,
u'ux#': 62,
u'wog': 62,
u'hba': 62,
u'vva': 62,
u'ecu': 62,
u'oot': 62,
u'paa': 62,
u'i\xfa#': 62,
u'z\xfas': 62,
u'\xfct\xe9': 62,
u'n\xfak': 62,
u'cny': 62,
u'zci': 62,
u'rmc': 62,
u'\xe9zf': 62,
u'td\xed': 62,
u'v\xf3n': 62,
u'fi\xf3': 61,
u'r\u0171z': 61,
u'lr\xe1': 61,
u'\xf3tk': 61,
u'\xe9ns': 61,
u'\xedvi': 61,
u'mva': 61,
u'z\xf3c': 61,
u'gj\xf6': 61,
u'kzs': 61,
u'l\xfas': 61,
u'aub': 61,
u'\xedaz': 61,
u'boh': 61,
u'dk#': 61,
u'zl\u0151': 61,
u'f#p': 61,
u'gah': 61,
u'\xf6n\u0151': 61,
u'r\xfch': 61,
u't\xf3\xfc': 61,
u'm\xe9i': 61,
u'md\xed': 61,
u'ku#': 61,
u'\xf3en': 61,
u'n\u0171t': 61,
u'h\u0171e': 61,
u'\xf3t\xf6': 61,
u'gs\xf6': 61,
u'opj': 61,
u'oo#': 61,
u'\xf6lk': 61,
u'd\xeda': 61,
u'tih': 61,
u'dns': 60,
u'fir': 60,
u'cut': 60,
u'\xfcl\xed': 60,
u'rf\xe9': 60,
u's\xf6r': 60,
u'tr\xed': 60,
u'by#': 60,
u'auv': 60,
u'p\xf3#': 60,
u'iik': 60,
u'\xe1dr': 60,
u'z\u0151z': 60,
u'\xe9ak': 60,
u'\xe9ar': 60,
u'fc#': 60,
u'yh\xfc': 60,
u'#v#': 60,
u'rmt': 60,
u'agz': 60,
u'th\xf3': 60,
u'yim': 60,
u'htt': 60,
u'stt': 60,
u'k\xf3n': 60,
u'ra\xe1': 60,
u'zb\xed': 60,
u'vsk': 60,
u'ejj': 60,
u'duf': 60,
u'fem': 60,
u'ir\xed': 59,
u'cur': 59,
u'mb#': 59,
u'zh\xe1': 59,
u'#\xf3p': 59,
u'#gk': 59,
u'\xe9no': 59,
u'p\xfar': 59,
u'uv\xe1': 59,
u'kag': 59,
u'na\u0144': 59,
u'ujo': 59,
u'uje': 59,
u'iuu': 59,
u'ldv': 59,
u'ufm': 59,
u'a\xf3r': 59,
u'ehm': 59,
u'cdd': 59,
u'\xf3#w': 59,
u'joe': 59,
u'#\xe1d': 59,
u'n\xe9e': 59,
u'le\xf3': 59,
u'le\xfc': 59,
u'\u0151pu': 59,
u'il\xed': 59,
u's\xf3i': 59,
u'shg': 59,
u'u\xe1c': 59,
u'dij': 59,
u'r\xedv': 59,
u'k\u0151\xe1': 59,
u'gou': 59,
u'lbr': 59,
u'oca': 59,
u'hik': 59,
u'b\xf6k': 59,
u'bye': 59,
u'hak': 58,
u'fma': 58,
u'\xednl': 58,
u'szd': 58,
u'ibl': 58,
u'ibt': 58,
u'ceb': 58,
u'if#': 58,
u'rwa': 58,
u'tms': 58,
u'eaf': 58,
u'mev': 58,
u'c\xe1v': 58,
u'ehi': 58,
u'u#\xfc': 58,
u'gde': 58,
u'hga': 58,
u'\xfaka': 58,
u'aot': 58,
u'g\u0151t': 58,
u'\xe1jl': 58,
u'piu': 58,
u'te\xe1': 58,
u'b\xf6t': 58,
u'h#\xe1': 58,
u'x#f': 57,
u'czy': 57,
u'\xe9vk': 57,
u'aef': 57,
u'+f#': 57,
u'h\xe1d': 57,
u'j#z': 57,
u'\xf3je': 57,
u'jjn': 57,
u'c\xe1h': 57,
u'\xedml': 57,
u'j\xf3c': 57,
u'ol\xe9': 57,
u'xek': 57,
u'x#p': 57,
u'you': 57,
u'mpt': 57,
u'na\xe1': 57,
u'\xe9s\xed': 57,
u'#dc': 57,
u'z\xfad': 57,
u'pem': 57,
u'f#i': 57,
u'#pc': 57,
u'rir': 57,
u'bbp': 57,
u'mff': 57,
u'sky': 57,
u'rz\xf3': 56,
u'\xf6et': 56,
u'sci': 56,
u'ojc': 56,
u'#g\xf3': 56,
u'mv\xe9': 56,
u'aqu': 56,
u'c#\xf6': 56,
u'eyn': 56,
u'\xe9i#': 56,
u'jbe': 56,
u'ldm': 56,
u'dpo': 56,
u'jo#': 56,
u'mda': 56,
u'f\u0171s': 56,
u'ttp': 56,
u'acz': 56,
u'#ey': 56,
u'zm\u0171': 56,
u'\xf3pr': 56,
u'szz': 56,
u'\xe9dk': 56,
u'woj': 56,
u'a#y': 56,
u'd#\u0151': 56,
u'f\xe1h': 56,
u'ogp': 56,
u'pr\xfc': 56,
u'fah': 56,
u'fab': 56,
u'#tg': 56,
u'jmi': 55,
u'rzo': 55,
u'#cb': 55,
u'\xe1p\xe9': 55,
u'xon': 55,
u'oj\xf3': 55,
u'#g\xfa': 55,
u'inr': 55,
u'm\u0171f': 55,
u'uvi': 55,
u'i\xe9t': 55,
u'yav': 55,
u'\xf3nu': 55,
u'c\xe1z': 55,
u'kmo': 55,
u'mud': 55,
u'gih': 55,
u'\xe9#\xfa': 55,
u'\xe1p\xe1': 55,
u'rp\xe1': 55,
u'g\xe9g': 55,
u'ttv': 55,
u'jcb': 55,
u'h\u0171s': 55,
u'alq': 55,
u'tea': 55,
u'b#\xf3': 55,
u'b\xe9g': 55,
u'tu#': 55,
u'z\xfaz': 55,
u'bid': 55,
u'gc\xed': 55,
u'uht': 55,
u'fap': 55,
u'pzs': 55,
u'nm\xe9': 55,
u'\xf3k\xed': 55,
u'h#b': 55,
u'h#n': 55,
u'tv#': 54,
u'hem': 54,
u'zk\xf3': 54,
u'nrm': 54,
u'\xe1#\xf6': 54,
u'nkv': 54,
u'\xe9bo': 54,
u'cao': 54,
u'm\u0171r': 54,
u'c\xe9k': 54,
u'b\xf3n': 54,
u'uja': 54,
u'\u0151#\u0151': 54,
u'imt': 54,
u'r\xfcm': 54,
u'ah\xe9': 54,
u'\u010d#\xfa': 54,
u'ed\xe1': 54,
u'##z': 54,
u'\xf8rg': 54,
u'j\xf8r': 54,
u'exm': 54,
u'\u0171nr': 54,
u'x#\xe1': 54,
u'cch': 54,
u'yov': 54,
u'#ht': 54,
u's\xf3t': 54,
u'mop': 54,
u'okj': 54,
u'pip': 54,
u'\xf3sl': 54,
u'ns\u0151': 53,
u'ijo': 53,
u'wai': 53,
u'\xedr\xe9': 53,
u'\xf3rm': 53,
u'mr#': 53,
u'auf': 53,
u'uji': 53,
u'gae': 53,
u'ldb': 53,
u'nsu': 53,
u'#\xf6e': 53,
u'mny': 53,
u'\u010dek': 53,
u'#\xe1h': 53,
u'e\xfcg': 53,
u'ky#': 53,
u'\xfcz\xe9': 53,
u'kd\xe9': 53,
u'hcr': 53,
u'u#u': 53,
u'\u0151ih': 53,
u'\u0151mb': 53,
u'kpr': 53,
u'#nb': 53,
u'#\xe0#': 53,
u'l\xfct': 53,
u'be\xfc': 53,
u'#lj': 53,
u'tye': 53,
u'mka': 53,
u'f#v': 53,
u'thb': 53,
u'a\xfat': 53,
u'yvn': 53,
u'#ts': 53,
u'\xf3cr': 52,
u'#fd': 52,
u'\u0161em': 52,
u'\xedjm': 52,
u'udz': 52,
u'm\u0171\xe1': 52,
u'sob': 52,
u'g\xfav': 52,
u'eet': 52,
u'dvi': 52,
u'zsm': 52,
u'j\xf3a': 52,
u'oed': 52,
u'#\xf6m': 52,
u'\xe1he': 52,
u'suu': 52,
u'b\u0151k': 52,
u't\xf3f': 52,
u't\xf3c': 52,
u'hob': 52,
u'dh#': 52,
u'm\xe1\xe9': 52,
u'saf': 52,
u'jcc': 52,
u'\u0171t#': 52,
u'fne': 52,
u'epb': 52,
u'spl': 52,
u'tyj': 52,
u'v\xf6s': 52,
u'www': 52,
u'gcc': 52,
u'f\xe9t': 52,
u'p#\xfc': 52,
u'rya': 52,
u'ejr': 52,
u'gku': 52,
u'ssn': 52,
u'nbb': 52,
u'h#r': 52,
u'y\xf3i': 52,
u'x#r': 51,
u'\xe1zl': 51,
u'l\u0171z': 51,
u'on\xfa': 51,
u'vle': 51,
u'\xedju': 51,
u't\u0171s': 51,
u'\xe1#\u0151': 51,
u'p\u0151b': 51,
u'brn': 51,
u'em\xe1': 51,
u'dz\u0151': 51,
u'jr\xf3': 51,
u'ffs': 51,
u'bz\xe1': 51,
u'kay': 51,
u'yah': 51,
u'do\u011f': 51,
u'ma\u0148': 51,
u'c\xe1p': 51,
u'o\u011fa': 51,
u'z\xfcz': 51,
u'nsh': 51,
u'imj': 51,
u'nhc': 51,
u'\xe9am': 51,
u'ahb': 51,
u'kyi': 51,
u'a\u0148k': 51,
u'rkt': 51,
u'\xe1pt': 51,
u'dsb': 51,
u'\xe9tz': 51,
u'#eq': 51,
u'sex': 51,
u'ueg': 51,
u'cks': 51,
u'nif': 51,
u'g\u0151z': 51,
u'upa': 51,
u'lqu': 51,
u's\xf3h': 51,
u'i\xfat': 51,
u'ewi': 51,
u'udr': 51,
u'x#j': 51,
u'\u011fan': 51,
u'cav': 51,
u'h#v': 51,
u'iru': 50,
u'b\xfac': 50,
u'db\u0151': 50,
u'br\xf3': 50,
u'obr': 50,
u'gbp': 50,
u'low': 50,
u'rwe': 50,
u'eed': 50,
u'rsd': 50,
u'ngk': 50,
u'fsh': 50,
u'n\u0151h': 50,
u'ldc': 50,
u'\u0171#z': 50,
u'\xe1dk': 50,
u'tsi': 50,
u'r\xfcp': 50,
u'eht': 50,
u'rk\xfc': 50,
u'#ry': 50,
u'\xe1tp': 50,
u'khi': 50,
u'ttb': 50,
u'v#\xf3': 50,
u'\xf3es': 50,
u'k\xfcv': 50,
u'cb\xf3': 50,
u'eou': 50,
u'jp\xe9': 50,
u'gs\xed': 50,
u'fou': 50,
u'ryb': 50,
u'\xedko': 50,
u'vno': 50,
u'cov': 50,
u'c\xf3r': 50,
u'us\xe9': 50,
u'\u0171k\xfc': 50,
u'bbg': 50,
u'r\u0171v': 49,
u'irs': 49,
u'fr\xe1': 49,
u'\u0171rj': 49,
u'#cf': 49,
u'b\xfat': 49,
u'nri': 49,
u'cmi': 49,
u'ogf': 49,
u'\u0151z#': 49,
u'#sn': 49,
u'm\xf6t': 49,
u'mab': 49,
u'gej': 49,
u'cua': 49,
u'\u0151\xe9r': 49,
u'j\xe9\xe9': 49,
u'\xe9#\u0151': 49,
u'gp\xe1': 49,
u'\xe9#\xf6': 49,
u'kdk': 49,
u'ndf': 49,
u'blj': 49,
u'rmn': 49,
u'\u0148ka': 49,
u'\xf6kz': 49,
u'cbr': 49,
u'ki\xfc': 49,
u'\xf6s\xed': 49,
u'stl': 49,
u'idp': 49,
u'\xedgu': 49,
u'#h#': 49,
u'nex': 49,
u's\xf3a': 49,
u'r\xeda': 49,
u'\xfasi': 49,
u'ws#': 49,
u'\xf3d\xfa': 49,
u'\xfctn': 49,
u'sda': 49,
u's\u0151\xed': 49,
u'vri': 49,
u'piz': 49,
u'#kd': 49,
u'\xfcpe': 49,
u'gce': 49,
u'num': 49,
u'eyf': 49,
u'ejv': 49,
u'ta\xe1': 49,
u'zz#': 49,
u'taw': 49,
u'y\u0171b': 49,
u'fih': 48,
u'rzy': 48,
u'zo#': 48,
u'#cn': 48,
u'jee': 48,
u'\xe9nl': 48,
u'\xe9nf': 48,
u'cef': 48,
u'ps\u0151': 48,
u'nc\xe1': 48,
u'\u0151ba': 48,
u'uk\xf6': 48,
u'aye': 48,
u'#bb': 48,
u'ajp': 48,
u'eyr': 48,
u'kug': 48,
u'#\xe1f': 48,
u'rks': 48,
u'sic': 48,
u'oud': 48,
u'ztk': 48,
u'umv': 48,
u'tp\xe1': 48,
u'b\xe1c': 48,
u'htf': 48,
u'x#v': 48,
u'ccp': 48,
u'oxo': 48,
u'\xf3h\xed': 48,
u'rtl': 48,
u'k\xf3\xe9': 48,
u'hly': 48,
u'dm\xe1': 48,
u'\xfasr': 48,
u'\xfal\xe1': 48,
u'ksh': 48,
u'obs': 48,
u'pvv': 48,
u'swa': 48,
u'rry': 48,
u'\u0171kl': 48,
u'j\xe1#': 48,
u'cze': 47,
u'so\xe1': 47,
u'os\xfa': 47,
u'm\xf3b': 47,
u'\xednk': 47,
u'bj\xfc': 47,
u'tsv': 47,
u'\xe9\xe1t': 47,
u'\u0171fe': 47,
u'n\xf3a': 47,
u'jni': 47,
u'i\xe1u': 47,
u'jf\xe9': 47,
u'\xe1dp': 47,
u'gm\u0171': 47,
u'oam': 47,
u'\xfcrt': 47,
u'aht': 47,
u'\xfa#\xed': 47,
u'tku': 47,
u'y\u0171e': 47,
u'g\xe1g': 47,
u'\xf3m\xf3': 47,
u'xte': 47,
u'h\xf3#': 47,
u'\xe1gl': 47,
u'ued': 47,
u'b\xe1m': 47,
u'ut\xe9': 47,
u'dma': 47,
u'l\xe1\u0161': 47,
u'kog': 47,
u'\xfct#': 47,
u'g\xf3l': 47,
u'f#l': 47,
u'j\xe1i': 47,
u'vv#': 47,
u'rv\u0171': 47,
u'#ty': 47,
u'bbc': 47,
u'owe': 47,
u'\u0171rk': 46,
u'k\xf6b': 46,
u'#cm': 46,
u'kju': 46,
u'\xfama': 46,
u'lch': 46,
u'zcz': 46,
u'obt': 46,
u'\xfatk': 46,
u'\xe1jt': 46,
u'\xb0co': 46,
u'eej': 46,
u'#\xfas': 46,
u'nwe': 46,
u'gsa': 46,
u'km\u0171': 46,
u'ai\xf3': 46,
u'ub#': 46,
u'ubr': 46,
u'sue': 46,
u'm\xedn': 46,
u'\xf3ih': 46,
u'\u0151as': 46,
u'\xfazh': 46,
u'\xf3mo': 46,
u'\u0171pa': 46,
u'dd\xe1': 46,
u'ohu': 46,
u'\xe1g\xf6': 46,
u'\xfako': 46,
u'v\u0151d': 46,
u'usf': 46,
u'mp\xed': 46,
u'mcz': 46,
u'oph': 46,
u'bm\xe9': 46,
u'ggj': 46,
u'jdi': 46,
u'peg': 46,
u'k\u0171n': 46,
u'm\xfcl': 46,
u'igs': 46,
u'eft': 46,
u's\xfan': 46,
u'\u0151gy': 46,
u'dn\xf6': 45,
u'r\u0171l': 45,
u'nzs': 45,
u'lzs': 45,
u'uck': 45,
u'\xf6mf': 45,
u'brd': 45,
u'\u0151zm': 45,
u'y\xfam': 45,
u'ruv': 45,
u'eeb': 45,
u'#j\xf8': 45,
u'\xf3ne': 45,
u'sn\xf6': 45,
u'mm\xe9': 45,
u'ubv': 45,
u'bse': 45,
u'ehs': 45,
u'eh\xfa': 45,
u'#v\u0103': 45,
u'lp#': 45,
u'v\u0103l': 45,
u'kly': 45,
u'mtj': 45,
u'f\xe1i': 45,
u'\u0103le': 45,
u'afc': 45,
u'zz\xf3': 45,
u'h#c': 45,
u'h#h': 45,
u'\u0171k\xe9': 45,
u'mf\xe1': 45,
u'\xednb': 44,
u'#cz': 44,
u'#fj': 44,
u'z\u0171#': 44,
u'zkb': 44,
u'evs': 44,
u'uce': 44,
u'cva': 44,
u'\xedv\u0171': 44,
u'em\xf3': 44,
u'\xe0#\xfa': 44,
u'noo': 44,
u'n\xf3b': 44,
u'c#u': 44,
u'dru': 44,
u'ajg': 44,
u'twi': 44,
u'vba': 44,
u'\xedit': 44,
u's\xedi': 44,
u'y\xe1g': 44,
u'm\xe1z': 44,
u'rk\u0151': 44,
u'yll': 44,
u'\xfaro': 44,
u'ztf': 44,
u'umf': 44,
u'sae': 44,
u'\xf6ku': 44,
u'bt\xe9': 44,
u'mp#': 44,
u'as\xf3': 44,
u'#ub': 44,
u'ots': 44,
u'kkj': 44,
u'\u0142aw': 44,
u'moi': 44,
u't\u0151p': 44,
u'f#\xe1': 44,
u'zfa': 44,
u'anl': 44,
u'ss\xf6': 44,
u'\xf6t\xfc': 44,
u'iz\xf3': 44,
u'duz': 44,
u'#ox': 44,
u'h#j': 44,
u'kfo': 43,
u'\xe9v\xfc': 43,
u'wro': 43,
u'df#': 43,
u'nrr': 43,
u'\xedjh': 43,
u'ijk': 43,
u'weg': 43,
u'k\xe9g': 43,
u'obu': 43,
u'guw': 43,
u'sog': 43,
u'eij': 43,
u'\xf6ul': 43,
u'ncc': 43,
u'z\xf6u': 43,
u'cy#': 43,
u'kee': 43,
u'\xf6bm': 43,
u'o\xe1#': 43,
u'pcb': 43,
u'#z\xfa': 43,
u'\xe1h\xed': 43,
u'l\xf3f': 43,
u'ehu': 43,
u'tgt': 43,
u'smo': 43,
u'jga': 43,
u'k\xfcg': 43,
u'gv\xed': 43,
u'ih#': 43,
u'eoj': 43,
u'hts': 43,
u'l\xfcz': 43,
u'pu#': 43,
u'idn': 43,
u'\xf3ly': 43,
u's\xf3v': 43,
u'd#w': 43,
u'aft': 43,
u'\xf6l\xfc': 43,
u'r\xfav': 43,
u'ssc': 43,
u's\xe3o': 43,
u'uwu': 43,
u'kbn': 43,
u'\xfatd': 42,
u'szy': 42,
u'zg\xe9': 42,
u'tn\xf6': 42,
u'mv\xe1': 42,
u'aio': 42,
u'\xedr\xfc': 42,
u'#wr': 42,
u'bc#': 42,
u'amv': 42,
u'\u0151j\xf6': 42,
u'h\xf6s': 42,
u'j\xf3j': 42,
u'y\xedr': 42,
u'wi#': 42,
u'chs': 42,
u'tsd': 42,
u'dib': 42,
u'tk#': 42,
u'rt\xfa': 42,
u'tge': 42,
u'\xe9tp': 42,
u'isu': 42,
u'eof': 42,
u'uay': 42,
u'plp': 42,
u'hta': 42,
u'st\xf6': 42,
u'jpr': 42,
u'nah': 42,
u'ecy': 42,
u'be\xf6': 42,
u'g\u0151k': 42,
u's\xf3r': 42,
u'wa#': 42,
u'f\u0151\xfc': 42,
u'\xfalf': 42,
u'daw': 42,
u'day': 42,
u'f#j': 42,
u'es\xf6': 42,
u't\xe4l': 42,
u'msa': 42,
u'ssj': 42,
u'\xf6tr': 42,
u'ynt': 42,
u'#tw': 42,
u'i\u010di': 42,
u'ebt': 42,
u'kfi': 41,
u'b\u0171v': 41,
u'\xf3fu': 41,
u'hew': 41,
u'\xe9jf': 41,
u'\u0151rr': 41,
u'p\xf6k': 41,
u'gnp': 41,
u'lcv': 41,
u'dze': 41,
u'pww': 41,
u'j\u0171e': 41,
u'ku\xe1': 41,
u'\xfcmi': 41,
u'\u0171\xe1g': 41,
u'rgn': 41,
u'\xfajh': 41,
u'clu': 41,
u'cdm': 41,
u'uuk': 41,
u'dwa': 41,
u'#g\u0151': 41,
u'vao': 41,
u'xis': 41,
u'\xe9po': 41,
u'tpw': 41,
u'jcs': 41,
u'ct\xf3': 41,
u'nib': 41,
u'yk\xe1': 41,
u'ghs': 41,
u'\u015f#\xfa': 41,
u'jfo': 41,
u'\xe1f#': 41,
u'yss': 41,
u'zb\xfc': 41,
u'vv\xe9': 41,
u'crs': 41,
u'\xe9rp': 40,
u'fif': 40,
u'i\u0171z': 40,
u't#\u0161': 40,
u'zh\xf6': 40,
u'ijs': 40,
u't\u0171j': 40,
u'zge': 40,
u'way': 40,
u'eep': 40,
u'tf\xfc': 40,
u'svo': 40,
u'\xedzl': 40,
u'wur': 40,
u'ki\u0171': 40,
u'd\xfa#': 40,
u'gag': 40,
u'\xe1dh': 40,
u'eud': 40,
u'd\xf6r': 40,
u'kup': 40,
u'tof': 40,
u'toe': 40,
u'hys': 40,
u'sui': 40,
u'cdc': 40,
u'\xf3ia': 40,
u'z\xe9j': 40,
u's\xe1j': 40,
u'my#': 40,
u'\xedjf': 40,
u'xit': 40,
u'it\xe4': 40,
u'#p\xfc': 40,
u'\xe1kj': 40,
u'la\xe1': 40,
u'uai': 40,
u'\xf6si': 40,
u'pur': 40,
u'#hl': 40,
u'be\xed': 40,
u'oty': 40,
u'\xfag#': 40,
u'\xfasf': 40,
u'ab\xf6': 40,
u'#bt': 40,
u'cne': 40,
u'pm\xe9': 40,
u'ejf': 40,
u'faf': 40,
u'h#d': 40,
u'oer': 40,
u'\xe4l\xe4': 39,
u'fix': 39,
u'dgo': 39,
u'\u0151rm': 39,
u'sr#': 39,
u'dza': 39,
u'#sb': 39,
u'ncv': 39,
u'c\xe9b': 39,
u'c#z': 39,
u'ayl': 39,
u'ory': 39,
u's\xed#': 39,
u'r\xfcf': 39,
u'roy': 39,
u'##\xb0': 39,
u'tc#': 39,
u'\xfcff': 39,
u'sms': 39,
u'olz': 39,
u'bl\xf3': 39,
u'xmi': 39,
u'\xe9td': 39,
u'kl\xfa': 39,
u'\xe1ks': 39,
u'sud': 39,
u'p\xfcs': 39,
u'cow': 39,
u'\xfcsp': 39,
u'v\u0151l': 39,
u'sp\xf6': 39,
u'spp': 39,
u'aoi': 39,
u'x#n': 39,
u'vu#': 39,
u'hha': 39,
u'i\u0144s': 39,
u'dpt': 39,
u'tyk': 39,
u'\xe1bj': 39,
u'dee': 39,
u'\xf3dz': 39,
u'koi': 39,
u'iks': 39,
u'l\xe9z': 39,
u'\u0151li': 39,
u'rew': 39,
u'ic\xed': 39,
u'thr': 39,
u'ti\xe9': 39,
u'\xe1n\xfc': 39,
u'pt\u0151': 39,
u'r#\u0161': 39,
u'gry': 39,
u'jkh': 39,
u'v\xf3k': 39,
u'p\xe9h': 38,
u'\u0171rb': 38,
u'onm': 38,
u'fiu': 38,
u'sc\xe9': 38,
u'\xf6na': 38,
u'vor': 38,
u'\u0171z\xfc': 38,
u'dzi': 38,
u'f#r': 38,
u'yro': 38,
u'erw': 38,
u'zca': 38,
u'gug': 38,
u's\u0151l': 38,
u'eia': 38,
u'#\xedm': 38,
u'm#\u0161': 38,
u'#wu': 38,
u'rct': 38,
u'gyg': 38,
u'zsk': 38,
u'kmr': 38,
u'p\xf3d': 38,
u'\u0151n\xf6': 38,
u'oe#': 38,
u'eux': 38,
u'dd\u0151': 38,
u'\xe9mk': 38,
u'gm#': 38,
u'k\xf3j': 38,
u'pkk': 38,
u'\xe9as': 38,
u'ppl': 38,
u'nlh': 38,
u'oya': 38,
u'z\xe9\xe9': 38,
u'l\xe4#': 38,
u'ncr': 38,
u'sm\xed': 38,
u'fo#': 38,
u'fp#': 38,
u'ax#': 38,
u'#pk': 38,
u'uik': 38,
u'sef': 38,
u'\xe1k\xf3': 38,
u'g\xfcr': 38,
u'xyn': 38,
u'#h\xe4': 38,
u'lu#': 38,
u'slu': 38,
u'wwe': 38,
u'i\xfaj': 38,
u'\xedtg': 38,
u'ik\u0151': 38,
u'feu': 38,
u'ksu': 38,
u'#xy': 38,
u'f\xe9s': 38,
u'rnk': 38,
u'r\xfaj': 38,
u'\xe1r\xfa': 38,
u'l\xe1\xe9': 38,
u'vcs': 38,
u'\xe1z\xe9': 37,
u'tza': 37,
u'zri': 37,
u'm\xf3p': 37,
u's\xe9j': 37,
u'#fp': 37,
u'weu': 37,
u'br\xfa': 37,
u'zco': 37,
u'noi': 37,
u'#s\xe3': 37,
u'ur\xe3': 37,
u'yau': 37,
u'drz': 37,
u'lwa': 37,
u'c\xedl': 37,
u'zsr': 37,
u'y\xe9\xe9': 37,
u'\xedms': 37,
u'\xe1\u0161i': 37,
u'ld\u0151': 37,
u'\xf6ni': 37,
u'\xe1dd': 37,
u'\xe9m\xed': 37,
u'kuh': 37,
u'gtc': 37,
u'shu': 37,
u'jst': 37,
u'hko': 37,
u'sij': 37,
u'\xedl\xf3': 37,
u'ody': 37,
u'mh\xed': 37,
u'i\xf3g': 37,
u'zea': 37,
u'z#w': 37,
u'kru': 37,
u'eca': 37,
u'ot\xe9': 37,
u'\u0161ik': 37,
u'k\u0151n': 37,
u'\xfasb': 37,
u'\xf6g\xfc': 37,
u'uhn': 37,
u'en\u0171': 37,
u'd\xedv': 37,
u'n\xfav': 37,
u'n\xfat': 37,
u'dt\u0151': 37,
u'\xedsz': 37,
u'\xfadu': 37,
u'rns': 37,
u'\xf3st': 37,
u'd\xe9#': 37,
u'i\xf6m': 37,
u'bbd': 37,
u'#oc': 36,
u'\u0151\xfat': 36,
u'hoo': 36,
u'vko': 36,
u'gf#': 36,
u'\xedjt': 36,
u'bnu': 36,
u'cea': 36,
u'hje': 36,
u'dv\xfc': 36,
u'b\xf3#': 36,
u'cyk': 36,
u'eyb': 36,
u'ldd': 36,
u'iit': 36,
u'ch\xed': 36,
u'dcm': 36,
u'imc': 36,
u'clo': 36,
u'shk': 36,
u'eh\xf3': 36,
u'iei': 36,
u'a\xe7a': 36,
u'bdi': 36,
u'\xe1ty': 36,
u'ztn': 36,
u'fte': 36,
u'tp\xe9': 36,
u'eog': 36,
u'm\xe4k': 36,
u'yka': 36,
u'lih': 36,
u'ty\u0171': 36,
u'baz': 36,
u'l\xedz': 36,
u'rys': 36,
u'gci': 36,
u'ogc': 36,
u'd\xeds': 36,
u'goi': 36,
u'ra\xe7': 36,
u'nur': 36,
u'h\u0151e': 36,
u'nnj': 36,
u'tij': 36,
u'gkr': 36,
u'gkm': 36,
u'efu': 36,
u'ljt': 36,
u'y\xf3n': 36,
u'l\xe9\xfc': 35,
u'nzj': 35,
u'\u0151ka': 35,
u'm\xf3t': 35,
u'\xednj': 35,
u'jtk': 35,
u'\xf3go': 35,
u'evb': 35,
u'mjo': 35,
u'n\xe7a': 35,
u'wen': 35,
u'lc\xed': 35,
u'\xedrb': 35,
u'gu\xe1': 35,
u'tfu': 35,
u'glu': 35,
u'\u0151ut': 35,
u'unr': 35,
u'\u0103se': 35,
u'mao': 35,
u'orw': 35,
u'gee': 35,
u'kji': 35,
u'#<>': 35,
u'oeb': 35,
u'iiv': 35,
u'iip': 35,
u'\xfcv\xe9': 35,
u'#\xf3d': 35,
u'oa#': 35,
u'\xf6za': 35,
u'\xf6zd': 35,
u'wu#': 35,
u'\u0151al': 35,
u'\xe9pu': 35,
u'um\xfa': 35,
u'hcs': 35,
u'cpr': 35,
u'ntg': 35,
u'il\xf6': 35,
u'nm\xe4': 35,
u'gpa': 35,
u'jpa': 35,
u'<>#': 35,
u'\xf3h\xe9': 35,
u'otl': 35,
u'rlg': 35,
u'la\xdf': 35,
u'di\u0107': 35,
u'xpe': 35,
u'k\u0151b': 35,
u'r\xf6d': 35,
u'\u0151ho': 35,
u'\xfaso': 35,
u'\xe1fa': 35,
u'\u0151ln': 35,
u'\xfaho': 35,
u'y\u0171v': 35,
u'\xf3sh': 35,
u'\xe4ki': 35,
u'#kj': 35,
u'sku': 35,
u'\u015foi': 34,
u'x#l': 34,
u'fr\xe9': 34,
u'ezg': 34,
u'scf': 34,
u'#g\xfc': 34,
u'bva': 34,
u'\xe1#u': 34,
u'gb#': 34,
u'br\xe9': 34,
u'zc\xe9': 34,
u'\xe1mc': 34,
u'hvi': 34,
u'esu': 34,
u'ozk': 34,
u'\u0151fu': 34,
u'fna': 34,
u'\xednf': 34,
u'\u0171nv': 34,
u'#nd': 34,
u'oib': 34,
u'\xe4tt': 34,
u'k\xfd#': 34,
u'bko': 34,
u'u\u015fo': 34,
u'\xe9it': 34,
u'mix': 34,
u'equ': 34,
u'+#r': 34,
u'gip': 34,
u'pt\xf6': 34,
u'jeh': 34,
u'a\xfcl': 34,
u'joa': 34,
u'vam': 34,
u'n\u0171n': 34,
u'doc': 34,
u'uef': 34,
u'kps': 34,
u'cka': 34,
u'xe#': 34,
u'th\xfa': 34,
u'ghb': 34,
u'nmo': 34,
u'j\xe4\xe4': 34,
u'\u0171d\xf6': 34,
u'ecb': 34,
u'g\u0151d': 34,
u'zym': 34,
u'\xe9sg': 34,
u'kgb': 34,
u'v\xf6n': 34,
u'opi': 34,
u'f\u0151p': 34,
u'\xf6sc': 34,
u'mk\xfc': 34,
u'ysu': 34,
u'bu\u015f': 34,
u'esd': 34,
u'nn\xf6': 34,
u'pmi': 34,
u'ssb': 34,
u'na\xfc': 34,
u'\xe4\xe4t': 34,
u'yn#': 34,
u'cf#': 34,
u'n\xf6n': 34,
u't\xe9i': 33,
u'\u0161a#': 33,
u'\u0171v\xf6': 33,
u'\u0151or': 33,
u'\xe1sl': 33,
u'bv#': 33,
u'mj\xe9': 33,
u'w#f': 33,
u'gb\xfa': 33,
u'\xedvs': 33,
u'ob#': 33,
u'ffl': 33,
u'\xb0c#': 33,
u'i\xe9v': 33,
u'c\xe9n': 33,
u'\xedz\xf3': 33,
u'j\u0171v': 33,
u'urs': 33,
u'\u0171j\xe9': 33,
u'yao': 33,
u'dod': 33,
u'ma\xfc': 33,
u'dka': 33,
u'uj\xe1': 33,
u'oiz': 33,
u'zl\xfc': 33,
u'iim': 33,
u'ch\xe9': 33,
u'r\u0151j': 33,
u's\xedp': 33,
u'bst': 33,
u'giv': 33,
u'kyp': 33,
u'ia\xf6': 33,
u'nlu': 33,
u'#ie': 33,
u'dl\xf3': 33,
u'\u010di\u016b': 33,
u'\xe1tb': 33,
u'#ae': 33,
u'sa\xef': 33,
u'i\u016bt': 33,
u'rdy': 33,
u'npu': 33,
u'bt\xe1': 33,
u'\xf6s\xe9': 33,
u'cod': 33,
u'ghu': 33,
u'#uh': 33,
u'ut\xed': 33,
u'\xe1ps': 33,
u'l\u0151\xfc': 33,
u'l\u0151\xe9': 33,
u'\u0171s#': 33,
u'mov': 33,
u'f\u0151e': 33,
u're\xfc': 33,
u'daj': 33,
u'f#b': 33,
u'f#h': 33,
u'ogd': 33,
u'\xe1nm': 33,
u'bp\xe1': 33,
u'faz': 33,
u'cag': 33,
u'\xe1vf': 33,
u'h#f': 33,
u'xr\u0151': 33,
u'#mb': 33,
u'd\xe1#': 33,
u'x#i': 32,
u'fie': 32,
u'ukj': 32,
u'\xednc': 32,
u'gf\xf3': 32,
u'\xfchr': 32,
u'cic': 32,
u'fdp': 32,
u'of\xe9': 32,
u'of\xed': 32,
u'\xf6mn': 32,
u'lfr': 32,
u'inp': 32,
u'wag': 32,
u'\xedrl': 32,
u'h\xe1m': 32,
u'cay': 32,
u'z\xf3e': 32,
u'eip': 32,
u'am\xfc': 32,
u'\u0144ba': 32,
u'y\xe1\xe9': 32,
u'n\xf3i': 32,
u'wir': 32,
u'b\u0103s': 32,
u'tb\xed': 32,
u'bcs': 32,
u'aux': 32,
u'bob': 32,
u'uft': 32,
u'\xf3f\xf6': 32,
u'tsp': 32,
u'mim': 32,
u'adw': 32,
u'clt': 32,
u'gim': 32,
u'tbo': 32,
u'oy#': 32,
u'i#\u0161': 32,
u'whi': 32,
u'fka': 32,
u'jk\xfa': 32,
u'ttl': 32,
u'jgy': 32,
u'vi\u0107': 32,
u'\u0171te': 32,
u'phn': 32,
u'uag': 32,
u'\xf6st': 32,
u'\xf3ty': 32,
u'nao': 32,
u'\u0161ev': 32,
u'jhe': 32,
u'diz': 32,
u'uly': 32,
u'ncz': 32,
u'r\xe9f': 32,
u'r\xe9\xe9': 32,
u'f#n': 32,
u'\xf6lr': 32,
u'cct': 32,
u'nu#': 32,
u'vv\xe1': 32,
u'h\u0151k': 32,
u'f\xedl': 32,
u'pr\xed': 32,
u'sst': 32,
u'rmb': 32,
u'nbo': 32,
u'izc': 32,
u'cfr': 32,
u'p\xe9i': 31,
u'fiv': 31,
u'#oi': 31,
u'#oh': 31,
u'#\xf3#': 31,
u'jew': 31,
u'#gt': 31,
u'gb\xe1': 31,
u'\xedvr': 31,
u'in\xfc': 31,
u'nod': 31,
u'jro': 31,
u'ncy': 31,
u'ymp': 31,
u'bgy': 31,
u'\xf3g\xe1': 31,
u'#j\xe4': 31,
u'meo': 31,
u'ma\u0144': 31,
u'zsc': 31,
u'jfi': 31,
u'oia': 31,
u'\xedm\xe1': 31,
u'\xfarl': 31,
u'mmr': 31,
u'r\xfah': 31,
u'\xfajv': 31,
u'iia': 31,
u'ad\u0151': 31,
u'pku': 31,
u'pk#': 31,
u'vip': 31,
u'yh\xf3': 31,
u'nns': 31,
u'hwo': 31,
u'hwi': 31,
u'sum': 31,
u'\xf6zy': 31,
u's#\u0161': 31,
u'#mt': 31,
u'hoa': 31,
u'dsi': 31,
u'sm#': 31,
u'ztb': 31,
u'vf\u0171': 31,
u'\xe1kc': 31,
u'\u0151is': 31,
u'odk': 31,
u'kpk': 31,
u'etz': 31,
u'nmi': 31,
u'puj': 31,
u'stg': 31,
u'idh': 31,
u'ief': 31,
u'mco': 31,
u'\u0151dz': 31,
u'zn\xf3': 31,
u'pay': 31,
u'zj\xe1': 31,
u'sd\xf6': 31,
u'gc\xe1': 31,
u'#xv': 31,
u'aiz': 31,
u'ejs': 31,
u'\xe9gu': 31,
u'p\xe1m': 31,
u'ef\xf6': 31,
u'hio': 31,
u'tax': 31,
u'v\xf3s': 31,
u'sk\xed': 31,
u'p\xe9\xe9': 30,
u'uon': 30,
u'tz\xfa': 30,
u'tzb': 30,
u'a\u0142o': 30,
u'\u0171r\xed': 30,
u'\xf6er': 30,
u'ezc': 30,
u'gfu': 30,
u'y\u0144s': 30,
u'\xf3ze': 30,
u'o\xf6k': 30,
u'cmr': 30,
u'hrk': 30,
u'z\xf3g': 30,
u'gj\xfc': 30,
u'nct': 30,
u'ppc': 30,
u'rsb': 30,
u'\xfa#z': 30,
u'sj\xe1': 30,
u'vd#': 30,
u'jf\xe1': 30,
u'oir': 30,
u'\xf8nd': 30,
u'rgg': 30,
u'ufi': 30,
u'nsc': 30,
u'\xfaj\xe9': 30,
u'uhr': 30,
u'ubi': 30,
u'\u0171an': 30,
u'su#': 30,
u'rkk': 30,
u'kza': 30,
u'iep': 30,
u's\xf8n': 30,
u'edf': 30,
u'vub': 30,
u'si\xf3': 30,
u'\u0161ti': 30,
u'ouv': 30,
u'rpc': 30,
u'an\u0161': 30,
u'#m#': 30,
u'khu': 30,
u'\xf3\xe9#': 30,
u'b\xfck': 30,
u'n\xe1\xe9': 30,
u'od\xed': 30,
u'zij': 30,
u'h\xe4u': 30,
u'np\xf3': 30,
u'btu': 30,
u'lax': 30,
u'coo': 30,
u'jt\xfc': 30,
u'dtv': 30,
u'b\xe9s': 30,
u'bey': 30,
u'mgy': 30,
u'zy\u0144': 30,
u'#ln': 30,
u'isf': 30,
u'r\xf6e': 30,
u'f\u0151h': 30,
u'f\u0151z': 30,
u'\u0151st': 30,
u'okz': 30,
u'a\xf6s': 30,
u'kst': 30,
u'zb\xe9': 30,
u'tiu': 30,
u'iz\xe9': 30,
u'yn\u0151': 30,
u'z\u0151h': 30,
u'b\xf6b': 30,
u'#mh': 30,
u'ybi': 30,
u'a\xfct': 30,
u'bn\xe1': 29,
u'azl': 29,
u'osv': 29,
u'oeu': 29,
u'jin': 29,
u'hep': 29,
u'\xe9ja': 29,
u't\u0171v': 29,
u'tr\xe3': 29,
u'uct': 29,
u'cme': 29,
u'\xe1i\xe9': 29,
u'\xe9by': 29,
u'uzi': 29,
u'uzk': 29,
u's\xe2r': 29,
u'\xe9mt': 29,
u'iya': 29,
u'tf\xe1': 29,
u'dv#': 29,
u'd\u0171s': 29,
u'\xednu': 29,
u'\xedam': 29,
u'\xf6rp': 29,
u'\xfclg': 29,
u'ge=': 29,
u'pc#': 29,
u'rg\xf6': 29,
u'i\xfcr': 29,
u'l\xf3c': 29,
u'uya': 29,
u'fck': 29,
u'ptj': 29,
u'al\xfc': 29,
u'\xe9#\xed': 29,
u'n\u0161a': 29,
u'\xe9\xe9#': 29,
u'yl\xf3': 29,
u'\xe9sj': 29,
u'#vv': 29,
u'khv': 29,
u'h\xf3t': 29,
u'y\u0151j': 29,
u'\u0171t\xf6': 29,
u'tpe': 29,
u'rdd': 29,
u'\u0151mu': 29,
u'\xe9hp': 29,
u'uhh': 29,
u'g\xfct': 29,
u'\xe2rb': 29,
u'vdo': 29,
u'v\u0151h': 29,
u'v\u0151o': 29,
u'z\xe9\xfc': 29,
u'hh\xfa': 29,
u'x\xe1l': 29,
u'#hy': 29,
u'upu': 29,
u'kg#': 29,
u'ne\u0161': 29,
u'tyv': 29,
u'hdo': 29,
u'\xfasa': 29,
u't\u0151f': 29,
u'ggh': 29,
u'=hu': 29,
u'\xf6lz': 29,
u'ra\xed': 29,
u'nui': 29,
u'p\xf3b': 29,
u'ejg': 29,
u'ejb': 29,
u'lng': 29,
u'fau': 29,
u'izv': 29,
u'arx': 29,
u'dut': 29,
u'd\xe1h': 29,
u'dph': 29,
u'rlh': 28,
u'a\xe9v': 28,
u'\u0171r\xfc': 28,
u'\u0171#\xed': 28,
u'fio': 28,
u'g\xf6m': 28,
u'ugj': 28,
u'zkf': 28,
u'chj': 28,
u't\u0171k': 28,
u'w#s': 28,
u'xvi': 28,
u'p\u0151n': 28,
u'dgr': 28,
u'nqu': 28,
u'jv\xed': 28,
u'mrl': 28,
u'\xe9m\xfc': 28,
u'\xf3vh': 28,
u'gmi': 28,
u'oze': 28,
u'\xe1ur': 28,
u'ovn': 28,
u'unb': 28,
u'\xf3b#': 28,
u'#b\u0103': 28,
u'two': 28,
u'eyt': 28,
u'gaj': 28,
u'rgr': 28,
u'nsb': 28,
u'a\u0144b': 28,
u'\xf6v#': 28,
u'z\xedj': 28,
u'rtc': 28,
u'cdu': 28,
u'g\u0171v': 28,
u'ylw': 28,
u'zk\xfd': 28,
u'siu': 28,
u'sii': 28,
u'ouf': 28,
u't\xf3o': 28,
u'jom': 28,
u'ndg': 28,
u'ndp': 28,
u'dt\xf3': 28,
u'lp\xe1': 28,
u'vee': 28,
u'tt\xed': 28,
u'k\xfcd': 28,
u'n\xe1d': 28,
u'cte': 28,
u'ckb': 28,
u'etd': 28,
u'mhz': 28,
u'x#b': 28,
u'\u015f#b': 28,
u'\u016bta': 28,
u'cc\xf3': 28,
u'dto': 28,
u'\xe9#z': 28,
u'id\xfc': 28,
u'shw': 28,
u'luh': 28,
u'f\xe1s': 28,
u'f\u0151j': 28,
u'dge': 28,
u'lf#': 28,
u'j\u0119d': 28,
u'raa': 28,
u'vvd': 28,
u'\xf3sb': 28,
u'an\xf6': 28,
u'hub': 28,
u'\xe8sj': 28,
u'dys': 28,
u'\u0119dr': 28,
u'rvt': 28,
u'kbt': 28,
u's\xfag': 28,
u'#mm': 28,
u'eb\xfc': 28,
u'\xe1z\xed': 27,
u'kf\xfc': 27,
u'#ov': 27,
u'lr\xe9': 27,
u'm\u0151#': 27,
u'e\xe9g': 27,
u'\xf3z#': 27,
u'sgo': 27,
u'lzu': 27,
u'aek': 27,
u'rbr': 27,
u'\xedvl': 27,
u'gu\xe9': 27,
u'z\xf3p': 27,
u'rh\xf6': 27,
u'lkl': 27,
u'\xedzp': 27,
u'#j\u0119': 27,
u'hna': 27,
u'gyc': 27,
u'or\xf3': 27,
u'#bs': 27,
u'p\xf3z': 27,
u'syf': 27,
u'syl': 27,
u'nsg': 27,
u'v\xfcg': 27,
u'a\xdf#': 27,
u'vio': 27,
u'ppr': 27,
u'rox': 27,
u'gtr': 27,
u'poi': 27,
u'\u0171em': 27,
u'vli': 27,
u'\u010di\u010d': 27,
u'\xf3mu': 27,
u's#\u0171': 27,
u'#my': 27,
u'hoh': 27,
u'xix': 27,
u'\u0151eg': 27,
u'anq': 27,
u'jkj': 27,
u'h\xf3s': 27,
u'dd\xe9': 27,
u'#pf': 27,
u'uev': 27,
u'exc': 27,
u'\xf6tj': 27,
u'#y#': 27,
u'ihr': 27,
u'rvd': 27,
u'\xfagn': 27,
u'\xfago': 27,
u'o\u0161e': 27,
u'\u0151t\u0171': 27,
u'x\xe9n': 27,
u'gh\xf6': 27,
u'tda': 27,
u'yo#': 27,
u'sth': 27,
u'y\xfcz': 27,
u'l\u0151u': 27,
u'mca': 27,
u'\u0151d\xed': 27,
u'\xe1b\xfa': 27,
u'tuo': 27,
u'r\xf6p': 27,
u'usl': 27,
u'lf\xe9': 27,
u'ioj': 27,
u'ltz': 27,
u'icr': 27,
u'ejc': 27,
u'cb#': 27,
u'pvc': 27,
u'dua': 27,
u'y\u0171n': 27,
u'us\xf6': 27,
u'jik': 26,
u'e\xedg': 26,
u'dbi': 26,
u'\xe9bi': 26,
u'em\xf6': 26,
u'rnb': 26,
u'z\xf3m': 26,
u'yij': 26,
u'psm': 26,
u'#s\xe2': 26,
u'iop': 26,
u'\u0151b\xe1': 26,
u'h\xf3z': 26,
u'#nt': 26,
u'vpo': 26,
u'mae': 26,
u'aii': 26,
u'sym': 26,
u'\xe1d\xe9': 26,
u'\xf6zc': 26,
u'pgo': 26,
u'hmo': 26,
u'kyo': 26,
u'\xfab\xf3': 26,
u'\u0171e#': 26,
u'yl\xe1': 26,
u'bd\xe1': 26,
u'r\u010di': 26,
u'apg': 26,
u'\u0151em': 26,
u'\xe1lf': 26,
u'dyn': 26,
u'ttt': 26,
u'cp#': 26,
u'cpo': 26,
u'\u0171zb': 26,
u'ex\xe1': 26,
u'exh': 26,
u'tlu': 26,
u'xen': 26,
u'\xdf#a': 26,
u'x\xe9t': 26,
u'hlr': 26,
u'ecv': 26,
u'rqu': 26,
u'\xe8ne': 26,
u'bay': 26,
u'ops': 26,
u'kkh': 26,
u'd#\xf3': 26,
u'\xfasd': 26,
u't\u0151\xf6': 26,
u'r\xe9u': 26,
u'bij': 26,
u'sdr': 26,
u'e=h': 26,
u'af\xf3': 26,
u'afn': 26,
u'f\xe9i': 26,
u'raw': 26,
u'\xf3sn': 26,
u'f\xf6n': 26,
u'efk': 26,
u'ar\u010d': 26,
u'\xfak\xe9': 26,
u'e\xf6t': 26,
u'y#\u0161': 26,
u'h#g': 26,
u'h#i': 26,
u'#md': 26,
u'\xe9ry': 25,
u'\xf3or': 25,
u'\xe1zg': 25,
u'irw': 25,
u'haa': 25,
u'osm': 25,
u'\u0151k\xe1': 25,
u'on\xf6': 25,
u'#\xf3k': 25,
u'hze': 25,
u'lb\xfa': 25,
u'rfm': 25,
u'cim': 25,
u'jz\xe1': 25,
u'c\xfa#': 25,
u'lce': 25,
u'jv\xe1': 25,
u'yiz': 25,
u'lo\u0161': 25,
u'psc': 25,
u'psd': 25,
u'meb': 25,
u'mey': 25,
u'#n\xf3': 25,
u'vpm': 25,
u'ayt': 25,
u'\xe9un': 25,
u'\xedmr': 25,
u'\xfd#\xfa': 25,
u'iul': 25,
u'gay': 25,
u'p=l': 25,
u'\xfajk': 25,
u'#f#': 25,
u'\u0151\xe1g': 25,
u'chd': 25,
u'\xe1du': 25,
u'cle': 25,
u'\u0151#w': 25,
u'nnb': 25,
u'cdo': 25,
u'\xfazs': 25,
u'xt\xed': 25,
u'hoj': 25,
u'xic': 25,
u'ttf': 25,
u'tt\xe0': 25,
u'rmk': 25,
u'\xf3em': 25,
u'pda': 25,
u'\xe9hc': 25,
u'et\xf3': 25,
u'\xe1k\xf6': 25,
u'laa': 25,
u'laq': 25,
u'gh#': 25,
u'cc\xe1': 25,
u'n+#': 25,
u'oxa': 25,
u'\xf3\xfcz': 25,
u'up=': 25,
u'l\u0151\xfa': 25,
u'\xfcge': 25,
u'#lh': 25,
u's\u0142a': 25,
u'oum': 25,
u'\u0151h\xfa': 25,
u'f#c': 25,
u'enu': 25,
u'ajc': 25,
u'=la': 25,
u'\xe9gd': 25,
u'\xe9zr': 25,
u'igf': 25,
u'\xedk#': 25,
u'#tz': 25,
u'nfu': 25,
u'j\xfat': 25,
u'\xf3ok': 24,
u'irl': 24,
u'mbl': 24,
u'bj\xe9': 24,
u'sc#': 24,
u'\u0151rj': 24,
u'ij\xf6': 24,
u'\xeffi': 24,
u'w#a': 24,
u'kr#': 24,
u'gn#': 24,
u'lc\xe9': 24,
u'yvh': 24,
u'foc': 24,
u'caj': 24,
u'\xf3rd': 24,
u'\xf6ls': 24,
u'eiz': 24,
u'rj#': 24,
u'\xf6gn': 24,
u'ngf': 24,
u'amy': 24,
u'sja': 24,
u'ovt': 24,
u'yeu': 24,
u'#n+': 24,
u'#nk': 24,
u'c\xe1#': 24,
u'zsp': 24,
u'e#\xe0': 24,
u'sv\xed': 24,
u'jfu': 24,
u'#bm': 24,
u'#bp': 24,
u'vh\xf6': 24,
u'goc': 24,
u'sy#': 24,
u'#zw': 24,
u'xpl': 24,
u'a\xfc#': 24,
u'sif': 24,
u'jku': 24,
u'a\xeff': 24,
u'ttg': 24,
u'gdy': 24,
u'ohr': 24,
u'l\xe8n': 24,
u'exk': 24,
u'ctb': 24,
u'doo': 24,
u'rdf': 24,
u'npt': 24,
u'kp#': 24,
u'\xe1jf': 24,
u'x#c': 24,
u'b#\u0151': 24,
u'dtn': 24,
u'p\u0151e': 24,
u'#uc': 24,
u'idu': 24,
u'utz': 24,
u'#hr': 24,
u'kkk': 24,
u'\xe9so': 24,
u'opl': 24,
u's\xf3l': 24,
u'pav': 24,
u'r\xf6t': 24,
u'\u0151sr': 24,
u'sd\xed': 24,
u'zf\u0151': 24,
u'hr\xf6': 24,
u'ksa': 24,
u'#x#': 24,
u'\xf6ll': 24,
u'cje': 24,
u'\xeds#': 24,
u'ic\xe9': 24,
u'\xf3se': 24,
u'ms\xe9': 24,
u'ejn': 24,
u'gkl': 24,
u'yno': 24,
u'\u0151\xf6v': 24,
u'j\xfab': 24,
u'mfn': 24,
u'y\xf3j': 24,
u'y\xf3t': 24,
u'yf\xf6': 23,
u'\xe9vl': 23,
u'ivv': 23,
u'e\u0161d': 23,
u'pb\u0151': 23,
u'\xe9jb': 23,
u'gfa': 23,
u'voj': 23,
u'pf\xf6': 23,
u'\xe1#\xfc': 23,
u'a#\u0163': 23,
u'inm': 23,
u'aie': 23,
u'\xe0#l': 23,
u'h\xe1j': 23,
u'cau': 23,
u'n\xf3t': 23,
u'pwa': 23,
u'zwi': 23,
u'doj': 23,
u'e#q': 23,
u'zss': 23,
u'zsl': 23,
u'uj#': 23,
u'oic': 23,
u'\xedmb': 23,
u'p\xf3j': 23,
u'o#y': 23,
u'\u0161de': 23,
u'chb': 23,
u'eu\xe1': 23,
u'ad\xfa': 23,
u'oad': 23,
u'rc\xfa': 23,
u'\xe9ab': 23,
u'\xf3ur': 23,
u'n\xfcr': 23,
u'iej': 23,
u'#ix': 23,
u'ylv': 23,
u'bdu': 23,
u'\xfarj': 23,
u's#+': 23,
u'n\xe9j': 23,
u'fop': 23,
u'ovg': 23,
u'ac\xed': 23,
u'#\xe8#': 23,
u'pfl': 23,
u'\xf6tb': 23,
u'rdm': 23,
u'\xfakb': 23,
u'y\xe9j': 23,
u'ssr': 23,
u'\xfagj': 23,
u'ilg': 23,
u'epw': 23,
u'bpt': 23,
u'mtu': 23,
u'ek\xed': 23,
u'gh\u0151': 23,
u'aex': 23,
u'yts': 23,
u'u\xe9l': 23,
u'ofa': 23,
u'h\xfcv': 23,
u'\xf3mn': 23,
u'dmo': 23,
u'n#\u0161': 23,
u'zn\xf6': 23,
u's\xf3d': 23,
u's\xf3g': 23,
u'iwi': 23,
u'bmi': 23,
u'hdi': 23,
u'de\xf3': 23,
u'g\xf3c': 23,
u'\u0151lh': 23,
u't\xe0#': 23,
u'fyr': 23,
u'esg': 23,
u'aj\xed': 23,
u'\xfad#': 23,
u'jde': 23,
u'd\xe9d': 23,
u'\xe9g\xf6': 23,
u'rm\xe0': 23,
u'\xfat\xe9': 23,
u'jud': 23,
u'jur': 23,
u'p\xe1#': 23,
u'rvg': 23,
u'pva': 23,
u'eb\xe1': 23,
u'jmo': 22,
u'vgo': 22,
u'tzu': 22,
u'\u0171rl': 22,
u'fib': 22,
u'k\xfat': 22,
u'k\xfaa': 22,
u'#fg': 22,
u'#fy': 22,
u'b\xfaa': 22,
u'\u0151rn': 22,
u'\xedje': 22,
u'h\xe9l': 22,
u'r\xf3o': 22,
u'jah': 22,
u'h\xedm': 22,
u'a#\xb0': 22,
u'rnd': 22,
u't\xfam': 22,
u'cak': 22,
u'cab': 22,
u'rj\xf6': 22,
u'i\xe9k': 22,
u'c#\xf3': 22,
u'dr\xed': 22,
u'xhe': 22,
u'#n\xfc': 22,
u'ztg': 22,
u'aym': 22,
u'z\xfcs': 22,
u'\u0151n\u0151': 22,
u'\xf3\xf6s': 22,
u'dg#': 22,
u'rgu': 22,
u'\xf6np': 22,
u'#ft': 22,
u'yqu': 22,
u'too': 22,
u'nh\xed': 22,
u'pk\xe1': 22,
u'muh': 22,
u'+##': 22,
u'\xe9at': 22,
u'gt\xfc': 22,
u'iaz': 22,
u'uyq': 22,
u'#rf': 22,
u'ed\xf6': 22,
u'apm': 22,
u'#\u010de': 22,
u'\u0151ak': 22,
u'#mg': 22,
u'jot': 22,
u'\xe4us': 22,
u'dhr': 22,
u'v#\u0151': 22,
u'rmh': 22,
u'\u0151ie': 22,
u'b\xfct': 22,
u'acv': 22,
u'uim': 22,
u'#eo': 22,
u'k\xfct': 22,
u'exb': 22,
u'\xe9hh': 22,
u'odd': 22,
u'\xe1kf': 22,
u'eod': 22,
u'uae': 22,
u'\xf6sb': 22,
u'p\xfcz': 22,
u'pl\xf3': 22,
u'bpr': 22,
u'ytr': 22,
u'ccc': 22,
u'\xe1s\xf3': 22,
u'oxf': 22,
u'idj': 22,
u'ecp': 22,
u'e\xf3n': 22,
u'\xf3\xfcn': 22,
u'\xe9s\xf3': 22,
u'dm#': 22,
u'l\u0151o': 22,
u'\u0171l\xe1': 22,
u'r\xedg': 22,
u'op\xe9': 22,
u'jh\xe1': 22,
u't\u0151\xe1': 22,
u'\xedmn': 22,
u'sdt': 22,
u'bi\u0144': 22,
u'xbe': 22,
u'yso': 22,
u'\xe1jo': 22,
u'pio': 22,
u'p#\xed': 22,
u'ocl': 22,
u'ocr': 22,
u'\xf6pp': 22,
u'ms\xe1': 22,
u'ejh': 22,
u'hiz': 22,
u'hij': 22,
u'riq': 22,
u'h#o': 22,
u'vc#': 22,
u'ebu': 22,
u'\xf3os': 21,
u'k\xf6p': 21,
u'#c\xf3': 21,
u'ezf': 21,
u'\u0151os': 21,
u'\xe9j\xe1': 21,
u'knu': 21,
u'\u0171zr': 21,
u'sg\u0151': 21,
u'ch\xf3': 21,
u'lz\xfa': 21,
u'\xedr\u0171': 21,
u'f\xf3n': 21,
u'yv\xf6': 21,
u'\xf3rc': 21,
u'\xf3rj': 21,
u'j#w': 21,
u'dz#': 21,
u'#ss': 21,
u'#s\xf8': 21,
u'\xed\u010de': 21,
u'n\xf3f': 21,
u'jju': 21,
u'v#z': 21,
u'fsf': 21,
u'ayr': 21,
u'vkk': 21,
u'n\u0151g': 21,
u'nsj': 21,
u'\xfaji': 21,
u'tsr': 21,
u'sf#': 21,
u'adg': 21,
u'+#a': 21,
u'ppn': 21,
u'\xe9an': 21,
u'k\xe1p': 21,
u'rkr': 21,
u'h\u0151h': 21,
u'rk\xed': 21,
u'fgm': 21,
u'\u010d#b': 21,
u'\xf3ga': 21,
u'##w': 21,
u'\xeddi': 21,
u'#up': 21,
u'xia': 21,
u'zt\xfa': 21,
u'dh\xf6': 21,
u'acp': 21,
u'ml\xf6': 21,
u'\xf3\xe9t': 21,
u'\xfakn': 21,
u'ag\xf6': 21,
u'h\u0171h': 21,
u'h\u0171l': 21,
u'aky': 21,
u'ntd': 21,
u'bp\xf3': 21,
u'x##': 21,
u'yt\xf6': 21,
u'\xe9d\xe1': 21,
u'idg': 21,
u'hl\xed': 21,
u'zyk': 21,
u'is\xfa': 21,
u'u\xe1n': 21,
u'xje': 21,
u'v\xfaa': 21,
u'l\xed\u010d': 21,
u'bmw': 21,
u'jdh': 21,
u'\xfalp': 21,
u'dae': 21,
u'm\xfcv': 21,
u's\u0151h': 21,
u'\xf6ly': 21,
u'\xe9f\xe1': 21,
u'\u0107#\xfa': 21,
u'p#\xf3': 21,
u'cnd': 21,
u'icu': 21,
u'd\xe9\xe9': 21,
u'anv': 21,
u'igk': 21,
u'\xfcd\xed': 21,
u'#k#': 21,
u'z\xf3u': 21,
u'v\xf3a': 21,
u'avk': 21,
u'j\xfak': 21,
u'p\xe9k': 20,
u'p\xe9#': 20,
u't#x': 20,
u'\xe1zc': 20,
u'ir\xe9': 20,
u'yfu': 20,
u'mbt': 20,
u't\xe9o': 20,
u'gzs': 20,
u'bjo': 20,
u'sce': 20,
u'gf\xfc': 20,
u'ugn': 20,
u'\u0151ry': 20,
u'evg': 20,
u'pf#': 20,
u'\xe1#\xed': 20,
u'rb\u0171': 20,
u'ofr': 20,
u'rb\xe9': 20,
u'wee': 20,
u'yru': 20,
u'em\xfa': 20,
u'k#\u0161': 20,
u'wad': 20,
u'i\u0161t': 20,
u'#k\xfa': 20,
u'rwi': 20,
u'gj\xe9': 20,
u'loh': 20,
u'ps\u0171': 20,
u'\xf3vs': 20,
u'bzo': 20,
u'jne': 20,
u'#w#': 20,
u'#ww': 20,
u'ngp': 20,
u'#jp': 20,
u'ovb': 20,
u'm\xf6s': 20,
u'#np': 20,
u'sn\u0151': 20,
u'ayi': 20,
u'p\xf3k': 20,
u'j\xf3e': 20,
u'\xe4#\xfa': 20,
u'\xe1d\xf6': 20,
u'\xe1d\xf3': 20,
u'm\xfat': 20,
u'oaq': 20,
u'nw\xfc': 20,
u'#zv': 20,
u'\xfcrn': 20,
u'+#k': 20,
u'+#s': 20,
u'hsh': 20,
u'ppp': 20,
u'fja': 20,
u'kye': 20,
u'u\u010de': 20,
u'\u0171al': 20,
u'sub': 20,
u'sua': 20,
u'kf\u0151': 20,
u'#r#': 20,
u'w\xfcr': 20,
u'ou\u010d': 20,
u'rpu': 20,
u'#mp': 20,
u'xt#': 20,
u'mdc': 20,
u'xi\xe1': 20,
u'jk\xf6': 20,
u'ssk': 20,
u'\xe1c\xe1': 20,
u'v#\xed': 20,
u'w\xe9n': 20,
u'\xedlh': 20,
u'\xf6kf': 20,
u'kl\xf6': 20,
u'\xe1gd': 20,
u'bho': 20,
u'#pd': 20,
u'mhu': 20,
u'ktf': 20,
u'\xf3p#': 20,
u'ni\xf6': 20,
u'mto': 20,
u'v\u0151\xe9': 20,
u'tde': 20,
u'\xedvk': 20,
u'\u0171ln': 20,
u'\xf6gl': 20,
u'\u0171h\xf3': 20,
u'#lm': 20,
u'tyo': 20,
u'\u0151do': 20,
u'oos': 20,
u'r\xedm': 20,
u'\u0151hu': 20,
u'\xfasv': 20,
u'f\u0151c': 20,
u'f\u0151\xe1': 20,
u'\u0142o#': 20,
u'kow': 20,
u'l\xe9c': 20,
u'z\xfaa': 20,
u'peo': 20,
u'\xe9os': 20,
u'lf\xe1': 20,
u's\u0151p': 20,
u'\xe1j\xf3': 20,
u'j\xe1z': 20,
u'd\xedn': 20,
u'gob': 20,
u'wa\u0142': 20,
u'\xfad\xed': 20,
u'icl': 20,
u'j\xe9z': 20,
u'\xe9zu': 20,
u'ynd': 20,
u'ri\u0161': 20,
u'ta\u0142': 20,
u'h#w': 20,
u'd\xe1\xe9': 20,
u'v\xf3d': 20,
u'y\xf3b': 20,
u'czm': 19,
u'uot': 19,
u'\xe1zd': 19,
u'vge': 19,
u'gz\xf6': 19,
u'ji\xe1': 19,
u'\xf3c\xe9': 19,
u'\u0171v\xfc': 19,
u'gf\xe1': 19,
u'ij\xfc': 19,
u'w\xe9r': 19,
u'xce': 19,
u'w#\xe9': 19,
u'aet': 19,
u'yri': 19,
u'\xe1il': 19,
u'hrc': 19,
u'lci': 19,
u't\xfaa': 19,
u'kvt': 19,
u'kvs': 19,
u'g\xfaz': 19,
u'psi': 19,
u'ps\xfc': 19,
u'\xe9mb': 19,
u'l\xf5c': 19,
u'rsv': 19,
u'l\xfab': 19,
u'ke\xe9': 19,
u'mza': 19,
u'meu': 19,
u'xho': 19,
u'yep': 19,
u'\xf6z\xe1': 19,
u'vpr': 19,
u'bov': 19,
u'i\xe1c': 19,
u'lsh': 19,
u'y\xe9\xfc': 19,
u'iug': 19,
u'gaf': 19,
u'cus': 19,
u'uf#': 19,
u'ld\xfa': 19,
u'qu\xed': 19,
u'm\xfad': 19,
u'rc\xfc': 19,
u'zdr': 19,
u'toy': 19,
u'el\xf5': 19,
u'fcs': 19,
u'uyt': 19,
u'eh\u0171': 19,
u'ehd': 19,
u'y\u0171t': 19,
u'#rc': 19,
u'xpa': 19,
u'kvo': 19,
u't\xf3u': 19,
u'#vr': 19,
u'tc\xed': 19,
u'mya': 19,
u's#y': 19,
u'f\xfct': 19,
u'lph': 19,
u'wt#': 19,
u'\xf3ad': 19,
u'd\u0151f': 19,
u'n\xed#': 19,
u'zp\xf3': 19,
u'acl': 19,
u'\xf3eg': 19,
u'itf': 19,
u'itp': 19,
u'ftr': 19,
u'\u0151\xfcz': 19,
u'\xf5cs': 19,
u'\xfakk': 19,
u'phy': 19,
u'ki\xf3': 19,
u'\xe9l\xe8': 19,
u'd\xf3\xf6': 19,
u'epm': 19,
u'epj': 19,
u'u\xedn': 19,
u'rk\xfa': 19,
u'rld': 19,
u'x#o': 19,
u'ytt': 19,
u'm\xe0#': 19,
u'\xe1s\xed': 19,
u'dtj': 19,
u'#uf': 19,
u'ut\xf6': 19,
u'snu': 19,
u'dpr': 19,
u'shv': 19,
u'luo': 19,
u'xum': 19,
u'mcm': 19,
u'fl\xfc': 19,
u'k\u0151s': 19,
u'l\xedd': 19,
u'ouk': 19,
u'\xfase': 19,
u'\xf3dk': 19,
u'ikc': 19,
u'z\xfan': 19,
u'lyd': 19,
u'mm\xf3': 19,
u'\u0151lj': 19,
u'ud\xe9': 19,
u'a\xf6v': 19,
u'\u0144i#': 19,
u'm\u0151t': 19,
u'cni': 19,
u'tii': 19,
u'pr#': 19,
u'unf': 19,
u'jua': 19,
u'juv': 19,
u'rvc': 19,
u'igj': 19,
u'jvi': 19,
u'zzi': 19,
u'lj\xf3': 19,
u'pza': 19,
u'hm\xfa': 19,
u'owt': 19,
u'i\xfcg': 19,
u'nzv': 18,
u'soi': 18,
u'\xe9vj': 18,
u'm\xf3k': 18,
u'fm#': 18,
u'a\xedr': 18,
u'ivn': 18,
u't#y': 18,
u'k\xe1u': 18,
u'aaa': 18,
u'nry': 18,
u'\xedjn': 18,
u'tdt': 18,
u'aea': 18,
u'\xf6ms': 18,
u'jz#': 18,
u'eu\xe9': 18,
u'\u0151zk': 18,
u'nof': 18,
u'tj\xf6': 18,
u'\u0171bi': 18,
u'yic': 18,
u'o\xe9r': 18,
u't\xf6p': 18,
u'zr\xe9': 18,
u'yd\xed': 18,
u'eea': 18,
u'hny': 18,
u'\u0151f\xe9': 18,
u'\xednn': 18,
u'jf\xfc': 18,
u'ge\u013e': 18,
u'oim': 18,
u'kmh': 18,
u'd\xfar': 18,
u'gac': 18,
u'jbi': 18,
u'\xfcv\u0151': 18,
u'dci': 18,
u'rcv': 18,
u'r\u0151\xf3': 18,
u'v\u0171e': 18,
u'im\xf3': 18,
u'bs#': 18,
u'muv': 18,
u'+#\xe9': 18,
u'ro\xe9': 18,
u'dpa': 18,
u'gt\u0171': 18,
u'v\xe9z': 18,
u'#rt': 18,
u'siw': 18,
u't\xf3\xe1': 18,
u'xi\xf3': 18,
u'khs': 18,
u'kh#': 18,
u'vey': 18,
u'ttw': 18,
u'axh': 18,
u'ui\xe8': 18,
u'uir': 18,
u'rxi': 18,
u'fti': 18,
u'fto': 18,
u'ag\xfc': 18,
u'\xfakr': 18,
u'npe': 18,
u'pf\xe9': 18,
u'ilu': 18,
u'cog': 18,
u'nii': 18,
u'za\xfc': 18,
u'tlo': 18,
u'td#': 18,
u'\u0151\xf3r': 18,
u'dtt': 18,
u'mga': 18,
u'luf': 18,
u'otc': 18,
u'\xe9sa': 18,
u'\xf3lm': 18,
u'gpl': 18,
u'tyn': 18,
u'zui': 18,
u'\u0171l\u0151': 18,
u'cs\u0171': 18,
u'\xedtn': 18,
u'#dg': 18,
u'kdo': 18,
u'da\xe1': 18,
u's\u0151m': 18,
u'tm#': 18,
u't\u0151g': 18,
u'm\u0151h': 18,
u'\xf3d\xe9': 18,
u'cn\xe9': 18,
u'h\u0151n': 18,
u'i\xf6n': 18,
u'y\u0151s': 18,
u'nb\xe9': 18,
u'h##': 18,
u'\xfcdt': 18,
u'vau': 18,
u'ebn': 18,
u'\xe1z\xfc': 17,
u'vga': 17,
u'a\xe9n': 17,
u'\u0171vi': 17,
u'hex': 17,
u'dsh': 17,
u'z\u0171e': 17,
u'\u0151rb': 17,
u'\xfch\xed': 17,
u'w\xe9v': 17,
u'\xe9np': 17,
u'w#r': 17,
u'\u0142\u0119s': 17,
u'uco': 17,
u'e\xe1j': 17,
u'a#\xe8': 17,
u'i\xf1a': 17,
u'k#\u0163': 17,
u'lcc': 17,
u't\xfaj': 17,
u'yv\xfc': 17,
u'\xe1mz': 17,
u'mr\xe1': 17,
u'pn\xf6': 17,
u'k\xedg': 17,
u'loi': 17,
u'jr\u0151': 17,
u'uvl': 17,
u'\u0171fo': 17,
u'i\xe9h': 17,
u'dry': 17,
u'y\xf6g': 17,
u'zsv': 17,
u'zs\xfc': 17,
u'fdl': 17,
u'sb\xe1': 17,
u'\xe9iv': 17,
u'dgy': 17,
u'\xe1ds': 17,
u'qu\xf3': 17,
u'dct': 17,
u'#zd': 17,
u'a\u0144#': 17,
u'\xfcrr': 17,
u'\xfcr\xed': 17,
u'gic': 17,
u'ro\xfc': 17,
u'ahu': 17,
u'suy': 17,
u'\xf1as': 17,
u'#rh': 17,
u'cda': 17,
u'rf\u0151': 17,
u'ylo': 17,
u'ul\xfc': 17,
u'edp': 17,
u'si\xf6': 17,
u'd\xe9j': 17,
u'r\xfai': 17,
u'dlr': 17,
u'\u0151ru': 17,
u'voi': 17,
u'dss': 17,
u'jkb': 17,
u'u#\xed': 17,
u'zpe': 17,
u'jge': 17,
u'pd#': 17,
u'\xe9hn': 17,
u'\u0161ko': 17,
u'npj': 17,
u'\xe1kv': 17,
u'hpo': 17,
u'ntf': 17,
u'ilf': 17,
u'coa': 17,
u'\xf3p\xe9': 17,
u'mtm': 17,
u'ekd': 17,
u'sp\xe9': 17,
u'idf': 17,
u'naj': 17,
u'yc\xe9': 17,
u'i\xe1m': 17,
u'e\xf3t': 17,
u'c\u0142a': 17,
u'up\xe9': 17,
u's\xf3j': 17,
u'n#y': 17,
u'n#q': 17,
u'bao': 17,
u'vjo': 17,
u'v\xfat': 17,
u'\xfasn': 17,
u'hyl': 17,
u'csg': 17,
u'okp': 17,
u'a\u0142\u0119': 17,
u'e\u013e#': 17,
u'f##': 17,
u'y\u0171f': 17,
u'ioh': 17,
u'd\xedr': 17,
u'\xe9c\xe9': 17,
u'aj\u010d': 17,
u'nup': 17,
u'oc\u0142': 17,
u'eyj': 17,
u'ti\xfc': 17,
u'i\xf6l': 17,
u'ssl': 17,
u'cbk': 17,
u'duy': 17,
u'ta\u015f': 17,
u'ta\xfc': 17,
u'\xe1vv': 17,
u'tay': 17,
u's\xfaz': 17,
u'#ms': 17,
u'kfa': 16,
u'yfi': 16,
u'\u0171rr': 16,
u'lr#': 16,
u'os\xfc': 16,
u'\xe9v\xfa': 16,
u'm\xf3s': 16,
u'mno': 16,
u'mn\xf6': 16,
u'aaf': 16,
u'zk\xfc': 16,
u'oj#': 16,
u'try': 16,
u'w#b': 16,
u'w#l': 16,
u'dbo': 16,
u'zgo': 16,
u'\xf6mk': 16,
u'bry': 16,
u'\xe9b\xf3': 16,
u'lc\xf3': 16,
u'y\xfaz': 16,
u'ncm': 16,
u'wij': 16,
u'tbc': 16,
u'hns': 16,
u'xha': 16,
u'\u0151fa': 16,
u'gyd': 16,
u'uns': 16,
u'eeh': 16,
u'ayj': 16,
u'\u010d\xe1k': 16,
u'gec': 16,
u'ujk': 16,
u'kmn': 16,
u'p\xf3n': 16,
u'eqf': 16,
u'\xe4ne': 16,
u'ezj': 16,
u's\xe9a': 16,
u'iil': 16,
u'v\xe1\xed': 16,
u'+#f': 16,
u'a\u0144i': 16,
u'pki': 16,
u'd\u017ei': 16,
u'pph': 16,
u'pp\xe9': 16,
u'gtn': 16,
u'l\xf3\xfc': 16,
u'\u0171ad': 16,
u'pow': 16,
u'pof': 16,
u'yhu': 16,
u'cds': 16,
u'ed\xfa': 16,
u'j\xfcg': 16,
u'dwi': 16,
u'crc': 16,
u'cr\xe1': 16,
u'nd\xfa': 16,
u'vax': 16,
u'\u0171ha': 16,
u'h\xf3l': 16,
u'g\xe9j': 16,
u'ydo': 16,
u'ip\xe9': 16,
u'\u0171pi': 16,
u'\xf3zv': 16,
u'b\xfcd': 16,
u'\xfcnd': 16,
u'ft\xe1': 16,
u'ftt': 16,
u'\xe9z\xf3': 16,
u'odb': 16,
u'#yu': 16,
u'ig\xf3': 16,
u'ihu': 16,
u'ckn': 16,
u'\xe1kl': 16,
u'mh#': 16,
u'gl\xf6': 16,
u'b\xe1d': 16,
u'c\xfcn': 16,
u'\xf3\xe1g': 16,
u'gh\xf3': 16,
u'x#\xf6': 16,
u'cvo': 16,
u'vs\xe9': 16,
u'gs\u0171': 16,
u'rur': 16,
u'z#x': 16,
u'kc\xed': 16,
u'kry': 16,
u'ecj': 16,
u'lug': 16,
u'j\u010d\xe1': 16,
u'zyn': 16,
u'\xe9s\xe1': 16,
u'#ll': 16,
u'du\xe1': 16,
u'n#\u0171': 16,
u'ty\xfc': 16,
u'#\xfcs': 16,
u'mc\xe9': 16,
u'\u0151c\xed': 16,
u'\xe1j\xed': 16,
u'gl\u0151': 16,
u'k\u0151k': 16,
u'y\xe9p': 16,
u'abc': 16,
u'csc': 16,
u'ryt': 16,
u'\u0151sg': 16,
u'r\xe9z': 16,
u'ikm': 16,
u'pej': 16,
u'\xfcty': 16,
u'vr\xf3': 16,
u'ilc': 16,
u'ilh': 16,
u'cob': 16,
u'bum': 16,
u'j\xedz': 16,
u'aij': 16,
u'\xe9ce': 16,
u'aj\xe9': 16,
u'ra\xfc': 16,
u'p#z': 16,
u'ray': 16,
u'\xfadi': 16,
u'cn#': 16,
u'\xf3sc': 16,
u'ejm': 16,
u'an\xfc': 16,
u'hud': 16,
u'ssp': 16,
u'\xe9zg': 16,
u'wba': 16,
u'bfo': 16,
u'ri\xf1': 16,
u'dui': 16,
u'gh\u0171': 16,
u'\xe1vb': 16,
u'usv': 16,
u's\xfab': 16,
u'#kf': 16,
u'av\xf6': 16,
u'owj': 16,
u'irm': 15,
u'lr\xfa': 15,
u'vkb': 15,
u'k\xfaj': 15,
u'#\xf3s': 15,
u'\xf8me': 15,
u'aam': 15,
u'nro': 15,
u'nrw': 15,
u'ijj': 15,
u'cij': 15,
u'w\xe9t': 15,
u'bvk': 15,
u'w#k': 15,
u'jaj': 15,
u'zgr': 15,
u'in\u0103': 15,
u'jza': 15,
u'ery': 15,
u'er\xfa': 15,
u's\u0171e': 15,
u'h\xe1i': 15,
u'\xe1m\xe9': 15,
u'mrs': 15,
u'dz\xe9': 15,
u'y\xfaa': 15,
u'ffn': 15,
u'ncn': 15,
u'n\xf3s': 15,
u'bgo': 15,
u'lks': 15,
u'lk\xf3': 15,
u'c#\xfc': 15,
u'eaj': 15,
u'\u0119sa': 15,
u'\xf3n\xe9': 15,
u'ayy': 15,
u'yya': 15,
u'\xe9ir': 15,
u'n\u0103u': 15,
u'uf\xf3': 15,
u'nsm': 15,
u'sf\xf6': 15,
u'sff': 15,
u'#\xedn': 15,
u'adf': 15,
u'i\xfct': 15,
u'ubn': 15,
u'nhr': 15,
u'mup': 15,
u'vib': 15,
u'squ': 15,
u's\xe1u': 15,
u'dps': 15,
u'ia\xfa': 15,
u'\xf3ut': 15,
u'pp\xe4': 15,
u'r\xf8m': 15,
u'#j#': 15,
u'm\xedb': 15,
u'ap\xf6': 15,
u'\xf3#\u0171': 15,
u'oug': 15,
u'rp\xfc': 15,
u's#x': 15,
u'upf': 15,
u'xig': 15,
u'xiz': 15,
u'\u015fin': 15,
u'cdt': 15,
u'jkm': 15,
u'um\xed': 15,
u'vec': 15,
u'u#w': 15,
u'\xfarp': 15,
u'xa\xe9': 15,
u'\xe9hb': 15,
u'rdv': 15,
u'np\xe1': 15,
u'bt\xf3': 15,
u'gl\xf3': 15,
u'ypu': 15,
u'sps': 15,
u'uxh': 15,
u'pud': 15,
u'gp#': 15,
u'p\xe4n': 15,
u'ruj': 15,
u'utv': 15,
u'snc': 15,
u'ecl': 15,
u'lsp': 15,
u'sh\xe9': 15,
u'\u017ei\u0107': 15,
u'ot\xfa': 15,
u'pee': 15,
u'f#d': 15,
u'ksp': 15,
u'iob': 15,
u'xfa': 15,
u'i\u015fi': 15,
u'rax': 15,
u'jyl': 15,
u'twa': 15,
u'h\u0151t': 15,
u'sro': 15,
u'\xe9g\u0151': 15,
u'prz': 15,
u'y\u0151n': 15,
u'#d\u0171': 15,
u'hi\xf6': 15,
u'hi\u015f': 15,
u'mfb': 15,
u'mft': 15,
u'zvo': 15,
u'lv\xf3': 15,
u'\xe1f\xe1': 14,
u'irc': 14,
u'\xfatf': 14,
u'ivp': 14,
u'gsm': 14,
u'cry': 14,
u'\u0151rh': 14,
u'pss': 14,
u'evv': 14,
u't\u0171t': 14,
u'r\xf3e': 14,
u'w#c': 14,
u'w#m': 14,
u'jaf': 14,
u'\xf6mj': 14,
u'\u016bna': 14,
u'rn\xf3': 14,
u't\xfan': 14,
u'gud': 14,
u'ib\xed': 14,
u'y\xfan': 14,
u'dzu': 14,
u'loy': 14,
u'ffr': 14,
u'ffj': 14,
u'tf\xf6': 14,
u'wiz': 14,
u'l\xfan': 14,
u'ngw': 14,
u'djy': 14,
u'd\u0171l': 14,
u'omg': 14,
u'hbi': 14,
u'ayb': 14,
u'fee': 14,
u'zs\xf3': 14,
u'y\xe9i': 14,
u'dkb': 14,
u'uju': 14,
u'eyi': 14,
u'bk#': 14,
u'pc\xed': 14,
u'gav': 14,
u'gao': 14,
u'cum': 14,
u'cuj': 14,
u'pga': 14,
u't\xe2n': 14,
u'mku': 14,
u'\xf6r\xe1': 14,
u'bsb': 14,
u'mu\xf1': 14,
u'+#m': 14,
u'\xe9ai': 14,
u'#\xe1i': 14,
u'hst': 14,
u'kyr': 14,
u'vt#': 14,
u'l\xf3\xe9': 14,
u'\u0151\xe9l': 14,
u'\xf3sk': 14,
u'#rs': 14,
u'js#': 14,
u'#r\xf8': 14,
u'\xe9#\xfc': 14,
u'cdk': 14,
u'g\xe1p': 14,
u'i#y': 14,
u'bde': 14,
u'bd#': 14,
u'\u0151au': 14,
u'ltl': 14,
u't\xf3\xe9': 14,
u'#vm': 14,
u'rps': 14,
u'tc\xe9': 14,
u'\xf1iz': 14,
u'lp\xf3': 14,
u'sm\u0171': 14,
u'n\xe9\xfc': 14,
u'umg': 14,
u'#aq': 14,
u'say': 14,
u'n\xedk': 14,
u'\u0151im': 14,
u'zp#': 14,
u'zp\xe1': 14,
u'#e\xe9': 14,
u'fth': 14,
u'evr': 14,
u'seo': 14,
u'f\xe4r': 14,
u'\xe9h#': 14,
u'od\xe9': 14,
u'rdb': 14,
u'un\xf3': 14,
u'u\xf1i': 14,
u'eoi': 14,
u'akf': 14,
u'uak': 14,
u'\xfagt': 14,
u'\u0151tu': 14,
u'nty': 14,
u'yp\xf3': 14,
u'\xe1r\xf6': 14,
u'd\xf3\xe9': 14,
u'ykj': 14,
u'spt': 14,
u'gbr': 14,
u'x#g': 14,
u'x#d': 14,
u'ccn': 14,
u'\xf3t\xed': 14,
u'stk': 14,
u'st\xfa': 14,
u'wom': 14,
u'idc': 14,
u'utj': 14,
u'\u0171dj': 14,
u'#hf': 14,
u'hlm': 14,
u'hli': 14,
u'awe': 14,
u'g\u0151n': 14,
u'dpv': 14,
u'e\xf3r': 14,
u'fhe': 14,
u'd\xfck': 14,
u'u\xe1t': 14,
u'u\xe1b': 14,
u'#\u03b1#': 14,
u'tyl': 14,
u'\xe4rm': 14,
u'ny\xf3': 14,
u'fl\xf3': 14,
u'k\u0151r': 14,
u'tuz': 14,
u'pah': 14,
u't\u0151\xed': 14,
u'f\u0151o': 14,
u'f\u0151m': 14,
u'ryc': 14,
u'csv': 14,
u'oig': 14,
u'#d\xf3': 14,
u'#dd': 14,
u'ewt': 14,
u'\xfcth': 14,
u'sdi': 14,
u'lfu': 14,
u'#xe': 14,
u'y\u0171z': 14,
u'iof': 14,
u'nfp': 14,
u'ouj': 14,
u'occ': 14,
u'zb\xe1': 14,
u'ti\xe8': 14,
u'j\xe9\xfc': 14,
u'#vh': 14,
u'ejo': 14,
u'hua': 14,
u'\xe9g\xfa': 14,
u'sss': 14,
u'\xf3d\xf6': 14,
u'uhu': 14,
u'ig\xfa': 14,
u'fay': 14,
u'ta\xe9': 14,
u'iz\xf6': 14,
u'rii': 14,
u'#dv': 14,
u'rrn': 14,
u'\u0171km': 14,
u'byt': 14,
u'h#u': 14,
u'ebs': 14,
u'mfr': 14,
u'skr': 14,
u'l\xe9i': 14,
u'lmj': 13,
u'rls': 13,
u'\xe1z\xf6': 13,
u'\u0161\xedk': 13,
u'hap': 13,
u'\u0171r\xf6': 13,
u'mbh': 13,
u'mbj': 13,
u't#\xb0': 13,
u'\u0171#\xf3': 13,
u'#cp': 13,
u'heo': 13,
u'k\xfar': 13,
u'#\xf3m': 13,
u'#fs': 13,
u'jec': 13,
u'aac': 13,
u'k\u0171v': 13,
u'zkk': 13,
u'\u0151r\u0171': 13,
u'cib': 13,
u'\u0171zm': 13,
u'pfi': 13,
u'nkm': 13,
u'br#': 13,
u'lcm': 13,
u'h\xe1c': 13,
u'kvb': 13,
u'\xf3rn': 13,
u'm\u0171m': 13,
u'pst': 13,
u'if\xfc': 13,
u'\xf3v#': 13,
u'stj': 13,
u'\xe1jr': 13,
u'dvd': 13,
u'nf#': 13,
u'e+#': 13,
u'j\u0171s': 13,
u'ur\xe9': 13,
u'urq': 13,
u'#j\xfc': 13,
u'd\u0171n': 13,
u'b\xf3k': 13,
u'\xe1\xe9h': 13,
u'#nx': 13,
u'c\xe1\xe9': 13,
u'dkv': 13,
u'\xe1\u0161#': 13,
u'pc\xe9': 13,
u'ozc': 13,
u'dga': 13,
u'sye': 13,
u'oem': 13,
u's\xe9e': 13,
u'iib': 13,
u'eu\xfc': 13,
u'ts\u0171': 13,
u'mij': 13,
u'ad\u017e': 13,
u'#zh': 13,
u'i+#': 13,
u'\xe9av': 13,
u'pps': 13,
u'jef': 13,
u'\u010dar': 13,
u'fco': 13,
u'tkl': 13,
u'\xf3ul': 13,
u'yh\xfa': 13,
u'n\xfcs': 13,
u'hwe': 13,
u'jo\xe3': 13,
u'\xeddr': 13,
u'hoe': 13,
u'atd': 13,
u'xib': 13,
u'n\xe9d': 13,
u'b\u0159e': 13,
u'olp': 13,
u'khb': 13,
u'h\xf3b': 13,
u'd\u0151a': 13,
u'it\u0117': 13,
u'\u0151mm': 13,
u'kb\xe1': 13,
u'ziu': 13,
u'jcn': 13,
u'jco': 13,
u'uek': 13,
u't\xfcs': 13,
u'tl\xf6': 13,
u'btk': 13,
u'llp': 13,
u'uk\xfc': 13,
u'aoc': 13,
u'jtt': 13,
u'\xf3t\u0151': 13,
u'\xe9dm': 13,
u'asd': 13,
u'dt\xf6': 13,
u'naa': 13,
u'hlo': 13,
u'k\xf3p': 13,
u'g\u0151j': 13,
u'dpb': 13,
u'duh': 13,
u'\xe9s\xf6': 13,
u'yg\xe1': 13,
u'd\xfcg': 13,
u'o\xe3o': 13,
u'mc#': 13,
u'opm': 13,
u'opn': 13,
u'k\u0151t': 13,
u'r\xe4s': 13,
u'moa': 13,
u'f\u0151\xfa': 13,
u'csd': 13,
u'de\xdf': 13,
u'okg': 13,
u'a\u015f#': 13,
u'#dm': 13,
u'ikj': 13,
u'ly\xf6': 13,
u'lyl': 13,
u'dk\xe1': 13,
u'f#g': 13,
u'zf\xf3': 13,
u'lf\u0171': 13,
u'\xfcdz': 13,
u'ocn': 13,
u'e\xdf#': 13,
u'rvs': 13,
u'igc': 13,
u'r#y': 13,
u'nbr': 13,
u'oar': 13,
u'efh': 13,
u'\xeddo': 13,
u'ljh': 13,
u'cfc': 13,
u'caf': 13,
u'fe+': 13,
u'bby': 13,
u'y\xf3v': 13,
u'lvs': 13,
u'cfi': 13,
u'jma': 12,
u'\xfacc': 12,
u'\xf3ol': 12,
u'nzc': 12,
u'irp': 12,
u'irn': 12,
u'uet': 12,
u'sow': 12,
u'#m\xe3': 12,
u'g\xf6d': 12,
u'wra': 12,
u'z\u0171b': 12,
u'#f\xe4': 12,
u'\xfam#': 12,
u'kn\xf6': 12,
u'ijm': 12,
u't\u0171m': 12,
u'\xe1#z': 12,
u'\xe8#t': 12,
u'aey': 12,
u'tsk': 12,
u'e\xe1k': 12,
u'e\xe1c': 12,
u'rb\xfc': 12,
u'\xf6mi': 12,
u'krz': 12,
u'p\xfat': 12,
u'a#+': 12,
u'yr#': 12,
u'+fb': 12,
u'tn#': 12,
u'emj': 12,
u'\xe0#h': 12,
u'obh': 12,
u'b\u0171z': 12,
u'pnk': 12,
u'#fb': 12,
u'psk': 12,
u'psu': 12,
u'odf': 12,
u'rjh': 12,
u'\xe9\xe1r': 12,
u'ffo': 12,
u'\xfats': 12,
u'gms': 12,
u'st\xe2': 12,
u'#\u0151f': 12,
u'wig': 12,
u'cyb': 12,
u'#\xb0#': 12,
u'yae': 12,
u'mz\xf3': 12,
u'mzs': 12,
u'auc': 12,
u'ovr': 12,
u'c\xedd': 12,
u'#n#': 12,
u'\xf6ff': 12,
u'i\xe1g': 12,
u'c\xe1s': 12,
u'#cy': 12,
u'zs\xf6': 12,
u'\u0159ez': 12,
u'gea': 12,
u'aj\xfa': 12,
u'oi\xe1': 12,
u'vh\u0171': 12,
u'#b\u0159': 12,
u'\xf6jt': 12,
u'n\u0151\xfc': 12,
u'pca': 12,
u'\u0151n\xfc': 12,
u'\xfaje': 12,
u'\xf3f\xfc': 12,
u'\xe1dm': 12,
u'i\xfcl': 12,
u'r\u0151\xe1': 12,
u'd\xe9e': 12,
u'#ew': 12,
u'gii': 12,
u'llc': 12,
u'j\xe1m': 12,
u'g#+': 12,
u'gt\xed': 12,
u'v\xe9c': 12,
u'lh\xf3': 12,
u'\xf6z\xed': 12,
u'\xe9#\xf3': 12,
u'xp#': 12,
u'cts': 12,
u'#mk': 12,
u'ltd': 12,
u'r\xfal': 12,
u'joz': 12,
u'joo': 12,
u'rp#': 12,
u'tce': 12,
u'\xf3mr': 12,
u'\xe2yo': 12,
u'ndl': 12,
u'dsf': 12,
u'um\xfc': 12,
u'fox': 12,
u'\u0171m\xe9': 12,
u'nxp': 12,
u'ipk': 12,
u'dzl': 12,
u'#l#': 12,
u'\xe2nc': 12,
u'tt\u0171': 12,
u'tk\u0151': 12,
u'\xedl\xe1': 12,
u'\u013e#b': 12,
u'\xe9hk': 12,
u'\xe4fn': 12,
u'odg': 12,
u'ctr': 12,
u'rdh': 12,
u'vms': 12,
u'\xfami': 12,
u'm\xf6n': 12,
u'etw': 12,
u'cbl': 12,
u'phd': 12,
u'xei': 12,
u'h\u0171v': 12,
u'hpu': 12,
u'la\xe2': 12,
u'i\xf3o': 12,
u'uas': 12,
u'il\u0151': 12,
u'\xfcsk': 12,
u'bpb': 12,
u'u\xeds': 12,
u'aok': 12,
u'et\xfa': 12,
u'lmk': 12,
u'\xedp\u0151': 12,
u'wca': 12,
u'jt\xed': 12,
u'jtb': 12,
u'a\xe2y': 12,
u'as\xe9': 12,
u'rh\xf3': 12,
u'dtr': 12,
u'a\xfcz': 12,
u'z#+': 12,
u'l#\u0161': 12,
u'hla': 12,
u'k\xf3l': 12,
u'arw': 12,
u'mg#': 12,
u'g\u0151h': 12,
u'lu\xed': 12,
u'foo': 12,
u'l\u0151p': 12,
u'#ld': 12,
u'bae': 12,
u'u\xfcg': 12,
u'\u0151dm': 12,
u'\u0151ds': 12,
u'dii': 12,
u'zn\u0151': 12,
u'ul\xed': 12,
u'\xf6dr': 12,
u'l\xedn': 12,
u'a\u0161e': 12,
u'uh\xed': 12,
u'\xf3df': 12,
u'koe': 12,
u'peh': 12,
u'\u0151ll': 12,
u'\u0151l\xe1': 12,
u'lf\xfa': 12,
u'zf\u0171': 12,
u'\xf6l\xe1': 12,
u'ysk': 12,
u'ilr': 12,
u'mw#': 12,
u'h#\xfc': 12,
u'\xe9ch': 12,
u'vv\xed': 12,
u'nnn': 12,
u'pmh': 12,
u'd\xe9h': 12,
u'd\xe9p': 12,
u'pr\xf6': 12,
u'ej\u0151': 12,
u'ssm': 12,
u'h\xe4f': 12,
u'p\xe1i': 12,
u'ksk': 12,
u'ynk': 12,
u'rix': 12,
u'b\xf6j': 12,
u'ta\xf6': 12,
u'#kt': 12,
u'o\xe1r': 12,
u'ebk': 12,
u'eby': 12,
u'v\xf3h': 12,
u'dn#': 11,
u'jm\xe1': 11,
u'k\xf6m': 11,
u'bn\xe9': 11,
u'\u0144#v': 11,
u'mbb': 11,
u'\xe4ss': 11,
u'osw': 11,
u'tey': 11,
u'jil': 11,
u't#q': 11,
u'\xe9j\xe0': 11,
u'z\u0171s': 11,
u'ugg': 11,
u'\xfch\xe9': 11,
u'p\xf6n': 11,
u'\u0161#d': 11,
u'\u0171zl': 11,
u'evn': 11,
u't\u0171b': 11,
u'r\xf3g': 11,
u'lzn': 11,
u'aev': 11,
u'aee': 11,
u'\xf6m\xf3': 11,
u'a#\u010d': 11,
u'vso': 11,
u'nv\xed': 11,
u'lfp': 11,
u'eu\xf6': 11,
u'em\xed': 11,
u'gn\xfc': 11,
u'k#\u0171': 11,
u'\xe0#v': 11,
u'rnj': 11,
u'kvn': 11,
u'uz\xf3': 11,
u'\xe1mg': 11,
u'g\xfak': 11,
u'mb\xe9': 11,
u'eix': 11,
u'sr\xe1': 11,
u'dz\xf3': 11,
u'li\u0107': 11,
u'pue': 11,
u'asy': 11,
u'jr\xe1': 11,
u'nc\xf6': 11,
u'lii': 11,
u'\xb0cr': 11,
u'#w\xf6': 11,
u'l\xfac': 11,
u'\xe1t\xf6': 11,
u'qf#': 11,
u'mz\xfc': 11,
u'ya\xe1': 11,
u'o\xe1l': 11,
u'sji': 11,
u'dr\u010d': 11,
u'h\u0151v': 11,
u'unm': 11,
u'ft\u0151': 11,
u'lsr': 11,
u'#cg': 11,
u'\u010dr#': 11,
u'oid': 11,
u'\xedmi': 11,
u'kmt': 11,
u'iud': 11,
u'y\xedv': 11,
u'syn': 11,
u's\xe9\xfc': 11,
u'd\xf6l': 11,
u'\u03b5\u03b4\u03c1': 11,
u'euz': 11,
u'mi\u0144': 11,
u'miq': 11,
u'ubt': 11,
u'nwi': 11,
u'#z#': 11,
u'#zb': 11,
u'tow': 11,
u'to\xe1': 11,
u'+#c': 11,
u'e\xfcz': 11,
u'k#y': 11,
u'g#x': 11,
u'\u0151\xe9#': 11,
u'\u0151\xe9t': 11,
u'tk\xf3': 11,
u'lld': 11,
u'rkj': 11,
u'\u03b1\u03c3\u03b7': 11,
u'#r\xed': 11,
u'jsk': 11,
u'cdn': 11,
u'cdv': 11,
u'm\xedl': 11,
u'\xf3i\xe9': 11,
u'g\xe1d': 11,
u'ixo': 11,
u'ixe': 11,
u'#\u010dr': 11,
u'edc': 11,
u'r\u010da': 11,
u'bdo': 11,
u'\u0107ot': 11,
u'\xfazv': 11,
u'al\xe0': 11,
u'dli': 11,
u'fkl': 11,
u'myl': 11,
u'l\xe0#': 11,
u'dsk': 11,
u'f\xfcz': 11,
u'\xe9p\xfa': 11,
u'i\u0107o': 11,
u'v\xe9i': 11,
u'jkk': 11,
u'\xf3at': 11,
u'rm\xfc': 11,
u'ydu': 11,
u'fpa': 11,
u'fpr': 11,
u'o\xfcg': 11,
u'u#z': 11,
u'n\xedl': 11,
u'dds': 11,
u'\xe1es': 11,
u'itg': 11,
u'xas': 11,
u'ct\u0151': 11,
u'npb': 11,
u'#ye': 11,
u'ckl': 11,
u'ph\u0151': 11,
u'rvu': 11,
u'bt\u0151': 11,
u'\xe9l\xf3': 11,
u'i\xf3l': 11,
u'zew': 11,
u'\u0151to': 11,
u'ktc': 11,
u'd\xf3l': 11,
u'g\u0142a': 11,
u'g\u0142o': 11,
u'v\u0151p': 11,
u'spu': 11,
u'lmm': 11,
u'lmn': 11,
u'yt\xed': 11,
u'gwi': 11,
u'#\xbac': 11,
u'b#w': 11,
u'tdi': 11,
u'asg': 11,
u'li\xe8': 11,
u'a#\u0161': 11,
u'utl': 11,
u'naw': 11,
u'j\xe0#': 11,
u'otv': 11,
u'zyt': 11,
u'zys': 11,
u'r\xe1u': 11,
u'ty\xe9': 11,
u'tyt': 11,
u'i\u0119t': 11,
u'bap': 11,
u'yt\u0171': 11,
u'diy': 11,
u'\u0142as': 11,
u'\xfcck': 11,
u'\xe1bc': 11,
u'd#y': 11,
u'av\xfa': 11,
u'abk': 11,
u'\u0142os': 11,
u'r\xe9\xfc': 11,
u'fum': 11,
u'og\u0142': 11,
u'lfk': 11,
u'zf\xe1': 11,
u'#sg': 11,
u'a\xf6b': 11,
u'ksw': 11,
u's\u0151c': 11,
u'#xa': 11,
u'ioi': 11,
u'piv': 11,
u'\u0107#e': 11,
u'\xe3ne': 11,
u'\u0151\u0151r': 11,
u'thm': 11,
u'oct': 11,
u'nug': 11,
u'm\xe3n': 11,
u'nnf': 11,
u'w\xf6g': 11,
u'an\u0163': 11,
u'y\u0151k': 11,
u'\xe9zj': 11,
u'#pb': 11,
u'r#\u010d': 11,
u'y#y': 11,
u'arq': 11,
u'pv#': 11,
u'rih': 11,
u'kb\xe9': 11,
u'#mv': 11,
u'xra': 11,
u'\xfate': 11,
u'crd': 11,
u'mf\xf3': 11,
u'#oa': 10,
u'#og': 10,
u'ir\xfa': 10,
u'irh': 10,
u'yf\xe1': 10,
u'hay': 10,
u'bno': 10,
u'az\xf6': 10,
u'jig': 10,
u'm\xf3v': 10,
u'fmo': 10,
u'on\xfc': 10,
u'a\xeds': 10,
u'\xe1al': 10,
u'heb': 10,
u'#\xf3g': 10,
u'\xe9j\xfa': 10,
u'jex': 10,
u'aai': 10,
u'zk\xed': 10,
u'zkn': 10,
u's\xf6l': 10,
u'\xf3zc': 10,
u'tr#': 10,
u'\xe8ge': 10,
u'r\xf3\xe1': 10,
u'sg\xe9': 10,
u'w#v': 10,
u'w#g': 10,
u'w#j': 10,
u'ucn': 10,
u'lg\xf6': 10,
u'ofj': 10,
u'a#\u0171': 10,
u'p\u0151t': 10,
u'nkg': 10,
u'emy': 10,
u'aiu': 10,
u'\u0151z\xfc': 10,
u't\xfab': 10,
u't\xfag': 10,
u'\xedrf': 10,
u'fbi': 10,
u'z\xf3\xe1': 10,
u'noe': 10,
u'hvr': 10,
u'\xe9fo': 10,
u'amd': 10,
u'\xed#s': 10,
u'#ao': 10,
u'uv\xf3': 10,
u'i\xe9b': 10,
u'\xf3v\xf3': 10,
u'\u03c3\u03c4\u03b9': 10,
u'c\xe9d': 10,
u'c\xe9z': 10,
u'#wz': 10,
u'j\u0171n': 10,
u'fj#': 10,
u'kej': 10,
u'\u0171je': 10,
u'\xf6bl': 10,
u'mea': 10,
u'jj\xfc': 10,
u'om\xf6': 10,
u'kuf': 10,
u'omy': 10,
u'\xfcan': 10,
u'yee': 10,
u'\u03c2##': 10,
u'fsz': 10,
u'ayn': 10,
u'y\xf6d': 10,
u'zsh': 10,
u'i\u0151r': 10,
u'vkh': 10,
u'y\xe9l': 10,
u'\xfant': 10,
u'h\xf6n': 10,
u'\xedgt': 10,
u'p\xf3v': 10,
u'goy': 10,
u'eyk': 10,
u'bke': 10,
u'pce': 10,
u'mmk': 10,
u'#\xe9#': 10,
u'\u0148ov': 10,
u'syd': 10,
u'sys': 10,
u'ldp': 10,
u'nsd': 10,
u'ddj': 10,
u'\u03c3\u03b7#': 10,
u'chc': 10,
u'd\u0103i': 10,
u'chk': 10,
u'iie': 10,
u'd\xf6v': 10,
u'\xe1df': 10,
u'tsw': 10,
u'z\u0151f': 10,
u'z\u0151c': 10,
u'\u0103ne': 10,
u'u\u0161n': 10,
u'dcn': 10,
u'ub\u0151': 10,
u'rcm': 10,
u'ubh': 10,
u'nwa': 10,
u'\u010des': 10,
u'r\xfcc': 10,
u'ro\xe1': 10,
u'll\xf6': 10,
u'ky\xf6': 10,
u'tbt': 10,
u'gtm': 10,
u'v\xe9m': 10,
u'rk\u016b': 10,
u'su\xe1': 10,
u'o\u017eu': 10,
u'js\xfa': 10,
u'jss': 10,
u'ieh': 10,
u'tg\xe1': 10,
u'\u010d#a': 10,
u'edw': 10,
u'\xf3g\xf6': 10,
u'ojo': 10,
u'hk#': 10,
u'hka': 10,
u'i\xe8g': 10,
u'yj\xe9': 10,
u'#g\u0142': 10,
u'\u010die': 10,
u'\xf3mm': 10,
u'kdi': 10,
u's#\u0163': 10,
u'ndy': 10,
u'md\xfa': 10,
u'e\u0161t': 10,
u'k\u016bn': 10,
u'#a+': 10,
u'tkr': 10,
u'ydi': 10,
u'iph': 10,
u'fp\xe1': 10,
u'f\u0171#': 10,
u'd\u0151\xfc': 10,
u'd\u0151\xe1': 10,
u'p\xe9c': 10,
u'sa\xf6': 10,
u'xm\u0171': 10,
u'\u015fes': 10,
u'\u0171\xe1l': 10,
u'gd\u0151': 10,
u'\xf6k\xed': 10,
u'zh\xed': 10,
u'k\xfcr': 10,
u'exj': 10,
u'pdo': 10,
u'ct\xe1': 10,
u'le\u0161': 10,
u'npn': 10,
u'\xdf#\xfa': 10,
u'h\u0171b': 10,
u'\u0111el': 10,
u'akz': 10,
u'uaz': 10,
u'ypa': 10,
u'kt\xf6': 10,
u'co\xfc': 10,
u'cox': 10,
u'pl\xfc': 10,
u'#\u0111e': 10,
u'\xf3\xe1t': 10,
u'spd': 10,
u'aon': 10,
u'yt\xe9': 10,
u'#q#': 10,
u'vui': 10,
u'ss\xf3': 10,
u'nm\u0171': 10,
u'\u017eu\u0161': 10,
u'rh\xfa': 10,
u'ut\u0151': 10,
u'l#\u0171': 10,
u'#hc': 10,
u'k\xf3\xf6': 10,
u'be\u0148': 10,
u'y\xfcr': 10,
u'otp': 10,
u'otw': 10,
u'jl\xed': 10,
u'l\u0151\u0151': 10,
u'\xf6gr': 10,
u'baf': 10,
u'\u0151dd': 10,
u'\u0151dr': 10,
u'k\u0151j': 10,
u'bm\xe1': 10,
u'ab\xed': 10,
u'hyn': 10,
u'ggr': 10,
u'i\xfan': 10,
u'uhi': 10,
u'\xfald': 10,
u'fuj': 10,
u'fui': 10,
u'ewc': 10,
u'bi\xe9': 10,
u'bi\xf3': 10,
u'xbr': 10,
u'afk': 10,
u'lfg': 10,
u'a\xf6n': 10,
u'pii': 10,
u'pim': 10,
u'\xfcpp': 10,
u'\u0107#m': 10,
u'e\u0148o': 10,
u'n\xfan': 10,
u'\u0161n\xed': 10,
u'\xe9ci': 10,
u'thn': 10,
u'icp': 10,
u'\u03c4\u03b9\u03c2': 10,
u'mss': 10,
u'msr': 10,
u'pm\u0171': 10,
u'ryi': 10,
u'hui': 10,
u'ko\u017e': 10,
u'guj': 10,
u'bfu': 10,
u'riy': 10,
u'e\xf6z': 10,
u'#td': 10,
u'#tn': 10,
u'y#\u0171': 10,
u'cac': 10,
u'cae': 10,
u'\u0171tu': 10,
u'\u0171kk': 10,
u'kbr': 10,
u'\u0171kr': 10,
u'owb': 10,
u'ybo': 10,
u'#\u03c3\u03c4': 10,
u'skj': 10,
u'\u03b9\u03c2#': 10,
u'\xe9ru': 9,
u'wj\xe1': 9,
u'uop': 9,
u'tzm': 9,
u'w\xe1b': 9,
u'bn#': 9,
u'\u0171r\xe1': 9,
u'j#\xed': 9,
u'\xfatp': 9,
u't\xe9\xfc': 9,
u'jir': 9,
u'm\xf3a': 9,
u'\xf3co': 9,
u'zoo': 9,
u'jtm': 9,
u't#\u0163': 9,
u'hey': 9,
u'hef': 9,
u'\u0119te': 9,
u'gfl': 9,
u'z\u0171z': 9,
u'k\u0171r': 9,
u'psa': 9,
u'j\u0151d': 9,
u'\u0171zk': 9,
u'sgp': 9,
u'ch\xe4': 9,
u'w#h': 9,
u'jaa': 9,
u'tsn': 9,
u'of\xf6': 9,
u'krs': 9,
u'inl': 9,
u'jk\xf3': 9,
u'z\u0151m': 9,
u'brl': 9,
u'mvj': 9,
u'o\xedt': 9,
u'wzn': 9,
u'udm': 9,
u'ob\xf3': 9,
u'kvp': 9,
u'ha\xe1': 9,
u'\xe0je': 9,
u'uz\xe1': 9,
u'm\u0171\xe9': 9,
u'\u0171b\xe9': 9,
u'yif': 9,
u'no\u015b': 9,
u'mri': 9,
u'\xf6lf': 9,
u'#xb': 9,
u'eif': 9,
u'\xe3es': 9,
u'\xe9fa': 9,
u'am\xf6': 9,
u'\u0159ic': 9,
u't\xf6g': 9,
u'ceo': 9,
u'ceu': 9,
u'cez': 9,
u'tf#': 9,
u'ncu': 9,
u'ncf': 9,
u'ifl': 9,
u'es\xe1': 9,
u'l\u010dy': 9,
u'eex': 9,
u'eez': 9,
u'c\xe9#': 9,
u'wim': 9,
u'\xe1ut': 9,
u'urh': 9,
u'ury': 9,
u'\xedel': 9,
u'c\xf6t': 9,
u'eab': 9,
u'eap': 9,
u'mee': 9,
u'au\u015f': 9,
u'un\xed': 9,
u'unj': 9,
u'\xf3nj': 9,
u'\u0171ny': 9,
u'hbo': 9,
u'ma\u0142': 9,
u'e#\u0161': 9,
u'or\xfc': 9,
u'c\xe1l': 9,
u'a\u0163i': 9,
u'geu': 9,
u'y\xe9g': 9,
u'nh\u0151': 9,
u'\u0142ow': 9,
u'h\xf6r': 9,
u'#by': 9,
u'd\xfaa': 9,
u'\xe4#u': 9,
u'u\u015fe': 9,
u'mms': 9,
u'\xe9ib': 9,
u'gaw': 9,
u'rg\xfc': 9,
u'\u0151\xe1t': 9,
u'\xe1d\xfc': 9,
u'pge': 9,
u'\xe9m\u0171': 9,
u'\xe9m\xe9': 9,
u'\xe9mn': 9,
u'dco': 9,
u'oac': 9,
u'ubu': 9,
u's\xede': 9,
u'\xf6rs': 9,
u'bs\xe1': 9,
u'bsi': 9,
u'+#p': 9,
u'viu': 9,
u'\u03b4\u03c1\u03af': 9,
u'\u03b9#\u03c3': 9,
u's\xe1z': 9,
u's\xe1p': 9,
u'gtp': 9,
u'iau': 9,
u'tkt': 9,
u'poe': 9,
u'\u0117ni': 9,
u'y\u0171k': 9,
u'o\u015b\u0107': 9,
u'suf': 9,
u'al\u010d': 9,
u'tgr': 9,
u'\u010d#m': 9,
u'i#x': 9,
u'edl': 9,
u'\xf3#x': 9,
u'##+': 9,
u'dl#': 9,
u's\xfcp': 9,
u'\xedjc': 9,
u'dsp': 9,
u'\xf1ig': 9,
u'\u015b\u0107#': 9,
u'ij\xf3': 9,
u'\u03af\u03b1\u03c3': 9,
u'dh\xed': 9,
u'#ax': 9,
u'vea': 9,
u'a\xefd': 9,
u'h\xf3k': 9,
u'd\u0151\xe9': 9,
u'z\xe1d': 9,
u'\u0142\xf3d': 9,
u'\u0103ia': 9,
u'\u0151i\xe9': 9,
u'\xfcni': 9,
u'n\u0163a': 9,
u'fts': 9,
u'ct\xe9': 9,
u'\u0171t\xed': 9,
u'tp\xf3': 9,
u'hgk': 9,
u'se\xe1': 9,
u'\xefd#': 9,
u'\u017eda': 9,
u'mlb': 9,
u'mlt': 9,
u'ml\xfc': 9,
u'\xf3\xe9v': 9,
u'jcr': 9,
u'ck\xf3': 9,
u'ckj': 9,
u'cku': 9,
u'la\u0163': 9,
u'\u0161ov': 9,
u'\u010dyt': 9,
u'ak\u0151': 9,
u'\xfagh': 9,
u'yp\xe9': 9,
u'ktv': 9,
u'ktp': 9,
u'ktg': 9,
u'\xf6s\xfc': 9,
u'co\xed': 9,
u'ni\u0119': 9,
u'm\u0103n': 9,
u'gbt': 9,
u'lmr': 9,
u'kxj': 9,
u'vut': 9,
u'\xe9dl': 9,
u'boi': 9,
u'#uz': 9,
u'#hm': 9,
u'awb': 9,
u'dp\xe1': 9,
u'\xf3\xfcl': 9,
u'swe': 9,
u'fhk': 9,
u'\xedcs': 9,
u'\u0171hi': 9,
u'ty\u0144': 9,
u'l\xe1p': 9,
u'tyr': 9,
u'opd': 9,
u'ul\u0117': 9,
u'\xf6d#': 9,
u's\xf3z': 9,
u'vjk': 9,
u'k\u0151e': 9,
u'k\u0151h': 9,
u'l\u0117n': 9,
u'hd#': 9,
u'r\xf6f': 9,
u'xna': 9,
u'\xfas\xfa': 9,
u't\u0151o': 9,
u'\u03c5\u03bd\u03b5': 9,
u'hya': 9,
u't\u0151\xe9': 9,
u'abf': 9,
u'abm': 9,
u'\u03c1\u03af\u03b1': 9,
u'ok\xf3': 9,
u'koj': 9,
u'\xf3d\u017a': 9,
u'l\xe9f': 9,
u'fug': 9,
u'\xfctl': 9,
u'mkn': 9,
u'mkv': 9,
u'sdp': 9,
u'h\xf6d': 9,
u'g\xf3m': 9,
u'g\xf3o': 9,
u'afj': 9,
u'f#u': 9,
u'f#w': 9,
u'f#o': 9,
u'a\xf6l': 9,
u'\u0171ks': 9,
u't\u0117m': 9,
u'\xfcki': 9,
u'ysi': 9,
u'bue': 9,
u'ra\xfa': 9,
u'ocu': 9,
u'kwa': 9,
u'rnn': 9,
u'a\xfal': 9,
u'cn\xe1': 9,
u'\u0151z\xf3': 9,
u'cnu': 9,
u'h\u0151d': 9,
u'\u0117mi': 9,
u'\u03bd\u03b5\u03b4': 9,
u'msc': 9,
u'ryj': 9,
u'y\u0151#': 9,
u'uhe': 9,
u'mm\xed': 9,
u'j\xf3p': 9,
u'iz\xfc': 9,
u'c\xf3b': 9,
u'c\xf3t': 9,
u'efn': 9,
u'\xf3dl': 9,
u'rr\xf6': 9,
u'tbu': 9,
u'rrj': 9,
u'rrt': 9,
u'\xe1vh': 9,
u'\xe1vj': 9,
u'\u0171ka': 9,
u'\xfcde': 9,
u'\u03c3\u03c5\u03bd': 9,
u'#\u03c3\u03c5': 9,
u'bbb': 9,
u'avv': 9,
u'p\xe9d': 8,
u'\xe9r\u0171': 8,
u'vgy': 8,
u'#oj': 8,
u'cub': 8,
u'tzs': 8,
u'haw': 8,
u'j\xf6r': 8,
u'mbn': 8,
u'\xfata': 8,
u'h\xe3e': 8,
u'jit': 8,
u'uos': 8,
u'ivs': 8,
u'yz\xfc': 8,
u'\xf3ot': 8,
u'g\xf6v': 8,
u'pbt': 8,
u'\u0151ot': 8,
u'aah': 8,
u'\u0151rf': 8,
u'\xfchg': 8,
u'h\xe9v': 8,
u'\xfch#': 8,
u'mj\xfc': 8,
u'w#z': 8,
u'fdi': 8,
u'jaw': 8,
u'lgb': 8,
u'\xfain': 8,
u'nv#': 8,
u'p\u0151i': 8,
u'\xf6mh': 8,
u'kr\u0161': 8,
u'k\xe9\xfc': 8,
u'\xdfun': 8,
u'r\u0161k': 8,
u'm\u0171\xfc': 8,
u'sox': 8,
u'hva': 8,
u'y\xfar': 8,
u'\xed#e': 8,
u'a++': 8,
u'h\xf3c': 8,
u'ffb': 8,
u'ceh': 8,
u'gmm': 8,
u'n\xf3n': 8,
u'lkv': 8,
u'lk#': 8,
u'c\xe9i': 8,
u'wid': 8,
u'\xedzd': 8,
u'jn\xf6': 8,
u'l\xfav': 8,
u'rs\xfa': 8,
u'ngc': 8,
u'ng=': 8,
u'keo': 8,
u'yac': 8,
u'ubk': 8,
u'hnn': 8,
u'\u0151f\xfc': 8,
u'ov\xed': 8,
u'ovv': 8,
u'b\xf3v': 8,
u'\u03b1\u03c4\u03bf': 8,
u'#uv': 8,
u'fnl': 8,
u'\xf3nn': 8,
u'uk\xf3': 8,
u'#nf': 8,
u'\xf6f\xe9': 8,
u'\xed\xf1i': 8,
u'ma\u0161': 8,
u'ay\xe1': 8,
u'oiv': 8,
u'km\xf3': 8,
u'\xedmo': 8,
u'\xfann': 8,
u'iur': 8,
u'bka': 8,
u'mmn': 8,
u'nh#': 8,
u'o#w': 8,
u'jb\u0151': 8,
u'ufa': 8,
u'#fn': 8,
u'vlo': 8,
u'pk\xfc': 8,
u'sfr': 8,
u'v\xe1\xe9': 8,
u'#\xed\xf1': 8,
u'\xe9mm': 8,
u'\xe9mr': 8,
u'gm\xfa': 8,
u'nhu': 8,
u'toa': 8,
u'\u0151#\u0171': 8,
u'\u0151#q': 8,
u'\xe9ad': 8,
u'ppm': 8,
u'ppv': 8,
u'pp\xed': 8,
u'\xf3f#': 8,
u'\u0151vo': 8,
u'gth': 8,
u'gf\xfa': 8,
u'nld': 8,
u'\xe9ep': 8,
u'y\u0171i': 8,
u'pt\xed': 8,
u'lh\xe3': 8,
u'oyd': 8,
u'#rw': 8,
u'gp\xe9': 8,
u'ie\xdf': 8,
u'fge': 8,
u'aaz': 8,
u'tgi': 8,
u'#i+': 8,
u'jlu': 8,
u'++#': 8,
u'ixa': 8,
u'i#q': 8,
u'edg': 8,
u'edy': 8,
u'\xf3g\xf3': 8,
u'ojz': 8,
u'v\xedk': 8,
u'\xe4ub': 8,
u'rp\xe9': 8,
u'vav': 8,
u'\xf8re': 8,
u'fho': 8,
u'atw': 8,
u'lp\xf6': 8,
u'fod': 8,
u'h\xf3h': 8,
u'h\xf3v': 8,
u'tkv': 8,
u'ydd': 8,
u'bl\xf6': 8,
u'blu': 8,
u'hcf': 8,
u'jc\xe9': 8,
u'rml': 8,
u'zwa': 8,
u'ddy': 8,
u'acy': 8,
u'ohs': 8,
u'rx#': 8,
u'\u0161ka': 8,
u'bh#': 8,
u'tpn': 8,
u'hg#': 8,
u'\xf3\xe9n': 8,
u'rdp': 8,
u'rdk': 8,
u'\xfakh': 8,
u'#\xfch': 8,
u'ck\xf6': 8,
u'ckk': 8,
u'ckr': 8,
u'tl#': 8,
u'hn\xe9': 8,
u'la\u0161': 8,
u'la\xe9': 8,
u'i\xf3u': 8,
u'ktm': 8,
u'x\xe9r': 8,
u'niy': 8,
u'\xb0#\xb0': 8,
u'epg': 8,
u'htr': 8,
u'aog': 8,
u'aob': 8,
u'izy': 8,
u'u\xe9#': 8,
u'p\u0151j': 8,
u'boa': 8,
u'gss': 8,
u'idv': 8,
u'z#\xb0': 8,
u'\xf3hu': 8,
u'\u0171d#': 8,
u'#hg': 8,
u'ecm': 8,
u'ecz': 8,
u'snp': 8,
u'aw\xe1': 8,
u'awo': 8,
u'mgl': 8,
u'a\xf3#': 8,
u'is\xf6': 8,
u'e\xf3v': 8,
u'e\xf3k': 8,
u'jlm': 8,
u'\xf6gh': 8,
u'kgs': 8,
u'kgr': 8,
u'#l\xfa': 8,
u'\u0171h\xf6': 8,
u'isd': 8,
u'n#<': 8,
u'is\u0142': 8,
u'ba\xf6': 8,
u'lqa': 8,
u'opv': 8,
u'wwf': 8,
u'\u0171li': 8,
u'\xfccs': 8,
u'pa\xed': 8,
u'bmu': 8,
u'pax': 8,
u'moj': 8,
u'f\xe1\xe9': 8,
u'moe': 8,
u'a\u0161\xed': 8,
u'o\u0161#': 8,
u'#d\u0103': 8,
u'cgt': 8,
u'bim': 8,
u'pep': 8,
u'usg': 8,
u'af\xfa': 8,
u'dao': 8,
u'udb': 8,
u'og\xed': 8,
u'kse': 8,
u'\xe1jn': 8,
u'\xe1jb': 8,
u'pi\u0144': 8,
u'pij': 8,
u'xfo': 8,
u'en\u0117': 8,
u'n\xfar': 8,
u'm\u0151r': 8,
u'zb#': 8,
u'nud': 8,
u'h\u0151b': 8,
u'ryk': 8,
u'r\xfad': 8,
u'e\xdfu': 8,
u'y\u0151t': 8,
u'rn\u0151': 8,
u'ln#': 8,
u'#p#': 8,
u'fad': 8,
u'yns': 8,
u'izj': 8,
u'dud': 8,
u'due': 8,
u'#tk': 8,
u'us\u0142': 8,
u'y\u0171h': 8,
u'\xf3kj': 8,
u'kbu': 8,
u'\xfcd\xf6': 8,
u'j\xe1a': 8,
u'tpt': 8,
u'\xf6cs': 8,
u'j\xfaj': 8,
u'j\xfad': 8,
u'hm\xe1': 8,
u'y\xf3z': 8,
u'y\xf3r': 8,
u'owr': 8,
u'p\xe9e': 7,
u'jm#': 7,
u'czw': 7,
u'\xedbo': 7,
u'r\u0171k': 7,
u'\xe1zy': 7,
u'yfr': 7,
u'rgt': 7,
u'\u0171rs': 7,
u'j\xf6k': 7,
u'azm': 7,
u'\xfat\xf3': 7,
u'\xe9vm': 7,
u'p\xedg': 7,
u'dji': 7,
u'm\xf3z': 7,
u'ukm': 7,
u'#ct': 7,
u'#cv': 7,
u'yzs': 7,
u'ivb': 7,
u'ivt': 7,
u't#+': 7,
u'he\xfc': 7,
u'scr': 7,
u'sc\xf3': 7,
u'wr\xe9': 7,
u'aav': 7,
u'\u0161ek': 7,
u'\u0161en': 7,
u'zkc': 7,
u'kn\u0151': 7,
u'\u0151rl': 7,
u'cr\u0151': 7,
u'\xedjo': 7,
u'a\xf1a': 7,
u'amg': 7,
u'knk': 7,
u'\u0171zd': 7,
u'srb': 7,
u'lzb': 7,
u'u\xf3t': 7,
u'w#t': 7,
u'dbl': 7,
u'aei': 7,
u'tsh': 7,
u'gb\xf6': 7,
u'lg\xfc': 7,
u'wef': 7,
u'wet': 7,
u'p\xfav': 7,
u'p\xfaa': 7,
u'+fi': 7,
u'cmp': 7,
u'mv\xf6': 7,
u'udg': 7,
u'hry': 7,
u'hru': 7,
u'\xe9bn': 7,
u'\xe0#p': 7,
u'lcr': 7,
u'\xe0##': 7,
u'h\xe1\xed': 7,
u'\xedrz': 7,
u'y#x': 7,
u'fba': 7,
u's\u0151a': 7,
u'ibn': 7,
u'soa': 7,
u'noj': 7,
u'#fm': 7,
u'yih': 7,
u'mro': 7,
u'o\xe9#': 7,
u'psr': 7,
u'y\xfav': 7,
u'\xed#\xe9': 7,
u't\xf6v': 7,
u'\xe9ml': 7,
u'tfm': 7,
u'\xf3vl': 7,
u'\xf3vt': 7,
u'm#y': 7,
u'm#\u0163': 7,
u'\u03b5\u03b9#': 7,
u'l\xfag': 7,
u'url': 7,
u'yaj': 7,
u'\xdfje': 7,
u'\xf6b\xfc': 7,
u'o\xe1z': 7,
u'c#\xed': 7,
u'aui': 7,
u'auh': 7,
u'sj\xfa': 7,
u'ovc': 7,
u'wut': 7,
u'\xedn\xfc': 7,
u'doa': 7,
u'omz': 7,
u'vpt': 7,
u'i\xe1z': 7,
u'xle': 7,
u'\xe1\xed#': 7,
u'e#y': 7,
u'vki': 7,
u'dku': 7,
u'dkr': 7,
u'oil': 7,
u'\u0142op': 7,
u'ujp': 7,
u'h\xf6t': 7,
u'\xfcmh': 7,
u'e\u015fb': 7,
u'\xfa\xe1l': 7,
u'bk\xe1': 7,
u'hf#': 7,
u'\xe1\u0161k': 7,
u'j\xf3o': 7,
u'mm\u0171': 7,
u'#\xe9i': 7,
u'\xe9ih': 7,
u'vbo': 7,
u'jbo': 7,
u'rgs': 7,
u'\u0171#\u0151': 7,
u'ld\u0159': 7,
u'\xf3fl': 7,
u'tvc': 7,
u'ddo': 7,
u'chy': 7,
u'iic': 7,
u'd\xf6f': 7,
u't#<': 7,
u'shn': 7,
u'\xe9mu': 7,
u'rcl': 7,
u'm\xfaa': 7,
u'm\xfan': 7,
u'oas': 7,
u'kuj': 7,
u'\xfaf\xed': 7,
u'ims': 7,
u'\xfcr\xf6': 7,
u'xxx': 7,
u'ro\u0161': 7,
u'mgo': 7,
u'\xe1l\u0151': 7,
u'fct': 7,
u'fcc': 7,
u'nlv': 7,
u'o\u015ft': 7,
u'pov': 7,
u'v\xe9e': 7,
u'v\xe9j': 7,
u'#\xe5s': 7,
u'tb\u0171': 7,
u'\xe9en': 7,
u'ptn': 7,
u'ptt': 7,
u'\xf1a#': 7,
u'oyo': 7,
u'rtg': 7,
u'uut': 7,
u'tgv': 7,
u'#iw': 7,
u'\xe9\xe9t': 7,
u'yl\xfc': 7,
u'\u0117je': 7,
u'tpu': 7,
u'hki': 7,
u'ed\u0171': 7,
u'j\xfca': 7,
u'#vf': 7,
u'joi': 7,
u'i\u0161o': 7,
u'#m\u0103': 7,
u'kde': 7,
u's\xfcv': 7,
u'ho\u0148': 7,
u'dsc': 7,
u'atx': 7,
u'xip': 7,
u'lpk': 7,
u'\xe9p\u0171': 7,
u'jkr': 7,
u'foz': 7,
u'\xe9\xe1j': 7,
u'yd\xf6': 7,
u'ipb': 7,
u'z\xe1p': 7,
u'ovk': 7,
u'sao': 7,
u'\xe9tg': 7,
u'gdf': 7,
u'gd\xe1': 7,
u'uig': 7,
u'ohd': 7,
u'cpe': 7,
u'\xf6ka': 7,
u'ex\xfc': 7,
u'bhi': 7,
u'\u0171ti': 7,
u'pdl': 7,
u'pde': 7,
u'pdc': 7,
u'\xf3\xe9h': 7,
u'jca': 7,
u'uem': 7,
u'uee': 7,
u'ctp': 7,
u'\xe1k\xe1': 7,
u'ckf': 7,
u'#yv': 7,
u'xeb': 7,
u'\xf3\xedz': 7,
u'hpr': 7,
u'alw': 7,
u'i\xf3\xfc': 7,
u'ntz': 7,
u'b\xe1\u0161': 7,
u'x\xe9v': 7,
u'bpk': 7,
u'bpn': 7,
u'mtn': 7,
u'ek\xf3': 7,
u'htm': 7,
u'ht\u0151': 7,
u'\xedp\xe1': 7,
u'za\xf6': 7,
u'\u015fti': 7,
u'yth': 7,
u'#\u017ed': 7,
u'ccm': 7,
u'\xf3td': 7,
u'#db': 7,
u'ef\u0171': 7,
u'mph': 7,
u'u\xe9r': 7,
u'as\xfc': 7,
u'\xe9dp': 7,
u'ofu': 7,
u'oxn': 7,
u'jp\xf3': 7,
u'l#y': 7,
u'ruo': 7,
u'na\xe9': 7,
u'rtd': 7,
u'h\xfck': 7,
u'#hb': 7,
u'yc#': 7,
u'be\u015f': 7,
u'y\xf6m': 7,
u'bo\u015f': 7,
u'ecn': 7,
u'i\u0144e': 7,
u'luj': 7,
u'sh\xf6': 7,
u'e\xf3#': 7,
u'yr\xfa': 7,
u'dmp': 7,
u'dmu': 7,
u'nef': 7,
u'ygo': 7,
u'\xf6dd': 7,
u'kg\xf6': 7,
u'#lg': 7,
u'd\xfcz': 7,
u'#\xfck': 7,
u'el\xf3': 7,
u'sj\xf8': 7,
u'\u0151d\xe1': 7,
u'\u0171\xfcz': 7,
u'jhi': 7,
u'di\xe9': 7,
u'gev': 7,
u'\u0171l#': 7,
u'kkl': 7,
u'kk\xfc': 7,
u's\xf3m': 7,
u'r\xedr': 7,
u'kk\u0151': 7,
u'k\u0151m': 7,
u'pa\u0161': 7,
u'pa\xe1': 7,
u'hde': 7,
u'fg#': 7,
u'\xfas\xe9': 7,
u'\u0151h\xed': 7,
u'hyb': 7,
u'ggt': 7,
u'okd': 7,
u'\xedkj': 7,
u'\xfale': 7,
u'\xfalu': 7,
u'#dp': 7,
u'r\xe9c': 7,
u'ik\xed': 7,
u'ewe': 7,
u'bif': 7,
u'peb': 7,
u'ufg': 7,
u'\u0151lv': 7,
u'\xe5sa': 7,
u'#\u0142\xf3': 7,
u'udw': 7,
u'#bn': 7,
u'ksi': 7,
u'\xfcka': 7,
u'io\xf6': 7,
u'nj#': 7,
u'buf': 7,
u'bug': 7,
u'mwr': 7,
u'n\xfam': 7,
u'a\u0161k': 7,
u'm\u0151g': 7,
u'zbu': 7,
u'\xf6pi': 7,
u'\xf6pk': 7,
u'a\xfaj': 7,
u'eyl': 7,
u'\xf3si': 7,
u'\xf3sd': 7,
u'\u0144er': 7,
u'f\xedt': 7,
u'ryd': 7,
u'\xf6tp': 7,
u'rvl': 7,
u'igt': 7,
u'ju\xe1': 7,
u'fax': 7,
u'mmt': 7,
u'fa\xe1': 7,
u'sbn': 7,
u'd\u0159i': 7,
u'a\xdfj': 7,
u'sk\xfd': 7,
u'\xe8#a': 7,
u'h#q': 7,
u'eb\xf3': 7,
u'mfh': 7,
u'y\xf3m': 7,
u'lvf': 7,
u'\xe9r\xe8': 6,
u'lmt': 6,
u'dn\xed': 6,
u'kfw': 6,
u'a\xe9k': 6,
u'ldz': 6,
u'irj': 6,
u'tz\xf3': 6,
u'tzn': 6,
u'\u0171rv': 6,
u'hao': 6,
u'j#\u0151': 6,
u'j\xf6m': 6,
u'soj': 6,
u'mbp': 6,
u'xk\xe9': 6,
u'xkl': 6,
u'osd': 6,
u'\u0151k\xed': 6,
u't\u0103l': 6,
u'jiz': 6,
u'onw': 6,
u'\xf3cp': 6,
u'kj\xf6': 6,
u'vkt': 6,
u'#qa': 6,
u'#c\u0103': 6,
u'fvi': 6,
u'\u0103t\u0103': 6,
u'scn': 6,
u'\xe9jt': 6,
u'df\u0151': 6,
u'hz#': 6,
u'\xe9j\u0171': 6,
u'dfr': 6,
u'dfa': 6,
u'aaj': 6,
u'ugm': 6,
u'ugl': 6,
u'ugc': 6,
u'k\u0171b': 6,
u'\xfclz': 6,
u'nrt': 6,
u'kn\xed': 6,
u'\xedjv': 6,
u'\xf3zm': 6,
u'pfa': 6,
u'pfs': 6,
u'mj\xfa': 6,
u'\xe9ng': 6,
u'w#e': 6,
u'w#\xe1': 6,
u'tsj': 6,
u'a#\u03bd': 6,
u'e\xe1i': 6,
u'e\xe1s': 6,
u'a#\xe0': 6,
u'yr\xe1': 6,
u'tn\u0151': 6,
u'cmo': 6,
u'\u0163ib': 6,
u'hrr': 6,
u'emg': 6,
u'hr\xed': 6,
u'bnp': 6,
u'ai\xfa': 6,
u'rnw': 6,
u'lck': 6,
u'zce': 6,
u'\xedrg': 6,
u'\xedrc': 6,
u'kvj': 6,
u'kvr': 6,
u'fb\u0151': 6,
u'z\xf3o': 6,
u'o\xe9l': 6,
u'hv#': 6,
u'psp': 6,
u'st\u0171': 6,
u'lof': 6,
u'\xe9\xe1v': 6,
u'a+#': 6,
u'\u0163a#': 6,
u'cew': 6,
u'ka\u0161': 6,
u'ka\xe1': 6,
u'\u0171fl': 6,
u'ym#': 6,
u'n\xf3e': 6,
u'n\xf3g': 6,
u'n\xf3v': 6,
u'\xb0cn': 6,
u'eec': 6,
u'nc\xf3': 6,
u'#\u0151e': 6,
u'\xedz\u0171': 6,
u'\xedzc': 6,
u'urf': 6,
u'ngm': 6,
u'\xf3j\xf6': 6,
u'\xe8le': 6,
u'bcn': 6,
u'hnt': 6,
u'o\u0148#': 6,
u'd\u0171e': 6,
u'ovu': 6,
u'b\xf3t': 6,
u'b\xf3b': 6,
u'fme': 6,
u'c\u0103t': 6,
u'fn\xe9': 6,
u'\xedah': 6,
u'\xe1\u010de': 6,
u'\u0171np': 6,
u'yea': 6,
u'yex': 6,
u'#nh': 6,
u'\xb0ck': 6,
u'#n\xed': 6,
u'fs#': 6,
u'fsp': 6,
u'\xf3ti': 6,
u'boy': 6,
u'zsf': 6,
u'\u0159ed': 6,
u'\u015bni': 6,
u'gex': 6,
u'gef': 6,
u'a\xedn': 6,
u'oik': 6,
u'oie': 6,
u'\xf3br': 6,
u'\xfank': 6,
u'\xfd#\xe1': 6,
u'vhu': 6,
u'vho': 6,
u'iuc': 6,
u'n\u0151f': 6,
u'nf\xfc': 6,
u'pco': 6,
u'sbr': 6,
u'\xf3w#': 6,
u'\xe9ik': 6,
u'sya': 6,
u'syr': 6,
u'uf\xe9': 6,
u'tvn': 6,
u'\u0171#w': 6,
u'#fv': 6,
u'chp': 6,
u'iiz': 6,
u'ch\xe8': 6,
u'ch\xf6': 6,
u'mii': 6,
u'k\xe4i': 6,
u'uib': 6,
u'#\xeds': 6,
u'dcv': 6,
u'kui': 6,
u'\u03c4\u03bf#': 6,
u'#zl': 6,
u'vpn': 6,
u'\xe5la': 6,
u'odm': 6,
u'imk': 6,
u'\u010dec': 6,
u'im\xfa': 6,
u'\xfcr#': 6,
u'muj': 6,
u'xxo': 6,
u'ahv': 6,
u'rs\xf6': 6,
u'lll': 6,
u'gt\xfa': 6,
u'shl': 6,
u'\xe1l\xf6': 6,
u'ia\xed': 6,
u'v\xe9a': 6,
u'#\xe5l': 6,
u'suc': 6,
u'lh\xf6': 6,
u'\xf6ra': 6,
u'cde': 6,
u'g\u0171k': 6,
u'\xe1pj': 6,
u't\u0159e': 6,
u'\xf3id': 6,
u'xpr': 6,
u'exo': 6,
u'hke': 6,
u'j\xfcr': 6,
u'cru': 6,
u'\u0151a#': 6,
u'rfj': 6,
u'd\xe9c': 6,
u'inw': 6,
u'joy': 6,
u'##\xe8': 6,
u's#\xb0': 6,
u'vaa': 6,
u's#\xe0': 6,
u'yu#': 6,
u'mly': 6,
u'txe': 6,
u'#gg': 6,
u'mdi': 6,
u'xie': 6,
u'xii': 6,
u'ztl': 6,
u'\u0151ed': 6,
u'\u0151ex': 6,
u'\xfav\xe9': 6,
u'jk\xed': 6,
u'\u0171mo': 6,
u'fpb': 6,
u'#\u03b2#': 6,
u'\xe1cb': 6,
u'lnb': 6,
u'\xf8kk': 6,
u'\xed#\u010d': 6,
u'\xedle': 6,
u'a\xf3t': 6,
u'ftb': 6,
u'exx': 6,
u'#p\xfa': 6,
u'ctv': 6,
u'mll': 6,
u'\u0151mr': 6,
u'\u0151mn': 6,
u'\xfakt': 6,
u'n\u0171v': 6,
u'rdc': 6,
u'zi\xe8': 6,
u'\u0151p\xe1': 6,
u'od\u0103': 6,
u'ck\xfc': 6,
u'bt\xf6': 6,
u'bts': 6,
u'bto': 6,
u'unu': 6,
u'\u0171\xe9r': 6,
u'\xe9l\xed': 6,
u'b\xe1g': 6,
u'\xe1r\xfc': 6,
u'coj': 6,
u'tht': 6,
u'yk\xed': 6,
u'pl\xed': 6,
u'bpm': 6,
u'bps': 6,
u'bpv': 6,
u'pln': 6,
u'mt\xed': 6,
u'ke\xfc': 6,
u'ghz': 6,
u'b\xedb': 6,
u'ytn': 6,
u'ytk': 6,
u'ccw': 6,
u'ccj': 6,
u'yoo': 6,
u'mpf': 6,
u'mpb': 6,
u'o\xebl': 6,
u'st\u0159': 6,
u'\xe9df': 6,
u'\xe9d\xed': 6,
u'wot': 6,
u'woe': 6,
u'b\xe9h': 6,
u'#u#': 6,
u'vyn': 6,
u'l\xf8k': 6,
u'kce': 6,
u'#hv': 6,
u'#\u03bd\u03b1': 6,
u'y\xf6l': 6,
u'hld': 6,
u'awr': 6,
u'shd': 6,
u'\u03b1\u03b9#': 6,
u'lua': 6,
u'a#<': 6,
u'otk': 6,
u'foh': 6,
u'#l\xfc': 6,
u'vf\xfc': 6,
u'\xf6gv': 6,
u'mci': 6,
u'akg': 6,
u'tnt': 6,
u'\xf1sk': 6,
u'ooc': 6,
u'zn\xfa': 6,
u'\u0171lh': 6,
u'kkf': 6,
u'g=e': 6,
u'\xe1bt': 6,
u'\u0103ui': 6,
u'\xe4in': 6,
u'hda': 6,
u'\xfasp': 6,
u'\xfash': 6,
u'\xf3k\xfa': 6,
u'hyp': 6,
u'f\u0151f': 6,
u'h\xe8l': 6,
u'\u015bwi': 6,
u'ik\xe4': 6,
u'z\xfav': 6,
u'z\xfaj': 6,
u'z\xfa\xe1': 6,
u'\xfctj': 6,
u'mkr': 6,
u'sdn': 6,
u'sd\xe1': 6,
u'\u0151la': 6,
u'\u0103li': 6,
u'gow': 6,
u't\xe0j': 6,
u'afh': 6,
u'afv': 6,
u'm\xfch': 6,
u'cw#': 6,
u're\u0161': 6,
u'zfu': 6,
u'\xfaha': 6,
u'kss': 6,
u'cj\xe1': 6,
u'\xe1jh': 6,
u'nfc': 6,
u'pib': 6,
u's\xfav': 6,
u'\u0107#\xe9': 6,
u'\u0107#a': 6,
u'\u0107#v': 6,
u'n\u0151\xe9': 6,
u'thj': 6,
u'ocd': 6,
u'ocz': 6,
u'nuj': 6,
u'wf#': 6,
u'sl\u0151': 6,
u'\xf6pe': 6,
u'thw': 6,
u'ti\u0151': 6,
u'uvs': 6,
u'h\u0151f': 6,
u'nnl': 6,
u'pmr': 6,
u'pma': 6,
u'msp': 6,
u'msk': 6,
u'msf': 6,
u'\u0151zd': 6,
u'sru': 6,
u'hue': 6,
u'rm\xf6': 6,
u'ssh': 6,
u'dyj': 6,
u'=en': 6,
u'\xe9zl': 6,
u'\xe9zp': 6,
u'cbt': 6,
u'cb\u0151': 6,
u'\xf3wi': 6,
u'iz\xed': 6,
u'c\xf3n': 6,
u'\xeddk': 6,
u'ar\xfc': 6,
u'bfi': 6,
u'pvo': 6,
u'hi\xe9': 6,
u'f\xfal': 6,
u'\xf6la': 6,
u'\xedd\xed': 6,
u'wna': 6,
u'wnk': 6,
u'wnt': 6,
u'\u03bd\u03b1\u03c4': 6,
u'#tt': 6,
u'cfs': 6,
u'cfp': 6,
u'\xe8#r': 6,
u'\xf3kv': 6,
u'\xf3kl': 6,
u'\u0171ki': 6,
u'kbo': 6,
u'kbb': 6,
u'h#z': 6,
u'yb#': 6,
u'#mw': 6,
u'ebf': 6,
u'rl\xfc': 6,
u'y\xf3\xe1': 6,
u'mf\xfc': 6,
u'owo': 6,
u'\u0151ge': 6,
u'lvp': 6,
u'lvc': 6,
u'\u0151g\xe1': 6,
u'wju': 5,
u'fij': 5,
u'fi\xe9': 5,
u'\xe1zj': 5,
u'#ow': 5,
u'rgj': 5,
u'rgk': 5,
u'rgl': 5,
u'tzh': 5,
u'tzj': 5,
u'\u0171rf': 5,
u'xke': 5,
u'zro': 5,
u'os\u0142': 5,
u'on\u010d': 5,
u'\u010ds#': 5,
u'ji\u0159': 5,
u'fms': 5,
u'\xf3ck': 5,
u'on\xe7': 5,
u'zou': 5,
u'e\xe9h': 5,
u'e\xe9l': 5,
u'kj\xe9': 5,
u'ns\xed': 5,
u'uoe': 5,
u'fvo': 5,
u'zh\xe9': 5,
u't#\xe0': 5,
u'dsr': 5,
u'xot': 5,
u'xod': 5,
u'xok': 5,
u'#fc': 5,
u'dfi': 5,
u'\xf3gr': 5,
u'nrc': 5,
u'\xedji': 5,
u'vog': 5,
u'h\xe9#': 5,
u'h\xe9k': 5,
u'h\xe9m': 5,
u'ij\xe9': 5,
u'ijn': 5,
u'\u0161#\u0161': 5,
u'at\u0171': 5,
u'\u0159\xed#': 5,
u'\u0171zp': 5,
u'\u0171zf': 5,
u'evl': 5,
u't\u0171i': 5,
u'lzc': 5,
u'u\xf3h': 5,
u'w#w': 5,
u'w#n': 5,
u'w#\xfa': 5,
u'jag': 5,
u'ucf': 5,
u'lgg': 5,
u'\xfaib': 5,
u'e\xe1h': 5,
u'cv\xe9': 5,
u'a#\u0142': 5,
u'vs\xe1': 5,
u'nvv': 5,
u'nvt': 5,
u'vsi': 5,
u'in\xf6': 5,
u'inx': 5,
u'+fr': 5,
u'o\xf6b': 5,
u'cmm': 5,
u'brz': 5,
u'nky': 5,
u'pj\xf6': 5,
u'hr\xf3': 5,
u'aic': 5,
u'\u03bd\u03b9#': 5,
u'lcz': 5,
u'k#<': 5,
u'\xedrd': 5,
u'foe': 5,
u'fom': 5,
u'y#\xe0': 5,
u'uzo': 5,
u'uz#': 5,
u'cah': 5,
u'z\xf3\xe9': 5,
u'l\xf2#': 5,
u'soe': 5,
u'soo': 5,
u'm\u0171j': 5,
u'd\xeat': 5,
u'mru': 5,
u'mrf': 5,
u'hvd': 5,
u'gbn': 5,
u'eii': 5,
u'\u0148#\xfa': 5,
u'psh': 5,
u'\xed#a': 5,
u'\xed#v': 5,
u'\xed#p': 5,
u'\xf6gj': 5,
u'\xf6gk': 5,
u'#aw': 5,
u'\xeatr': 5,
u'#sf': 5,
u'ncd': 5,
u'h\xfag': 5,
u'ka\xf6': 5,
u'\xe9d\u0171': 5,
u'\xe1ji': 5,
u'\u0165as': 5,
u'dv\u0171': 5,
u'i\xe9j': 5,
u'\u0151bu': 5,
u'c\xe9u': 5,
u'wib': 5,
u'\xf3\xfat': 5,
u'\u0151b\u0171': 5,
u'#wy': 5,
u'#wf': 5,
u'i\u0159\xed': 5,
u'+re': 5,
u'n\u010di': 5,
u'\u0171ja': 5,
u'#jh': 5,
u'\xf6br': 5,
u'c#w': 5,
u'\xfa\xe9r': 5,
u'hnv': 5,
u'auj': 5,
u'b\xf3z': 5,
u'doe': 5,
u'f#\xfc': 5,
u'\xf3nb': 5,
u'yef': 5,
u'ayu': 5,
u'sn#': 5,
u'bo\u0161': 5,
u'lsb': 5,
u'lsm': 5,
u'c\xe1m': 5,
u'e#x': 5,
u'b\xe6k': 5,
u'z#\xf8': 5,
u'gep': 5,
u'geh': 5,
u'aj\xfc': 5,
u'ge\xe9': 5,
u'iml': 5,
u'oip': 5,
u'h\xf6g': 5,
u'\xedm\xfc': 5,
u'\xedgn': 5,
u'\xfan\xe1': 5,
u'#bc': 5,
u'#bj': 5,
u'vh\xe1': 5,
u'fw#': 5,
u'\xe7ai': 5,
u'eyh': 5,
u'hft': 5,
u'hfc': 5,
u'j\xf3u': 5,
u'j\xf3\xed': 5,
u'mmj': 5,
u'u\xf6k': 5,
u'o#q': 5,
u'vbp': 5,
u'syb': 5,
u'ldw': 5,
u'uf\xe1': 5,
u'\u0161\u0165a': 5,
u'oeh': 5,
u'\xf3fj': 5,
u'ddr': 5,
u'vl\xf6': 5,
u'chh': 5,
u'\xe1dv': 5,
u'ts\xfa': 5,
u'bwa': 5,
u'\xfc#s': 5,
u'sf\xe1': 5,
u'vdc': 5,
u'xd\xed': 5,
u'\u015fbo': 5,
u'\xe9ms': 5,
u'wyn': 5,
u'+#e': 5,
u'#z\u012b': 5,
u'kuc': 5,
u'kua': 5,
u'q#p': 5,
u's\xedl': 5,
u'vpv': 5,
u'\xf6ru': 5,
u'#z\xfc': 5,
u'\u010dez': 5,
u'to\xe9': 5,
u'bsk': 5,
u'pkt': 5,
u'+#o': 5,
u'+#v': 5,
u'n\u0117j': 5,
u'\u0151#x': 5,
u'\xe9aj': 5,
u'#\xe1k': 5,
u'#\xe1c': 5,
u'hsa': 5,
u'ppk': 5,
u'ppt': 5,
u'ah\xf3': 5,
u'cnt': 5,
u'tbr': 5,
u'gtb': 5,
u'gtt': 5,
u'\xf6vp': 5,
u'\u0151\xe9g': 5,
u'nly': 5,
u'nl\xf6': 5,
u'\xfa#\u0151': 5,
u'rkg': 5,
u'rkl': 5,
u'd\u017a#': 5,
u'#rm': 5,
u'ups': 5,
u'\xf2#r': 5,
u'\u0171el': 5,
u'qai': 5,
u'i#\u0163': 5,
u'i#+': 5,
u'\u010d#\xe9': 5,
u'hkt': 5,
u'j\xfcz': 5,
u'ap\xfc': 5,
u'\u0161te': 5,
u'\xf1ez': 5,
u'\xfazu': 5,
u'\xf3gn': 5,
u'\xfamm': 5,
u'##y': 5,
u'rph': 5,
u'rpl': 5,
u'\xeddn': 5,
u'\xedde': 5,
u'\xedd\xe9': 5,
u'\u0171ig': 5,
u'vap': 5,
u'vob': 5,
u'hoi': 5,
u'\xf3nt': 5,
u'md\xe1': 5,
u'ucl': 5,
u'e\u0161e': 5,
u'dsa': 5,
u'f\xfcv': 5,
u'xiv': 5,
u'oqu': 5,
u'ztd': 5,
u'dh\xfa': 5,
u'prt': 5,
u'ol\xf2': 5,
u'um=': 5,
u'\xf3aj': 5,
u'\xf3au': 5,
u'khm': 5,
u'\xfcbe': 5,
u'h\xf3e': 5,
u'g\xe9z': 5,
u'ipj': 5,
u'\xf6g\u0151': 5,
u'f\u0171r': 5,
u'ip\xf3': 5,
u'\xf6tf': 5,
u'sa\xe9': 5,
u'saz': 5,
u'u#y': 5,
u'u#x': 5,
u'\xe9tc': 5,
u'ddt': 5,
u'acm': 5,
u'uie': 5,
u'oh#': 5,
u'x\xfcn': 5,
u'x\xfck': 5,
u'cpi': 5,
u'cpn': 5,
u'cpa': 5,
u'zm#': 5,
u'\xedlo': 5,
u'klm': 5,
u'ueo': 5,
u'\u0171ts': 5,
u'tpm': 5,
u'tpv': 5,
u'pdb': 5,
u'\xe9ht': 5,
u'ueb': 5,
u'uex': 5,
u'ctu': 5,
u'rdz': 5,
u'vmi': 5,
u'#yi': 5,
u'v\xf6d': 5,
u'kpf': 5,
u'kp\xe9': 5,
u'cky': 5,
u'btv': 5,
u'bti': 5,
u'btj': 5,
u'g\xfcz': 5,
u'phu': 5,
u'xel': 5,
u'mh\xf6': 5,
u'h\u0171n': 5,
u'hpt': 5,
u'gls': 5,
u'ua\xfa': 5,
u'o\u0161\xed': 5,
u'o\u0161t': 5,
u'\xf3n\xf6': 5,
u'x\xe9b': 5,
u'ni\u0161': 5,
u'coz': 5,
u'yk\xf3': 5,
u'\u03b9\u03bd\u03b9': 5,
u'vdl': 5,
u'htb': 5,
u'#\xe0s': 5,
u'aov': 5,
u'lmf': 5,
u'lmg': 5,
u'vur': 5,
u'uxt': 5,
u'uxd': 5,
u'ccu': 5,
u'ccv': 5,
u'cci': 5,
u'cck': 5,
u'ccb': 5,
u'l\xfcs': 5,
u'b#\xb0': 5,
u'nmu': 5,
u'\xf3tb': 5,
u'\xf3t\u0171': 5,
u'n\xfd#': 5,
u'as\xf6': 5,
u'#\u03b9\u03bd': 5,
u'li\xe9': 5,
u'rh\xfc': 5,
u'liq': 5,
u'lix': 5,
u'oxl': 5,
u'won': 5,
u'b\xe9j': 5,
u'p\u0151g': 5,
u'jpe': 5,
u'\u012ble': 5,
u'id\xf6': 5,
u'l#q': 5,
u'l#+': 5,
u'ru\u0161': 5,
u'yci': 5,
u'#hk': 5,
u'uwe': 5,
u'hlj': 5,
u'hlb': 5,
u'k\xf3f': 5,
u'r\u0151\xe9': 5,
u'#\xf8r': 5,
u'i\u0144a': 5,
u'shf': 5,
u'shj': 5,
u'luv': 5,
u'ot\xfc': 5,
u'zyd': 5,
u'kk\xed': 5,
u'yg\xf6': 5,
u'#l\xe4': 5,
u'yge': 5,
u'\u0171h\xe1': 5,
u'u\xe1v': 5,
u'r\xe1\u010d': 5,
u'tys': 5,
u'\xe9u#': 5,
u'mce': 5,
u'opk': 5,
u'di\u010d': 5,
u'fly': 5,
u'kkm': 5,
u'\u0171l\xfc': 5,
u's\xf3p': 5,
u'#\u0161t': 5,
u'tn\xfd': 5,
u'r\xed\xe1': 5,
u'\u0103uj': 5,
u'v\xfan': 5,
u'paw': 5,
u'r\xf6r': 5,
u'mo\xf6': 5,
u'\xfasm': 5,
u'abz': 5,
u'\xe9k\xf6': 5,
u'ryn': 5,
u'csj': 5,
u'uhf': 5,
u'\u0151s\xe1': 5,
u'\xedki': 5,
u'#ds': 5,
u'z\xfah': 5,
u'z\xfag': 5,
u'ewo': 5,
u'\xedmf': 5,
u'\xfct\xfc': 5,
u'sdb': 5,
u'lyw': 5,
u'f#\xf6': 5,
u'req': 5,
u'm\xfcz': 5,
u'\xe0s#': 5,
u'zf\xfc': 5,
u'og\xf6': 5,
u'nji': 5,
u'k#x': 5,
u'p\xf3e': 5,
u'vcn': 5,
u'\u0107#s': 5,
u'nf\xfa': 5,
u'\xe9cl': 5,
u'p#\u0151': 5,
u'lb\u0171': 5,
u'gog': 5,
u'\xe4ch': 5,
u'p#w': 5,
u'\xfad\xe1': 5,
u'\xe9b\xe9': 5,
u'\xedss': 5,
u'\xf6pt': 5,
u'rnt': 5,
u'ywo': 5,
u'wha': 5,
u'ti\xf6': 5,
u'nnr': 5,
u'\xf3sr': 5,
u'pmt': 5,
u'z\u012bl': 5,
u'\xe8se': 5,
u'huk': 5,
u'huy': 5,
u'wap': 5,
u'rmd': 5,
u'f\xf6z': 5,
u'f\xf6r': 5,
u'ko\u0161': 5,
u'sb\xfc': 5,
u'k#\xb0': 5,
u'p\xe1g': 5,
u'rvy': 5,
u'nb\xed': 5,
u'k\u0161i': 5,
u'yvs': 5,
u'\u0144as': 5,
u'r#\u0163': 5,
u'ync': 5,
u'ef\xfa': 5,
u'ef\xf3': 5,
u'c\xf3v': 5,
u'#d\xea': 5,
u'bfa': 5,
u'ri\u0144': 5,
u'ljn': 5,
u'\u0151c\xe9': 5,
u'e\xf6s': 5,
u'wne': 5,
u'\xf3k\u0151': 5,
u'\xe1vn': 5,
u'bys': 5,
u'byp': 5,
u'byb': 5,
u'vce': 5,
u'#kw': 5,
u'wso': 5,
u'yb\xe9': 5,
u'eb\xe6': 5,
u'\u0151\xf6s': 5,
u'n\xf6b': 5,
u'av\xf3': 5,
u'hmu': 5,
u'avt': 5,
u'avl': 5,
u'avf': 5,
u'avc': 5,
u'eb\u0151': 5,
u'skb': 5,
u'mf\xf6': 5,
u'y\xf3h': 5,
u'y\xf3p': 5,
u'oea': 4,
u'\u015foa': 4,
u'fuo': 4,
u'czk': 4,
u'czi': 4,
u'\u03bf\u03c2#': 4,
u'fii': 4,
u'\xe6k#': 4,
u'\xe1z\xfa': 4,
u'#o\xe1': 4,
u'a\xe9s': 4,
u'a\xe9h': 4,
u'fr\u0151': 4,
u'xv#': 4,
u'fr\xf3': 4,
u'xk\xed': 4,
u'azr': 4,
u'v\xf3\xe9': 4,
u'\xfat\xfc': 4,
u'\xfat\xf6': 4,
u'zru': 4,
u'l\u0171e': 4,
u'oev': 4,
u'wva': 4,
u't\u0103n': 4,
u'rz\u0105': 4,
u'jid': 4,
u'jib': 4,
u'uk\u0161': 4,
u'\xbac#': 4,
u'\xbaco': 4,
u'\u03c1\u03c7\u03af': 4,
u'zo\xe9': 4,
u'zoe': 4,
u'a\xedd': 4,
u'ivc': 4,
u'ivr': 4,
u'tvk': 4,
u'#\xf3n': 4,
u'scm': 4,
u'pbi': 4,
u'pb\xe9': 4,
u'b\xfa#': 4,
u'jeo': 4,
u'aag': 4,
u'gf\u0171': 4,
u'df\xfc': 4,
u'af\u0151': 4,
u'y\u0144b': 4,
u'rfl': 4,
u'zkt': 4,
u'\u0151rg': 4,
u'cr\xe9': 4,
u'vou': 4,
u'#gw': 4,
u'#gv': 4,
u'nr\xe1': 4,
u'h\xe9o': 4,
u'knd': 4,
u'\xedj\xe9': 4,
u'p\xf6s': 4,
u'p\xf6m': 4,
u's\xf6m': 4,
u'ijb': 4,
u'ci+': 4,
u'fza': 4,
u'a\xe9g': 4,
u'ci\xf2': 4,
u'\u0171z\xe1': 4,
u'r\xf3\u017c': 4,
u'xch': 4,
u'ch\xfc': 4,
u'lz\xfc': 4,
u'\u015fcu': 4,
u'dby': 4,
u'aeo': 4,
u'tsf': 4,
u'of\u0171': 4,
u'ucr': 4,
u'ucp': 4,
u'rbs': 4,
u'\xfait': 4,
u'zgu': 4,
u'ofp': 4,
u'\xedvd': 4,
u'\xedvf': 4,
u'nv\xfc': 4,
u'krb': 4,
u'p\xfan': 4,
u'p\xfad': 4,
u'\xf4ne': 4,
u'm\xfct': 4,
u'l\xf6f': 4,
u'\xe1id': 4,
u'brj': 4,
u'o\u0142o': 4,
u'cm\u0171': 4,
u'pj\xfa': 4,
u'pj\xf3': 4,
u'c\xfas': 4,
u'hr\u0151': 4,
u'rnm': 4,
u'og\xfc': 4,
u'zch': 4,
u'\xe0#\xe9': 4,
u'wam': 4,
u'wak': 4,
u'waf': 4,
u'waz': 4,
u'wax': 4,
u'kv\xed': 4,
u'kv\xf6': 4,
u'kvf': 4,
u'kvh': 4,
u'ha\xe7': 4,
u'yvk': 4,
u'ibm': 4,
u'ibs': 4,
u'\u03b6\u03b5\u03b9': 4,
u'ib\xfc': 4,
u'no\xeb': 4,
u's\u012bt': 4,
u'\u0171ba': 4,
u'g\xfar': 4,
u'pno': 4,
u'mrt': 4,
u'mrv': 4,
u'\u0105dk': 4,
u'psj': 4,
u'psn': 4,
u'ps\u012b': 4,
u'\xed#h': 4,
u'rj\xed': 4,
u'wmo': 4,
u'#sq': 4,
u'yd\xfa': 4,
u'uvr': 4,
u'\u0148#r': 4,
u'tfl': 4,
u'if\u0151': 4,
u'\xf3vv': 4,
u'm#q': 4,
u'ymn': 4,
u'm#\u015f': 4,
u'iye': 4,
u'aq#': 4,
u'i\xe9i': 4,
u'dvo': 4,
u'c\xe9\xfc': 4,
u'c\xe9s': 4,
u'rs\xfc': 4,
u'ke\xf6': 4,
u'ng+': 4,
u'\xf3j\xf3': 4,
u'o\xe1g': 4,
u'c\xf6g': 4,
u'hnh': 4,
u'e\u010da': 4,
u'dr\xf6': 4,
u'mej': 4,
u'xhu': 4,
u'b\xf3r': 4,
u'zwe': 4,
u'ukv': 4,
u'jjh': 4,
u'h\u0151i': 4,
u'gsb': 4,
u'doi': 4,
u'gsn': 4,
u'unl': 4,
u'fn\u0151': 4,
u'\u0171nk': 4,
u'yeh': 4,
u'\u03af\u03b6\u03b5': 4,
u'#nv': 4,
u'#ns': 4,
u'#nc': 4,
u'vdi': 4,
u'v#w': 4,
u'vpi': 4,
u'vph': 4,
u'hb\xe9': 4,
u'\xf3\u0142y': 4,
u'\xf3\u0142#': 4,
u'ayd': 4,
u'ayf': 4,
u'\xf6r\xfa': 4,
u'\u0151j\u0171': 4,
u'\xfcla': 4,
u'fep': 4,
u'\xfcly': 4,
u'uj\u015b': 4,
u'ge\xfc': 4,
u'imy': 4,
u'\xedga': 4,
u'kmb': 4,
u'n\xedv': 4,
u'yzu': 4,
u'gof': 4,
u'eyo': 4,
u'\xe4#\xe9': 4,
u'mmy': 4,
u'j\xf3\xfa': 4,
u'prs': 4,
u'#<#': 4,
u'y\xedk': 4,
u'syj': 4,
u'dgh': 4,
u'ldl': 4,
u'cuz': 4,
u'#uu': 4,
u'tvt': 4,
u'\xf3fn': 4,
u'tvb': 4,
u'zh\xfa': 4,
u's\xe9z': 4,
u'\xf6nu': 4,
u'd\u0103#': 4,
u'd\xf6c': 4,
u'\xe1dc': 4,
u'bi\u0161': 4,
u'mi\u015f': 4,
u'#\xf6g': 4,
u'\xfc#h': 4,
u'r\u0151u': 4,
u'uia': 4,
u'\u0171v\xe1': 4,
u'\xe9mp': 4,
u'wyb': 4,
u'gmb': 4,
u'dck': 4,
u'\u0163u#': 4,
u'ubp': 4,
u'nwr': 4,
u'\xfaf#': 4,
u'kuy': 4,
u'q#a': 4,
u'vpe': 4,
u'imv': 4,
u'\xe7i#': 4,
u'nhs': 4,
u'bs\xfa': 4,
u'rsl': 4,
u'nh\xf3': 4,
u'pkr': 4,
u'cyn': 4,
u'\xfcri': 4,
u'mue': 4,
u'muc': 4,
u'\xfcr\xf3': 4,
u'+#i': 4,
u'py#': 4,
u'p\u0171#': 4,
u'\xe9af': 4,
u'hsi': 4,
u'ppb': 4,
u'ppy': 4,
u'llz': 4,
u'\u03c0\u03c1\u03cc': 4,
u'roq': 4,
u'llf': 4,
u'icy': 4,
u'kyn': 4,
u'kyb': 4,
u'a\xfcv': 4,
u'tbv': 4,
u's\xe1m': 4,
u'gtk': 4,
u'#+o': 4,
u'#+r': 4,
u'yuf': 4,
u'm\xe1u': 4,
u'\u010das': 4,
u'\u010dan': 4,
u'ia\xf3': 4,
u'fch': 4,
u'\xf3uk': 4,
u'tb\xfc': 4,
u'v\xe9\xfc': 4,
u'r\xfae': 4,
u'\xf6be': 4,
u'\xf6bh': 4,
u'pt\xfa': 4,
u'wea': 4,
u'suj': 4,
u'rkn': 4,
u'oy\xe1': 4,
u'#rd': 4,
u'd\u01d0m': 4,
u'b\u0151b': 4,
u'jsp': 4,
u'cdj': 4,
u'iez': 4,
u'#\u03ba\u03b1': 4,
u'g\u0171t': 4,
u'uuh': 4,
u'\xf2#c': 4,
u'tg#': 4,
u'qat': 4,
u'\u0144cz': 4,
u'ahd': 4,
u'a\xe7i': 4,
u'l\xe4n': 4,
u'ix\xe9': 4,
u'ix\xfc': 4,
u'hk\xe9': 4,
u'j\xfcl': 4,
u'\u0161t\xe1': 4,
u'#mn': 4,
u'rfr': 4,
u'lt\xed': 4,
u'ouh': 4,
u'oux': 4,
u'ouw': 4,
u'ltc': 4,
u't\xf3w': 4,
u'zkl': 4,
u'#vy': 4,
u'dlu': 4,
u'#v\u0151': 4,
u'\u010dil': 4,
u'rf\xe1': 4,
u'\xeddj': 4,
u'\xeddf': 4,
u'tco': 4,
u'tcl': 4,
u'ndc': 4,
u'myr': 4,
u'\xf6c\xf6': 4,
u'bct': 4,
u'\xfcf\xe9': 4,
u'ho\u0142': 4,
u'xt\u0151': 4,
u'#gj': 4,
u'e\u0161#': 4,
u'mdg': 4,
u'mds': 4,
u'f\xfcm': 4,
u'#\u03b1\u03c1': 4,
u'lpd': 4,
u'ztp': 4,
u'\u01d0ma': 4,
u'i\u0107e': 4,
u'\xe4nd': 4,
u'\xe4ns': 4,
u'umz': 4,
u'\xf3ab': 4,
u'uyn': 4,
u'khl': 4,
u'ol\u0148': 4,
u'nx#': 4,
u'l\u0148#': 4,
u've\xfc': 4,
u'yds': 4,
u'f\u0171n': 4,
u'd\u0151u': 4,
u'd\u0151c': 4,
u'dym': 4,
u'ssf': 4,
u'vfi': 4,
u'axa': 4,
u'axo': 4,
u'axm': 4,
u'xma': 4,
u'r\xe1o': 4,
u'\xebll': 4,
u'\xe9t\xf6': 4,
u'\u015fen': 4,
u'b\xfcf': 4,
u'cp\xe1': 4,
u'cpd': 4,
u'kl#': 4,
u'a\xf3b': 4,
u'\xedl\u0171': 4,
u'itd': 4,
u'ft\xf3': 4,
u'ft\xfc': 4,
u'ftg': 4,
u'ftk': 4,
u'\u0171t\xe1': 4,
u'bh\xf6': 4,
u'exf': 4,
u'xal': 4,
u'mld': 4,
u'xa#': 4,
u'\xe9hv': 4,
u'\xf3\xe9l': 4,
u'zi\u0144': 4,
u'odc': 4,
u'np\xe9': 4,
u't\xfcm': 4,
u'vmu': 4,
u'vm#': 4,
u'kpe': 4,
u'#y\xe1': 4,
u'kp\xf3': 4,
u'qtr': 4,
u'ph\xe1': 4,
u'phr': 4,
u'xer': 4,
u'\xe9eb': 4,
u'pts': 4,
u'\u03b1#a': 4,
u'ktd': 4,
u'd\xf3g': 4,
u'd\xf3\xe1': 4,
u'th\u0171': 4,
u'\xf3pl': 4,
u'th\xf6': 4,
u'ep\xf3': 4,
u'nix': 4,
u'bpa': 4,
u'bpj': 4,
u'bpo': 4,
u'\u03b5\u03c4\u03b1': 4,
u'ykr': 4,
u'vdk': 4,
u'xys': 4,
u'mtr': 4,
u'htj': 4,
u'spy': 4,
u'ek\u0151': 4,
u'#\u015bw': 4,
u'lm\xf6': 4,
u'lm\xed': 4,
u'x#u': 4,
u'\u015f#m': 4,
u'wcz': 4,
u'wc#': 4,
u'jty': 4,
u'jtr': 4,
u'gwa': 4,
u'vus': 4,
u'#qt': 4,
u'#\u017ee': 4,
u'uxy': 4,
u'\xe1k\xed': 4,
u'nm\xfc': 4,
u'l\xfcd': 4,
u'nm\xe1': 4,
u'\u0161i\u0107': 4,
u'mpj': 4,
u'mpp': 4,
u'u\xe9b': 4,
u'\xfaik': 4,
u'shb': 4,
u'asq': 4,
u'#\u015fe': 4,
u'rh\xf4': 4,
u'\u0151va': 4,
u'r\u016bn': 4,
u'wou': 4,
u'b\xe9a': 4,
u'b\xe9p': 4,
u'\u03b7#\u03b1': 4,
u'l#\u03b1': 4,
u'id=': 4,
u'vsu': 4,
u'z#\xe5': 4,
u'z#\xe0': 4,
u'utk': 4,
u'\xf3h\xfa': 4,
u'utp': 4,
u'z#y': 4,
u'h\xfcs': 4,
u'\u0171dd': 4,
u'yce': 4,
u'i\u010do': 4,
u'#\u03c0\u03c1': 4,
u'm\xeds': 4,
u'r\u0151g': 4,
u'k\xf3m': 4,
u'k\xf3\u0142': 4,
u'aws': 4,
u'pyk': 4,
u'otg': 4,
u'zs\u0151': 4,
u'\u0171lv': 4,
u'zyf': 4,
u'zyc': 4,
u'i\u015fo': 4,
u'z\u0105d': 4,
u'dm\xf3': 4,
u'ygd': 4,
u'vf#': 4,
u'ne\u010d': 4,
u'vfl': 4,
u'vfa': 4,
u'#lr': 4,
u'd\xfcs': 4,
u'r\xe1\xe1': 4,
u'al\xe8': 4,
u'\u0107et': 4,
u'n#\u0111': 4,
u'n#x': 4,
u'tyb': 4,
u'typ': 4,
u'\u0171se': 4,
u'#\xfcc': 4,
u'ba\xf3': 4,
u'mcl': 4,
u'mck': 4,
u'\u0151dk': 4,
u'\u012bti': 4,
u'\xf4le': 4,
u'\xe9l\u0171': 4,
u'h\xf4n': 4,
u'\xf6cc': 4,
u'#\u0161\u0165': 4,
u'\u0171lj': 4,
u'\xf6db': 4,
u'iwc': 4,
u'iwa': 4,
u'\xe1bk': 4,
u'\xe1bp': 4,
u'\xe1bs': 4,
u'\u0103ub': 4,
u'bm#': 4,
u'pa\xf1': 4,
u'bma': 4,
u'y#\u0163': 4,
u'd#+': 4,
u'f\xe1d': 4,
u'ab\xfa': 4,
u'\xe9kd': 4,
u'a\u0161t': 4,
u'hyu': 4,
u'f\u0151u': 4,
u'i\xfai': 4,
u'f\u0151\xe9': 4,
u'\u0151t\xfa': 4,
u'ab\u0151': 4,
u'rye': 4,
u'ko\u0144': 4,
u'\u0151sp': 4,
u'\u0119k#': 4,
u'#d\u01d0': 4,
u'\xedkb': 4,
u'\xedks': 4,
u'\xf3dg': 4,
u'a\u015fc': 4,
u'#dy': 4,
u'r\xe9e': 4,
u'\u0142#k': 4,
u'\xe1fu': 4,
u'fuc': 4,
u'a\u0142#': 4,
u'g\xf3\u0142': 4,
u'pe=': 4,
u'mm\xfa': 4,
u'mk#': 4,
u'g\xf3e': 4,
u'bcg': 4,
u'\u0151l\xf6': 4,
u'af\xed': 4,
u'afs': 4,
u'lft': 4,
u're\u0163': 4,
u'\u0171ko': 4,
u'a\xf6c': 4,
u'\xf6l\xed': 4,
u'io\xe9': 4,
u'ysp': 4,
u'm\u011b\u0159': 4,
u'\xe1jz': 4,
u'esq': 4,
u'buv': 4,
u'mwa': 4,
u'\u0107#u': 4,
u'\u0107#t': 4,
u'hyd': 4,
u'm\u0151i': 4,
u'i\xf2#': 4,
u'lb#': 4,
u'\xeds\xed': 4,
u'oc\xe9': 4,
u'\xfade': 4,
u'\xf6pf': 4,
u'ywa': 4,
u'whe': 4,
u'cnn': 4,
u'ti\u0161': 4,
u'm\xe3o': 4,
u'\xe1n\xf6': 4,
u'thl': 4,
u'msu': 4,
u'msi': 4,
u'r\xfa\xe9': 4,
u'p\u0119k': 4,
u'f\xedr': 4,
u'd\xe9i': 4,
u'ej\xf3': 4,
u'an\u0103': 4,
u'hu\xe1': 4,
u'i\xf6t': 4,
u'dyg': 4,
u'lns': 4,
u'j\u015bc': 4,
u'\xe9zd': 4,
u'\xe9zo': 4,
u'gv#': 4,
u'h\xe4n': 4,
u'ig\xf6': 4,
u'\u015bci': 4,
u'igd': 4,
u'#p\u0119': 4,
u'\u0151pe': 4,
u'+os': 4,
u'r#q': 4,
u'nb#': 4,
u'i\u0161k': 4,
u'sbl': 4,
u'\u010d\xedk': 4,
u'j\xfaa': 4,
u'u\u0151r': 4,
u'izk': 4,
u'\u03c7\u03af\u03b6': 4,
u'efp': 4,
u'pv\xe9': 4,
u'hif': 4,
u'\u03c4\u03b1\u03b9': 4,
u'cvi': 4,
u'ar\u016b': 4,
u'\u03b1\u03c1\u03c7': 4,
u'#df': 4,
u'#dj': 4,
u'#t\u0103': 4,
u'wnn': 4,
u'grz': 4,
u'\xe1vm': 4,
u'voy': 4,
u'fef': 4,
u'tpl': 4,
u'byc': 4,
u's\xfak': 4,
u'#ks': 4,
u'j\xe1d': 4,
u'h#\xed': 4,
u'd\xe1g': 4,
u'ebm': 4,
u't+r': 4,
u'pzi': 4,
u'eb\u0171': 4,
u'e\u0163u': 4,
u'mfl': 4,
u'mfs': 4,
u'skv': 4,
u'skl': 4,
u'owc': 4,
u'\u0151go': 4,
u'owv': 4,
u'\u0142y#': 4,
u'no\xee': 3,
u't\xedf': 3,
u'uoc': 3,
u'dgk': 3,
u'r\u0171r': 3,
u'r\u0171j': 3,
u'kft': 3,
u'kfr': 3,
u'kf#': 3,
u'kf\xe1': 3,
u'a\xe9#': 3,
u'\xe1z\u0151': 3,
u'f=e': 3,
u'frd': 3,
u'frr': 3,
u'cud': 3,
u'rgd': 3,
u'\u0151\xfar': 3,
u'tzo': 3,
u'fr\xed': 3,
u'\u0171ra': 3,
u'\u0171ru': 3,
u'k\xf6h': 3,
u'j\xf6l': 3,
u'\xedpj': 3,
u'\xe4lj': 3,
u'ha\u0161': 3,
u'azy': 3,
u'f\xe2n': 3,
u'xki': 3,
u'xko': 3,
u'j#\xf3': 3,
u'osg': 3,
u'\u0151k\xf3': 3,
u'\u0151kl': 3,
u'\u0151ku': 3,
u'\xe9vp': 3,
u'\xf3\xf3r': 3,
u'czn': 3,
u'jic': 3,
u'jiv': 3,
u'jis': 3,
u'm\xf3h': 3,
u'\u03c0\u03bf\u03c7': 3,
u'\xf3ca': 3,
u'zoi': 3,
u'zow': 3,
u'\xf3c#': 3,
u'e\xe9k': 3,
u'e\xe9n': 3,
u'\u03ba\u03bf\u03b9': 3,
u'a\xedv': 3,
u'\xfcl\u0171': 3,
u'gwp': 3,
u'tv\xed': 3,
u't#\u015f': 3,
u'hej': 3,
u'he\xe1': 3,
u'scl': 3,
u'g\xf6c': 3,
u'sc\xe1': 3,
u'\u03c8\u03b7\u03c6': 3,
u'gfb': 3,
u'dfb': 3,
u'df\xf3': 3,
u'ugi': 3,
u'ojl': 3,
u'crr': 3,
u'y\u0144i': 3,
u'rfb': 3,
u'crf': 3,
u'\xfamb': 3,
u'rf\xf3': 3,
u'\u0151rp': 3,
u'y\xf1s': 3,
u'\u0151rv': 3,
u'kn#': 3,
u'psb': 3,
u'\xfchk': 3,
u'a\xf1e': 3,
u'p\xf6g': 3,
u'\u0161#e': 3,
u'ijd': 3,
u'iji': 3,
u'\xf3zp': 3,
u'evt': 3,
u'ev\xfa': 3,
u'tr\xf4': 3,
u'pfr': 3,
u'pfu': 3,
u'srh': 3,
u'xcl': 3,
u'mj#': 3,
u'r\xf3w': 3,
u'\xe1#\xf3': 3,
u'u\xf3n': 3,
u'u\xf3#': 3,
u'\xe9nc': 3,
u'w#i': 3,
u'dbb': 3,
u'dbr': 3,
u'aeb': 3,
u'aed': 3,
u'ucm': 3,
u'rby': 3,
u'uc\xed': 3,
u'uc\xe9': 3,
u'#m\xe4': 3,
u'wed': 3,
u'wev': 3,
u'wez': 3,
u'nvn': 3,
u'b#\u0171': 3,
u'\xf6mv': 3,
u'we\u0142': 3,
u'jzz': 3,
u'+f+': 3,
u'\u03ad\u03c1#': 3,
u'+fe': 3,
u'\u0163i#': 3,
u'cmt': 3,
u'nkp': 3,
u'mv#': 3,
u'hrv': 3,
u'hrn': 3,
u's\u0171k': 3,
u'aiw': 3,
u'v\u012b\u0137': 3,
u'\xe0#s': 3,
u'rnh': 3,
u'lcd': 3,
u'ob\xed': 3,
u'\xfael': 3,
u'\u0151zg': 3,
u't\xfav': 3,
u'k#q': 3,
u'\xeet#': 3,
u'k#+': 3,
u'i\u0161e': 3,
u'y#+': 3,
u'foi': 3,
u'uzl': 3,
u'uzn': 3,
u'uzb': 3,
u'\xe1m\xf3': 3,
u'ibp': 3,
u'ibv': 3,
u'\xf3rk': 3,
u'm\u0171l': 3,
u'\xf3r\xfa': 3,
u'yio': 3,
u'b\u0171#': 3,
u'yi\xe9': 3,
u'\u0171b\u0151': 3,
u'c+x': 3,
u'o\xe9t': 3,
u'j#x': 3,
u'uvo': 3,
u'ce=': 3,
u'\xf6lo': 3,
u'\xe8te': 3,
u'\xe9fe': 3,
u'sr\xf6': 3,
u'srp': 3,
u'loa': 3,
u'\xed#k': 3,
u'\xed#z': 3,
u'wma': 3,
u'p\u0159e': 3,
u'#s\u0142': 3,
u'jr\xe9': 3,
u'fft': 3,
u'ffh': 3,
u'cee': 3,
u'cey': 3,
u'\u0163at': 3,
u'ioz': 3,
u'if\xfa': 3,
u'tfg': 3,
u'\u03b1\u03c0\u03bf': 3,
u'm#<': 3,
u'\xdfna': 3,
u'gmn': 3,
u'ym\u0171': 3,
u'hja': 3,
u'#\u03bb\u03ae': 3,
u'\xb0cb': 3,
u'eef': 3,
u'\u0161uj': 3,
u'aqs': 3,
u'i\xe9\xe9': 3,
u'#\u0151n': 3,
u'#\u0151m': 3,
u'i\xe9m': 3,
u'dvr': 3,
u'lkh': 3,
u'\u0151b\xfa': 3,
u'\u0151bo': 3,
u'n\u0103s': 3,
u'c\xe9p': 3,
u'jnu': 3,
u'#wm': 3,
u'#wc': 3,
u'ur\xf6': 3,
u'l\xfar': 3,
u'urp': 3,
u'\xfa#w': 3,
u'kex': 3,
u'yaa': 3,
u'm\xf3\xe9': 3,
u'pu\u0151': 3,
u'#jl': 3,
u'\xf6bn': 3,
u'ya\xfc': 3,
u'ya\xf1': 3,
u'\u0151ug': 3,
u'xst': 3,
u'p\xf6f': 3,
u'#\xfa#': 3,
u'hng': 3,
u'dr\xfc': 3,
u'i\xedt': 3,
u'lw\xf3': 3,
u'e\xf1o': 3,
u'\u03c0\u03bc#': 3,
u'b\xf3h': 3,
u'b\xf3i': 3,
u'\u03b1\u03c4\u03ac': 3,
u'jjg': 3,
u'jj#': 3,
u'do\xe1': 3,
u'+vh': 3,
u'\u0137ef': 3,
u'f\xe9c': 3,
u'\u03c0\u03ad\u03c1': 3,
u'fn#': 3,
u'unw': 3,
u'om\u011b': 3,
u'\xedng': 3,
u'#nr': 3,
u'vde': 3,
u'eek': 3,
u'iq#': 3,
u'fse': 3,
u'vpa': 3,
u'z#\u0161': 3,
u'e#\u03b1': 3,
u'box': 3,
u'\u03b7#\u03bb': 3,
u'ayv': 3,
u'ayp': 3,
u'i\xe1d': 3,
u'bo\u017e': 3,
u'lsv': 3,
u'lss': 3,
u'e#\u0171': 3,
u'zs\u0171': 3,
u'ls\xed': 3,
u'\u0151ja': 3,
u'\xe1\xedg': 3,
u'zsj': 3,
u'\u0159er': 3,
u'#ck': 3,
u'\xe4i#': 3,
u'dkk': 3,
u'\u0142od': 3,
u'ujr': 3,
u'ujb': 3,
u'\xedmh': 3,
u'\xfcm\u0151': 3,
u'h\xf6p': 3,
u'#\xdf#': 3,
u'\u03bf\u03b9\u03bd': 3,
u'#bk': 3,
u'#bz': 3,
u'\xfcma': 3,
u'\xfcms': 3,
u'+a+': 3,
u'vh\xe9': 3,
u'iuj': 3,
u'd\xfaj': 3,
u'n\u0151p': 3,
u'eyg': 3,
u'hf\xe9': 3,
u'uyu': 3,
u'bkn': 3,
u'bku': 3,
u'hfr': 3,
u'hfl': 3,
u'pcp': 3,
u'j\xf3d': 3,
u'r\xf4l': 3,
u'c\u01ceu': 3,
u'sbt': 3,
u'sby': 3,
u'mmb': 3,
u'\u012b\u0137e': 3,
u'o#\xed': 3,
u'o#\xf3': 3,
u'c\xe9h': 3,
u'syk': 3,
u'syt': 3,
u'rgw': 3,
u'cuc': 3,
u'\u03c5\u03c0\u03ad': 3,
u'\u0142ka': 3,
u'nsl': 3,
u'nsv': 3,
u'tvs': 3,
u'tvj': 3,
u'vl#': 3,
u's\xe9c': 3,
u'\xe1d\xfa': 3,
u'dd+': 3,
u'z\u0151\xe9': 3,
u'#\xf6c': 3,
u'sf\u0171': 3,
u'\xfc#b': 3,
u'vda': 3,
u'kgu': 3,
u'\u0103n\u0103': 3,
u'u\u0161u': 3,
u'ad\xf6': 3,
u'y\xe1u': 3,
u'ad\xfc': 3,
u'ub\u010d': 3,
u'dcb': 3,
u'dcc': 3,
u'dca': 3,
u'dcu': 3,
u'dcr': 3,
u'ub\xfc': 3,
u'm\xfam': 3,
u'ub\xe9': 3,
u'oaf': 3,
u'oab': 3,
u'oai': 3,
u'+#n': 3,
u'v\u0171n': 3,
u'\xfc#\xe1': 3,
u'sct': 3,
u'q#n': 3,
u's\xeds': 3,
u'#zm': 3,
u'ku\u017a': 3,
u'+i#': 3,
u'\xe1h\xe1': 3,
u'to\u0161': 3,
u'\xe1hu': 3,
u'mn\xfc': 3,
u'eqs': 3,
u'eq#': 3,
u'bso': 3,
u'pk\u0151': 3,
u'ah\u0151': 3,
u'+#h': 3,
u'+#t': 3,
u'#\xe1j': 3,
u'hsc': 3,
u'p\u0171e': 3,
u'#\xe1#': 3,
u'ahk': 3,
u'\xedfu': 3,
u'\xfcns': 3,
u'gi\xfa': 3,
u'd\u017ea': 3,
u'tbn': 3,
u'ia\u015f': 3,
u'gtf': 3,
u'vtz': 3,
u'ia\u010d': 3,
u'm\xe1\u0161': 3,
u'#+\xb0': 3,
u'dpc': 3,
u'g#q': 3,
u'g#y': 3,
u'\xe1l\xfc': 3,
u'iay': 3,
u'fcr': 3,
u'\u0151\xe9p': 3,
u'tkk': 3,
u'gf\xf6': 3,
u's+#': 3,
u'\u0107re': 3,
u'\u01ceu#': 3,
u'n\xfcb': 3,
u'k\u0142o': 3,
u'\xe9es': 3,
u'ptf': 3,
u'al\xf6': 3,
u'rkp': 3,
u'\u03b5u#': 3,
u'oys': 3,
u'oye': 3,
u'oyc': 3,
u'd\u017ab': 3,
u'\u01cene': 3,
u'#rk': 3,
u'cdi': 3,
u'iey': 3,
u'iex': 3,
u'iea': 3,
u'#\u03ba\u03bf': 3,
u'rt+': 3,
u'rt\xe4': 3,
u'cd\xed': 3,
u'\xf3is': 3,
u'\u03c4\u03ac#': 3,
u'\u0171eb': 3,
u'ilq': 3,
u'hgo': 3,
u'\xfbte': 3,
u'c\xf3i': 3,
u'i#\u0142': 3,
u'++d': 3,
u'tpb': 3,
u'l\xe4i': 3,
u'ix\xe1': 3,
u'tps': 3,
u'v\xedg': 3,
u'\xfark': 3,
u'six': 3,
u'\u0161tu': 3,
u'\u0151ar': 3,
u'lty': 3,
u'ltf': 3,
u'ltm': 3,
u'\xfazr': 3,
u'\xf3#y': 3,
u'#vd': 3,
u's\xe1f': 3,
u'##x': 3,
u'g+#': 3,
u'jov': 3,
u'fk\xe9': 3,
u'#v\u012b': 3,
u'kd\xed': 3,
u'\xeddd': 3,
u'tcb': 3,
u'\xf3mh': 3,
u'kda': 3,
u's\xfcc': 3,
u's#\xba': 3,
u'vay': 3,
u'hnj': 3,
u'##\u03bc': 3,
u'va\xe9': 3,
u'va\xe1': 3,
u'##\u03b1': 3,
u'##\u03b2': 3,
u'##\u03c0': 3,
u'nd\u017e': 3,
u'xt+': 3,
u'voc': 3,
u'yut': 3,
u'\xe8me': 3,
u'md\xf6': 3,
u'l\xf3\xe1': 3,
u'sm\xe3': 3,
u'#\u03c5\u03c0': 3,
u'a\xf1s': 3,
u'fhr': 3,
u'xio': 3,
u'lpn': 3,
u'\xfav#': 3,
u'\xe9pd': 3,
u'\u015fi#': 3,
u'prg': 3,
u'i\u0107r': 3,
u'\xf3ls': 3,
u'dhd': 3,
u'\xeame': 3,
u'y\u0142a': 3,
u'um\xf6': 3,
u'\u017ami': 3,
u'um\u0171': 3,
u'h\xf3i': 3,
u'e\u0142#': 3,
u'yd#': 3,
u'g\xe9c': 3,
u'ydn': 3,
u'ydl': 3,
u'ydr': 3,
u'=ep': 3,
u'd\u0151\xf6': 3,
u'fpi': 3,
u'bl\xed': 3,
u'\xe1ca': 3,
u'tt\xfa': 3,
u'\u0171pr': 3,
u'bly': 3,
u'\xe2nt': 3,
u'\xe1c\xf3': 3,
u'y\u0151i': 3,
u'y\u0151e': 3,
u'xml': 3,
u'zpu': 3,
u'n\xedr': 3,
u'w\xe9h': 3,
u'b\xf3j': 3,
u'b\xf3d': 3,
u'dd\xf6': 3,
u'gd\xe9': 3,
u'jgl': 3,
u'jgo': 3,
u'ddl': 3,
u'acd': 3,
u'uiv': 3,
u'cpc': 3,
u'cpp': 3,
u'cps': 3,
u'\xf3ef': 3,
u'\xedl\xe9': 3,
u'\xedla': 3,
u'\xfcng': 3,
u'a\xf3i': 3,
u'\xfcn\xf6': 3,
u'ftf': 3,
u'hgy': 3,
u'n#\xb0': 3,
u'sew': 3,
u'ml+': 3,
u'\xf3\xe9i': 3,
u'\u0151m\xf6': 3,
u'\u03b2#a': 3,
u'zih': 3,
u'=in': 3,
u'od\xf6': 3,
u'\u03bc\u03bc#': 3,
u't\xfcv': 3,
u'\xfak\xf3': 3,
u'npv': 3,
u'npp': 3,
u'\u0151p\xe9': 3,
u'vmk': 3,
u'vme': 3,
u'ihn': 3,
u'ihf': 3,
u'vm\u0171': 3,
u'+do': 3,
u'ckt': 3,
u'btt': 3,
u'cbd': 3,
u'g\xfcm': 3,
u'eh\xfc': 3,
u'u\xf1a': 3,
u'hpn': 3,
u'eoc': 3,
u'\u0171\xe9p': 3,
u'u\xf3r': 3,
u'te\xfc': 3,
u'ptr': 3,
u'akw': 3,
u'uau': 3,
u'la\u03b2': 3,
u'\xfaga': 3,
u'ze\xed': 3,
u'om\u0171': 3,
u'\xedtl': 3,
u'\xf6sr': 3,
u'\u0171nl': 3,
u'\xf3wb': 3,
u'm\xe4c': 3,
u'p\xfcg': 3,
u'il\xe4': 3,
u'co\xfb': 3,
u'dsn': 3,
u'coi': 3,
u'ilz': 3,
u'ni\xed': 3,
u'epf': 3,
u'bpe': 3,
u'bph': 3,
u'ykn': 3,
u'ykl': 3,
u'pt\u0171': 3,
u'plc': 3,
u'm\u01cen': 3,
u'mtf': 3,
u'ht\xf3': 3,
u'dbu': 3,
u'spk': 3,
u'spn': 3,
u'spb': 3,
u'ghl': 3,
u'lm\xf3': 3,
u'gbb': 3,
u'lmv': 3,
u'lmd': 3,
u'za\xed': 3,
u'\u015f#u': 3,
u'#\u03c8\u03b7': 3,
u'z\u010ds': 3,
u'\u03ba\u03b1\u03c4': 3,
u'gwe': 3,
u'\u016bt\u0117': 3,
u'ux\xe1': 3,
u'm\xe0j': 3,
u'uxi': 3,
u'b#x': 3,
u'\xf3tc': 3,
u'w\xf3w': 3,
u'\xf3ts': 3,
u'ef\u0151': 3,
u'rbk': 3,
u'u\xe9t': 3,
u'a\u03b2#': 3,
u'ss\xe3': 3,
u'\xe1\xfct': 3,
u'lg\xed': 3,
u'stf': 3,
u'dtm': 3,
u'stc': 3,
u'stp': 3,
u'oxr': 3,
u'oxk': 3,
u'woh': 3,
u'b\xe9m': 3,
u'wo#': 3,
u'\u03bf\u03c7\u03ae': 3,
u'b\u0151#': 3,
u'p\u0151p': 3,
u'wep': 3,
u'p\u0151v': 3,
u'bop': 3,
u'vyt': 3,
u'gsg': 3,
u'l#\xdf': 3,
u'\xedvp': 3,
u'gp\xf3': 3,
u'x\xe1c': 3,
u'cgo': 3,
u'l#<': 3,
u'ruy': 3,
u'na\xed': 3,
u'l#\u0163': 3,
u'cdb': 3,
u'\u0171dv': 3,
u'vb\xf3': 3,
u'yco': 3,
u'vbi': 3,
u'#hp': 3,
u'#\u03c0\u03bc': 3,
u'hl\xe1': 3,
u'hln': 3,
u'=re': 3,
u'd+#': 3,
u'g\u0151l': 3,
u'g\u0151i': 3,
u'z\xe9i': 3,
u'dpk': 3,
u'lup': 3,
u'mg\xe9': 3,
u'uur': 3,
u'a\xdfn': 3,
u'e\xf3b': 3,
u'e\xf3j': 3,
u'zy\xf1': 3,
u'zyb': 3,
u'zya': 3,
u'\u017ca#': 3,
u'jlj': 3,
u'+t#': 3,
u'fh#': 3,
u'\u0142es': 3,
u'upj': 3,
u'upb': 3,
u'#\u03b1\u03c0': 3,
u'\xf3lf': 3,
u'#l\xf8': 3,
u'kg\xe1': 3,
u'#lc': 3,
u'#lw': 3,
u'r\xe1\xed': 3,
u'r\xe1\xfc': 3,
u'n#\u0163': 3,
u'a\u010do': 3,
u'a\u010da': 3,
u'\u0171s\xfa': 3,
u'sld': 3,
u'opb': 3,
u'wws': 3,
u'zuj': 3,
u'b\u010de': 3,
u'+xm': 3,
u'jh#': 3,
u'\u0142a#': 3,
u'ulp': 3,
u'fl\xf6': 3,
u'\u010d#e': 3,
u'r\xedl': 3,
u'ixs': 3,
u'iwo': 3,
u'k\u0151v': 3,
u'k\u0151d': 3,
u'k\u0151l': 3,
u'pa\u015f': 3,
u'tup': 3,
u'tui': 3,
u'qs#': 3,
u'v\xfar': 3,
u'd#q': 3,
u'\u0151h\u0151': 3,
u'z\xe9c': 3,
u'xn\xe9': 3,
u'\u017eak': 3,
u'ze\xe9': 3,
u'\xfasg': 3,
u'\xfasl': 3,
u'a\u0161a': 3,
u'wsi': 3,
u'abp': 3,
u'f\u0151d': 3,
u'i\xfav': 3,
u'i\xfaz': 3,
u'f\u0151\xf6': 3,
u'ryl': 3,
u'ab\u0171': 3,
u'csy': 3,
u'\u0151t\xed': 3,
u'\xedkt': 3,
u'zju': 3,
u'\xe9\xe1n': 3,
u'ko\xe1': 3,
u'rrr': 3,
u'#dk': 3,
u'\u0171kn': 3,
u'o\xfbt': 3,
u'ikw': 3,
u'l\xe9d': 3,
u'l\xe9e': 3,
u'z\xfab': 3,
u'fub': 3,
u'ewl': 3,
u'ewy': 3,
u'k\u0171k': 3,
u'ta\xfa': 3,
u'bip': 3,
u'pey': 3,
u'pev': 3,
u'\xfctv': 3,
u'few': 3,
u'sdv': 3,
u'usq': 3,
u'sdm': 3,
u'fez': 3,
u'g\xf3z': 3,
u'jzf': 3,
u'g\xf3\xe1': 3,
u'af\u0171': 3,
u'\xe9on': 3,
u'zl\xed': 3,
u'ixn': 3,
u'\xe9\xe1b': 3,
u'f#\xed': 3,
u'da\xf1': 3,
u're\u015f': 3,
u'#bv': 3,
u'ksd': 3,
u'ys\u0171': 3,
u'ks\xfc': 3,
u's\u0151\xe9': 3,
u'vrt': 3,
u'ysn': 3,
u'ysl': 3,
u'cja': 3,
u'ilp': 3,
u'ys\xed': 3,
u'ys\xfa': 3,
u'\xe1jp': 3,
u'\xe1jv': 3,
u'z\xf6z': 3,
u'n\u0171e': 3,
u'\xf3\u017ca': 3,
u'\xfd#a': 3,
u'es\u0142': 3,
u'mwn': 3,
u'\u0107#k': 3,
u'\u0107#l': 3,
u'd\xedl': 3,
u'l+v': 3,
u'+\xb0c': 3,
u'thd': 3,
u'oc+': 3,
u'nuo': 3,
u'oc\xe1': 3,
u'\xfadt': 3,
u'\xfadj': 3,
u'kwh': 3,
u'wfp': 3,
u'\xf6p#': 3,
u'\xf6pl': 3,
u'u\u017am': 3,
u'm=i': 3,
u'icj': 3,
u'icn': 3,
u'icb': 3,
u'h\u0151c': 3,
u'\xf3s\xfc': 3,
u'\xf3s\xf3': 3,
u'tiy': 3,
u'tiq': 3,
u'ms\u0171': 3,
u'\u0144ez': 3,
u'pmf': 3,
u'pmn': 3,
u'j\xe9s': 3,
u'thc': 3,
u'msy': 3,
u'msh': 3,
u'msb': 3,
u'o\xeet': 3,
u'a\u0142e': 3,
u'gb\u0171': 3,
u'pr\xe8': 3,
u'huo': 3,
u'\xe8sn': 3,
u'prk': 3,
u'prp': 3,
u'gkv': 3,
u'ej\u010d': 3,
u'rm\xe4': 3,
u'lnt': 3,
u'\u03c7\u03ae#': 3,
u'f+i': 3,
u'yvd': 3,
u'\xe9z\xe1': 3,
u'wb\xf3': 3,
u'p\xe1d': 3,
u'jum': 3,
u'rvf': 3,
u'cbi': 3,
u'rvp': 3,
u'\u010dor': 3,
u'te\xf3': 3,
u'fa\xfc': 3,
u'uz\u010d': 3,
u'y#q': 3,
u'wri': 3,
u'wki': 3,
u'iz\u0151': 3,
u'ef=': 3,
u'pv\xed': 3,
u'bfh': 3,
u'bfr': 3,
u'a\u015fi': 3,
u'hiq': 3,
u'hiw': 3,
u'tcr': 3,
u'ri\xf6': 3,
u'duo': 3,
u'p+t': 3,
u'du\xf3': 3,
u'ha\u0142': 3,
u'zz\xed': 3,
u'ljj': 3,
u'e\xf6m': 3,
u'e\xf6n': 3,
u'wnb': 3,
u'wni': 3,
u'#tc': 3,
u'rrl': 3,
u'rrv': 3,
u'\xe1vs': 3,
u'\xe1vc': 3,
u'fea': 3,
u'\xf3ks': 3,
u'us\xfc': 3,
u'ic\u01ce': 3,
u'kby': 3,
u'owk': 3,
u'byv': 3,
u'byr': 3,
u'vct': 3,
u'\xfcdn': 3,
u'wst': 3,
u'ebp': 3,
u'ebz': 3,
u't+a': 3,
u'avm': 3,
u'j\xfar': 3,
u'mfp': 3,
u'y\xf3f': 3,
u'y\xf3d': 3,
u'owm': 3,
u'sk\u0151': 3,
u'sk\u0142': 3,
u'owu': 3,
u'lvm': 3,
u'lvl': 3,
u'\u0151g\xf6': 3,
u'#\u03bc\u03bc': 3,
u'jm\u0171': 2,
u'wja': 2,
u'wjs': 2,
u'\xe9r\xfa': 2,
u'\xe9r\xf6': 2,
u'cz\u0105': 2,
u'x#z': 2,
u'=co': 2,
u'jmb': 2,
u'p\xe9\xfc': 2,
u'dn\xf3': 2,
u'r+#': 2,
u'uob': 2,
u'b\u0171b': 2,
u'nzl': 2,
u'\u0161\xedn': 2,
u'e\u0144#': 2,
u'yfl': 2,
u'irv': 2,
u'irf': 2,
u'cup': 2,
u'tz\xe1': 2,
u'cue': 2,
u'\u03ba\u03cc\u03c0': 2,
u'rgm': 2,
u'a\u0142g': 2,
u'k\xf6y': 2,
u'\u0144#p': 2,
u'mbk': 2,
u'\xe4si': 2,
u'\xf1or': 2,
u'\xf1o#': 2,
u'ly\u0151': 2,
u'az\xfa': 2,
u'zr\xe1': 2,
u'\u0151k\xfa': 2,
u'\xfatz': 2,
u'lr\xf6': 2,
u'\u0151km': 2,
u'jih': 2,
u'm\xf3j': 2,
u'\xbacr': 2,
u'uks': 2,
u'fmp': 2,
u'ukf': 2,
u'ukd': 2,
u'\xf3cz': 2,
u'qor': 2,
u'vk\xfc': 2,
u'zhu': 2,
u'kj\xf3': 2,
u't#\u03b1': 2,
u'\u03af\u03c3\u03bc': 2,
u'\u017cbi': 2,
u'#cw': 2,
u'u\u0219e': 2,
u'#\u03b4\u03b9': 2,
u'vud': 2,
u'ivl': 2,
u'#c\u0153': 2,
u'ivk': 2,
u'ez\xf6': 2,
u'#\u02dac': 2,
u'tvv': 2,
u'\xe1a#': 2,
u'\u0171vk': 2,
u'bjp': 2,
u'\xe8ch': 2,
u'heh': 2,
u't#\xba': 2,
u'g\xf6b': 2,
u'pb#': 2,
u'smh': 2,
u'pbr': 2,
u'pbo': 2,
u'xol': 2,
u'xoi': 2,
u'gsk': 2,
u'\xfap\xe1': 2,
u'\u0119to': 2,
u'\u03cc\u03c4\u03b1': 2,
u'\u0161pe': 2,
u'vlj': 2,
u'#ff': 2,
u'\xfapo': 2,
u'\xe9ju': 2,
u'wre': 2,
u'hzi': 2,
u'wrz': 2,
u'\xe9j#': 2,
u'gfn': 2,
u'#=#': 2,
u'\u013eub': 2,
u'#=r': 2,
u'\u03bfc#': 2,
u'dfu': 2,
u'dft': 2,
u'dfm': 2,
u'dfg': 2,
u'aau': 2,
u'aap': 2,
u'ojb': 2,
u'zk\xfa': 2,
u'crv': 2,
u'\u0151r\xed': 2,
u'crk': 2,
u'crn': 2,
u'\xf3gt': 2,
u'\xf3ge': 2,
u'zkh': 2,
u'zky': 2,
u'\u0151ro': 2,
u'\xfch\xfc': 2,
u'nrk': 2,
u'nrj': 2,
u'nrg': 2,
u'nrs': 2,
u'nrp': 2,
u'rf\u0171': 2,
u'vov': 2,
u'h\xe9i': 2,
u'\xedj\xfc': 2,
u'\u0161#t': 2,
u'vgr': 2,
u'ijp': 2,
u'ijr': 2,
u'\u0161#\xfa': 2,
u'd\u0103n': 2,
u'ciw': 2,
u'nm#': 2,
u'\u0159\xedm': 2,
u'lpg': 2,
u'bvt': 2,
u'\u0171zg': 2,
u'vco': 2,
u'evk': 2,
u'evd': 2,
u'trg': 2,
u'trz': 2,
u'trs': 2,
u'y\xfas': 2,
u'tdn': 2,
u'tr\xe8': 2,
u'\u017a#v': 2,
u'r\xf3\xfc': 2,
u'xcs': 2,
u'pf\xfc': 2,
u'lzj': 2,
u'lzm': 2,
u'lz\xed': 2,
u'w#u': 2,
u'w#o': 2,
u'\xe1#\u013e': 2,
u'jau': 2,
u'jaq': 2,
u'\u0161i#': 2,
u'db#': 2,
u'aec': 2,
u'aej': 2,
u'db\xe1': 2,
u'a#\u03ba': 2,
u'\u0161i\u010d': 2,
u'zg\xf6': 2,
u'cv#': 2,
u'lgd': 2,
u'rbn': 2,
u'rbh': 2,
u'\u03b3\u03b5\u03b9': 2,
u'e\xe1#': 2,
u'\xfaiv': 2,
u'\xfair': 2,
u'lg\xf3': 2,
u'p\u0151h': 2,
u'p\u0151s': 2,
u'a#\u017e': 2,
u'vse': 2,
u'\xf6mu': 2,
u'p\u0151\xe1': 2,
u'in\u015b': 2,
u'vs#': 2,
u'krv': 2,
u'krt': 2,
u'in\xfa': 2,
u'jzu': 2,
u'yrs': 2,
u'lfv': 2,
u'\u0105cy': 2,
u'lfs': 2,
u'+ft': 2,
u'+fj': 2,
u'er\u010d': 2,
u'cwi': 2,
u'o\xf6s': 2,
u'cma': 2,
u'cmu': 2,
u'\xe1is': 2,
u'l\xf6g': 2,
u'brk': 2,
u'brc': 2,
u'nkw': 2,
u'tnp': 2,
u'nk\xfa': 2,
u'\xeb#a': 2,
u'\xed\xe1n': 2,
u'\xfc\xe1n': 2,
u'hrd': 2,
u'\xb1os': 2,
u'k\xe9d': 2,
u'hr\xe1': 2,
u'k#\u03b1': 2,
u's\u0171n': 2,
u'\xe9bh': 2,
u'\xe9bt': 2,
u'#\u0159\xed': 2,
u'zc\xf3': 2,
u'\xe0#m': 2,
u'\xe0#d': 2,
u'rnl': 2,
u'rnf': 2,
u'lcb': 2,
u'lcn': 2,
u'ob\xe9': 2,
u'k#\u0111': 2,
u'f\xf3z': 2,
u'\xe0#\xe1': 2,
u'obv': 2,
u't\xfaz': 2,
u'wah': 2,
u'wac': 2,
u'k#\xe5': 2,
u'kvc': 2,
u'\xedr\xf6': 2,
u'n\u0151\xe1': 2,
u'bn\xf6': 2,
u'gu#': 2,
u'yvp': 2,
u'yvf': 2,
u'gub': 2,
u'guo': 2,
u'guz': 2,
u'gu\xea': 2,
u'uzt': 2,
u'ksm': 2,
u'ibh': 2,
u'ibc': 2,
u'bn\u0151': 2,
u'\xf3rf': 2,
u'\xf3rl': 2,
u'\u0171b\xe1': 2,
u'no\xe9': 2,
u'no\xf6': 2,
u'yii': 2,
u'y#\u03b2': 2,
u'y#\u03b5': 2,
u'mrm': 2,
u'\u011b\u0159i': 2,
u'o\xe9b': 2,
u'o\xe9v': 2,
u'\u03c3\u03bc\u03b1': 2,
u't\u0117j': 2,
u'#xo': 2,
u'\u0153ur': 2,
u'rwc': 2,
u'tf\u0171': 2,
u'gj#': 2,
u'psv': 2,
u'src': 2,
u'dzo': 2,
u'dzn': 2,
u'srr': 2,
u'\xed#\xe1': 2,
u'am\u0163': 2,
u'rjn': 2,
u'\xed#m': 2,
u'\xed#j': 2,
u'\xed#f': 2,
u'\xed##': 2,
u'v\u010d\xed': 2,
u'\xe9\xe1h': 2,
u'\xf6gg': 2,
u'\u03c0\u03c4\u03b5': 2,
u'jr\xfa': 2,
u'+ne': 2,
u'#s\xe8': 2,
u'\xe9mf': 2,
u'fff': 2,
u'\u015fan': 2,
u'cev': 2,
u'uv#': 2,
u'ifs': 2,
u'ifz': 2,
u'ifn': 2,
u'uv\xed': 2,
u'ka\u0144': 2,
u'ff\u0151': 2,
u'rw\u0119': 2,
u'h\xfar': 2,
u'tf\xfa': 2,
u'bzp': 2,
u'qba': 2,
u'bze': 2,
u'\u0171fa': 2,
u'bzi': 2,
u'ymy': 2,
u'\xfdje': 2,
u'iy#': 2,
u'm#\u010d': 2,
u'gmp': 2,
u'\xe1jg': 2,
u'm#\u0111': 2,
u'n\xf3l': 2,
u'ryg': 2,
u'i\u0146\u0161': 2,
u'hjo': 2,
u'bg\xf6': 2,
u'pu\u017c': 2,
u'bge': 2,
u'dv\xed': 2,
u'#\u0151g': 2,
u'dvv': 2,
u've\u010f': 2,
u'lkk': 2,
u'wi\u015b': 2,
u'lkp': 2,
u'tmk': 2,
u'\u0146\u0161#': 2,
u'wi\u0119': 2,
u'lk\xfa': 2,
u'n\u0103#': 2,
u'ozb': 2,
u'wia': 2,
u'wiv': 2,
u'wip': 2,
u'e+a': 2,
u'\xedz\xf6': 2,
u'+r\xf3': 2,
u'jn#': 2,
u'\xf6yk': 2,
u'jn\xe9': 2,
u'+r#': 2,
u'cyc': 2,
u'm\xeam': 2,
u'fj\xe1': 2,
u'fjo': 2,
u'tbl': 2,
u'#j\u0151': 2,
u'ng\xf6': 2,
u'qfe': 2,
u'\xf6bs': 2,
u'#jv': 2,
u'ya\xe9': 2,
u'#\u03bfc': 2,
u'#\u03bf#': 2,
u'c\xf6k': 2,
u'eah': 2,
u'eaa': 2,
u'eae': 2,
u'hn\xe1': 2,
u'bca': 2,
u'bcr': 2,
u'me\xf3': 2,
u'au\xdf': 2,
u'mef': 2,
u'mew': 2,
u'mep': 2,
u'drr': 2,
u'sju': 2,
u'ov\u0148': 2,
u'd\u0171k': 2,
u'ov\xf6': 2,
u'pr\xe4': 2,
u'\u0119s\xe1': 2,
u'ovh': 2,
u'wup': 2,
u'wuj': 2,
u'wun': 2,
u'b\xf3s': 2,
u'au\u0219': 2,
u'h\u0151r': 2,
u'gsd': 2,
u'\u03c2#\u03c3': 2,
u'm\xf6#': 2,
u'\u03b5\u03bc\u03b1': 2,
u'fn\xe1': 2,
u'm\xf6k': 2,
u'unv': 2,
u'\xf3ns': 2,
u'\xednd': 2,
u'kiw': 2,
u'q#o': 2,
u'#nm': 2,
u'\xb0ct': 2,
u'fm\xf3': 2,
u'eee': 2,
u'\u0163#b': 2,
u'na\u0161': 2,
u'fsb': 2,
u'fsj': 2,
u'fsm': 2,
u'fsw': 2,
u'vpk': 2,
u'\u03b7#\u03c4': 2,
u'\u03b7#\u03c8': 2,
u'\xf3\u0142k': 2,
u'\u03b7#\u03b4': 2,
u'ayk': 2,
u'ayg': 2,
u'ma\xf1': 2,
u'ma\xf3': 2,
u'ma\xed': 2,
u'ma\xdf': 2,
u'gvv': 2,
u'y\xf6b': 2,
u'maq': 2,
u'hb\u0151': 2,
u'e#\u017e': 2,
u'xl#': 2,
u'or\xf6': 2,
u'xlo': 2,
u'vp\xf3': 2,
u'c\xe1g': 2,
u'c\xe1d': 2,
u'\u015fje': 2,
u'xp\xf3': 2,
u'jtf': 2,
u'or\u0119': 2,
u'e#\xb0': 2,
u'aqb': 2,
u'fda': 2,
u'\u010dre': 2,
u'dkl': 2,
u'\xe4is': 2,
u'oif': 2,
u'oix': 2,
u'ujd': 2,
u'vh\u0151': 2,
u'zlu': 2,
u'\xfanr': 2,
u'h\xf6m': 2,
u'\xf3b\xf6': 2,
u'\xf3b\xe9': 2,
u'p\xf3\u0142': 2,
u'\xfcm#': 2,
u'e\u015f#': 2,
u'#bq': 2,
u'\xfd#\xe9': 2,
u'\xfcmm': 2,
u'e\u015fl': 2,
u'+ak': 2,
u'u\u0144i': 2,
u'p\xf3m': 2,
u'\xfd#t': 2,
u'\u0107cs': 2,
u'd\xfad': 2,
u'z\xfcf': 2,
u'\xe7aj': 2,
u'n\u0151a': 2,
u'k\xfdj': 2,
u'eya': 2,
u'eyv': 2,
u'm\u0151b': 2,
u'bk\xfc': 2,
u'\u0161ve': 2,
u'd=s': 2,
u'pch': 2,
u'sbb': 2,
u'j\xf3\xfc': 2,
u'\u03c1#a': 2,
u'im\u0171': 2,
u'u#\u0151': 2,
u'o#\xe0': 2,
u'u\xf6v': 2,
u'u\xf6e': 2,
u'#\xe9c': 2,
u'\xe9im': 2,
u'ns\u0171': 2,
u'rgp': 2,
u'rgc': 2,
u'\xf3f\u0151': 2,
u'rg\xed': 2,
u'ufl': 2,
u'ufn': 2,
u'oek': 2,
u'ufr': 2,
u'a\u0144c': 2,
u'nsf': 2,
u'nsy': 2,
u'\xfajm': 2,
u'ns\xf3': 2,
u'w\u0119#': 2,
u'ddf': 2,
u'e\u015bn': 2,
u'+es': 2,
u'chv': 2,
u'iih': 2,
u'\xe1dg': 2,
u'tsm': 2,
u'qu\xe9': 2,
u'ts=': 2,
u'bw#': 2,
u'mi\u0107': 2,
u'v\xfc#': 2,
u'\xfc#a': 2,
u'\xfc#k': 2,
u'\xfc#i': 2,
u'pg#': 2,
u'sf\xe2': 2,
u'\u0117f\xe9': 2,
u'sfm': 2,
u'sfh': 2,
u'k\xfan': 2,
u'ai\xe1': 2,
u'\xe9m\xf6': 2,
u'r\u0119b': 2,
u'\u0103nc': 2,
u'\u015fb\xf3': 2,
u'uix': 2,
u'v\xe1p': 2,
u'wyo': 2,
u'gmv': 2,
u'gmc': 2,
u'u\u0161a': 2,
u'dch': 2,
u'\u0119ba': 2,
u'rcp': 2,
u'm\xfar': 2,
u'rxn': 2,
u'y\xe1\u0144': 2,
u'oaj': 2,
u'+#l': 2,
u'ubc': 2,
u'v\u0171v': 2,
u'\xedug': 2,
u'mnt': 2,
u'zdn': 2,
u'wdo': 2,
u'q#m': 2,
u's\xedu': 2,
u's\xedo': 2,
u's\xedf': 2,
u'\xf6ro': 2,
u'\xfc#\xe9': 2,
u'u\u017ca': 2,
u'to\u015f': 2,
u'cl#': 2,
u'\xe1hi': 2,
u'\xe1hy': 2,
u'\xe1h#': 2,
u'nht': 2,
u'eqo': 2,
u'\u03c4\u03bf\u03c2': 2,
u'cyp': 2,
u'to\xfc': 2,
u'bsu': 2,
u'bsp': 2,
u'bsa': 2,
u'pkn': 2,
u'\xfcra': 2,
u'\xfcrb': 2,
u'\u0151#\u0161': 2,
u'tvr': 2,
u'+#g': 2,
u'+r\u0151': 2,
u'+#j': 2,
u'+#x': 2,
u'n\u0117n': 2,
u'\xe9ae': 2,
u'\xe9aa': 2,
u'hse': 2,
u'hsr': 2,
u'hss': 2,
u'\u0142go': 2,
u'\xe8qu': 2,
u'\u0151r\xfa': 2,
u'\xf6kc': 2,
u'gi\xfc': 2,
u'cns': 2,
u'ro\u017e': 2,
u'ro\u015f': 2,
u'kyl': 2,
u'\u03b9#\u03c4': 2,
u'\u03b9#\u03b1': 2,
u'tbj': 2,
u'g#\xb0': 2,
u'gts': 2,
u'vt\xed': 2,
u'yub': 2,
u'l\xf3\xf6': 2,
u'el\u017c': 2,
u'uy\xe8': 2,
u'fcv': 2,
u'fca': 2,
u'uyf': 2,
u'fcn': 2,
u'g#\u0161': 2,
u'g#\u0171': 2,
u'nlg': 2,
u'nln': 2,
u'cmn': 2,
u'gm\xf3': 2,
u'tb\xf6': 2,
u'pob': 2,
u'wrs': 2,
u'\u0117ne': 2,
u'v\xe9\xe1': 2,
u'ehb': 2,
u'n\xfca': 2,
u'n\xfcj': 2,
u'u\xeas': 2,
u'v\xe9f': 2,
u'\xe9er': 2,
u'\xe9ei': 2,
u'\xe9el': 2,
u'n\xfc\xe1': 2,
u'ptc': 2,
u'ptv': 2,
u'\u03ac#a': 2,
u'z\u0142o': 2,
u'#jt': 2,
u'lh\u0171': 2,
u'rkc': 2,
u'\xf6no': 2,
u'\xf1al': 2,
u'\xf1an': 2,
u'lh\xe9': 2,
u'\u015flu': 2,
u'\xf6rc': 2,
u'o\u017e#': 2,
u'o\u017eb': 2,
u'#rr': 2,
u'#rz': 2,
u'js\xe9': 2,
u'js\xfc': 2,
u'a\xfcn': 2,
u'\xe9#w': 2,
u'b\u0151g': 2,
u'a\u0161p': 2,
u'm\xedm': 2,
u'g\u0171b': 2,
u'\xe1pp': 2,
u'\u0171tv': 2,
u'tgp': 2,
u'rt\u0171': 2,
u'o\xe1t': 2,
u'\xf3il': 2,
u'tg\xf6': 2,
u'\u0171ex': 2,
u'qas': 2,
u'qad': 2,
u's\u0151g': 2,
u's\xf8r': 2,
u'\xfczs': 2,
u'ylu': 2,
u'\u010d#k': 2,
u'ixr': 2,
u'\u010d#u': 2,
u'i#\xe5': 2,
u'r\xe4f': 2,
u'l\xe4c': 2,
u'o\u0144s': 2,
u'l\xe4j': 2,
u'l\xe4r': 2,
u'r\u010ds': 2,
u'v\xedc': 2,
u'hkh': 2,
u'si\u0107': 2,
u'si\xe9': 2,
u'#\u010da': 2,
u'dwe': 2,
u'rfs': 2,
u'\xf3#\u0161': 2,
u'\u0151at': 2,
u't\xf3\xed': 2,
u't\xf3\xf6': 2,
u'\xf3#q': 2,
u'\xfazb': 2,
u'r\xfap': 2,
u's#\u03bf': 2,
u'y\xe1\xf1': 2,
u'dl\xe9': 2,
u'#vj': 2,
u'jo\xeb': 2,
u'##q': 2,
u'##\xba': 2,
u'dll': 2,
u'jow': 2,
u'jof': 2,
u'joj': 2,
u'#++': 2,
u'fk\xe1': 2,
u'dl\u0151': 2,
u'rp\xed': 2,
u'cr\xe8': 2,
u'fk#': 2,
u'\xedda': 2,
u'kd\xe1': 2,
u'tck': 2,
u'\xedds': 2,
u'#+e': 2,
u'\xedd\xfc': 2,
u'kdu': 2,
u's#\u017e': 2,
u's#\u015b': 2,
u'\u0123ir': 2,
u'e\xe9#': 2,
u's#\xe5': 2,
u'bch': 2,
u'mys': 2,
u'myu': 2,
u'myt': 2,
u'myi': 2,
u's#<': 2,
u'myc': 2,
u'##\u03bf': 2,
u'#m\xea': 2,
u'g\xedz': 2,
u'\u03c0\u03b1\u03bd': 2,
u'xt\xf3': 2,
u'kn\xfc': 2,
u'voa': 2,
u'#\u03c4\u03b7': 2,
u'vox': 2,
u'm\u0163#': 2,
u'e\u0161i': 2,
u'e\u0161o': 2,
u'uph': 2,
u'mdf': 2,
u'mdr': 2,
u'mdu': 2,
u'md#': 2,
u'f\xfch': 2,
u'zhk': 2,
u'xir': 2,
u'j\u0151#': 2,
u'huh': 2,
u'lps': 2,
u'iij': 2,
u'lpj': 2,
u'j\u0151v': 2,
u'j\u0151s': 2,
u'n\xe9i': 2,
u'\u0151eu': 2,
u'jk\u0151': 2,
u'\u015fia': 2,
u'zt\u0171': 2,
u'prr': 2,
u'i\u0107c': 2,
u'dhj': 2,
u'dhu': 2,
u'jkt': 2,
u'\u011b\u0161\xed': 2,
u'\u0171m\u0171': 2,
u'fov': 2,
u'kh\u0151': 2,
u'foy': 2,
u'\xf3af': 2,
u'\u0171m\xe1': 2,
u'lwe': 2,
u'qin': 2,
u'\u0171me': 2,
u'nxl': 2,
u'kgf': 2,
u'\xe9\xe1l': 2,
u'vep': 2,
u'\xf6ga': 2,
u'veo': 2,
u'h\xf3f': 2,
u've\xe1': 2,
u'g\xe9m': 2,
u'yda': 2,
u'ipg': 2,
u'ipy': 2,
u'z\xe1u': 2,
u'bl\xfc': 2,
u'\xe1co': 2,
u'sa\u0161': 2,
u'hch': 2,
u'hci': 2,
u'\xe2ne': 2,
u'bl#': 2,
u'sax': 2,
u'axe': 2,
u'axl': 2,
u'\u0119#w': 2,
u'xme': 2,
u'u#\xf3': 2,
u'\u0161k\xf3': 2,
u'\xebl#': 2,
u'\u03ae#\u03c0': 2,
u'\xe9t\xed': 2,
u'\xfarv': 2,
u'zpb': 2,
u'\u0103i#': 2,
u'\u0151il': 2,
u'\u0151ij': 2,
u'wp#': 2,
u'b\xfcg': 2,
u'gdj': 2,
u'\u0171\xe1r': 2,
u'dd\xed': 2,
u'gd#': 2,
u'ac\xf3': 2,
u'ac\xfa': 2,
u'\xe4je': 2,
u'acj': 2,
u'\u0148k\xe1': 2,
u'y\xe8r': 2,
u'ohm': 2,
u'uip': 2,
u'\xf3eu': 2,
u'\xf3ez': 2,
u'\xfcn\u0151': 2,
u'zm\xed': 2,
u'zm\xf6': 2,
u'\u03cc\u03c0\u03c4': 2,
u'\xf6kd': 2,
u'#e+': 2,
u'a\xf3n': 2,
u'\xfcn#': 2,
u'vi\xe9': 2,
u'=#o': 2,
u'\xf6k\xfa': 2,
u'kl\xfc': 2,
u'it+': 2,
u'r\xe8m': 2,
u'it\xe8': 2,
u'it\xe3': 2,
u'it\xe0': 2,
u'ftu': 2,
u'ftw': 2,
u'ftj': 2,
u'ftl': 2,
u'bh\xf3': 2,
u'\u010fal': 2,
u'e\xfcs': 2,
u'\u0171tt': 2,
u'k\xfc#': 2,
u'\u0171tj': 2,
u'k\xfcj': 2,
u'tpp': 2,
u'o\u0144c': 2,
u'se\xf1': 2,
u'se\xe9': 2,
u'e\xfcn': 2,
u'xai': 2,
u'xam': 2,
u'xac': 2,
u'mlh': 2,
u'mln': 2,
u'mlr': 2,
u'n\xe1u': 2,
u'\xe9hl': 2,
u'\xf3\xe9b': 2,
u'\xf3\xe9k': 2,
u'wbo': 2,
u'\u015fah': 2,
u'\u0151m\xf3': 2,
u'\u03b7\u03c6\u03af': 2,
u'\xe4fi': 2,
u'jcj': 2,
u'od\xfa': 2,
u'od\xfc': 2,
u'zi\u0151': 2,
u'#py': 2,
u'zi\u0107': 2,
u'rd\xe8': 2,
u'uec': 2,
u'rdw': 2,
u'\xfaki': 2,
u't\xfcc': 2,
u'npk': 2,
u'npd': 2,
u'le\u0151': 2,
u'#yp': 2,
u'\xfcje': 2,
u'\u012bks': 2,
u'a\u017ad': 2,
u'vm\xf3': 2,
u'\xfcj\xe9': 2,
u'=tr': 2,
u's\xe8t': 2,
u'ihh': 2,
u'bt\xed': 2,
u'ih\u0171': 2,
u'\xe1kp': 2,
u'\xe1kg': 2,
u'btm': 2,
u'btn': 2,
u'\xe1k\xfc': 2,
u'rvj': 2,
u'\u03b7\u03c2#': 2,
u'\xdf#j': 2,
u'\xdf#m': 2,
u'xeq': 2,
u'\xdf#n': 2,
u'mht': 2,
u'\xdf#\xe9': 2,
u't\xe3o': 2,
u'hpv': 2,
u'hpa': 2,
u'ptk': 2,
u'gl\xfc': 2,
u'\xe3os': 2,
u'\xe9l\xf6': 2,
u'la\xfc': 2,
u'la\xf6': 2,
u'#\u010di': 2,
u'uam': 2,
u'uab': 2,
u'zey': 2,
u'\xfagu': 2,
u'\xfagv': 2,
u'\u0151tr': 2,
u'b\xe1\xe9': 2,
u'ze\xf6': 2,
u'o\u0161i': 2,
u'b\xe1h': 2,
u'\xf6sp': 2,
u'nby': 2,
u'ze\u015b': 2,
u'sl\xfc': 2,
u'\u0142\u0105c': 2,
u'\u03bf##': 2,
u'yp#': 2,
u'\u03bf#e': 2,
u'suh': 2,
u'd\xf3o': 2,
u'co\xe9': 2,
u'ep\u0171': 2,
u'ilw': 2,
u'\xf3ps': 2,
u'\xf3pn': 2,
u'ni\xe9': 2,
u'il\u0103': 2,
u'\u03b9\u03bd\u03ae': 2,
u'vdt': 2,
u'bpp': 2,
u'ykb': 2,
u'ykk': 2,
u'ply': 2,
u'd\xe8c': 2,
u'c\xfcl': 2,
u'c\xfck': 2,
u'ht\xe1': 2,
u'htn': 2,
u'\xf3\xe1p': 2,
u'spv': 2,
u'sph': 2,
u'spc': 2,
u'spf': 2,
u'aoj': 2,
u'ghy': 2,
u'ghg': 2,
u'rl\xf6': 2,
u'rlb': 2,
u'lmh': 2,
u'\u0161ic': 2,
u'zay': 2,
u'\xedp\xe9': 2,
u'wco': 2,
u'\xedpa': 2,
u'kyt': 2,
u'yt\xfa': 2,
u'jt\xfa': 2,
u'gw\xe9': 2,
u'\u03b9\u03b1\u03ba': 2,
u'cc\xe9': 2,
u'uxb': 2,
u'uxl': 2,
u'\u03b5\u03c0\u03b1': 2,
u'#\u013eu': 2,
u'tld': 2,
u'ccl': 2,
u'ccd': 2,
u'l\xfcb': 2,
u'\u017a#\xe9': 2,
u'#\xbao': 2,
u'\xf3tm': 2,
u'\xf3th': 2,
u'\xf3tj': 2,
u'yox': 2,
u'yop': 2,
u'mpd': 2,
u'mpm': 2,
u'mpk': 2,
u'mpy': 2,
u'cvm': 2,
u'hh\xf3': 2,
u'u\xe9n': 2,
u'\xe8ve': 2,
u'n+n': 2,
u'l\u0103#': 2,
u'hh#': 2,
u'#\u015f\u0131': 2,
u'p\u012bk': 2,
u'li\u0151': 2,
u'pug': 2,
u'puz': 2,
u'\xe9d\xf3': 2,
u'#\u015fa': 2,
u'li\xfa': 2,
u'dtk': 2,
u'dtb': 2,
u'as\u0171': 2,
u'st\xe4': 2,
u'dt\xed': 2,
u'pu\u0142': 2,
u'z#\u03b5': 2,
u'z#\u03b1': 2,
u'oxe': 2,
u'oxc': 2,
u'xes': 2,
u'a\xfcs': 2,
u'a#\u015b': 2,
u'wod': 2,
u'jp\xe1': 2,
u'jpl': 2,
u'vyr': 2,
u'gsv': 2,
u'l#\xb0': 2,
u'p\xe4i': 2,
u'cgp': 2,
u'na\xf3': 2,
u'\xf3h\xf3': 2,
u'z#<': 2,
u'l#\u010d': 2,
u'kca': 2,
u'kcj': 2,
u'kct': 2,
u'yck': 2,
u'ycl': 2,
u'\u0171d\xfc': 2,
u'n\u015bw': 2,
u'i\u010dr': 2,
u'r\u0151z': 2,
u'r\u0151a': 2,
u'ec\xe1': 2,
u'ec\xe9': 2,
u'k\xf3w': 2,
u'k\xf3a': 2,
u'yrt': 2,
u'bex': 2,
u'e\u010fa': 2,
u'awy': 2,
u'awn': 2,
u'pyl': 2,
u'pya': 2,
u'g\u0151g': 2,
u'g\u0151f': 2,
u'g\u0151p': 2,
u'lu\xf1': 2,
u'y\xfcv': 2,
u'mgb': 2,
u'mge': 2,
u'mgp': 2,
u'shh': 2,
u'dpl': 2,
u'g\u0151\xe9': 2,
u'\u0105ck': 2,
u'e\xf3f': 2,
u'e\xf3m': 2,
u'e\xf3i': 2,
u'otd': 2,
u'sbv': 2,
u'wk#': 2,
u'jls': 2,
u'jlf': 2,
u'=do': 2,
u'jlc': 2,
u'jll': 2,
u'ie\u0142': 2,
u'ie\u0144': 2,
u'z\u0105c': 2,
u'#\u0123i': 2,
u'\u015f\u0131k': 2,
u'ne\xfc': 2,
u'ne\xf3': 2,
u'n#\u03b9': 2,
u'l\u0151\xf6': 2,
u'\xf3lk': 2,
u'\xf3lb': 2,
u'kk\xe4': 2,
u'\u017cel': 2,
u'kgi': 2,
u'ygh': 2,
u'#lp': 2,
u'\u03cc\u03b5\u03b4': 2,
u'n#\u0159': 2,
u'n#\u0123': 2,
u'isy': 2,
u'xup': 2,
u'ty\u0142': 2,
u'u\xe1z': 2,
u'n#\xe0': 2,
u'#\u03b1k': 2,
u'n#\xba': 2,
u'n#+': 2,
u'v\xf6g': 2,
u'v\xf6e': 2,
u'baw': 2,
u'baq': 2,
u'ba\xf1': 2,
u'ba\xfc': 2,
u'mcn': 2,
u'mcj': 2,
u'mch': 2,
u'ba\u0144': 2,
u'mcy': 2,
u'sly': 2,
u'slv': 2,
u'slg': 2,
u'\u0219es': 2,
u'u\xfcz': 2,
u'\u0151da': 2,
u'opg': 2,
u'wwd': 2,
u'wwb': 2,
u'wwk': 2,
u'op\xe4': 2,
u's\u0142u': 2,
u'b\u010d\xed': 2,
u't\xe8#': 2,
u's\u0142o': 2,
u'dix': 2,
u'tnk': 2,
u'di\xf1': 2,
u'fl#': 2,
u'ooh': 2,
u'jh\u0151': 2,
u'\xedoc': 2,
u'fl\xe4': 2,
u'\u017cak': 2,
u'a\u015bn': 2,
u'kks': 2,
u'g=h': 2,
u'td\xfa': 2,
u'#\u0161v': 2,
u'#\u0161k': 2,
u'\u03ae\u03b3\u03b5': 2,
u'iwe': 2,
u'k\u0151f': 2,
u'bm\u0171': 2,
u'pa\u0142': 2,
u'd#\u0171': 2,
u'pa\u017a': 2,
u'tuv': 2,
u'\u0103un': 2,
u'pa\xfc': 2,
u'v\xfas': 2,
u'qsa': 2,
u'bms': 2,
u'hdk': 2,
u's=t': 2,
u'hd\xf6': 2,
u'r\xf6b': 2,
u'f\xe1g': 2,
u'#\u03b5\u03c5': 2,
u'l\u017cb': 2,
u'bmo': 2,
u'#\u03b5\u03bc': 2,
u't\xe4t': 2,
u'wsj': 2,
u'wsn': 2,
u'wsr': 2,
u'f\u0151a': 2,
u'i\xfag': 2,
u'dew': 2,
u'\u03bc\u03b1s': 2,
u'ab\u010d': 2,
u'de\xfc': 2,
u'de\xf1': 2,
u'\u03bc\u03b1\u03c4': 2,
u'\u0151sb': 2,
u'\xedk\xfa': 2,
u'okw': 2,
u'okc': 2,
u'zj\xf6': 2,
u'\xf3dp': 2,
u'\u0142ot': 2,
u'ok\xfc': 2,
u'\xf3dm': 2,
u'ok\xed': 2,
u'h\xe8q': 2,
u'a\u015fj': 2,
u'\xf6h\xf6': 2,
u'vn#': 2,
u'ko\xfc': 2,
u'#dw': 2,
u'\xf6hl': 2,
u'u\u0142a': 2,
u'\u03bb\u03ae\u03b3': 2,
u'\u0142#z': 2,
u'l\xe9o': 2,
u'fud': 2,
u'ewj': 2,
u'ewm': 2,
u'ewr': 2,
u'mk\u0151': 2,
u'bii': 2,
u'bco': 2,
u'pef': 2,
u'bi\xe8': 2,
u'bi\xfc': 2,
u'\xf3k\xfc': 2,
u'c\u0153u': 2,
u'\u03c6\u03af\u03c3': 2,
u'sd\xf3': 2,
u'bcb': 2,
u'tkn': 2,
u'\u017eer': 2,
u'xba': 2,
u'\u0151lu': 2,
u'\xe9z\xed': 2,
u'e=p': 2,
u'e=s': 2,
u'e=a': 2,
u'e=o': 2,
u'\xe1v\xfc': 2,
u'th\xe8': 2,
u'afb': 2,
u'\u03b1kc': 2,
u'afy': 2,
u'afz': 2,
u'afp': 2,
u'e\u013en': 2,
u'jzt': 2,
u'k<#': 2,
u're\xfa': 2,
u'\u03b1\u03ba\u03cc': 2,
u'da\xfc': 2,
u'\xfah\xfa': 2,
u'#\u0142\u0105': 2,
u'lf\u0151': 2,
u'\u017aba': 2,
u'ksb': 2,
u'ksl': 2,
u'\xfck\xfc': 2,
u'\xfck\xf3': 2,
u't\u0117#': 2,
u'\xfcmt': 2,
u'\xfckc': 2,
u'=sl': 2,
u'ysa': 2,
u'ysr': 2,
u'=sr': 2,
u'cjo': 2,
u'\xfck\u0151': 2,
u'fy#': 2,
u'\xe1jj': 2,
u'\xe1jm': 2,
u'#b\xe2': 2,
u'p\xf3i': 2,
u'esw': 2,
u'tmg': 2,
u'\xf6tg': 2,
u'pi\xe8': 2,
u'pi\xe9': 2,
u'pi\xf3': 2,
u'\u0144in': 2,
u'buy': 2,
u'\u03bd\u03ae#': 2,
u'es\u0171': 2,
u'\xe1ol': 2,
u'l\xedk': 2,
u'\u0107#h': 2,
u'\u0107#n': 2,
u'\u0107#p': 2,
u'en\xeb': 2,
u'en\xf3': 2,
u'\u03c1\u03cc\u03b5': 2,
u'\u03c1\u03cc\u03c4': 2,
u'\xe9co': 2,
u'\xe9ca': 2,
u'\xe9cr': 2,
u'\xe9c#': 2,
u'goh': 2,
u'ra\u010d': 2,
u'm\u0151v': 2,
u'\xe4ck': 2,
u'thh': 2,
u'ocj': 2,
u'\u0142ub': 2,
u'ybu': 2,
u'nub': 2,
u'nua': 2,
u'k#\u017e': 2,
u'\xedso': 2,
u'\xfado': 2,
u'\xfadn': 2,
u'kwe': 2,
u'kw#': 2,
u'\xe1\u0144e': 2,
u'wfd': 2,
u'cnp': 2,
u'ob\xfc': 2,
u'cng': 2,
u'\xf3s\u0171': 2,
u'nn\xed': 2,
u'h\u0151l': 2,
u'\xf3s\xe9': 2,
u'o\u015fa': 2,
u'o\u015fi': 2,
u'\xf3sg': 2,
u'\xf3sf': 2,
u'\u0171cs': 2,
u'ur\xed': 2,
u'bq#': 2,
u'\u03bd\u03b5\u03c4': 2,
u'pmb': 2,
u'j\xe9p': 2,
u'lmp': 2,
u'\u017adz': 2,
u'j\xe9#': 2,
u'#vt': 2,
u'#vp': 2,
u'msn': 2,
u'oby': 2,
u'yj\xf6': 2,
u'i\xfa\xe9': 2,
u'waj': 2,
u'pr\xfa': 2,
u'gk\xf3': 2,
u'huf': 2,
u'hux': 2,
u'prc': 2,
u'prb': 2,
u'prh': 2,
u'prj': 2,
u'gkh': 2,
u'an+': 2,
u'ln\xf3': 2,
u'i\xf6s': 2,
u'rmv': 2,
u'dyv': 2,
u'dyd': 2,
u'y\u0151f': 2,
u'rmg': 2,
u'k#\xe0': 2,
u'yk\u0151': 2,
u'\u03b1s#': 2,
u'\u03c4\u03b5\u03c4': 2,
u'uhs': 2,
u'\xf6tc': 2,
u'vze': 2,
u'#pw': 2,
u'ig\xfc': 2,
u'jup': 2,
u'rvm': 2,
u'cbs': 2,
u'rvz': 2,
u'\u03b4\u03b9\u03b1': 2,
u'igp': 2,
u'#p\u012b': 2,
u'\xbaos': 2,
u'uwa': 2,
u'nbw': 2,
u'tew': 2,
u'fa\xf6': 2,
u'n\u0163\xe1': 2,
u'r#\xe0': 2,
u'r#\u015f': 2,
u'fki': 2,
u't\u011b\u0161': 2,
u'j\xfai': 2,
u'j\xfaz': 2,
u'izb': 2,
u'izd': 2,
u'u\xdf#': 2,
u'iz\u0171': 2,
u'efj': 2,
u'efb': 2,
u'efc': 2,
u'efg': 2,
u'pvs': 2,
u'pvd': 2,
u'ri\u0146': 2,
u'\u03c4\u03b1\u03c3': 2,
u't\xe1a': 2,
u'\u0151ci': 2,
u'b\xf6m': 2,
u'#t\u011b': 2,
u'zzl': 2,
u'\u0171in': 2,
u'#t\xe2': 2,
u'lj\u0151': 2,
u'#tp': 2,
u'#tl': 2,
u'\u0163\xe1b': 2,
u'rr\xed': 2,
u'i\u010de': 2,
u'\xeas#': 2,
u'rr\xfc': 2,
u'rrh': 2,
u'ta\u0144': 2,
u'fey': 2,
u'\xedfe': 2,
u'\xe1\xf1e': 2,
u'#\xb1o': 2,
u'vc\xe9': 2,
u'byn': 2,
u'byk': 2,
u'bya': 2,
u'yb\u0171': 2,
u'h#x': 2,
u'kb\xed': 2,
u'vca': 2,
u'#kc': 2,
u'vci': 2,
u'\xfcdj': 2,
u'nf\xf3': 2,
u'j\u010de': 2,
u'd\xe1d': 2,
u'eb\xed': 2,
u'ju#': 2,
u'\u0151\xf6l': 2,
u'\u0151\xf6r': 2,
u'ebc': 2,
u'n\xf6d': 2,
u'av\u010d': 2,
u'v\xf3p': 2,
u'pz\xfc': 2,
u't+#': 2,
u'hm#': 2,
u'bbz': 2,
u'hms': 2,
u'hmg': 2,
u's#=': 2,
u'avn': 2,
u'j\xfah': 2,
u'mfk': 2,
u'skt': 2,
u'skn': 2,
u'skh': 2,
u'si\xfa': 2,
u'\xf3rs': 2,
u'l\xe9a': 2,
u'y\xf3a': 2,
u'\xe1fr': 2,
u'n\xeb#': 2,
u'lv\xf6': 2,
})
|
dmort27/pylid
|
pylid/langs/hu.py
|
Python
|
mit
| 343,717
|
[
"ADF",
"ASE",
"BWA",
"CDK",
"EPW",
"Elk",
"MOE"
] |
c9e1924a5a7360af8959b2f846210771f152624e4dbedfc89551dcd4b2a3facf
|
__author__ = "Dr. Lapis"
langversion = 1
langname = "Русский"
##updater
# text construct: "Version "+version+available+changelog
#example: Version 3 available, click here to download, or for changelog click here
available = " доступна, нажмите здесь, чтобы скачать"
changelog = ", или нажмите здесь, чтобы посмотреть список изменений"
##world gen
worldify = "из картинки"
planetoids = "Планетоиды и Терра"
arena = "Данжевая Арена"
flat = "Плоский мир"
new = "Новый мир:"
##mainmenu
#omnitool
settings = "Настройки"
report_issue = "Report Issue"
exit = "Выход"
#start
start = "Старт"
terraria = "Террария"
steamfree = "Террария без Стима"
#open
open = "Открыть"
imagefolder = "Картинки Миров"
backupfolder = "Откаты Миров"
themes = "Темы Omnitool"
#visit
visit = "Посетить"
donate = "Пожертвовать"
homepage = "Домашняя страница Omnitool"
TO = "Форум Terraria Online"
wiki = "Англоязычная Террария Вики"
##world thumbnail
label = "Мир: "
##settings menu
warning = "Для того, чтобы некоторые изменения вступили в силу, надо перезагрузить Omnitool"
none = "Нет"
tiny = "Микро" #unused
small = "Маленький"
medium = "Нормальный"
large = "Большой"
very_large = "XXL"
theme_select = "Выбор темы:"
thumbsize = "Размер миниатюры мира:"
mk_backups = "Сделать Откаты:"
world_columns = "Количество Колонок Миров:"
##world interaction menu
wa_worldactionmenu = "Действие для {}:"
wa_imageopen = "Открыть картинку"
wa_renderopen = "Отрендерить Мир"
wa_teditopen = "Открыть в TEdit"
wa_update = "Обновиь Картинку"
wa_super = "Сгенерировать Супер Картинку"
##planetoids & terra
pt_start = 'Начать генерацию!'
pt_name = "Имя: "
pt_mode = "Режим: "
pt_small = "Планетоиды (маленький мир)"
pt_medium = "Планетоиды (средний мир)"
pt_large = "Планетоиды (большой мир)"
pt_square = "Планетоиды (квадратный мир)"
pt_both = "Терра & Планетоиды (большой мир)"
pt_square_terra = "Терра (квадратный мир)"
pt_start_sel = "Начальное время суток: "
pt_morning = "Утро"
pt_day = "День"
pt_night = "Ночь"
pt_bloodmoon = "Кровавая Луна"
pt_extras = "Дополнительно: "
pt_sun = "Солнце: "
pt_atlantis = "Водный Мир: "
pt_merchant = "Торговец сначала: "
pt_lloot = "Меньше вещей в сундуках: "
pt_mirror = "Зеркальный режим: "
pt_pre = "Префиксы Вещей: "
##worldify
w_start = "Начать превращение картинки в мир!"
w_cont = "Продолжить"
w_name = "Имя: "
w_rgb = "RGB"
w_hsv = "утяжелённый HSV"
w_method = "Метод: "
w_priority = "Выбрать Приоритет:"
w_hue = "Оттенок: "
w_saturation = "Насыщенность: "
w_brightness = "Яркость: "
##arena
a_start = "Начать генерацию!"
a_name = "Имя: "
a_rooms = "Кол-во комнат: "
a_sidelen = "Длина комнат: "
a_corlen = "Длина коридоров: "
a_chest = "Вероятность появления сундука в комнате: "
a_itemchest = "Вещей в каждом сундуке: "
a_light = "Освещение: "
a_chances = "Шансы генерации комнаты как: "
a_standard = "Стандартной: "
a_cross = "Перекрёсток: "
##torch
at_chances = "Шанс генерации освещения различных цветов:"
at_full = "Полный спектр"
at_blue = "Синий"
at_red = "Красный"
at_green = "Зелёный"
at_pink = "Демоничемкий"
at_white = "Белый"
at_yellow = "Жёлтый"
at_purple = "Фиолетовый"
at_lime = "Проклятый"
##plugins
pl_start = "Запустить плагин"
pl_rec = "Выберите мир для получения"
pl_mod = "Выберите мир для изменения"
pl_trans = "Выберите два мира для трансфера"
pl_trans_source = "Источник"
pl_trans_target = "Цель"
##flatworld
fw_size = "Размер мира:"
fw_tiny = "микро"
fw_square = "квадратный"
fw_small = "маленький"
fw_medium = "средний"
fw_large = "большой"
fw_tile = "Тип блоков:"
fw_wall = "Тип стен:"
fw_surf = "Тип поверхности:"
|
Berserker66/omnitool
|
omnitool/Language/russian.py
|
Python
|
mit
| 4,942
|
[
"VisIt"
] |
b180fd96bdc70c428cc02e2dc1ec8ec6de551826584c81e49bb6870fe5954eb5
|
# -*- coding: utf-8 -*-
from kivy.lang import Builder
from kivy.properties import BoundedNumericProperty, ReferenceListProperty
from kivy.properties import OptionProperty, ListProperty
from kivy.uix.widget import Widget
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import text_colors
from kivymd.theming import ThemableBehavior
Builder.load_string('''
<BackgroundColorBehavior>
canvas:
Color:
rgba: self.md_bg_color
Rectangle:
size: self.size
pos: self.pos
''')
class BackgroundColorBehavior(Widget):
r = BoundedNumericProperty(1., min=0., max=1.)
g = BoundedNumericProperty(1., min=0., max=1.)
b = BoundedNumericProperty(1., min=0., max=1.)
a = BoundedNumericProperty(0., min=0., max=1.)
md_bg_color = ReferenceListProperty(r, g, b, a)
class SpecificBackgroundColorBehavior(BackgroundColorBehavior):
background_palette = OptionProperty(
'Primary',
options=['Primary', 'Accent',
'Red', 'Pink', 'Purple', 'DeepPurple', 'Indigo', 'Blue',
'LightBlue', 'Cyan', 'Teal', 'Green', 'LightGreen',
'Lime', 'Yellow', 'Amber', 'Orange', 'DeepOrange',
'Brown', 'Grey', 'BlueGrey'])
background_hue = OptionProperty(
'500',
options=['50', '100', '200', '300', '400', '500', '600', '700',
'800', '900', 'A100', 'A200', 'A400', 'A700'])
specific_text_color = ListProperty([0, 0, 0, 0.87])
specific_secondary_text_color = ListProperty([0, 0, 0, 0.87])
def _update_specific_text_color(self, instance, value):
if hasattr(self, 'theme_cls'):
palette = {'Primary': self.theme_cls.primary_palette,
'Accent': self.theme_cls.accent_palette
}.get(self.background_palette, self.background_palette)
else:
palette = {'Primary': 'Blue',
'Accent': 'Amber'
}.get(self.background_palette, self.background_palette)
if text_colors[palette].get(self.background_hue):
color = get_color_from_hex(text_colors[palette]
[self.background_hue])
else:
# Some palettes do not have 'A100', 'A200', 'A400', 'A700'
# In that situation just default to using 100/200/400/700
hue = self.background_hue[1:]
color = get_color_from_hex(text_colors[palette][hue])
secondary_color = color[:]
# Check for black text (need to adjust opacity)
if (color[0] + color[1] + color[2]) == 0:
color[3] = 0.87
secondary_color[3] = 0.54
else:
secondary_color[3] = 0.7
self.specific_text_color = color
self.specific_secondary_text_color = secondary_color
def __init__(self, **kwargs):
super(SpecificBackgroundColorBehavior, self).__init__(**kwargs)
if hasattr(self, 'theme_cls'):
self.theme_cls.bind(primary_palette=self._update_specific_text_color)
self.theme_cls.bind(accent_palette=self._update_specific_text_color)
self.theme_cls.bind(theme_style=self._update_specific_text_color)
self.bind(background_hue=self._update_specific_text_color)
self.bind(background_palette=self._update_specific_text_color)
self._update_specific_text_color(None, None)
|
cruor99/KivyMD
|
kivymd/backgroundcolorbehavior.py
|
Python
|
mit
| 3,479
|
[
"Amber"
] |
5e492ebafd9f6882d6d554debe1d0cd97e49482f8416f025e60b806105addfaa
|
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
PASS_VARS = {
'check_mode': 'check_mode',
'debug': '_debug',
'diff': '_diff',
'keep_remote_files': '_keep_remote_files',
'module_name': '_name',
'no_log': 'no_log',
'remote_tmp': '_remote_tmp',
'selinux_special_fs': '_selinux_special_fs',
'shell_executable': '_shell',
'socket': '_socket_path',
'syslog_facility': '_syslog_facility',
'tmpdir': '_tmpdir',
'verbosity': '_verbosity',
'version': 'ansible_version',
}
PASS_BOOLS = ('no_log', 'debug', 'diff')
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import __main__
import atexit
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
import json
# Detect the python-json library which is incompatible
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json and was not found!", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
# we may have been able to import md5 but it could still not be available
try:
hashlib.md5()
except ValueError:
algorithms.pop('md5', None)
except Exception:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except Exception:
pass
from ansible.module_utils.common._collections_compat import (
deque,
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.file import is_executable
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (size / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
def _json_encode_fallback(obj):
if isinstance(obj, Set):
return list(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Cannot json serialize %s" % to_native(obj))
def jsonify(data, **kwargs):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
except UnicodeDecodeError:
continue
raise UnicodeError('Invalid unicode encoding encountered')
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self.aliases = {}
self._legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set and we have a remote_tmp,
# the module needs to create it and clean it up once finished.
# otherwise we create our own module tmp dir from the system defaults
if self._tmpdir is None:
basedir = None
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if not os.path.exists(basedir):
try:
os.makedirs(basedir, mode=0o700)
except (OSError, IOError) as e:
self.warn("Unable to use %s as temporary directory, "
"failing back to system: %s" % (basedir, to_native(e)))
basedir = None
else:
self.warn("Module remote_tmp %s did not exist and was "
"created with a mode of 0700, this may cause"
" issues when running as another user. To "
"avoid this, create the remote_tmp dir with "
"the correct permissions manually" % basedir)
basefile = "ansible-moduletmp-%s-" % time.time()
try:
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
except (OSError, IOError) as e:
self.fail_json(
msg="Failed to create remote module tmp path at dir %s "
"with prefix %s: %s" % (basedir, basefile, to_native(e))
)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
attr_mod = '='
if attributes.startswith(('-', '+')):
attr_mod = attributes[0]
attributes = attributes[1:]
if existing.get('attr_flags', '') != attributes or attr_mod == '-':
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
elif k.startswith('_ansible_'):
# handle setting internal properties from internal ansible vars
key = k.replace('_ansible_', '')
if key in PASS_BOOLS:
setattr(self, PASS_VARS[key], self.boolean(v))
else:
setattr(self, PASS_VARS[key], v)
# clean up internal params:
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ", ".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
term = 'any'
else:
term = 'all'
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but %s of the following are missing: %s" % (key, val, term, ', '.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
if HAS_SYSLOG:
# If syslog_facility specified, it needs to convert
# from the facility name to the facility code, and
# set it as SYSLOG_FACILITY argument of journal.send()
facility = getattr(syslog,
self._syslog_facility,
syslog.LOG_USER) >> 3
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
SYSLOG_FACILITY=facility,
**dict(journal_args))
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
bin_path = None
try:
bin_path = get_bin_path(arg, required, opt_dirs)
except ValueError as e:
self.fail_json(msg=to_text(e))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# Add traceback if debug or high verbosity and it is missing
# NOTE: Badly named as exception, it really always has been a traceback
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
if PY2:
# On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
''.join(traceback.format_tb(sys.exc_info()[2]))
else:
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ', '.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
(src, dest, b_tmp_dest_name, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
expand_user_and_vars=True):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
dictates whether ``~`` is expanded in paths and environment variables
are expanded before running the command. When ``True`` a string such as
``$SHELL`` will be expanded regardless of escaping. When ``False`` and
``use_unsafe_shell=False`` no path or variable expansion will be done.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = " ".join([shlex_quote(x) for x in args])
# not set explicitly, check if set by controller
if executable:
args = [executable, '-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [self._shell, '-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand ``~`` in paths, and all environment vars
if expand_user_and_vars:
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
else:
args = [x for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
caphrim007/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 117,324
|
[
"VisIt"
] |
75bd1c2413fa79d6d6c5f3149aee2674a0d637844c397ed448b60ba9a6d07cce
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements Compatibility corrections for mixing runs of different
functionals.
"""
import abc
import os
import warnings
from collections import defaultdict
from typing import List, Optional, Sequence, Union
import numpy as np
from monty.design_patterns import cached_class
from monty.dev import deprecated
from monty.json import MSONable
from monty.serialization import loadfn
from uncertainties import ufloat
from pymatgen.analysis.structure_analyzer import oxide_type, sulfide_type
from pymatgen.core.periodic_table import Element
from pymatgen.entries.computed_entries import (
CompositionEnergyAdjustment,
ComputedEntry,
ComputedStructureEntry,
ConstantEnergyAdjustment,
TemperatureEnergyAdjustment,
)
from pymatgen.io.vasp.sets import MITRelaxSet, MPRelaxSet
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
MU_H2O = -2.4583 # Free energy of formation of water, eV/H2O, used by MaterialsProjectAqueousCompatibility
__author__ = "Ryan Kingsbury, Shyue Ping Ong, Anubhav Jain, Stephen Dacek, Sai Jayaraman"
__copyright__ = "Copyright 2012-2020, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "April 2020"
class CompatibilityError(Exception):
"""
Exception class for Compatibility. Raised by attempting correction
on incompatible calculation
"""
pass
class Correction(metaclass=abc.ABCMeta):
"""
A Correction class is a pre-defined scheme for correction a computed
entry based on the type and chemistry of the structure and the
calculation parameters. All Correction classes must implement a
correct_entry method.
"""
@abc.abstractmethod
def get_correction(self, entry):
"""
Returns correction and uncertainty for a single entry.
Args:
entry: A ComputedEntry object.
Returns:
The energy correction to be applied and the uncertainty of the correction.
Raises:
CompatibilityError if entry is not compatible.
"""
return
def correct_entry(self, entry):
"""
Corrects a single entry.
Args:
entry: A ComputedEntry object.
Returns:
An processed entry.
Raises:
CompatibilityError if entry is not compatible.
"""
new_corr = self.get_correction(entry)
old_std_dev = entry.correction_uncertainty
if np.isnan(old_std_dev):
old_std_dev = 0
old_corr = ufloat(entry.correction, old_std_dev)
updated_corr = new_corr + old_corr
if updated_corr.nominal_value != 0 and updated_corr.std_dev == 0:
# if there are no error values available for the corrections applied,
# set correction uncertainty to not a number
uncertainty = np.nan
else:
uncertainty = updated_corr.std_dev
entry.energy_adjustments.append(ConstantEnergyAdjustment(updated_corr.nominal_value, uncertainty))
return entry
class PotcarCorrection(Correction):
"""
Checks that POTCARs are valid within a pre-defined input set. This
ensures that calculations performed using different InputSets are not
compared against each other.
Entry.parameters must contain a "potcar_symbols" key that is a list of
all POTCARs used in the run. Again, using the example of an Fe2O3 run
using Materials Project parameters, this would look like
entry.parameters["potcar_symbols"] = ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002'].
"""
def __init__(self, input_set, check_hash=False):
"""
Args:
input_set: InputSet object used to generate the runs (used to check
for correct potcar symbols)
check_hash (bool): If true, uses the potcar hash to check for valid
potcars. If false, uses the potcar symbol (Less reliable).
Defaults to True
Raises:
ValueError if entry do not contain "potcar_symbols" key.
CombatibilityError if wrong potcar symbols
"""
potcar_settings = input_set.CONFIG["POTCAR"]
if isinstance(list(potcar_settings.values())[-1], dict):
if check_hash:
self.valid_potcars = {k: d["hash"] for k, d in potcar_settings.items()}
else:
self.valid_potcars = {k: d["symbol"] for k, d in potcar_settings.items()}
else:
if check_hash:
raise ValueError("Cannot check hashes of potcars, since hashes are not included in the entry.")
self.valid_potcars = potcar_settings
self.input_set = input_set
self.check_hash = check_hash
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction, Uncertainty.
"""
if self.check_hash:
if entry.parameters.get("potcar_spec"):
psp_settings = {d.get("hash") for d in entry.parameters["potcar_spec"] if d}
else:
raise ValueError("Cannot check hash without potcar_spec field")
else:
if entry.parameters.get("potcar_spec"):
psp_settings = {d.get("titel").split()[1] for d in entry.parameters["potcar_spec"] if d}
else:
psp_settings = {sym.split()[1] for sym in entry.parameters["potcar_symbols"] if sym}
if {self.valid_potcars.get(str(el)) for el in entry.composition.elements} != psp_settings:
raise CompatibilityError("Incompatible potcar")
return ufloat(0.0, 0.0)
def __str__(self):
return "{} Potcar Correction".format(self.input_set.__name__)
@cached_class
class GasCorrection(Correction):
"""
Correct gas energies to obtain the right formation energies. Note that
this depends on calculations being run within the same input set.
For old MaterialsProjectCompatibility and MITCompatibility.
"""
def __init__(self, config_file):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
"""
c = loadfn(config_file)
self.name = c["Name"]
self.cpd_energies = c["Advanced"]["CompoundEnergies"]
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction.
"""
comp = entry.composition
correction = ufloat(0.0, 0.0)
# set error to 0 because old MPCompatibility doesn't have errors
rform = entry.composition.reduced_formula
if rform in self.cpd_energies:
correction += self.cpd_energies[rform] * comp.num_atoms - entry.uncorrected_energy
return correction
def __str__(self):
return "{} Gas Correction".format(self.name)
@cached_class
class AnionCorrection(Correction):
"""
Correct anion energies to obtain the right formation energies. Note that
this depends on calculations being run within the same input set.
For old MaterialsProjectCompatibility and MITCompatibility.
"""
def __init__(self, config_file, correct_peroxide=True):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
"""
c = loadfn(config_file)
self.oxide_correction = c["OxideCorrections"]
self.sulfide_correction = c.get("SulfideCorrections", defaultdict(float))
self.name = c["Name"]
self.correct_peroxide = correct_peroxide
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction.
"""
comp = entry.composition
if len(comp) == 1: # Skip element entry
return ufloat(0.0, 0.0)
correction = ufloat(0.0, 0.0)
# Check for sulfide corrections
if Element("S") in comp:
sf_type = "sulfide"
if entry.data.get("sulfide_type"):
sf_type = entry.data["sulfide_type"]
elif hasattr(entry, "structure"):
warnings.warn(sf_type)
sf_type = sulfide_type(entry.structure)
# use the same correction for polysulfides and sulfides
if sf_type == "polysulfide":
sf_type = "sulfide"
if sf_type in self.sulfide_correction:
correction += self.sulfide_correction[sf_type] * comp["S"]
# Check for oxide, peroxide, superoxide, and ozonide corrections.
if Element("O") in comp:
if self.correct_peroxide:
if entry.data.get("oxide_type"):
if entry.data["oxide_type"] in self.oxide_correction:
ox_corr = self.oxide_correction[entry.data["oxide_type"]]
correction += ox_corr * comp["O"]
if entry.data["oxide_type"] == "hydroxide":
ox_corr = self.oxide_correction["oxide"]
correction += ox_corr * comp["O"]
elif hasattr(entry, "structure"):
ox_type, nbonds = oxide_type(entry.structure, 1.05, return_nbonds=True)
if ox_type in self.oxide_correction:
correction += self.oxide_correction[ox_type] * nbonds
elif ox_type == "hydroxide":
correction += self.oxide_correction["oxide"] * comp["O"]
else:
warnings.warn(
"No structure or oxide_type parameter present. Note "
"that peroxide/superoxide corrections are not as "
"reliable and relies only on detection of special"
"formulas, e.g., Li2O2."
)
rform = entry.composition.reduced_formula
if rform in UCorrection.common_peroxides:
correction += self.oxide_correction["peroxide"] * comp["O"]
elif rform in UCorrection.common_superoxides:
correction += self.oxide_correction["superoxide"] * comp["O"]
elif rform in UCorrection.ozonides:
correction += self.oxide_correction["ozonide"] * comp["O"]
elif Element("O") in comp.elements and len(comp.elements) > 1:
correction += self.oxide_correction["oxide"] * comp["O"]
else:
correction += self.oxide_correction["oxide"] * comp["O"]
return correction
def __str__(self):
return "{} Anion Correction".format(self.name)
@cached_class
class AqueousCorrection(Correction):
"""
This class implements aqueous phase compound corrections for elements
and H2O.
"""
def __init__(self, config_file, error_file=None):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
error_file: Path to the selected compatibilityErrors.yaml config file.
"""
c = loadfn(config_file)
self.cpd_energies = c["AqueousCompoundEnergies"]
# there will either be a CompositionCorrections OR an OxideCorrections key,
# but not both, depending on the compatibility scheme we are using.
# TODO - the two lines below are specific to MaterialsProjectCompatibility
# and MaterialsProject2020Compatibility. Could be changed to be more general
# and/or streamlined if MaterialsProjectCompatibility is retired.
self.comp_correction = c.get("CompositionCorrections", defaultdict(float))
self.oxide_correction = c.get("OxideCorrections", defaultdict(float))
self.name = c["Name"]
if error_file:
e = loadfn(error_file)
self.cpd_errors = e.get("AqueousCompoundEnergies", defaultdict(float))
else:
self.cpd_errors = defaultdict(float)
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction, Uncertainty.
"""
from pymatgen.analysis.pourbaix_diagram import MU_H2O
comp = entry.composition
rform = comp.reduced_formula
cpdenergies = self.cpd_energies
correction = ufloat(0.0, 0.0)
if rform in cpdenergies:
if rform in ["H2", "H2O"]:
corr = cpdenergies[rform] * comp.num_atoms - entry.uncorrected_energy - entry.correction
err = self.cpd_errors[rform] * comp.num_atoms
correction += ufloat(corr, err)
else:
corr = cpdenergies[rform] * comp.num_atoms
err = self.cpd_errors[rform] * comp.num_atoms
correction += ufloat(corr, err)
if not rform == "H2O":
# if the composition contains water molecules (e.g. FeO.nH2O),
# correct the gibbs free energy such that the waters are assigned energy=MU_H2O
# in other words, we assume that the DFT energy of such a compound is really
# a superposition of the "real" solid DFT energy (FeO in this case) and the free
# energy of some water molecules
# e.g. that E_FeO.nH2O = E_FeO + n * g_H2O
# so, to get the most accurate gibbs free energy, we want to replace
# g_FeO.nH2O = E_FeO.nH2O + dE_Fe + (n+1) * dE_O + 2n dE_H
# with
# g_FeO = E_FeO.nH2O + dE_Fe + dE_O + n g_H2O
# where E is DFT energy, dE is an energy correction, and g is gibbs free energy
# This means we have to 1) remove energy corrections associated with H and O in water
# and then 2) remove the free energy of the water molecules
nH2O = int(min(comp["H"] / 2.0, comp["O"])) # only count whole water molecules
if nH2O > 0:
# first, remove any H or O corrections already applied to H2O in the
# formation energy so that we don't double count them
# No. of H atoms not in a water
correction -= ufloat((comp["H"] - nH2O / 2) * self.comp_correction["H"], 0.0)
# No. of O atoms not in a water
correction -= ufloat(
(comp["O"] - nH2O) * (self.comp_correction["oxide"] + self.oxide_correction["oxide"]),
0.0,
)
# next, add MU_H2O for each water molecule present
correction += ufloat(-1 * MU_H2O * nH2O, 0.0)
# correction += 0.5 * 2.46 * nH2O # this is the old way this correction was calculated
return correction
def __str__(self):
return "{} Aqueous Correction".format(self.name)
@cached_class
class UCorrection(Correction):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Entry.parameters must contain a "hubbards" key which is a dict
of all non-zero Hubbard U values used in the calculation. For example,
if you ran a Fe2O3 calculation with Materials Project parameters,
this would look like entry.parameters["hubbards"] = {"Fe": 5.3}
If the "hubbards" key is missing, a GGA run is assumed.
It should be noted that ComputedEntries assimilated using the
pymatgen.apps.borg package and obtained via the MaterialsProject REST
interface using the pymatgen.matproj.rest package will automatically have
these fields populated.
"""
common_peroxides = [
"Li2O2",
"Na2O2",
"K2O2",
"Cs2O2",
"Rb2O2",
"BeO2",
"MgO2",
"CaO2",
"SrO2",
"BaO2",
]
common_superoxides = ["LiO2", "NaO2", "KO2", "RbO2", "CsO2"]
ozonides = ["LiO3", "NaO3", "KO3", "NaO5"]
def __init__(self, config_file, input_set, compat_type, error_file=None):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
input_set: InputSet object (to check for the +U settings)
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
error_file: Path to the selected compatibilityErrors.yaml config file.
"""
if compat_type not in ["GGA", "Advanced"]:
raise CompatibilityError("Invalid compat_type {}".format(compat_type))
c = loadfn(config_file)
self.input_set = input_set
if compat_type == "Advanced":
self.u_settings = self.input_set.CONFIG["INCAR"]["LDAUU"]
self.u_corrections = c["Advanced"]["UCorrections"]
else:
self.u_settings = {}
self.u_corrections = {}
self.name = c["Name"]
self.compat_type = compat_type
if error_file:
e = loadfn(error_file)
self.u_errors = e["Advanced"]["UCorrections"]
else:
self.u_errors = {}
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction, Uncertainty.
"""
if entry.parameters.get("run_type") not in ["GGA", "GGA+U"]:
raise CompatibilityError(
"Entry {} has invalid run type {}. Discarding.".format(entry.entry_id, entry.parameters.get("run_type"))
)
calc_u = entry.parameters.get("hubbards", None)
calc_u = defaultdict(int) if calc_u is None else calc_u
comp = entry.composition
elements = sorted([el for el in comp.elements if comp[el] > 0], key=lambda el: el.X)
most_electroneg = elements[-1].symbol
correction = ufloat(0.0, 0.0)
ucorr = self.u_corrections.get(most_electroneg, {})
usettings = self.u_settings.get(most_electroneg, {})
uerrors = self.u_errors.get(most_electroneg, defaultdict(float))
for el in comp.elements:
sym = el.symbol
# Check for bad U values
if calc_u.get(sym, 0) != usettings.get(sym, 0):
raise CompatibilityError("Invalid U value of %s on %s" % (calc_u.get(sym, 0), sym))
if sym in ucorr:
correction += ufloat(ucorr[sym], uerrors[sym]) * comp[el]
return correction
def __str__(self):
return "{} {} Correction".format(self.name, self.compat_type)
class Compatibility(MSONable, metaclass=abc.ABCMeta):
"""
Abstract Compatibility class, not intended for direct use.
Compatibility classes are used to correct the energies of an entry or a set
of entries. All Compatibility classes must implement .get_adjustments method.
"""
@abc.abstractmethod
def get_adjustments(self, entry: ComputedEntry):
"""
Get the energy adjustments for a ComputedEntry.
This method must generate a list of EnergyAdjustment objects
of the appropriate type (constant, composition-based, or temperature-based)
to be applied to the ComputedEntry, and must raise a CompatibilityError
if the entry is not compatible.
Args:
entry: A ComputedEntry object.
Returns:
[EnergyAdjustment]: A list of EnergyAdjustment to be applied to the
Entry.
Raises:
CompatibilityError if the entry is not compatible
"""
return
def process_entry(self, entry):
"""
Process a single entry with the chosen Corrections. Note
that this method will change the data of the original entry.
Args:
entry: A ComputedEntry object.
Returns:
An adjusted entry if entry is compatible, otherwise None is
returned.
"""
if self.process_entries(entry):
return self.process_entries(entry)[0]
return None
def process_entries(self, entries: Union[ComputedEntry, list], clean: bool = True):
"""
Process a sequence of entries with the chosen Compatibility scheme. Note
that this method will change the data of the original entries.
Args:
entries: ComputedEntry or [ComputedEntry]
clean: bool, whether to remove any previously-applied energy adjustments.
If True, all EnergyAdjustment are removed prior to processing the Entry.
Default is True.
Returns:
A list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
# convert input arg to a list if not already
if isinstance(entries, ComputedEntry):
entries = [entries]
processed_entry_list = []
for entry in entries:
ignore_entry = False
# if clean is True, remove all previous adjustments from the entry
if clean:
entry.energy_adjustments = []
# get the energy adjustments
try:
adjustments = self.get_adjustments(entry)
except CompatibilityError as exc:
ignore_entry = True
print(exc)
continue
for ea in adjustments:
# Has this correction already been applied?
if (ea.name, ea.cls, ea.value) in [(ea.name, ea.cls, ea.value) for ea in entry.energy_adjustments]:
# we already applied this exact correction. Do nothing.
pass
elif (ea.name, ea.cls) in [(ea.name, ea.cls) for ea in entry.energy_adjustments]:
# we already applied a correction with the same name
# but a different value. Something is wrong.
ignore_entry = True
warnings.warn(
"Entry {} already has an energy adjustment called {}, but its "
"value differs from the value of {:.3f} calculated here. This "
"Entry will be discarded.".format(entry.entry_id, ea.name, ea.value)
)
else:
# Add the correction to the energy_adjustments list
entry.energy_adjustments.append(ea)
if not ignore_entry:
processed_entry_list.append(entry)
return processed_entry_list
@staticmethod
def explain(entry):
"""
Prints an explanation of the energy adjustments applied by the
Compatibility class. Inspired by the "explain" methods in many database
methodologies.
Args:
entry: A ComputedEntry.
"""
print(
"The uncorrected energy of {} is {:.3f} eV ({:.3f} eV/atom).".format(
entry.composition,
entry.uncorrected_energy,
entry.uncorrected_energy / entry.composition.num_atoms,
)
)
if len(entry.energy_adjustments) > 0:
print("The following energy adjustments have been applied to this entry:")
for e in entry.energy_adjustments:
print(
"\t\t{}: {:.3f} eV ({:.3f} eV/atom)".format(e.name, e.value, e.value / entry.composition.num_atoms)
)
elif entry.correction == 0:
print("No energy adjustments have been applied to this entry.")
print(
"The final energy after adjustments is {:.3f} eV ({:.3f} eV/atom).".format(
entry.energy, entry.energy_per_atom
)
)
class CorrectionsList(Compatibility):
"""
The CorrectionsList class combines a list of corrections to be applied to
an entry or a set of entries. Note that some of the Corrections have
interdependencies. For example, PotcarCorrection must always be used
before any other compatibility. Also, AnionCorrection("MP") must be used
with PotcarCorrection("MP") (similarly with "MIT"). Typically,
you should use the specific MaterialsProjectCompatibility and
MITCompatibility subclasses instead.
"""
def __init__(self, corrections: Sequence):
"""
Args:
corrections: List of corrections to apply.
"""
self.corrections = corrections
super().__init__()
def get_adjustments(self, entry):
"""
Get the list of energy adjustments to be applied to an entry.
"""
adjustment_list = []
corrections, uncertainties = self.get_corrections_dict(entry)
for k, v in corrections.items():
if v != 0 and uncertainties[k] == 0:
uncertainty = np.nan
else:
uncertainty = uncertainties[k]
adjustment_list.append(
ConstantEnergyAdjustment(
v,
uncertainty=uncertainty,
name=k,
cls=self.as_dict(),
)
)
return adjustment_list
def get_corrections_dict(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
"""
corrections = {}
uncertainties = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val.nominal_value
uncertainties[str(c)] = val.std_dev
return corrections, uncertainties
def get_explanation_dict(self, entry):
"""
Provides an explanation dict of the corrections that are being applied
for a given compatibility scheme. Inspired by the "explain" methods
in many database methodologies.
Args:
entry: A ComputedEntry.
Returns:
(dict) of the form
{"Compatibility": "string",
"Uncorrected_energy": float,
"Corrected_energy": float,
"correction_uncertainty:" float,
"Corrections": [{"Name of Correction": {
"Value": float, "Explanation": "string", "Uncertainty": float}]}
"""
centry = self.process_entry(entry)
if centry is None:
uncorrected_energy = entry.uncorrected_energy
corrected_energy = None
correction_uncertainty = None
else:
uncorrected_energy = centry.uncorrected_energy
corrected_energy = centry.energy
correction_uncertainty = centry.correction_uncertainty
d = {
"compatibility": self.__class__.__name__,
"uncorrected_energy": uncorrected_energy,
"corrected_energy": corrected_energy,
"correction_uncertainty": correction_uncertainty,
}
corrections = []
corr_dict, uncer_dict = self.get_corrections_dict(entry)
for c in self.corrections:
if corr_dict.get(str(c), 0) != 0 and uncer_dict.get(str(c), 0) == 0:
uncer = np.nan
else:
uncer = uncer_dict.get(str(c), 0)
cd = {
"name": str(c),
"description": c.__doc__.split("Args")[0].strip(),
"value": corr_dict.get(str(c), 0),
"uncertainty": uncer,
}
corrections.append(cd)
d["corrections"] = corrections
return d
def explain(self, entry):
"""
Prints an explanation of the corrections that are being applied for a
given compatibility scheme. Inspired by the "explain" methods in many
database methodologies.
Args:
entry: A ComputedEntry.
"""
d = self.get_explanation_dict(entry)
print("The uncorrected value of the energy of %s is %f eV" % (entry.composition, d["uncorrected_energy"]))
print("The following corrections / screening are applied for %s:\n" % d["compatibility"])
for c in d["corrections"]:
print("%s correction: %s\n" % (c["name"], c["description"]))
print("For the entry, this correction has the value %f eV." % c["value"])
if c["uncertainty"] != 0 or c["value"] == 0:
print("This correction has an uncertainty value of %f eV." % c["uncertainty"])
else:
print("This correction does not have uncertainty data available")
print("-" * 30)
print("The final energy after corrections is %f" % d["corrected_energy"])
class MaterialsProjectCompatibility(CorrectionsList):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet).
Using this compatibility scheme on runs with different parameters is not
valid.
"""
@deprecated(
message=(
"MaterialsProjectCompatibility will be updated with new correction classes "
"as well as new values of corrections and uncertainties in 2020"
)
)
def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MPCompatibility.yaml")
super().__init__(
[
PotcarCorrection(MPRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MPRelaxSet, compat_type),
]
)
class MaterialsProject2020Compatibility(Compatibility):
"""
This class implements the Materials Project 2020 energy correction scheme,
which incorporates uncertainty quantification and allows for mixing of GGA
and GGA+U entries (see References).
Note that this scheme should only be applied to VASP calculations that use the
Materials Project input set parameters (see pymatgen.io.vasp.sets.MPRelaxSet).
Using this compatibility scheme on calculations with different parameters is not
valid.
"""
def __init__(
self,
compat_type="Advanced",
correct_peroxide=True,
check_potcar_hash=False,
):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means the GGA/GGA+U mixing scheme
of Jain et al. (see References) is implemented. In this case,
entries which are supposed to be calculated in GGA+U (i.e.,
transition metal oxides and fluorides) will have the corresponding
GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. An Fe oxide run in GGA
will therefore be excluded.
To use the "Advanced" type, Entry.parameters must contain a "hubbards"
key which is a dict of all non-zero Hubbard U values used in the
calculation. For example, if you ran a Fe2O3 calculation with
Materials Project parameters, this would look like
entry.parameters["hubbards"] = {"Fe": 5.3}. If the "hubbards" key
is missing, a GGA run is assumed. Entries obtained from the
MaterialsProject database will automatically have these fields
populated.
(Default: "Advanced")
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not. If false, all oxygen-containing
compounds are assigned the 'oxide' correction. (Default: True)
check_potcar_hash (bool): Use potcar hash to verify POTCAR settings are
consistent with MPRelaxSet. If False, only the POTCAR symbols will
be used. (Default: False)
References:
Wang, A., et al. A framework for quantifying uncertainty in DFT energy corrections.
Under review.
Jain, A. et al. Formation enthalpies by mixing GGA and GGA + U calculations.
Phys. Rev. B - Condens. Matter Mater. Phys. 84, 1–10 (2011).
"""
if compat_type not in ["GGA", "Advanced"]:
raise CompatibilityError("Invalid compat_type {}".format(compat_type))
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
# load corrections and uncertainties
self.config_file = os.path.join(MODULE_DIR, "MP2020Compatibility.yaml")
c = loadfn(self.config_file)
self.name = c["Name"]
self.comp_correction = c["Corrections"].get("CompositionCorrections", defaultdict(float))
self.comp_errors = c["Uncertainties"].get("CompositionCorrections", defaultdict(float))
if self.compat_type == "Advanced":
self.u_settings = MPRelaxSet.CONFIG["INCAR"]["LDAUU"]
self.u_corrections = c["Corrections"].get("GGAUMixingCorrections", defaultdict(float))
self.u_errors = c["Uncertainties"].get("GGAUMixingCorrections", defaultdict(float))
else:
self.u_settings = {}
self.u_corrections = {}
self.u_errors = {}
def get_adjustments(self, entry: Union[ComputedEntry, ComputedStructureEntry]):
"""
Get the energy adjustments for a ComputedEntry or ComputedStructureEntry.
Energy corrections are implemented directly in this method instead of in
separate AnionCorrection, GasCorrection, or UCorrection classes which
were used in the legacy correction scheme.
Args:
entry: A ComputedEntry or ComputedStructureEntry object.
Returns:
[EnergyAdjustment]: A list of EnergyAdjustment to be applied to the
Entry.
Raises:
CompatibilityError if the entry is not compatible
"""
if entry.parameters.get("run_type") not in ["GGA", "GGA+U"]:
raise CompatibilityError(
"Entry {} has invalid run type {}. Must be GGA or GGA+U. Discarding.".format(
entry.entry_id, entry.parameters.get("run_type")
)
)
# check the POTCAR symbols
# this should return ufloat(0, 0) or raise a CompatibilityError or ValueError
pc = PotcarCorrection(MPRelaxSet, check_hash=self.check_potcar_hash)
pc.get_correction(entry)
# apply energy adjustments
adjustments: List[CompositionEnergyAdjustment] = []
comp = entry.composition
rform = comp.reduced_formula
# sorted list of elements, ordered by electronegativity
elements = sorted([el for el in comp.elements if comp[el] > 0], key=lambda el: el.X)
# Skip single elements
if len(comp) == 1:
return adjustments
# Check for sulfide corrections
if Element("S") in comp:
sf_type = "sulfide"
if entry.data.get("sulfide_type"):
sf_type = entry.data["sulfide_type"]
elif hasattr(entry, "structure"):
sf_type = sulfide_type(entry.structure)
# use the same correction for polysulfides and sulfides
if sf_type == "polysulfide":
sf_type = "sulfide"
if sf_type == "sulfide":
adjustments.append(
CompositionEnergyAdjustment(
self.comp_correction["S"],
comp["S"],
uncertainty_per_atom=self.comp_errors["S"],
name="MP2020 anion correction (S)",
cls=self.as_dict(),
)
)
# Check for oxide, peroxide, superoxide, and ozonide corrections.
if Element("O") in comp:
if self.correct_peroxide:
# determine the oxide_type
if entry.data.get("oxide_type"):
ox_type = entry.data["oxide_type"]
elif hasattr(entry, "structure"):
ox_type, nbonds = oxide_type(entry.structure, 1.05, return_nbonds=True)
else:
warnings.warn(
"No structure or oxide_type parameter present. Note "
"that peroxide/superoxide corrections are not as "
"reliable and relies only on detection of special"
"formulas, e.g., Li2O2."
)
common_peroxides = [
"Li2O2",
"Na2O2",
"K2O2",
"Cs2O2",
"Rb2O2",
"BeO2",
"MgO2",
"CaO2",
"SrO2",
"BaO2",
]
common_superoxides = ["LiO2", "NaO2", "KO2", "RbO2", "CsO2"]
ozonides = ["LiO3", "NaO3", "KO3", "NaO5"]
if rform in common_peroxides:
ox_type = "peroxide"
elif rform in common_superoxides:
ox_type = "superoxide"
elif rform in ozonides:
ox_type = "ozonide"
else:
ox_type = "oxide"
else:
ox_type = "oxide"
if ox_type == "hydroxide":
ox_type = "oxide"
adjustments.append(
CompositionEnergyAdjustment(
self.comp_correction[ox_type],
comp["O"],
uncertainty_per_atom=self.comp_errors[ox_type],
name="MP2020 anion correction ({})".format(ox_type),
cls=self.as_dict(),
)
)
# Check for anion corrections
for anion in ["Br", "I", "Se", "Si", "Sb", "Te", "H", "N", "F", "Cl"]:
if Element(anion) in comp and anion in self.comp_correction:
apply_correction = False
# only apply anion corrections if the element is an anion
# first check for a pre-populated oxidation states key
# the key is expected to comprise a dict corresponding to the first element output by
# Composition.oxi_state_guesses(), e.g. {'Al': 3.0, 'S': 2.0, 'O': -2.0} for 'Al2SO4'
if entry.data.get("oxidation_states"):
if entry.data["oxidation_states"].get(anion, 0) < 0:
apply_correction = True
else:
# if the oxidation_states key is not populated, only apply the correction if the anion
# is the most electronegative element
most_electroneg = elements[-1].symbol
if anion == most_electroneg:
apply_correction = True
if apply_correction:
adjustments.append(
CompositionEnergyAdjustment(
self.comp_correction[anion],
comp[anion],
uncertainty_per_atom=self.comp_errors[anion],
name="MP2020 anion correction",
cls=self.as_dict(),
)
)
# GGA / GGA+U mixing scheme corrections
calc_u = entry.parameters.get("hubbards", None)
calc_u = defaultdict(int) if calc_u is None else calc_u
most_electroneg = elements[-1].symbol
ucorr = self.u_corrections.get(most_electroneg, defaultdict(float))
usettings = self.u_settings.get(most_electroneg, defaultdict(float))
uerrors = self.u_errors.get(most_electroneg, defaultdict(float))
for el in comp.elements:
sym = el.symbol
# Check for bad U values
if calc_u.get(sym, 0) != usettings.get(sym, 0):
raise CompatibilityError("Invalid U value of {:.1f} on {}".format(calc_u.get(sym, 0), sym))
if sym in ucorr:
adjustments.append(
CompositionEnergyAdjustment(
ucorr[sym],
comp[el],
uncertainty_per_atom=uerrors[sym],
name="MP2020 GGA/GGA+U mixing correction ({})".format(sym),
cls=self.as_dict(),
)
)
return adjustments
class MITCompatibility(CorrectionsList):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml")
super().__init__(
[
PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MITRelaxSet, compat_type),
]
)
class MITAqueousCompatibility(CorrectionsList):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml")
super().__init__(
[
PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MITRelaxSet, compat_type),
AqueousCorrection(fp),
]
)
class MaterialsProjectAqueousCompatibility(Compatibility):
"""
This class implements the Aqueous energy referencing scheme for constructing
Pourbaix diagrams from DFT energies, as described in Persson et al.
This scheme applies various energy adjustments to convert DFT energies into
Gibbs free energies of formation at 298 K and to guarantee that the experimental
formation free energy of H2O is reproduced. Briefly, the steps are:
1. Beginning with the DFT energy of O2, adjust the energy of H2 so that
the experimental reaction energy of -2.458 eV/H2O is reproduced.
2. Add entropy to the DFT energy of any compounds that are liquid or
gaseous at room temperature
3. Adjust the energy of H2O for consistency with the adjusted H2 energy.
4. Adjust the DFT energies of solid hydrate compounds (compounds that
contain water, e.g. FeO.nH2O) such that the energies of the embedded
H2O molecules are equal to the experimental free energy
The above energy adjustments are computed dynamically based on the input
Entries.
References:
K.A. Persson, B. Waldwick, P. Lazic, G. Ceder, Prediction of solid-aqueous
equilibria: Scheme to combine first-principles calculations of solids with
experimental aqueous states, Phys. Rev. B - Condens. Matter Mater. Phys.
85 (2012) 1–12. doi:10.1103/PhysRevB.85.235438.
"""
def __init__(
self,
solid_compat: Optional[Compatibility] = MaterialsProjectCompatibility,
o2_energy: Optional[float] = None,
h2o_energy: Optional[float] = None,
h2o_adjustments: Optional[float] = None,
):
"""
Initialize the MaterialsProjectAqueousCompatibility class.
Note that this class requires as inputs the ground-state DFT energies of O2 and H2O, plus the value of any
energy adjustments applied to an H2O molecule. If these parameters are not provided in __init__, they can
be automatically populated by including ComputedEntry for the ground state of O2 and H2O in a list of entries
passed to process_entries. process_entries will fail if one or the other is not provided.
Args:
solid_compat: Compatiblity scheme used to pre-process solid DFT energies prior to applying aqueous
energy adjustments. May be passed as a class (e.g. MaterialsProjectCompatibility) or an instance
(e.g., MaterialsProjectCompatibility()). If None, solid DFT energies are used as-is.
Default: MaterialsProjectCompatibility
o2_energy: The ground-state DFT energy of oxygen gas, including any adjustments or corrections, in eV/atom.
If not set, this value will be determined from any O2 entries passed to process_entries.
Default: None
h2o_energy: The ground-state DFT energy of water, including any adjstments or corrections, in eV/atom.
If not set, this value will be determined from any H2O entries passed to process_entries.
Default: None
h2o_adjustments: Total energy adjustments applied to one water molecule, in eV/atom.
If not set, this value will be determined from any H2O entries passed to process_entries.
Default: None
"""
self.solid_compat = solid_compat
if self.solid_compat:
if not isinstance(self.solid_compat, Compatibility): # check whether solid_compat has been instantiated
self.solid_compat = solid_compat()
self.o2_energy = o2_energy
self.h2o_energy = h2o_energy
self.h2o_adjustments = h2o_adjustments
if not all([self.o2_energy, self.h2o_energy, self.h2o_adjustments]):
warnings.warn(
"You did not provide the required O2 and H2O energies. {} "
"needs these energies in order to compute the appropriate energy adjustments. It will try "
"to determine the values from ComputedEntry for O2 and H2O passed to process_entries, but "
"will fail if these entries are not provided.".format(type(self).__name__)
)
# Standard state entropy of molecular-like compounds at 298K (-T delta S)
# from Kubaschewski Tables (eV/atom)
self.cpd_entropies = {
"O2": 0.316731,
"N2": 0.295729,
"F2": 0.313025,
"Cl2": 0.344373,
"Br": 0.235039,
"Hg": 0.234421,
"H2O": 0.071963, # 0.215891 eV/H2O
}
self.name = "MP Aqueous free energy adjustment"
super().__init__()
def get_adjustments(self, entry: ComputedEntry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
[EnergyAdjustment]: Energy adjustments to be applied to entry.
Raises:
CompatibilityError if the required O2 and H2O energies have not been provided to
MaterialsProjectAqueousCompatibility during init or in the list of entries passed to process_entries.
"""
adjustments = []
if self.o2_energy is None or self.h2o_energy is None or self.h2o_adjustments is None:
raise CompatibilityError(
"You did not provide the required O2 and H2O energies. "
"{} needs these energies in order to compute "
"the appropriate energy adjustments. Either specify the energies as arguments "
"to {}.__init__ or run process_entries on a list that includes ComputedEntry for "
"the ground state of O2 and H2O.".format(type(self).__name__, type(self).__name__)
)
# compute the free energies of H2 and H2O (eV/atom) to guarantee that the
# formationfree energy of H2O is equal to -2.4583 eV/H2O from experiments
# (MU_H2O from pourbaix module)
# Free energy of H2 in eV/atom, fitted using Eq. 40 of Persson et al. PRB 2012 85(23)
# for this calculation ONLY, we need the (corrected) DFT energy of water
self.h2_energy = round(
0.5
* (
3 * (self.h2o_energy - self.cpd_entropies["H2O"]) - (self.o2_energy - self.cpd_entropies["O2"]) - MU_H2O
),
6,
)
# Free energy of H2O, fitted for consistency with the O2 and H2 energies.
self.fit_h2o_energy = round(
(2 * self.h2_energy + (self.o2_energy - self.cpd_entropies["O2"]) + MU_H2O) / 3,
6,
)
comp = entry.composition
rform = comp.reduced_formula
# pin the energy of all H2 entries to h2_energy
if rform == "H2":
adjustments.append(
ConstantEnergyAdjustment(
self.h2_energy * comp.num_atoms - entry.energy,
uncertainty=np.nan,
name="MP Aqueous H2 / H2O referencing",
cls=self.as_dict(),
description="Adjusts the H2 and H2O energy to reproduce the experimental "
"Gibbs formation free energy of H2O, based on the DFT energy "
"of Oxygen",
)
)
# pin the energy of all H2O entries to fit_h2o_energy
elif rform == "H2O":
adjustments.append(
ConstantEnergyAdjustment(
self.fit_h2o_energy * comp.num_atoms - entry.energy,
uncertainty=np.nan,
name="MP Aqueous H2 / H2O referencing",
cls=self.as_dict(),
description="Adjusts the H2 and H2O energy to reproduce the experimental "
"Gibbs formation free energy of H2O, based on the DFT energy "
"of Oxygen",
)
)
# add minus T delta S to the DFT energy (enthalpy) of compounds that are
# molecular-like at room temperature
elif rform in self.cpd_entropies and rform != "H2O":
adjustments.append(
TemperatureEnergyAdjustment(
-1 * self.cpd_entropies[rform] / 298,
298,
comp.num_atoms,
uncertainty_per_deg=np.nan,
name="Compound entropy at room temperature",
cls=self.as_dict(),
description="Adds the entropy (T delta S) to energies of compounds that "
"are gaseous or liquid at standard state",
)
)
# TODO - detection of embedded water molecules is not very sophisticated
# Should be replaced with some kind of actual structure detection
# For any compound except water, check to see if it is a hydrate (contains)
# H2O in its structure. If so, adjust the energy to remove MU_H2O ev per
# embedded water molecule.
# in other words, we assume that the DFT energy of such a compound is really
# a superposition of the "real" solid DFT energy (FeO in this case) and the free
# energy of some water molecules
# e.g. that E_FeO.nH2O = E_FeO + n * g_H2O
# so, to get the most accurate gibbs free energy, we want to replace
# g_FeO.nH2O = E_FeO.nH2O + dE_Fe + (n+1) * dE_O + 2n dE_H
# with
# g_FeO = E_FeO.nH2O + dE_Fe + dE_O + n g_H2O
# where E is DFT energy, dE is an energy correction, and g is gibbs free energy
# This means we have to 1) remove energy corrections associated with H and O in water
# and then 2) remove the free energy of the water molecules
if not rform == "H2O":
# count the number of whole water molecules in the composition
nH2O = int(min(comp["H"] / 2.0, comp["O"]))
if nH2O > 0:
# first, remove any H or O corrections already applied to H2O in the
# formation energy so that we don't double count them
# next, remove MU_H2O for each water molecule present
hydrate_adjustment = -1 * (self.h2o_adjustments * 3 + MU_H2O)
adjustments.append(
CompositionEnergyAdjustment(
hydrate_adjustment,
nH2O,
uncertainty_per_atom=np.nan,
name="MP Aqueous hydrate",
cls=self.as_dict(),
description="Adjust the energy of solid hydrate compounds (compounds "
"containing H2O molecules in their structure) so that the "
"free energies of embedded H2O molecules match the experimental"
" value enforced by the MP Aqueous energy referencing scheme.",
)
)
return adjustments
def process_entries(self, entries: Union[ComputedEntry, list], clean: bool = False):
"""
Process a sequence of entries with the chosen Compatibility scheme.
Args:
entries: ComputedEntry or [ComputedEntry]
clean: bool, whether to remove any previously-applied energy adjustments.
If True, all EnergyAdjustment are removed prior to processing the Entry.
Default is False.
Returns:
A list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
# convert input arg to a list if not already
if isinstance(entries, ComputedEntry):
entries = [entries]
# pre-process entries with the given solid compatibility class
if self.solid_compat:
entries = self.solid_compat.process_entries(entries, clean=True)
# extract the DFT energies of oxygen and water from the list of entries, if present
if not self.o2_energy:
o2_entries = [e for e in entries if e.composition.reduced_formula == "O2"]
if o2_entries:
self.o2_energy = min(e.energy_per_atom for e in o2_entries)
if not self.h2o_energy and not self.h2o_adjustments:
h2o_entries = [e for e in entries if e.composition.reduced_formula == "H2O"]
if h2o_entries:
h2o_entries = sorted(h2o_entries, key=lambda e: e.energy_per_atom)
self.h2o_energy = h2o_entries[0].energy_per_atom
self.h2o_adjustments = h2o_entries[0].correction / h2o_entries[0].composition.num_atoms
return super().process_entries(entries, clean=clean)
|
davidwaroquiers/pymatgen
|
pymatgen/entries/compatibility.py
|
Python
|
mit
| 58,757
|
[
"VASP",
"pymatgen"
] |
5c39fddbce67dedd197c6e20f500b11a82d04e29a17098136984e332113a7ff2
|
from django import forms
from django.conf import settings
from django.db import connection
from django.shortcuts import render_to_response
from formtools.wizard.views import SessionWizardView
from catmaid.models import Class, ClassInstance, ClassInstanceClassInstance
from catmaid.models import Connector, Project, Relation, Treenode
SOURCE_TYPE_CHOICES = [
('file', 'Local file'),
('project', 'CATMAID project'),
]
IMPORT_TEMPLATES = {
"sourcetypeselection": "catmaid/import/annotations/setup_source.html",
"projectimport": "catmaid/import/annotations/setup.html",
"fileimport": "catmaid/import/annotations/setup.html",
"confirmation": "catmaid/import/annotations/confirmation.html",
"done": "catmaid/import/annotations/done.html",
}
class SourceTypeForm(forms.Form):
""" A form to select basic properties on the data to be
imported.
"""
source_type = forms.ChoiceField(choices=SOURCE_TYPE_CHOICES,
widget=forms.RadioSelect(), help_text="The source type defines "
"where the data to import comes from")
target_project = forms.ModelChoiceField(required=True,
help_text="The project the data will be imported into.",
queryset=Project.objects.all().exclude(pk=settings.ONTOLOGY_DUMMY_PROJECT_ID))
import_treenodes = forms.BooleanField(initial=True, required=False,
help_text="Should treenodes be imported?")
import_connectors = forms.BooleanField(initial=True, required=False,
help_text="Should connectors be imported?")
import_annotations = forms.BooleanField(initial=True, required=False,
help_text="Should neuron annotations be imported?")
import_tags = forms.BooleanField(initial=True, required=False,
help_text="Should neuron node tags be imported?")
class FileBasedImportForm(forms.Form):
pass
class ProjectBasedImportForm(forms.Form):
""" Display a list of available projects."""
projects = forms.ModelMultipleChoiceField(required=False,
widget=forms.CheckboxSelectMultiple(attrs={'class': 'autoselectable'}),
help_text="Only data from selected projects will be imported.",
queryset=Project.objects.all().exclude(pk=settings.ONTOLOGY_DUMMY_PROJECT_ID))
# TODO: check administer or super user permissions for validation
class ConfirmationForm(forms.Form):
""" Displays a summary of the data to be imported.
"""
pass
def get_source_type(wizard):
""" Test whether the project import form should be shown."""
cleaned_data = wizard.get_cleaned_data_for_step('sourcetypeselection') \
or {'source_type': SOURCE_TYPE_CHOICES[0]}
return cleaned_data['source_type']
class ImportingWizard(SessionWizardView):
""" With the help of the importing wizard it is possible to import neurons
and their annotations as well as the linked skeletons and their treenodes
and tags into an existing CATMAID project. The source for this data can
either be a file or another project. Users can only be carried over if the
source is another project in the target instance. Otherwise, the importing
user gets ownership on all model objects.
"""
form_list = [
("sourcetypeselection", SourceTypeForm),
("projectimport", ProjectBasedImportForm),
("fileimport", FileBasedImportForm),
("confirmation", ConfirmationForm),
]
# Either file or project import form will be shown
condition_dict = {
'fileimport': lambda w: get_source_type(w) == 'file',
'projectimport': lambda w: get_source_type(w) == 'project',
}
def get_context_data(self, form, **kwargs):
""" On the confirmation step, this will read in the data to import and
collect some statistics on it.
"""
context = super(ImportingWizard, self).get_context_data(form=form, **kwargs)
if self.steps.current == 'confirmation':
stats = []
# Load all wanted information from the selected projects
scd = self.get_cleaned_data_for_step('sourcetypeselection')
if scd["source_type"] == 'project':
projects = self.get_cleaned_data_for_step('projectimport')['projects']
for p in projects:
ps = {
'source': "%s (%s)" % (p.title, p.id),
'ntreenodes': 0,
'nconnectors': 0,
'nannotations': 0,
'nannotationlinks': 0,
'ntags': 0,
}
if scd['import_treenodes']:
ps['ntreenodes'] = Treenode.objects.filter(project=p).count()
if scd['import_connectors']:
ps['nconnectors'] = Connector.objects.filter(project=p).count()
if scd['import_annotations']:
annotation = Class.objects.filter(project=p,
class_name="annotation")
annotated_with = Relation.objects.filter(project=p,
relation_name="annotated_with")
ps['nannotations'] = ClassInstance.objects.filter(
project=p, class_column=annotation).count()
ps['nannotationlinks'] = ClassInstanceClassInstance.objects.filter(
project=p, relation=annotated_with).count()
if scd['import_tags']:
pass
stats.append(ps)
# Update context
context.update({
'source_type': scd["source_type"],
'stats': stats,
})
return context
def get_template_names(self):
return [IMPORT_TEMPLATES[self.steps.current]]
def done(self, form_list, **kwargs):
""" All previously configured sources will now be used to import data.
"""
# Load all wanted information from the selected projects
scd = self.get_cleaned_data_for_step('sourcetypeselection')
target_project = scd['target_project']
if scd["source_type"] == 'project':
projects = self.get_cleaned_data_for_step('projectimport')['projects']
for p in projects:
copy_annotations(p.id, target_project.id,
scd['import_treenodes'], scd['import_connectors'],
scd['import_annotations'], scd['import_tags'])
return render_to_response(IMPORT_TEMPLATES['done'])
class ExportingWizard(SessionWizardView):
""" The export wizard makes it possible to export neurons and their
annotations as well as the linked skeletons and their treenodes into a JSON
representation.
"""
pass
def copy_annotations(source_pid, target_pid, import_treenodes=True,
import_connectors=True, import_connectortreenodes=True,
import_annotations=True, import_tags=True):
""" Copy annotation data (treenodes, connectors, annotations, tags) to
another (existing) project. The newly created entities will have new IDs
and are independent from the old ones.
import_treenodes: if true, all treenodes from the source will be imported
import_connectors: if ture, all connectors from the source will be imported
import_connectortreenodes: if true, all connectors and treenodes that are
linked are imported, along with the links themself
"""
# Use raw SQL to duplicate the rows, because there is no
# need to transfer the data to Django and back to Postgres
# again.
cursor = connection.cursor()
imported_treenodes = []
if import_treenodes:
# Copy treenodes from source to target
cursor.execute('''
WITH get_data (
SELECT 5, location_x, location_y, location_z,
editor_id, user_id, creation_time, edition_time,
skeleton_id, radius, confidence, parent_id
FROM treenode tn
WHERE tn.project_id=3
RETURNING *),
copy AS (
INSERT
INTO treenode (project_id, location_x,
location_y, location_z, editor_id, user_id,
creation_time, edition_time, skeleton_id,
radius, confidence, parent_id)
SELECT 5, location_x, location_y, location_z,
editor_id, user_id, creation_time, edition_time,
skeleton_id, radius, confidence, parent_id
FROM get_data
RETURNING *, get_data.id),
SELECT id FROM copy
''', (target_pid, source_pid))
if import_connectors:
# Copy connectors from source to target
cursor.execute('''
INSERT INTO connector (project_id, location_x,
location_y, location_z, editor_id, user_id,
creation_time, edition_time, confidence)
SELECT %s, location_x, location_y, location_z,
editor_id, user_id, creation_time, edition_time,
confidence
FROM connector cn
WHERE cn.project_id=%s
AND cn.proj
''', (target_pid, source_pid))
if import_connectortreenodes:
# If not all treenodes have been inserted
cursor.execute('''
INSERT INTO treenode (project_id, location_x,
location_y, location_z, editor_id, user_id,
creation_time, edition_time, skeleton_id,
radius, confidence, parent_id)
SELECT %s, location_x, location_y, location_z,
editor_id, user_id, creation_time, edition_time,
skeleton_id, radius, confidence, parent_id
FROM treenode tn
WHERE tn.project_id=%s
''', (target_pid, source_pid))
# Link connectors to treenodes
currsor.execute('''
INSERT INTO connector_treenode ()
SELECT
FROM connector_treenode ct
WHERE ct.project_id=%s
''' % (target_pid, source_pid))
if import_annotations:
try:
# Make sure the target has the 'annotation' class and the
# 'annotated_with' relation.
annotation_src = Class.objects.get(
project_id=source_pid, class_name="annotation")
annotated_with_src = Relation.objects.get(
project_id=source_pid, relation_name="annotated_with")
annotation_tgt = Class.objects.get_or_create(
project_id=target_pid, class_name="annotation", defaults={
"user": annotation_src.user,
"creation_time": annotation_src.creation_time,
"edition_time": annotation_src.edition_time,
"description": annotation_src.description,
})[0]
annotated_with_tgt = Relation.objects.get_or_create(
project_id=target_pid, relation_name="annotated_with", defaults={
"user": annotation_src.user,
"creation_time": annotated_with_src.creation_time,
"edition_time": annotated_with_src.edition_time,
"description": annotated_with_src.description,
"isreciprocal": annotated_with_src.isreciprocal,
"uri": annotated_with_src.uri,
})[0]
# Get all source annotations and import them into target
annotations_src = ClassInstance.objects.filter(
project_id=source_pid, class_column=annotation_src)
existing_target_annotations = [a.name for a in ClassInstance.objects.filter(
project_id=target_pid, class_column=annotation_tgt)]
annotations_tgt = []
for a in annotations_src:
# Ignore if there is already a target annotation like this
if a.name in existing_target_annotations:
continue
annotations_tgt.append(ClassInstance(
project_id=source_pid,
class_column=annotation_src,
name=a.name,
user=a.user,
creation_time=a.creation_time,
edition_time=a.edition_time))
ClassInstance.objects.bulk_create(annotations_tgt)
# Import annotation links
cursor.execute('''
INSERT INTO class_instance_class_instance (user_id,
creation_time, edition_time, project_id, relation_id,
class_instance_a, class_instance_b)
SELECT %s
editor_id, user_id, creation_time, edition_time,
FROM class_instance_class_instance cici
JOIN class_instance ci_s ON ci_s.id=cici.class_instance_b
WHERE cici.project_id=%s AND relation_id=%s
''')
except DoesNotExist:
# No annotations need to be imported if no source annotations are
# found
pass
if import_tags:
# TreenodeClassInstance
# ConnectorClassInstance
pass
|
catsop/CATMAID
|
django/applications/catmaid/control/annotationadmin.py
|
Python
|
gpl-3.0
| 13,443
|
[
"NEURON"
] |
867e3bb29c10b49abae4bcb1936a5502c1c413ed7ecdf05ef6861ea369cc25f1
|
# -*- coding: utf-8 -*-
""" Module for converting various mesh formats."""
# Copyright (C) 2006 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Garth N. Wells (gmsh function)
# Modified by Alexander H. Jarosch (gmsh fix)
# Modified by Angelo Simone (Gmsh and Medit fix)
# Modified by Andy R. Terrel (gmsh fix and triangle function)
# Modified by Magnus Vikstrom (metis and scotch function)
# Modified by Bartosz Sawicki (diffpack function)
# Modified by Gideon Simpson (Exodus II function)
# Modified by Kent-Andre Mardal (Star-CD function)
# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))
# Modified by Neilen Marais (add gmsh support for reading physical region)
# Modified by Evan Lezar (add support for reading gmsh physical regions on facets)
# Modified by Jan Blechta (add triangle support for marker on edges and attributes on triangles)
#
# Last changed: 2014-02-06
# NOTE: This module does not depend on (py)dolfin beeing installed.
# NOTE: If future additions need that please import dolfin in a try: except:
# NOTE: clause and tell the user to install dolfin if it is not installed.
from __future__ import print_function
import getopt
import sys
from instant import get_status_output
import re
import warnings
import os.path
import numpy
import six
from . import abaqus
from . import xml_writer
def format_from_suffix(suffix):
"Return format for given suffix"
if suffix == "xml":
return "xml"
elif suffix == "mesh":
return "mesh"
elif suffix == "gmsh":
return "gmsh"
elif suffix == "msh":
return "gmsh"
elif suffix == "gra":
return "metis"
elif suffix == "grf":
return "scotch"
elif suffix == "grid":
return "diffpack"
elif suffix == "inp":
return "abaqus"
elif suffix == "ncdf":
return "NetCDF"
elif suffix =="exo":
return "ExodusII"
elif suffix =="e":
return "ExodusII"
elif suffix == "vrt" or suffix == "cel":
return "StarCD"
elif suffix == "ele" or suffix == "node":
return "Triangle"
else:
_error("Sorry, unknown suffix %s." % suffix)
def mesh2xml(ifilename, ofilename):
"""Convert between .mesh and .xml, parser implemented as a
state machine:
0 = read 'Vertices'
1 = read number of vertices
2 = read next vertex
3 = read 'Triangles' or 'Tetrahedra'
4 = read number of cells
5 = read next cell
6 = done
"""
print("Converting from Medit format (.mesh) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Remove newline
line = line.strip(" \n\r").split(" ")
# Read dimension either on same line or following line
if line[0] == "Dimension":
if (len(line) == 2):
line = line[1]
else:
line = ifile.readline()
num_dims = int(line)
if num_dims == 2:
cell_type = "triangle"
dim = 2
elif num_dims == 3:
cell_type = "tetrahedron"
dim = 3
break
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Step to beginning of file
ifile.seek(0)
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "Vertices" or line == " Vertices":
state += 1
elif state == 1:
num_vertices = int(line)
xml_writer.write_header_vertices(ofile, num_vertices)
state +=1
elif state == 2:
if num_dims == 2:
(x, y, tmp) = line.split()
x = float(x)
y = float(y)
z = 0.0
elif num_dims == 3:
(x, y, z, tmp) = line.split()
x = float(x)
y = float(y)
z = float(z)
xml_writer.write_vertex(ofile, num_vertices_read, x, y, z)
num_vertices_read +=1
if num_vertices == num_vertices_read:
xml_writer.write_footer_vertices(ofile)
state += 1
elif state == 3:
if (line == "Triangles" or line == " Triangles") and num_dims == 2:
state += 1
if line == "Tetrahedra" and num_dims == 3:
state += 1
elif state == 4:
num_cells = int(line)
xml_writer.write_header_cells(ofile, num_cells)
state +=1
elif state == 5:
if num_dims == 2:
(n0, n1, n2, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
(n0, n1, n2, n3, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
n3 = int(n3) - 1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
state += 1
elif state == 6:
break
# Check that we got all data
if state == 6:
print("Conversion done")
else:
_error("Missing data, unable to convert")
# Write footer
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def gmsh2xml(ifilename, handler):
"""Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
parser implemented as a state machine:
0 = read 'MeshFormat'
1 = read mesh format data
2 = read 'EndMeshFormat'
3 = read 'Nodes'
4 = read number of vertices
5 = read vertices
6 = read 'EndNodes'
7 = read 'Elements'
8 = read number of cells
9 = read cells
10 = done
Afterwards, extract physical region numbers if they are defined in
the mesh file as a mesh function.
"""
print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")
# The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
# the gmsh element types supported for conversion
supported_gmsh_element_types = [1, 2, 4, 15]
# Open files
ifile = open(ifilename, "r")
# Scan file for cell type
cell_type = None
highest_dim = 0
line = ifile.readline()
while line:
# Remove newline
line = line.rstrip("\n\r")
# Read dimension
if line.find("$Elements") == 0:
line = ifile.readline()
num_elements = int(line)
if num_elements == 0:
_error("No elements found in gmsh file.")
line = ifile.readline()
# Now iterate through elements to find largest dimension. Gmsh
# format might include elements of lower dimensions in the element list.
# We also need to count number of elements of correct dimensions.
# Also determine which vertices are not used.
dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
# Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
tags_for_dim = {0: [], 1: [], 2: [], 3: []}
while line.find("$EndElements") == -1:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
if highest_dim < dim:
highest_dim = dim
node_num_list = [int(node) for node in element[3 + num_tags:]]
vertices_used_for_dim[dim].extend(node_num_list)
if num_tags > 0:
tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
dim_count[dim] += 1
else:
#TODO: output a warning here. "gmsh element type %d not supported" % elem_type
pass
line = ifile.readline()
else:
# Read next line
line = ifile.readline()
# Check that we got the cell type and set num_cells_counted
if highest_dim == 0:
_error("Unable to find cells of supported type.")
num_cells_counted = dim_count[highest_dim]
vertex_set = set(vertices_used_for_dim[highest_dim])
vertices_used_for_dim[highest_dim] = None
vertex_dict = {}
for n,v in enumerate(vertex_set):
vertex_dict[v] = n
# Step to beginning of file
ifile.seek(0)
# Set mesh type
handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)
# Initialise node list (gmsh does not export all vertexes in order)
nodelist = {}
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
# Only import the dolfin objects if facet markings exist
process_facets = False
if len(tags_for_dim[highest_dim-1]) > 0:
# first construct the mesh
try:
from dolfin import MeshEditor, Mesh
except ImportError:
_error("DOLFIN must be installed to handle Gmsh boundary regions")
mesh = Mesh()
mesh_editor = MeshEditor ()
mesh_editor.open( mesh, highest_dim, highest_dim )
process_facets = True
else:
# TODO: Output a warning or an error here
me = None
while state != 10:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "$MeshFormat":
state = 1
elif state == 1:
(version, file_type, data_size) = line.split()
state = 2
elif state == 2:
if line == "$EndMeshFormat":
state = 3
elif state == 3:
if line == "$Nodes":
state = 4
elif state == 4:
num_vertices = len(vertex_dict)
handler.start_vertices(num_vertices)
if process_facets:
mesh_editor.init_vertices_global(num_vertices, num_vertices)
state = 5
elif state == 5:
(node_no, x, y, z) = line.split()
node_no = int(node_no)
x,y,z = [float(xx) for xx in (x,y,z)]
if node_no in vertex_dict:
node_no = vertex_dict[node_no]
else:
continue
nodelist[int(node_no)] = num_vertices_read
handler.add_vertex(num_vertices_read, [x, y, z])
if process_facets:
if highest_dim == 1:
coords = numpy.array([x])
elif highest_dim == 2:
coords = numpy.array([x, y])
elif highest_dim == 3:
coords = numpy.array([x, y, z])
mesh_editor.add_vertex(num_vertices_read, coords)
num_vertices_read +=1
if num_vertices == num_vertices_read:
handler.end_vertices()
state = 6
elif state == 6:
if line == "$EndNodes":
state = 7
elif state == 7:
if line == "$Elements":
state = 8
elif state == 8:
handler.start_cells(num_cells_counted)
if process_facets:
mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)
state = 9
elif state == 9:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
else:
dim = 0
if dim == highest_dim:
node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
for node in node_num_list:
if not node in nodelist:
_error("Vertex %d of %s %d not previously defined." %
(node, cell_type_for_dim[dim], num_cells_read))
cell_nodes = [nodelist[n] for n in node_num_list]
handler.add_cell(num_cells_read, cell_nodes)
if process_facets:
cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
mesh_editor.add_cell(num_cells_read, cell_nodes)
num_cells_read +=1
if num_cells_counted == num_cells_read:
handler.end_cells()
if process_facets:
mesh_editor.close()
state = 10
elif state == 10:
break
# Write mesh function based on the Physical Regions defined by
# gmsh, but only if they are not all zero. All zero physical
# regions indicate that no physical regions were defined.
if highest_dim not in [1,2,3]:
_error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)
tags = tags_for_dim[highest_dim]
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
handler.start_meshfunction("physical_region", dim, num_cells_counted)
for i, physical_region in enumerate(physical_regions):
handler.add_entity_meshfunction(i, physical_region)
handler.end_meshfunction()
# Now process the facet markers
tags = tags_for_dim[highest_dim-1]
if (len(tags) > 0) and (mesh is not None):
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
mesh.init(highest_dim-1,0)
# Get the facet-node connectivity information (reshape as a row of node indices per facet)
if highest_dim==1:
# for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
# as facets are vertices
facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
else:
facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )
# Build the reverse map
nodes_as_facets = {}
for facet in range(mesh.num_facets()):
nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet
data = [int(0*k) for k in range(mesh.num_facets()) ]
for i, physical_region in enumerate(physical_regions):
nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
nodes.sort()
if physical_region != 0:
try:
index = nodes_as_facets[tuple(nodes)]
data[index] = physical_region
except IndexError:
raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )
# Create and initialise the mesh function
handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
for index, physical_region in enumerate ( data ):
handler.add_entity_meshfunction(index, physical_region)
handler.end_meshfunction()
# Check that we got all data
if state == 10:
print("Conversion done")
else:
_error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")
# Close files
ifile.close()
def triangle2xml(ifilename, ofilename):
"""Convert between triangle format
(http://www.cs.cmu.edu/~quake/triangle.html) and .xml. The
given ifilename should be the prefix for the corresponding
.node, and .ele files.
"""
def get_next_line (fp):
"""Helper function for skipping comments and blank lines"""
line = fp.readline()
if line == '':
_error("Hit end of file prematurely.")
line = line.strip()
if not (line.startswith('#') or line == ''):
return line
return get_next_line(fp)
print("Converting from Triangle format {.node, .ele} to DOLFIN XML format")
# Open files
for suffix in [".node", ".ele"]:
if suffix in ifilename and ifilename[-len(suffix):] == suffix:
ifilename = ifilename.replace(suffix, "")
node_file = open(ifilename+".node", "r")
ele_file = open(ifilename+".ele", "r")
ofile = open(ofilename, "w")
try:
edge_file = open(ifilename+".edge", "r")
print("Found .edge file")
except IOError:
edge_file = None
# Read all the nodes
nodes = {}
num_nodes, dim, attr, bound = list(map(int, get_next_line(node_file).split()))
while len(nodes) < num_nodes:
node, x, y = get_next_line(node_file).split()[:3]
nodes[int(node)] = (float(x), float(y))
# Read all the triangles
tris = {}
tri_attrs = {}
num_tris, n_per_tri, attrs = list(map(int, get_next_line(ele_file).split()))
while len(tris) < num_tris:
line = get_next_line(ele_file).split()
tri, n1, n2, n3 = list(map(int, line[:4]))
# vertices are ordered according to current UFC ordering scheme -
# - may change in future!
tris[tri] = tuple(sorted((n1, n2, n3)))
tri_attrs[tri] = tuple(map(float, line[4:4+attrs]))
# Read all the boundary markers from edges
edge_markers_global = {}
edge_markers_local = []
got_negative_edge_markers = False
if edge_file is not None:
num_edges, num_edge_markers = list(map(int, get_next_line(edge_file).split()))
if num_edge_markers == 1:
while len(edge_markers_global) < num_edges:
edge, v1, v2, marker = list(map(int, get_next_line(edge_file).split()))
if marker < 0: got_negative_edge_markers = True
edge_markers_global[tuple(sorted((v1, v2)))] = marker
if got_negative_edge_markers:
print("Some edge markers are negative! dolfin will increase "\
"them by probably 2**32 when loading xml. "\
"Consider using non-negative edge markers only.")
for tri, vertices in six.iteritems(tris):
v0, v1, v2 = sorted((vertices[0:3]))
try:
edge_markers_local.append((tri, 0, \
edge_markers_global[(v1, v2)]))
edge_markers_local.append((tri, 1, \
edge_markers_global[(v0, v2)]))
edge_markers_local.append((tri, 2, \
edge_markers_global[(v0, v1)]))
except IndexError:
raise Exception("meshconvert.py: The facet was not found.")
elif num_edge_markers == 0:
print("...but no markers in it. Ignoring it")
else:
print("...but %d markers specified in it. It won't be processed."\
%num_edge_markers)
# Write everything out
xml_writer.write_header_mesh(ofile, "triangle", 2)
xml_writer.write_header_vertices(ofile, num_nodes)
node_off = 0 if 0 in nodes else -1
for node, node_t in six.iteritems(nodes):
xml_writer.write_vertex(ofile, node+node_off, node_t[0], node_t[1], 0.0)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_tris)
tri_off = 0 if 0 in tris else -1
for tri, tri_t in six.iteritems(tris):
xml_writer.write_cell_triangle(ofile, tri+tri_off, tri_t[0] + node_off,
tri_t[1] + node_off, tri_t[2] + node_off)
xml_writer.write_footer_cells(ofile)
if len(edge_markers_local) > 0:
xml_writer.write_header_domains(ofile)
xml_writer.write_header_meshvaluecollection(ofile, \
"edge markers", 1, len(edge_markers_local), "uint")
for tri, local_edge, marker in edge_markers_local:
xml_writer.write_entity_meshvaluecollection(ofile, \
1, tri+tri_off, marker, local_edge)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
for i in range(attrs):
afilename = ofilename.replace(".xml", ".attr"+str(i)+".xml")
afile = open(afilename, "w")
xml_writer.write_header_meshfunction2(afile)
xml_writer.write_header_meshvaluecollection(afile, \
"triangle attribs "+str(i), 2, num_tris, "double")
for tri, tri_a in six.iteritems(tri_attrs):
xml_writer.write_entity_meshvaluecollection(afile, \
2, tri+tri_off, tri_a[i], 0)
xml_writer.write_footer_meshvaluecollection(afile)
xml_writer.write_footer_meshfunction(afile)
print("triangle attributes from .ele file written to "+afilename)
afile.close()
# Close files
node_file.close()
ele_file.close()
if edge_file is not None:
edge_file.close()
ofile.close()
def xml_old2xml(ifilename, ofilename):
"Convert from old DOLFIN XML format to new."
print("Converting from old (pre DOLFIN 0.6.2) to new DOLFIN XML format...")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type (assuming there is just one)
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Read dimension
if "<triangle" in line:
cell_type = "triangle"
dim = 2
break
elif "<tetrahedron" in line:
cell_type = "tetrahedron"
dim = 3
break
# Step to beginning of file
ifile.seek(0)
# Read lines and make changes
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Modify line
if "xmlns" in line:
line = "<dolfin xmlns:dolfin=\"http://fenicsproject.org\">\n"
if "<mesh>" in line:
line = " <mesh celltype=\"%s\" dim=\"%d\">\n" % (cell_type, dim)
if dim == 2 and " z=\"0.0\"" in line:
line = line.replace(" z=\"0.0\"", "")
if " name=" in line:
line = line.replace(" name=", " index=")
if " name =" in line:
line = line.replace(" name =", " index=")
if "n0" in line:
line = line.replace("n0", "v0")
if "n1" in line:
line = line.replace("n1", "v1")
if "n2" in line:
line = line.replace("n2", "v2")
if "n3" in line:
line = line.replace("n3", "v3")
# Write line
ofile.write(line)
# Close files
ifile.close();
ofile.close();
print("Conversion done")
def metis_graph2graph_xml(ifilename, ofilename):
"Convert from Metis graph format to DOLFIN Graph XML."
print("Converting from Metis graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
xml_writer.write_header_graph(ofile, "directed")
xml_writer.write_header_vertices(ofile, int(num_vertices))
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges))
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, 2*int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
for i in range(int(num_vertices)):
print("vertex %g", i)
line = ifile.readline()
edges = line.split()
for e in edges:
xml_writer.write_graph_edge(ofile, i, int(e))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def scotch_graph2graph_xml(ifilename, ofilename):
"Convert from Scotch graph format to DOLFIN Graph XML."
print("Converting from Scotch graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Skip graph file version number
ifile.readline()
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
# Read start index and numeric flag
# Start index is 0 or 1 (C/Fortran)
# Numeric flag is 3 bits where bit 1 enables vertex labels
# bit 2 enables edge weights and bit 3 enables vertex weights
line = ifile.readline()
(start_index, numeric_flag) = line.split()
# Handling not implented
if not numeric_flag == "000":
_error("Handling of scotch vertex labels, edge- and vertex weights not implemented")
xml_writer.write_header_graph(ofile, "undirected")
xml_writer.write_header_vertices(ofile, int(num_vertices))
# Read vertices and edges, first number gives number of edges from this vertex (not used)
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges)-1)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
ifile.readline()
ifile.readline()
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
for j in range(1, len(edges)):
xml_writer.write_graph_edge(ofile, i, int(edges[j]))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def diffpack2xml(ifilename, ofilename):
"Convert from Diffpack tetrahedral/triangle grid format to DOLFIN XML."
print(diffpack2xml.__doc__)
# Format strings for MeshFunction XML files
meshfunction_header = """\
<?xml version="1.0" encoding="UTF-8"?>\n
<dolfin xmlns:dolfin="http://www.fenics.org/dolfin/">
<mesh_function type="uint" dim="%d" size="%d">\n"""
meshfunction_entity = " <entity index=\"%d\" value=\"%d\"/>\n"
meshfunction_footer = " </mesh_function>\n</dolfin>"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read and analyze header
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
if re.search(r"Number of elements", line):
num_cells = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of nodes", line):
num_vertices = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of space dim.", line):
num_dims = int(re.match(r".*\s(\d+).*", line).group(1))
if num_dims == 3:
xml_writer.write_header_mesh(ofile, "tetrahedron", 3)
elem_type = "ElmT4n3D"
write_cell_func = xml_writer.write_cell_tetrahedron
else:
xml_writer.write_header_mesh(ofile, "triangle", 2)
elem_type = "ElmT3n2D"
write_cell_func = xml_writer.write_cell_triangle
xml_writer.write_header_vertices(ofile, num_vertices)
# Read & write vertices and collect markers for vertices
vertex_markers = []
unique_vertex_markers = set()
for i in range(num_vertices):
line = ifile.readline()
m = re.match(r"^.*\(\s*(.*)\s*\).*\](.*)$", line)
x = list(map(float, re.split("[\s,]+", m.group(1))))
xml_writer.write_vertex(ofile, i, *x)
markers = list(map(int, m.group(2).split()))
vertex_markers.append(markers)
unique_vertex_markers.update(markers)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_cells)
# Output unique vertex markers as individual VertexFunctions
unique_vertex_markers.difference_update([0])
for unique_marker in unique_vertex_markers:
ofile_marker = open(ofilename.replace(".xml", "") + \
"_marker_" + str(unique_marker)+".xml", "w")
xml_writer.write_header_meshfunction(ofile_marker, 0, num_vertices)
for ind, markers in enumerate(vertex_markers):
if unique_marker in markers:
xml_writer.write_entity_meshfunction(ofile_marker, ind, unique_marker)
else:
xml_writer.write_entity_meshfunction(ofile_marker, ind, 0)
xml_writer.write_footer_meshfunction(ofile_marker)
# Ignore comment lines
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
# Read & write cells and collect cell and face markers
cell_markers = []
facet_markers = []
facet_to_vert = [[1,2,3], [0,2,3], [0,1,3], [0,1,2]]
vert_to_facet = facet_to_vert # The same!
cell_ind = 0
while cell_ind < num_cells:
line = ifile.readline()
v = line.split()
if not v:
continue
if v[1] != elem_type:
_error("Only tetrahedral (ElmT4n3D) and triangular (ElmT3n2D) elements are implemented.")
# Store Cell markers
cell_markers.append(int(v[2]))
# Sort vertex indices
cell_indices = sorted([int(x)-1 for x in v[3:]])
write_cell_func(ofile, cell_ind, *cell_indices)
if num_dims == 2:
cell_ind += 1
continue
# Check Facet info
process_facet = set(range(4))
for local_vert_ind, global_vert_ind in enumerate(cell_indices):
# If no marker is included for vertex skip corresponding facet
if not vertex_markers[global_vert_ind]:
process_facet.difference_update(facet_to_vert[local_vert_ind])
# Process facets
for local_facet in process_facet:
# Start with markers from first vertex
global_first_vertex = cell_indices[facet_to_vert[local_facet][0]]
marker_intersection = set(vertex_markers[global_first_vertex])
# Process the other vertices
for local_vert in facet_to_vert[local_facet][1:]:
marker_intersection.intersection_update(\
vertex_markers[cell_indices[local_vert]])
if not marker_intersection:
break
# If not break we have a marker on local_facet
else:
assert(len(marker_intersection)==1)
facet_markers.append((cell_ind, local_facet, \
marker_intersection.pop()))
# Bump cell_ind
cell_ind += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_domains(ofile)
# Write facet markers if any
if facet_markers:
xml_writer.write_header_meshvaluecollection(ofile, "m", 2, \
len(facet_markers), "uint")
for cell, local_facet, marker in facet_markers:
xml_writer.write_entity_meshvaluecollection(ofile, 2, cell, \
marker, local_facet)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_header_meshvaluecollection(ofile, "m", num_dims, \
len(cell_markers), "uint")
for cell, marker in enumerate(cell_markers):
xml_writer.write_entity_meshvaluecollection(ofile, num_dims, cell, \
marker)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
class ParseError(Exception):
""" Error encountered in source file.
"""
class DataHandler(object):
""" Baseclass for handlers of mesh data.
The actual handling of mesh data encountered in the source file is
delegated to a polymorfic object. Typically, the delegate will write the
data to XML.
@ivar _state: the state which the handler is in, one of State_*.
@ivar _cell_type: cell type in mesh. One of CellType_*.
@ivar _dim: mesh dimensions.
"""
State_Invalid, State_Init, State_Vertices, State_Cells, \
State_MeshFunction, State_MeshValueCollection = list(range(6))
CellType_Tetrahedron, CellType_Triangle, CellType_Interval = list(range(3))
def __init__(self):
self._state = self.State_Invalid
def set_mesh_type(self, cell_type, dim):
assert self._state == self.State_Invalid
self._state = self.State_Init
if cell_type == "tetrahedron":
self._cell_type = self.CellType_Tetrahedron
elif cell_type == "triangle":
self._cell_type = self.CellType_Triangle
elif cell_type == "interval":
self._cell_type = self.CellType_Interval
self._dim = dim
def start_vertices(self, num_vertices):
assert self._state == self.State_Init
self._state = self.State_Vertices
def add_vertex(self, vertex, coords):
assert self._state == self.State_Vertices
def end_vertices(self):
assert self._state == self.State_Vertices
self._state = self.State_Init
def start_cells(self, num_cells):
assert self._state == self.State_Init
self._state = self.State_Cells
def add_cell(self, cell, nodes):
assert self._state == self.State_Cells
def end_cells(self):
assert self._state == self.State_Cells
self._state = self.State_Init
def start_domains(self):
assert self._state == self.State_Init
def end_domains(self):
self._state = self.State_Init
def start_meshfunction(self, name, dim, size):
assert self._state == self.State_Init
self._state = self.State_MeshFunction
def add_entity_meshfunction(self, index, value):
assert self._state == self.State_MeshFunction
def end_meshfunction(self):
assert self._state == self.State_MeshFunction
self._state = self.State_Init
def start_mesh_value_collection(self, name, dim, size, etype):
assert self._state == self.State_Init
self._state = self.State_MeshValueCollection
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
assert self._state == self.State_MeshValueCollection
def end_mesh_value_collection(self):
assert self._state == self.State_MeshValueCollection
self._state = self.State_Init
def warn(self, msg):
""" Issue warning during parse.
"""
warnings.warn(msg)
def error(self, msg):
""" Raise error during parse.
This method is expected to raise ParseError.
"""
raise ParseError(msg)
def close(self):
self._state = self.State_Invalid
class XmlHandler(DataHandler):
""" Data handler class which writes to Dolfin XML.
"""
def __init__(self, ofilename):
DataHandler.__init__(self)
self._ofilename = ofilename
self.__ofile = open(ofilename, "w")
self.__ofile_meshfunc = None
def ofile(self):
return self.__ofile
def set_mesh_type(self, cell_type, dim):
DataHandler.set_mesh_type(self, cell_type, dim)
xml_writer.write_header_mesh(self.__ofile, cell_type, dim)
def start_vertices(self, num_vertices):
DataHandler.start_vertices(self, num_vertices)
xml_writer.write_header_vertices(self.__ofile, num_vertices)
def add_vertex(self, vertex, coords):
DataHandler.add_vertex(self, vertex, coords)
xml_writer.write_vertex(self.__ofile, vertex, *coords)
def end_vertices(self):
DataHandler.end_vertices(self)
xml_writer.write_footer_vertices(self.__ofile)
def start_cells(self, num_cells):
DataHandler.start_cells(self, num_cells)
xml_writer.write_header_cells(self.__ofile, num_cells)
def add_cell(self, cell, nodes):
DataHandler.add_cell(self, cell, nodes)
if self._cell_type == self.CellType_Tetrahedron:
func = xml_writer.write_cell_tetrahedron
elif self._cell_type == self.CellType_Triangle:
func = xml_writer.write_cell_triangle
elif self._cell_type == self.CellType_Interval:
func = xml_writer.write_cell_interval
func(self.__ofile, cell, *nodes)
def end_cells(self):
DataHandler.end_cells(self)
xml_writer.write_footer_cells(self.__ofile)
def start_meshfunction(self, name, dim, size):
DataHandler.start_meshfunction(self, name, dim, size)
fname = os.path.splitext(self.__ofile.name)[0]
self.__ofile_meshfunc = open("%s_%s.xml" % (fname, name), "w")
xml_writer.write_header_meshfunction(self.__ofile_meshfunc, dim, size)
def add_entity_meshfunction(self, index, value):
DataHandler.add_entity_meshfunction(self, index, value)
xml_writer.write_entity_meshfunction(self.__ofile_meshfunc, index, value)
def end_meshfunction(self):
DataHandler.end_meshfunction(self)
xml_writer.write_footer_meshfunction(self.__ofile_meshfunc)
self.__ofile_meshfunc.close()
self.__ofile_meshfunc = None
def start_domains(self):
#DataHandler.start_domains(self)
xml_writer.write_header_domains(self.__ofile)
def end_domains(self):
#DataHandler.end_domains(self)
xml_writer.write_footer_domains(self.__ofile)
def start_mesh_value_collection(self, name, dim, size, etype):
DataHandler.start_mesh_value_collection(self, name, dim, size, etype)
xml_writer.write_header_meshvaluecollection(self.__ofile, name, dim, size, etype)
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
DataHandler.add_entity_mesh_value_collection(self, dim, index, value)
xml_writer.write_entity_meshvaluecollection(self.__ofile, dim, index, value, local_entity=local_entity)
def end_mesh_value_collection(self):
DataHandler.end_mesh_value_collection(self)
xml_writer.write_footer_meshvaluecollection(self.__ofile)
def close(self):
DataHandler.close(self)
if self.__ofile.closed:
return
xml_writer.write_footer_mesh(self.__ofile)
self.__ofile.close()
if self.__ofile_meshfunc is not None:
self.__ofile_meshfunc.close()
def netcdf2xml(ifilename,ofilename):
"Convert from NetCDF format to DOLFIN XML."
print("Converting from NetCDF format (.ncdf) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
cell_type = None
dim = 0
# Scan file for dimension, number of nodes, number of elements
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if re.search(r"num_dim.*=", line):
dim = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_nodes.*=", line):
num_vertices = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_elem.*=", line):
num_cells = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"connect1 =",line):
break
num_dims=dim
# Set cell type
if dim == 2:
cell_type = "triangle"
if dim == 3:
cell_type = "tetrahedron"
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
xml_writer.write_header_cells(ofile, num_cells)
num_cells_read = 0
# Read and write cells
while 1:
# Read next line
line = ifile.readline()
if not line:
break
connect=re.split("[,;]",line)
if num_dims == 2:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
n3 = int(connect[3])-1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_vertices(ofile, num_vertices)
break
num_vertices_read = 0
coords = [[],[],[]]
coord = -1
while 1:
line = ifile.readline()
if not line:
_error("Missing data")
if re.search(r"coord =",line):
break
# Read vertices
while 1:
line = ifile.readline()
if not line:
break
if re.search(r"\A\s\s\S+,",line):
coord+=1
print("Found x_"+str(coord)+" coordinates")
coords[coord] += line.split()
if re.search(r";",line):
break
# Write vertices
for i in range(num_vertices):
if num_dims == 2:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = 0
if num_dims == 3:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = float(re.split(",",coords[2].pop(0))[0])
xml_writer.write_vertex(ofile, i, x, y, z)
# Write footer
xml_writer.write_footer_vertices(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def exodus2xml(ifilename,ofilename):
"Convert from Exodus II format to DOLFIN XML."
print("Converting from Exodus II format to NetCDF format")
name = ifilename.split(".")[0]
netcdffilename = name +".ncdf"
status, output = get_status_output('ncdump '+ifilename + ' > '+netcdffilename)
if status != 0:
raise IOError("Something wrong while executing ncdump. Is ncdump "\
"installed on the system?")
netcdf2xml(netcdffilename, ofilename)
def _error(message):
"Write an error message"
for line in message.split("\n"):
print("*** %s" % line)
sys.exit(2)
def convert2xml(ifilename, ofilename, iformat=None):
""" Convert a file to the DOLFIN XML format.
"""
convert(ifilename, XmlHandler(ofilename), iformat=iformat)
def convert(ifilename, handler, iformat=None):
""" Convert a file using a provided data handler.
Note that handler.close is called when this function finishes.
@param ifilename: Name of input file.
@param handler: The data handler (instance of L{DataHandler}).
@param iformat: Format of input file.
"""
if iformat is None:
iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])
# XXX: Backwards-compat
if hasattr(handler, "_ofilename"):
ofilename = handler._ofilename
# Choose conversion
if iformat == "mesh":
# Convert from mesh to xml format
mesh2xml(ifilename, ofilename)
elif iformat == "gmsh":
# Convert from gmsh to xml format
gmsh2xml(ifilename, handler)
elif iformat == "Triangle":
# Convert from Triangle to xml format
triangle2xml(ifilename, ofilename)
elif iformat == "xml-old":
# Convert from old to new xml format
xml_old2xml(ifilename, ofilename)
elif iformat == "metis":
# Convert from metis graph to dolfin graph xml format
metis_graph2graph_xml(ifilename, ofilename)
elif iformat == "scotch":
# Convert from scotch graph to dolfin graph xml format
scotch_graph2graph_xml(ifilename, ofilename)
elif iformat == "diffpack":
# Convert from Diffpack tetrahedral grid format to xml format
diffpack2xml(ifilename, ofilename)
elif iformat == "abaqus":
# Convert from abaqus to xml format
abaqus.convert(ifilename, handler)
elif iformat == "NetCDF":
# Convert from NetCDF generated from ExodusII format to xml format
netcdf2xml(ifilename, ofilename)
elif iformat =="ExodusII":
# Convert from ExodusII format to xml format via NetCDF
exodus2xml(ifilename, ofilename)
elif iformat == "StarCD":
# Convert from Star-CD tetrahedral grid format to xml format
starcd2xml(ifilename, ofilename)
else:
_error("Sorry, cannot convert between %s and DOLFIN xml file formats." % iformat)
# XXX: handler.close messes things for other input formats than abaqus or gmsh
if iformat in ("abaqus", "gmsh"):
handler.close()
def starcd2xml(ifilename, ofilename):
"Convert from Star-CD tetrahedral grid format to DOLFIN XML."
print(starcd2xml.__doc__)
if not os.path.isfile(ifilename[:-3] + "vrt") or not os.path.isfile(ifilename[:-3] + "cel"):
print("StarCD format requires one .vrt file and one .cel file")
sys.exit(2)
# open output file
ofile = open(ofilename, "w")
# Open file, the vertices are in a .vrt file
ifile = open(ifilename[:-3] + "vrt", "r")
write_header_mesh(ofile, "tetrahedron", 3)
# Read & write vertices
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of vertices
num_vertices = -1
counter = 0
# nodenr_map is needed because starcd support node numbering like 1,2,4 (ie 3 is missing)
nodenr_map = {}
for line in lines:
nodenr = int(line[0:15])
nodenr_map[nodenr] = counter
counter += 1
num_vertices = counter
# third, run over all vertices
xml_writer.write_header_vertices(ofile, num_vertices)
for line in lines:
nodenr = int(line[0:15])
vertex0 = float(line[15:31])
vertex1 = float(line[31:47])
vertex2 = float(line[47:63])
xml_writer.write_vertex(ofile, nodenr_map[nodenr], float(vertex0), float(vertex1), float(vertex2))
xml_writer.write_footer_vertices(ofile)
# Open file, the cells are in a .cel file
ifile = open(ifilename[:-3] + "cel", "r")
# Read & write cells
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of cells
num_cells = -1
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if node4 > 0:
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
counter += 1
else:
print("The file does contain cells that are not tetraheders. The cell number is ", cellnr, " the line read was ", line)
else:
# triangles on the surface
# print "The file does contain cells that are not tetraheders node4==0. The cell number is ", cellnr, " the line read was ", line
#sys.exit(2)
pass
num_cells = counter
# third, run over all cells
xml_writer.write_header_cells(ofile, num_cells)
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if (node4 > 0):
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
xml_writer.write_cell_tetrahedron(ofile, counter, nodenr_map[node0], nodenr_map[node1], nodenr_map[node2], nodenr_map[node4])
counter += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
|
FEniCS/dolfin
|
site-packages/dolfin_utils/meshconvert/meshconvert.py
|
Python
|
lgpl-3.0
| 50,178
|
[
"NetCDF"
] |
c1a64de7d069d4078f28e0d5238873f138a0f8cf411f66d9cb0fc9a388f485db
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from peacock.utils.FileCache import FileCache
from .JsonData import JsonData
from .BlockInfo import BlockInfo
from .ParameterInfo import ParameterInfo
class ExecutableInfo(object):
"""
Holds the Json of an executable.
"""
SETTINGS_KEY = "ExecutableInfo"
SETTINGS_KEY_TEST_OBJS = "ExecutableWithTestObjectsInfo"
CACHE_VERSION = 4
def __init__(self, **kwds):
super(ExecutableInfo, self).__init__(**kwds)
self.json_data = None
self.path = None
self.path_map = {}
self.type_to_block_map = {}
def setPath(self, new_path, use_test_objects=False):
"""
Executable path set property.
Will try to generate the json data of the executable.
"""
if not new_path:
return
setting_key = self.SETTINGS_KEY
extra_args = []
if use_test_objects:
setting_key = self.SETTINGS_KEY_TEST_OBJS
extra_args = ["--allow-test-objects"]
fc = FileCache(setting_key, new_path, self.CACHE_VERSION)
if fc.path == self.path:
# If we are setting the path again, we need to make sure the executable itself hasn't changed
if not fc.dirty:
return
self.json_data = None
self.path = None
use_cache = os.environ.get("PEACOCK_DISABLE_EXE_CACHE", "0") != "1"
if use_cache:
obj = fc.read()
if obj:
self.fromPickle(obj)
self.path = fc.path
return
json_data = JsonData(fc.path, extra_args)
if json_data.app_path:
self.json_data = json_data
self.path = fc.path
self._createPathMap()
fc.add(self.toPickle())
def valid(self):
"""
Check if this is a valid object.
Returns:
bool: Whether the executable has valid json
"""
return self.path != None and self.json_data != None
@staticmethod
def clearCache():
FileCache.clearAll(ExecutableInfo.SETTINGS_KEY)
FileCache.clearAll(ExecutableInfo.SETTINGS_KEY_TEST_OBJS)
def toPickle(self):
return {"json_data": self.json_data.toPickle(),
"path_map": self.path_map,
"path": self.path,
"type_to_block_map": self.type_to_block_map,
}
def fromPickle(self, data):
self.json_data = JsonData()
self.json_data.fromPickle(data["json_data"])
self.path_map = data["path_map"]
self.path = data["path"]
self.type_to_block_map = data["type_to_block_map"]
def _createBasicInfo(self, parent, jdata, is_hard):
full_name = os.path.join(parent.path, jdata["name"])
info = BlockInfo(parent, full_name, is_hard, jdata.get("description", ""))
return info
def getDict(self, jdata, key):
d = jdata.get(key, {})
if not d:
d = {}
return d
def _getCommonParameters(self, block):
actions = block.get("actions", {})
all_params = {}
for name, data in actions.items():
all_params.update(self.getDict(data, "parameters"))
return all_params
def _processChild(self, parent, jdata, is_hard):
info = self._createBasicInfo(parent, jdata, is_hard)
for name, child in self.getDict(jdata, "subblocks").items():
child["name"] = name
child_info = self._processChild(info, child, True & is_hard)
info.addChildBlock(child_info)
self.path_map[child_info.path] = child_info
for name, child in self.getDict(jdata, "types").items():
child["name"] = name
child_info = self._processChild(info, child, False)
info.types[name] = child_info
if "star" in jdata:
jdata["star"]["name"] = "*"
star_node = self._processChild(info, jdata["star"], False)
info.setStarInfo(star_node)
for name, child in self.getDict(jdata, "subblock_types").items():
child["name"] = name
child_info = self._processChild(info, child, False)
info.types[name] = child_info
common_params = self._getCommonParameters(jdata)
for name, param in common_params.items():
param_info = ParameterInfo(info, name)
param_info.setFromData(param)
info.addParameter(param_info)
for name, param in self.getDict(jdata, "parameters").items():
param_info = ParameterInfo(info, name)
param_info.setFromData(param)
info.addParameter(param_info)
for t in jdata.get("associated_types", []):
self.type_to_block_map.setdefault(t, []).append(parent.path)
return info
def readFromFiles(self, json_file):
json_data = JsonData()
json_data.readFromFile(json_file)
self.path = "From Files"
self.json_data = json_data
self._createPathMap()
def _createPathMap(self):
self.path_map = {}
self.root_info = BlockInfo(None, "/", False, "root node")
for name, block in self.json_data.json_data["blocks"].items():
block["name"] = name
block_info = self._processChild(self.root_info, block, True)
self.root_info.addChildBlock(block_info)
self.path_map[block_info.path] = block_info
self.path_map["/"] = self.root_info
def _dumpNode(self, output, entry, level, prefix=' ', only_hard=False):
if not only_hard or entry.hard:
hard = "hard"
if not entry.hard:
hard = "not hard"
star = "star"
if not entry.star:
star = "not star"
output.write("%s%s: %s: %s\n" % (prefix*level, entry.path, hard, star))
for c in entry.children_list:
self._dumpNode(output, entry.children[c], level+1, prefix, only_hard=only_hard)
def dumpDefaultTree(self, hard_only=False):
output = StringIO()
for c in sorted(self.path_map.keys()):
if c == "/":
continue
self._dumpNode(output, self.path_map[c], 0, only_hard=hard_only)
return output.getvalue()
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("Usage: <path_to_exe>")
exit(1)
exe_info = ExecutableInfo()
exe_info.clearCache()
exe_info.setPath(sys.argv[1])
print("Keys: %s" % (sorted(exe_info.path_map.keys())))
print(exe_info.type_to_block_map)
|
nuclear-wizard/moose
|
python/peacock/Input/ExecutableInfo.py
|
Python
|
lgpl-2.1
| 7,023
|
[
"MOOSE"
] |
f14f852c91f6a72de093a39580849ffd730a0ec59bf02c6868c0acd3c73834b3
|
# $HeadURL$
"""
DIRAC Times module
Support for basic Date and Time operations
based on system datetime module.
It provides common interface to UTC timestamps,
converter to string types and back.
The following datetime classes are used in the returned objects:
- dateTime = datetime.datetime
- date = datetime.date
- time = datetime.timedelta
Useful timedelta constant are also provided to
define time intervals.
Notice: datetime.timedelta objects allow multiplication and division by interger
but not by float. Thus:
- DIRAC.Times.second * 1.5 is not allowed
- DIRAC.Times.second * 3 / 2 is allowed
An timeInterval class provides a method to check
if a give datetime is in the defined interval.
"""
__RCSID__ = "$Id$"
import time as nativetime
import datetime
from types import StringTypes
# Some useful constants for time operations
microsecond = datetime.timedelta( microseconds = 1 )
second = datetime.timedelta( seconds = 1 )
minute = datetime.timedelta( minutes = 1 )
hour = datetime.timedelta( hours = 1 )
day = datetime.timedelta( days = 1 )
week = datetime.timedelta( days = 7 )
dt = datetime.datetime( 2000, 1, 1 )
def dateTime():
"""
Return current UTC datetime, as datetime.datetime object
"""
return dt.utcnow()
def date( myDateTime = None ):
"""
Return current UTC date, as datetime.date object
if a _dateTimeType is pass as argument its associated date is returned
"""
if type( myDateTime ) == _dateTimeType:
return myDateTime.date()
return dateTime().date()
def time( myDateTime = None ):
"""
Return current UTC time, as datetime.time object
if a _dateTimeType is pass as argument its associated time is returned
"""
if not type( myDateTime ) == _dateTimeType:
myDateTime = dateTime()
return myDateTime - datetime.datetime( myDateTime.year, myDateTime.month, myDateTime.day )
def toEpoch( dateTimeObject = None ):
"""
Get seconds since epoch
"""
if not dateTimeObject:
dateTimeObject = dateTime()
return nativetime.mktime( dateTimeObject.timetuple() )
def fromEpoch( epoch ):
"""
Get datetime object from epoch
"""
return dt.fromtimestamp( epoch )
def to2K( dateTimeObject = None ):
"""
Get seconds, with microsecond precission, since 2K
"""
if not dateTimeObject:
dateTimeObject = dateTime()
delta = dateTimeObject - dt
return delta.days * 86400 + delta.seconds + delta.microseconds / 1000000.
def from2K( seconds2K = None ):
"""
Get date from seconds since 2K
"""
if not seconds2K:
seconds2K = to2K( dt )
return dt + int( seconds2K ) * second + int( seconds2K % 1 * 1000000 ) * microsecond
def toString( myDate = None ):
"""
Convert to String
if argument type is neither _dateTimeType, _dateType, nor _timeType
the current dateTime converted to String is returned instead
Notice: datetime.timedelta are converted to strings using the format:
[day] days [hour]:[min]:[sec]:[microsec]
where hour, min, sec, microsec are always positive integers,
and day carries the sign.
To keep internal consistency we are using:
[hour]:[min]:[sec]:[microsec]
where min, sec, microsec are alwys positive intergers and hour carries the
sign.
"""
if type( myDate ) == _dateTimeType :
return str( myDate )
elif type( myDate ) == _dateType :
return str( myDate )
elif type( myDate ) == _timeType :
return '%02d:%02d:%02d.%06d' % ( myDate.days * 24 + myDate.seconds / 3600,
myDate.seconds % 3600 / 60,
myDate.seconds % 60,
myDate.microseconds )
else:
return toString( dateTime() )
def fromString( myDate = None ):
"""
Convert date/time/datetime String back to appropriated objects
The format of the string it is assume to be that returned by toString method.
See notice on toString method
On Error, return None
"""
if StringTypes.__contains__( type( myDate ) ):
if myDate.find( ' ' ) > 0:
dateTimeTuple = myDate.split( ' ' )
dateTuple = dateTimeTuple[0].split( '-' )
try:
return ( datetime.datetime( year = dateTuple[0],
month = dateTuple[1],
day = dateTuple[2] ) +
fromString( dateTimeTuple[1] ) )
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
except:
return ( datetime.datetime( year = int( dateTuple[0] ),
month = int( dateTuple[1] ),
day = int( dateTuple[2] ) ) +
fromString( dateTimeTuple[1] ) )
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
return None
elif myDate.find( ':' ) > 0:
timeTuple = myDate.replace( '.', ':' ).split( ':' )
try:
if len( timeTuple ) == 4:
return datetime.timedelta( hours = int( timeTuple[0] ),
minutes = int( timeTuple[1] ),
seconds = int( timeTuple[2] ),
microseconds = int( timeTuple[3] ) )
elif len( timeTuple ) == 3:
return datetime.timedelta( hours = int( timeTuple[0] ),
minutes = int( timeTuple[1] ),
seconds = int( timeTuple[2] ),
microseconds = 0 )
else:
return None
except:
return None
elif myDate.find( '-' ) > 0:
dateTuple = myDate.split( '-' )
try:
return datetime.date( int( dateTuple[0] ), int( dateTuple[1] ), int( dateTuple[2] ) )
except:
return None
return None
class timeInterval:
"""
Simple class to define a timeInterval object able to check if a given
dateTime is inside
"""
def __init__( self, initialDateTime, intervalTimeDelta ):
"""
Initialization method, it requires the initial dateTime and the
timedelta that define the limits.
The upper limit is not included thus it is [begin,end)
If not properly initialized an error flag is set, and subsequent calls
to any method will return None
"""
if ( type( initialDateTime ) != _dateTimeType or
type( intervalTimeDelta ) != _timeType ):
self.__error = True
return None
self.__error = False
if intervalTimeDelta.days < 0:
self.__startDateTime = initialDateTime + intervalTimeDelta
self.__endDateTime = initialDateTime
else:
self.__startDateTime = initialDateTime
self.__endDateTime = initialDateTime + intervalTimeDelta
def includes( self, myDateTime ):
"""
"""
if self.__error :
return None
if type( myDateTime ) != _dateTimeType :
return None
if myDateTime < self.__startDateTime :
return False
if myDateTime >= self.__endDateTime :
return False
return True
_dateTimeType = type( dateTime() )
_dateType = type( date() )
_timeType = type( time() )
_allTimeTypes = ( _dateTimeType, _timeType )
_allDateTypes = ( _dateTimeType, _dateType )
_allTypes = ( _dateTimeType, _dateType, _timeType )
|
Sbalbp/DIRAC
|
Core/Utilities/Time.py
|
Python
|
gpl-3.0
| 7,370
|
[
"DIRAC"
] |
85b8234d039ef8f11eed988325269e2d794bfb6ac8dc14cbb216d22c6ff6410f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Get z axis position of spherical markers from 3D image stacks (tiff z-stack)
import beadPos.py and call z = beadPos.getz(x,y,img,n=None,optimize=False) to get z position
at the given x and y pixel coordinate or call x,y,z = beadPos.getz(x,y,img,n=None,optimize=True)
to get an optimized bead position (optimization of x, y and z)
# @Title : beadPos
# @Project : 3DCTv2
# @Description : Get bead z axis position from 3D image stacks (tiff z-stack)
# @Author : Jan Arnold
# @Email : jan.arnold (at) coraxx.net
# @Copyright : Copyright (C) 2016 Jan Arnold
# @License : GPLv3 (see LICENSE file)
# @Credits : endolith https://gist.github.com/endolith/255291 for parabolic fitting function
# 2D Gaussian fit from http://scipy.github.io/old-wiki/pages/Cookbook/FittingData
# @Maintainer : Jan Arnold
# @Date : 2015/12
# @Version : 3DCT 2.3.0 module rev. 2
# @Status : stable
# @Usage : import beadPos.py and call z = beadPos.getz(x,y,img,n=None,optimize=False) to get z position
# at the given x and y pixel coordinate or call x,y,z = beadPos.getz(x,y,img,n=None,optimize=True)
# to get an optimized bead position (optimization of x, y and z)
# @Notes : stable, but problems with low SNR <- needs revisiting
# @Python_version : 2.7.11
"""
# ======================================================================================================================
import time
import math
import numpy as np
from scipy.optimize import curve_fit, leastsq
import matplotlib.pyplot as plt
import tifffile as tf
import parabolic
try:
import clrmsg
import TDCT_debug
except:
pass
repeat = 0
debug = TDCT_debug.debug
def getzPoly(x,y,img,n=None,optimize=False):
"""x and y are coordinates
img is the path to the z-stack tiff file or a numpy.ndarray from tifffile.py imread function
n is the number of points around the max value that are used in the polyfit
leave n to use the maximum amount of points
If optimize is set to True, the algorithm will try to optimize the x,y,z position
!! if optimize is True, 3 values are returned: x,y,z"""
if not isinstance(img, str) and not isinstance(img, np.ndarray):
if clrmsg and debug is True: print clrmsg.ERROR
raise TypeError('I can only handle an image path as string or an image volume as numpy.ndarray imported from tifffile.py')
elif isinstance(img, str):
img = tf.imread(img)
data_z = img[:,y,x]
if n is None:
n = getn(data_z)
data_z_xp_poly, data_z_yp_poly = parabolic.parabolic_polyfit(data_z, np.argmax(data_z), n)
if math.isnan(data_z_xp_poly):
if clrmsg and debug is True: print clrmsg.ERROR
print TypeError('Failed: Probably due to low SNR')
if optimize is True:
return x,y,'failed'
else:
return 'failed'
if debug is True:
f, ax = plt.subplots()
ax.plot(range(0,len(data_z)), data_z, color='blue')
ax.plot(data_z_xp_poly, data_z_yp_poly, 'o', color='black')
ax.set_title("mid: "+str(data_z_xp_poly))
plt.draw()
plt.pause(1)
plt.close()
if optimize is True:
x_opt_vals, y_opt_vals, z_opt_vals = optimize_z(x,y,data_z_xp_poly,img,n=None)
return x_opt_vals[-1], y_opt_vals[-1], z_opt_vals[-1]
else:
return data_z_xp_poly
def getzGauss(x,y,img,parent=None,optimize=False,threshold=None,threshVal=0.6,cutout=15):
"""x and y are coordinates
img is the path to the z-stack tiff file or a numpy.ndarray from tifffile.py imread function
optimize == True kicks off the 2D Gaussian fit and this function will return x,y,z
threshold == True filters the image where it cuts off at max - min * threshVal (threshVal between 0.1 and 1)
cutout specifies the FOV for the 2D Gaussian fit"""
if not isinstance(img, str) and not isinstance(img, np.ndarray):
if clrmsg and debug is True: print clrmsg.ERROR
raise TypeError('I can only handle an image path as string or an image volume as numpy.ndarray imported from tifffile.py')
elif isinstance(img, str):
img = tf.imread(img)
x = np.round(x).astype(int)
y = np.round(y).astype(int)
data_z = img[:,y,x]
data = np.array([np.arange(len(data_z)), data_z])
poptZ, pcov = gaussfit(data,parent)
if optimize is False:
return poptZ[1]
else:
repeats = 5
if clrmsg and debug is True: print clrmsg.DEBUG + '2D Gaussian xy optimization running %.f at z = %.f' % (repeats,round(poptZ[1]))
for repeat in range(repeats):
data = np.copy(img[
round(poptZ[1]),
y-cutout:y+cutout,
x-cutout:x+cutout])
if threshold is not None:
threshold = data < data.max()-(data.max()-data.min())*threshVal
data[threshold] = 0
poptXY = fitgaussian(data,parent)
if poptXY is None:
return x, y, poptZ[1]
(height, xopt, yopt, width_x, width_y) = poptXY
## x and y are switched when applying the offset
x = x-cutout+yopt
y = y-cutout+xopt
data_z = img[:,y,x]
data = np.array([np.arange(len(data_z)), data_z])
poptZ, pcov = gaussfit(data,parent,hold=True)
if parent: parent.refreshUI()
time.sleep(0.01)
return x, y, poptZ[1]
def optimize_z(x,y,z,image,n=None):
"""Optimize z for poly fit"""
if type(image) == str:
img = tf.imread(image)
elif type(image) == np.ndarray:
img = image
data_z = img[:,y,x]
if n is None:
n = getn(data_z)
x_opt_vals, y_opt_vals, z_opt_vals = [], [], []
x_opt,y_opt,z_opt = x,y,z
for i in range(5):
try:
print x_opt,y_opt,z_opt
x_opt,y_opt,z_opt = int(round(x_opt)),int(round(y_opt)),int(round(z_opt))
x_opt, y_opt = optimize_xy(x_opt,y_opt,z_opt,img,nx=None,ny=None)
data_z = img[:,round(y_opt),round(x_opt)]
except Exception as e:
if clrmsg and debug is True: print clrmsg.ERROR
print IndexError("Optimization failed, possibly due to low signal or low SNR. "+str(e))
return [x],[y],['failed']
n = getn(data_z)
z_opt, data_z_yp_poly = parabolic.parabolic_polyfit(data_z, np.argmax(data_z), n)
x_opt_vals.append(x_opt)
y_opt_vals.append(y_opt)
z_opt_vals.append(z_opt)
return x_opt_vals, y_opt_vals, z_opt_vals
def getn(data):
"""this function is used to determine the maximum amount of data points for the polyfit function
data is a numpy array of values"""
if len(data)-np.argmax(data) <= np.argmax(data):
n = 2*(len(data)-np.argmax(data))-1
else:
n = 2*np.argmax(data)
return n
def optimize_xy(x,y,z,image,nx=None,ny=None):
"""x and y are coordinates, z is the layer in the z-stack tiff file
image can be either the path to the z-stack tiff file or the np.array data of itself
n is the number of points around the max value that are used in the polyfit
leave n to use the maximum amount of points"""
get_nx, get_ny = False, False
if type(image) == str:
img = tf.imread(image)
elif type(image) == np.ndarray:
img = image
## amount of data points around coordinate
samplewidth = 10
data_x = img[z,y,x-samplewidth:x+samplewidth]
data_y = img[z,y-samplewidth:y+samplewidth,x]
if debug is True: f, axarr = plt.subplots(2, sharex=True)
if nx is None:
get_nx = True
if ny is None:
get_ny = True
## optimize x
xmaxvals = np.array([], dtype=np.int32)
for offset in range(10):
data_x = img[z,y-offset,x-samplewidth:x+samplewidth]
if data_x.max() < data_x.mean()*1.1:
# print "breaking at ",offset
# print data_x.max(), data_x.mean(), data_x.mean()*1.1
break
if get_nx is True:
nx = getn(data_x)
data_x_xp_poly, data_x_yp_poly = parabolic.parabolic_polyfit(data_x, np.argmax(data_x), nx)
xmaxvals = np.append(xmaxvals,[data_x_xp_poly])
c = np.random.rand(3,1)
if debug is True:
axarr[0].plot(range(0,len(data_x)), data_x, color=c)
axarr[0].plot(data_x_xp_poly, data_x_yp_poly, 'o', color=c)
for offset in range(10):
data_x = img[z,y+offset,x-samplewidth:x+samplewidth]
if data_x.max() < data_x.mean()*1.1:
# print "breaking at ",offset
# print data_x.max(), data_x.mean(), data_x.mean()*1.1
break
if get_nx is True:
nx = getn(data_x)
data_x_xp_poly, data_x_yp_poly = parabolic.parabolic_polyfit(data_x, np.argmax(data_x), nx)
xmaxvals = np.append(xmaxvals,[data_x_xp_poly])
c = np.random.rand(3,1)
if debug is True:
axarr[0].plot(range(0,len(data_x)), data_x, color=c)
axarr[0].plot(data_x_xp_poly, data_x_yp_poly, 'o', color=c)
if debug is True: axarr[0].set_title("mid-mean: "+str(xmaxvals.mean()))
## optimize y
ymaxvals = np.array([], dtype=np.int32)
for offset in range(10):
data_y = img[z,y-samplewidth:y+samplewidth,x-offset]
if data_y.max() < data_y.mean()*1.1:
# print "breaking at ",offset
# print data_y.max(), data_y.mean(), data_y.mean()*1.1
break
if get_ny is True:
ny = getn(data_y)
data_y_xp_poly, data_y_yp_poly = parabolic.parabolic_polyfit(data_y, np.argmax(data_y), ny)
ymaxvals = np.append(ymaxvals,[data_y_xp_poly])
c = np.random.rand(3,1)
if debug is True:
axarr[1].plot(range(0,len(data_y)), data_y, color=c)
axarr[1].plot(data_y_xp_poly, data_y_yp_poly, 'o', color=c)
for offset in range(10):
data_y = img[z,y-samplewidth:y+samplewidth,x+offset]
if data_y.max() < data_y.mean()*1.1:
# print "breaking at ",offset
# print data_y.max(), data_y.mean(), data_y.mean()*1.1
break
if get_ny is True:
ny = getn(data_y)
data_y_xp_poly, data_y_yp_poly = parabolic.parabolic_polyfit(data_y, np.argmax(data_y), ny)
ymaxvals = np.append(ymaxvals,[data_y_xp_poly])
c = np.random.rand(3,1)
if debug is True:
axarr[1].plot(range(0,len(data_y)), data_y, color=c)
axarr[1].plot(data_y_xp_poly, data_y_yp_poly, 'o', color=c)
if debug is True: axarr[1].set_title("mid-mean: "+str(ymaxvals.mean()))
if debug is True:
plt.draw()
plt.pause(0.5)
plt.close()
## calculate offset into coordinates
x_opt = x+xmaxvals.mean()-samplewidth
y_opt = y+ymaxvals.mean()-samplewidth
return x_opt, y_opt
## Gaussian 1D fit
def gauss(x, *p):
# A "magnitude"
# mu "offset on x axis"
# sigma "width"
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def gaussfit(data,parent=None,hold=False):
## Fitting gaussian to data
data[1] = data[1]-data[1].min()
p0 = [data[1].max(), data[1].argmax(), 1]
popt, pcov = curve_fit(gauss, data[0], data[1], p0=p0)
if parent is not None:
## Draw graphs in GUI
x = []
y = []
for i in np.arange(len(data[0])):
x.append(i)
y.append(gauss(i,*popt))
if hold is False:
parent.widget_matplotlib.setupScatterCanvas(width=4,height=4,dpi=52,toolbar=False)
parent.widget_matplotlib.xyPlot(data[0], data[1], label='z data',clear=True)
parent.widget_matplotlib.xyPlot(x, y, label='gaussian fit',clear=False)
## DEBUG
if clrmsg and debug is True:
from scipy.stats import ks_2samp
## Get std from the diagonal of the covariance matrix
std_height, std_mean, std_sigma = np.sqrt(np.diag(pcov))
print clrmsg.DEBUG + '='*15, 'GAUSS FIT', '='*25
print clrmsg.DEBUG + 'Amplitude :', popt[0]
print clrmsg.DEBUG + 'Location :', popt[1]
## http://mathworld.wolfram.com/GaussianFunction.html -> sigma * 2 * sqrt(2 * ln(2))
print clrmsg.DEBUG + 'FWHM :', popt[2] * 2 * math.sqrt(2 * math.log(2,math.e))
print clrmsg.DEBUG + 'Std. Amplitude :', std_height
print clrmsg.DEBUG + 'Std. Location :', std_mean
print clrmsg.DEBUG + 'Std. FWHM :', std_sigma * 2 * math.sqrt(2 * math.log(2,math.e))
print clrmsg.DEBUG + 'Mean dy :', np.absolute(y-data[1]).mean()
print clrmsg.DEBUG + str(ks_2samp(y, data[1]))
return popt, pcov
## Gaussian 2D fit from http://scipy.github.io/old-wiki/pages/Cookbook/FittingData
def gaussian(height, center_x, center_y, width_x, width_y):
"""Returns a Gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(
-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the Gaussian parameters of a 2D distribution by calculating its
moments"""
total = data.sum()
X, Y = np.indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
return height, x, y, width_x, width_y
def fitgaussian(data,parent=None):
"""Returns (height, x, y, width_x, width_y)
the Gaussian parameters of a 2D distribution found by a fit"""
def errorfunction(p):
return np.ravel(gaussian(*p)(*np.indices(data.shape)) - data)
params = moments(data)
p, success = leastsq(errorfunction, params)
if np.isnan(p).any():
parent.widget_matplotlib.matshowPlot(
mat=data,contour=np.ones(data.shape),labelContour="XY optimization failed\n" +
"Try reducing the\nmarker size (equates to\nFOV for gaussian fit)")
return None
if parent is not None:
## Draw graphs in GUI
fit = gaussian(*p)
contour = fit(*np.indices(data.shape))
(height, x, y, width_x, width_y) = p
labelContour = (
" x : %.1f\n"
" y : %.1f\n"
"width_x : %.1f\n"
"width_y : %.1f") % (x, y, width_x, width_y)
parent.widget_matplotlib.matshowPlot(mat=data,contour=contour,labelContour=labelContour)
return p
# def test1Dgauss(data=None):
# if not data:
# data = np.random.normal(loc=5., size=10000)
# hist, bin_edges = np.histogram(data, density=True)
# bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
# data = np.array([bin_centres, hist])
# # data = np.array([[0,1,2,3,4,5,6,7,8,9],[10,12,11,15,25,18,13,9,11,10]])
# popt, pcov = gaussfit(data)
# x = []
# y = []
# for i in np.arange(len(data[0])):
# x.append(i)
# y.append(gauss(i,*popt))
# plt.clf()
# plt.plot(data[0], data[1], label='Test data')
# plt.plot(x, y, label='Gaussian fit')
# new_bin_centers = np.linspace(bin_centres[0], bin_centres[-1], 200)
# new_hist_fit = gauss(new_bin_centers, *popt)
# plt.plot(new_bin_centers, new_hist_fit,label='Interpolated')
# plt.legend()
# plt.show()
# if clrmsg and debug is True:
# from scipy.stats import ks_2samp
# print clrmsg.DEBUG + ('Mean dy : %.6f' % np.absolute(y-data[1]).mean())
# print clrmsg.DEBUG + str(ks_2samp(y, data[1]))
# def test2Dgauss(data=None):
# from pylab import *
# if data is None:
# # Create the Gaussian data
# Xin, Yin = mgrid[0:201, 0:201]
# data = gaussian(3, 100, 100, 20, 40)(Xin, Yin) + np.random.random(Xin.shape)
# # data = data-data.min()
# print data.min(), data.max()
# threshold = data < data.max()-(data.max()-data.min())*0.6
# data[threshold] = 0
# matshow(data, cmap=cm.gist_earth_r)
# params = fitgaussian(data)
# fit = gaussian(*params)
# contour(fit(*indices(data.shape)), cmap=cm.copper)
# ax = gca()
# (height, x, y, width_x, width_y) = params
# text(0.85, 0.05, """
# x : %.1f
# y : %.1f
# width_x : %.1f
# width_y : %.1f""" % (x, y, width_x, width_y),
# fontsize=12, horizontalalignment='right',
# verticalalignment='bottom', transform=ax.transAxes)
# show()
# img = tf.imread('/Users/jan/Desktop/dot2.tif')
# print img.shape
# test2Dgauss(img)
|
Splo0sh/3DCT
|
tdct/beadPos.py
|
Python
|
gpl-3.0
| 15,029
|
[
"Gaussian"
] |
78170c164443fb05e637930d3b56d354b0c64ae14bb7e95c83866f28fbea2240
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import os
import moose
print("[INFO ] using moose from %s" % moose.__file__)
import numpy as np
sdir_ = os.path.dirname(os.path.realpath(__file__))
def runAndSavePlots( name ):
runtime = 20.0
moose.reinit()
moose.start( runtime )
pa = moose.Neutral( '/model/graphs/' + name )
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
if ( x.tick != -1 ):
tabname = '/model/graphs/' + name + '/' + x.name + '.' + name
y = moose.Table( tabname )
y.vector = x.vector
y.tick = -1
# Takes args ee, gsl, or gssa
def switchSolvers( solver ):
if ( moose.exists( 'model/kinetics/stoich' ) ):
moose.delete( '/model/kinetics/stoich' )
moose.delete( '/model/kinetics/ksolve' )
compt = moose.element( '/model/kinetics' )
if ( solver == 'gsl' ):
ksolve = moose.Ksolve( '/model/kinetics/ksolve' )
if ( solver == 'gssa' ):
ksolve = moose.Gsolve( '/model/kinetics/ksolve' )
if ( solver != 'ee' ):
stoich = moose.Stoich( '/model/kinetics/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = "/model/kinetics/##"
def test_switch_solvers():
"""
At zero order, you can select the solver you want to use within the
function moose.loadModel( filename, modelpath, solver ).
Having loaded in the model, you can change the solver to use on it.
This example illustrates how to assign and change solvers for a
kinetic model. This process is necessary in two situations:
* If we want to change the numerical method employed, for example,
from deterministic to stochastic.
* If we are already using a solver, and we have changed the reaction
network by adding or removing molecules or reactions.
Note that we do not have to change the solvers if the volume or
reaction rates change.
In this example the model is loaded in with a gsl solver. The
sequence of solver calculations is:
#. gsl
#. ee
#. gsl
#. gssa
#. gsl
If you're removing the solvers, you just delete the stoichiometry
object and the associated ksolve/gsolve. Should there be diffusion
(a dsolve)then you should delete that too. If you're
building the solvers up again, then you must do the following
steps in order:
#. build up the ksolve/gsolve and stoich (any order)
#. Assign stoich.ksolve
#. Assign stoich.path.
See the Reaction-diffusion section should you want to do diffusion
as well.
"""
solver = "gsl" # Pick any of gsl, gssa, ee..
mfile = os.path.join(sdir_, '../data/kkit_objects_example.g')
modelId = moose.loadModel( mfile, 'model', solver )
# Increase volume so that the stochastic solver gssa
# gives an interesting output
compt = moose.element( '/model/kinetics' )
compt.volume = 1e-19
runAndSavePlots( 'gsl' )
#########################################################
switchSolvers( 'ee' )
runAndSavePlots( 'ee' )
#########################################################
switchSolvers( 'gsl' )
runAndSavePlots( 'gsl2' )
#########################################################
switchSolvers( 'gssa' )
runAndSavePlots( 'gssa' )
#########################################################
switchSolvers( 'gsl' )
runAndSavePlots( 'gsl3' )
#########################################################
# Display all plots.
plotdt = moose.element( '/clock' ).tickDt[18]
conc = []
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
conc.append(x.vector)
conc = np.array(conc)
assert conc.mean() > 0.0
data = []
for x in moose.wildcardFind( '/model/graphs/gsl/#' ):
data.append(x.vector)
gsl = np.array(data)
assert abs(conc - gsl).sum() < 0.25
data=[]
for x in moose.wildcardFind( '/model/graphs/ee/#' ):
data.append(x.vector)
ee = np.array(data)
assert abs(conc-ee).sum() < 0.2
data=[]
for x in moose.wildcardFind( '/model/graphs/gsl2/#' ):
data.append(x.vector)
gsl2 = np.array(data)
assert abs(conc-gsl2).sum() == 0.0 # these are the same.
data=[]
for x in moose.wildcardFind( '/model/graphs/gssa/#' ):
data.append(x.vector)
gssa = np.array(data)
assert abs(conc - gssa).sum() < 0.15, (conc - gssa).sum()
assert gssa.shape == conc.shape == gsl.shape == ee.shape
print('all done')
def main():
test_switch_solvers()
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_switch_solvers.py
|
Python
|
gpl-3.0
| 5,055
|
[
"MOOSE"
] |
295d6d3337c7eb5dab1b5352013643537bf9ea9d41eee6f283278c0cc618c9d9
|
'''
Created on Jul 19, 2011
@author: sean
'''
from __future__ import print_function
from ...asttools import Visitor
import sys
import _ast
from warnings import warn
if sys.version_info.major < 3:
from StringIO import StringIO
else:
from io import StringIO
class Indentor(object):
def __init__(self, printer, indent=' '):
self.printer = printer
self.indent = indent
def __enter__(self):
self.printer._indent = self.printer._indent + self.indent
def __exit__(self, *args):
indent = self.printer._indent[:-len(self.indent)]
self.printer._indent = indent
clsname = lambda node: type(node).__name__
def depth(node):
return len(flatten(node))
def flatten(node):
result = []
if isinstance(node, _ast.AST):
for value in ast_values(node):
result.extend(flatten(value))
elif isinstance(node, (list, tuple)):
for child in node:
result.extend(flatten(child))
else:
result.append(node)
return result
def ast_keys(node):
return node._fields
def ast_values(node):
return [getattr(node, field, None) for field in node._fields]
def ast_items(node):
return [(field, getattr(node, field, None)) for field in node._fields]
class ASTPrinter(Visitor):
def __init__(self, indent=' ', level=0, newline='\n'):
self.out = StringIO()
self._indent = ''
self.one_indent = indent
self.level = level
self.newline = newline
def dump(self, file=sys.stdout):
self.out.seek(0)
print(self.out.read(), file=file)
def dumps(self):
self.out.seek(0)
return self.out.read()
def print(self, text, noindent=False, **kwargs):
# if noindent:
# prf = ''
# else:
# prf = self._indent
new_text = text.format(**kwargs)
# print(prf, new_text, file=self.out, sep='', end='')
print(new_text, file=self.out, sep='', end='')
def indent(self, level):
ident = self.one_indent * level
return Indentor(self, ident)
def visitDefault(self, node):
nodename = '%s(' % clsname(node)
self.print(nodename, noindent=True)
undefined = [attr for attr in node._fields if not hasattr(node, attr)]
if undefined:
warn('ast node %r does not have required field(s) %r ' % (clsname(node), undefined,), stacklevel=2)
undefined = [attr for attr in node._attributes if not hasattr(node, attr)]
if undefined:
warn('ast does %r not have required attribute(s) %r ' % (clsname(node), undefined,), stacklevel=2)
children = sorted([(attr, getattr(node, attr)) for attr in node._fields if hasattr(node, attr)])
with self.indent(len(nodename)):
i = 0
while children:
attr, child = children.pop(0)
if isinstance(child, (list, tuple)):
text = '{attr}=['.format(attr=attr)
self.print(text)
with self.indent(len(text)):
for j, inner_child in enumerate(child):
if isinstance(inner_child, _ast.AST):
self.visit(inner_child)
else:
self.print(repr(inner_child))
if j < (len(child) - 1):
self.print(", {nl}{idnt}", nl=self.newline, idnt=self._indent)
self.print(']')
else:
text = '{attr}='.format(attr=attr)
self.print(text)
with self.indent(len(text)):
if isinstance(child, _ast.AST):
self.visit(child)
else:
self.print(repr(child))
if children:
self.print(", {nl}{idnt}", nl=self.newline, idnt=self._indent)
i += 1
self.print(")")
def dump_ast(ast, indent=' ', newline='\n'):
'''
Returns a string representing the ast.
:param ast: the ast to print.
:param indent: how far to indent a newline.
:param newline: The newline character.
'''
visitor = ASTPrinter(indent=indent, level=0, newline=newline)
visitor.visit(ast)
return visitor.dumps()
def print_ast(ast, indent=' ', initlevel=0, newline='\n', file=sys.stdout):
'''
Pretty print an ast node.
:param ast: the ast to print.
:param indent: how far to indent a newline.
:param initlevel: starting indent level
:param newline: The newline character.
:param file: file object to print to
To print a short ast you may want to use::
node = ast.parse(source)
print_ast(node, indent='', newline='')
'''
visitor = ASTPrinter(indent=indent, level=initlevel, newline=newline)
visitor.visit(ast)
visitor.dump(file=file)
|
jasonyaw/SFrame
|
oss_src/unity/python/sframe/meta/asttools/visitors/print_visitor.py
|
Python
|
bsd-3-clause
| 5,046
|
[
"VisIt"
] |
433f13db91178e2a6f49808d68b96347cca12ce07747eea086db8c1ed9657c34
|
import m5
from m5.objects import *
alpha_suffix = '_base.amd64-m64-gcc43-nn'
#400.perlbench
perlbench = LiveProcess()
perlbench.executable = 'perlbench' + alpha_suffix
perlbench.cmd = [perlbench.executable] + ['-I./lib', 'checkspam.pl', '2500',
'5', '25', '11', '150', '1', '1', '1', '1']
#401.bzip2
bzip2 = LiveProcess()
bzip2.executable = 'bzip2' + alpha_suffix
bzip2.cmd = [bzip2.executable] + ['input.source', '280']
#403.gcc
gcc = LiveProcess()
gcc.executable = 'gcc' + alpha_suffix
gcc.cmd = [gcc.executable] + ['166.i', '-o', '166.s']
#410.bwaves
bwaves = LiveProcess()
bwaves.executable = 'bwaves' + alpha_suffix
bwaves.cmd = [bwaves.executable]
#416.gamess
gamess = LiveProcess()
gamess.executable = 'gamess' + alpha_suffix
gamess.cmd = [gamess.executable]
gamess.input = 'cytosine.2.config'
#429.mcf
mcf = LiveProcess()
mcf.executable = 'mcf' + alpha_suffix
mcf.cmd = [mcf.executable] + ['inp.in']
#433.milc
milc = LiveProcess()
milc.executable = 'milc' + alpha_suffix
milc.cmd = [milc.executable]
milc.input = 'su3imp.in'
#434.zeusmp
zeusmp = LiveProcess()
zeusmp.executable = 'zeusmp' + alpha_suffix
zeusmp.cmd = [zeusmp.executable]
#435.gromacs
gromacs = LiveProcess()
gromacs.executable = 'gromacs' + alpha_suffix
gromacs.cmd = [gromacs.executable] + ['-silent', '-deffnm', 'gromacs',
'-nice', '0']
#436.cactusADM
cactusADM = LiveProcess()
cactusADM.executable = 'cactusADM' + alpha_suffix
cactusADM.cmd = [cactusADM.executable] + ['benchADM.par']
#437.leslie3d
leslie3d = LiveProcess()
leslie3d.executable = 'leslie3d' + alpha_suffix
leslie3d.cmd = [leslie3d.executable]
leslie3d.input = 'leslie3d.in'
#444.namd
namd = LiveProcess()
namd.executable = 'namd' + alpha_suffix
namd.cmd = [namd.executable] + ['--input', 'namd.input', '--output',
'namd.out', '--iterations', '38']
#445.gobmk
gobmk = LiveProcess()
gobmk.executable = 'gobmk' + alpha_suffix
gobmk.cmd = [gobmk.executable] + ['--quiet', '--mode', 'gtp']
gobmk.input = '13x13.tst'
#447.dealII
dealII=LiveProcess()
dealII.executable = 'dealII' + alpha_suffix
# TEST CMDS
dealII.cmd = [dealII.executable]+['23']
# REF CMDS
#dealII.output = out_dir + 'dealII.out'
#450.soplex
soplex = LiveProcess()
soplex.executable = 'soplex' + alpha_suffix
soplex.cmd = [soplex.executable] + ['-m45000', 'pds-50.mps']
#453.povray
povray = LiveProcess()
povray.executable = 'povray' + alpha_suffix
povray.cmd = [povray.executable] + ['SPEC-benchmark-ref.ini']
#454.calculix
calculix = LiveProcess()
calculix.executable = 'calculix' + alpha_suffix
calculix.cmd = [calculix.executable] + ['-i', 'hyperviscoplastic']
#456.hmmer
hmmer = LiveProcess()
hmmer.executable = 'hmmer' + alpha_suffix
hmmer.cmd = [hmmer.executable] + ['nph3.hmm', 'swiss41']
#458.sjeng
sjeng = LiveProcess()
sjeng.executable = 'sjeng' + alpha_suffix
sjeng.cmd = [sjeng.executable] + ['ref.txt']
#459.GemsFDTD
GemsFDTD = LiveProcess()
GemsFDTD.executable = 'GemsFDTD' + alpha_suffix
GemsFDTD.cmd = [GemsFDTD.executable] + ['yee.dat']
#462.libquantum
libquantum = LiveProcess()
libquantum.executable = 'libquantum' + alpha_suffix
libquantum.cmd = [libquantum.executable] + ['1397', '8']
#464.h264ref
h264ref = LiveProcess()
h264ref.executable = 'h264ref' + alpha_suffix
h264ref.cmd = [h264ref.executable] + ['-d', 'foreman_ref_encoder_baseline.cfg']
#465.tonto
tonto = LiveProcess()
tonto.executable = 'tonto' + alpha_suffix
tonto.cmd = [tonto.executable]
#470.lbm
lbm = LiveProcess()
lbm.executable = 'lbm' + alpha_suffix
lbm.cmd = [lbm.executable] + ['300', 'reference.dat', '0', '0',
'100_100_130_ldc.of']
#471.omnetpp
omnetpp=LiveProcess()
omnetpp.executable = 'omnetpp' + alpha_suffix
omnetpp.cmd = [omnetpp.executable] + ['omnetpp.ini']
#473.astar
astar=LiveProcess()
astar.executable = 'astar' + alpha_suffix
astar.cmd = [astar.executable] + ['rivers.cfg']
#481.wrf
wrf=LiveProcess()
wrf.executable = 'wrf' + alpha_suffix
wrf.cmd = [wrf.executable]
#482.sphinx3
sphinx3=LiveProcess()
sphinx3.executable = 'sphinx_livepretend' + alpha_suffix
sphinx3.cmd = [sphinx3.executable] + ['ctlfile', '.', 'args.an4']
#483.xalancbmk ##--NOT WORKING--##
#483.xalancbmk
xalancbmk=LiveProcess()
xalancbmk.executable = 'Xalan' + alpha_suffix
# TEST CMDS
xalancbmk.cmd = [xalancbmk.executable] + ['-v','t5.xml','xalanc.xsl']
# REF CMDS
#xalancbmk.output = out_dir + 'xalancbmk.out'
|
nilmini20s/gem5-2016-08-13
|
configs/example/spec06_benchmarks.py
|
Python
|
bsd-3-clause
| 4,359
|
[
"GAMESS",
"Gromacs",
"NAMD"
] |
46c1a483098191944469fd2251663feb2e02928bbdcc26d5583eb6b081bc605d
|
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Uoti Urpala and Matt Chisholm
from __future__ import division
from BitTorrent.platform import install_translation
install_translation()
import sys
import itertools
import math
import os
import threading
import datetime
import random
import atexit
assert sys.version_info >= (2, 3), _("Install Python %s or greater") % '2.3'
from BitTorrent import BTFailure, INFO, WARNING, ERROR, CRITICAL, status_dict, app_name
from BitTorrent import configfile
from BitTorrent.defaultargs import get_defaults
from BitTorrent.IPC import ipc_interface
from BitTorrent.prefs import Preferences
from BitTorrent.platform import doc_root, btspawn, path_wrap, os_version, is_frozen_exe, get_startup_dir, create_shortcut, remove_shortcut
from BitTorrent import zurllib
defaults = get_defaults('bittorrent')
defaults.extend((('donated' , '', ''), # the version that the user last donated for
('notified', '', ''), # the version that the user was last notified of
))
ui_options = [
'max_upload_rate' ,
'minport' ,
'maxport' ,
'next_torrent_time' ,
'next_torrent_ratio' ,
'last_torrent_ratio' ,
'seed_forever' ,
'seed_last_forever' ,
'ask_for_save' ,
'save_in' ,
'open_from' ,
'ip' ,
'start_torrent_behavior',
'upnp' ,
]
if os.name == 'nt':
ui_options.extend( [
'launch_on_startup' ,
'minimize_to_tray' ,
])
advanced_ui_options_index = len(ui_options)
ui_options.extend([
'min_uploads' ,
'max_uploads' ,
'max_initiate' ,
'max_incomplete' ,
'max_allow_in' ,
'max_files_open' ,
'forwarded_port' ,
'display_interval',
'donated' ,
'notified' ,
])
if is_frozen_exe:
ui_options.append('progressbar_hack')
defproghack = 0
if os_version == 'XP':
# turn on progress bar hack by default for Win XP
defproghack = 1
defaults.extend((('progressbar_hack' , defproghack, ''),))
NAG_FREQUENCY = 3
PORT_RANGE = 5
defconfig = dict([(name, value) for (name, value, doc) in defaults])
del name, value, doc
def btgui_exit(ipc):
ipc.stop()
class global_logger(object):
def __init__(self, logger = None):
self.logger = logger
def __call__(self, severity, msg):
if self.logger:
self.logger(severity, msg)
else:
sys.stderr.write("%s: %s\n" % (status_dict[severity], msg))
# if it's application global, why do we pass a reference to it everywhere?
global_log_func = global_logger()
if __name__ == '__main__':
zurllib.add_unsafe_thread()
try:
config, args = configfile.parse_configuration_and_args(defaults,
'bittorrent', sys.argv[1:], 0, None)
except BTFailure, e:
print str(e)
sys.exit(1)
config = Preferences().initWithDict(config)
advanced_ui = config['advanced']
newtorrents = args
for opt in ('responsefile', 'url'):
if config[opt]:
print '"--%s"' % opt, _("deprecated, do not use")
newtorrents.append(config[opt])
ipc = ipc_interface(config, global_log_func)
# this could be on the ipc object
ipc_master = True
try:
ipc.create()
except BTFailure:
ipc_master = False
try:
ipc.send_command('no-op')
except BTFailure:
global_log_func(ERROR, _("Failed to communicate with another %s process "
"but one seems to be running.") +
_(" Closing all %s windows may fix the problem.")
% (app_name, app_name))
sys.exit(1)
# make sure we clean up the ipc when we close
atexit.register(btgui_exit, ipc)
# it's not obvious, but 'newtorrents' is carried on to the gui
# __main__ if we're the IPC master
if not ipc_master:
if newtorrents:
# Not sure if anything really useful could be done if
# these send_command calls fail
for name in newtorrents:
ipc.send_command('start_torrent', name, config['save_as'])
sys.exit(0)
try:
ipc.send_command('show_error', _("%s already running")%app_name)
except BTFailure:
global_log_func(ERROR, _("Failed to communicate with another %s process.") +
_(" Closing all %s windows may fix the problem.")
% app_name)
sys.exit(1)
import gtk
import pango
import gobject
import webbrowser
assert gtk.pygtk_version >= (2, 6), _("PyGTK %s or newer required") % '2.6'
from BitTorrent import HELP_URL, DONATE_URL, SEARCH_URL, version, branch
from BitTorrent import TorrentQueue
from BitTorrent import LaunchPath
from BitTorrent import Desktop
from BitTorrent import ClientIdentifier
from BitTorrent import NewVersion
from BitTorrent.parseargs import makeHelp
from BitTorrent.TorrentQueue import RUNNING, RUN_QUEUED, QUEUED, KNOWN, ASKING_LOCATION
from BitTorrent.TrayIcon import TrayIcon
from BitTorrent.StatusLight import GtkStatusLight as StatusLight
from BitTorrent.GUI import *
main_torrent_dnd_tip = _("drag to reorder")
torrent_menu_tip = _("right-click for menu")
torrent_tip_format = '%s:\n %s\n %s'
rate_label = ': %s'
speed_classes = {
( 4, 5):_("dialup" ),
( 6, 14):_("DSL/cable 128k up"),
( 15, 29):_("DSL/cable 256k up"),
( 30, 91):_("DSL 768k up" ),
( 92, 137):_("T1" ),
( 138, 182):_("T1/E1" ),
( 183, 249):_("E1" ),
( 250, 5446):_("T3" ),
(5447,18871):_("OC3" ),
}
def find_dir(path):
if os.path.isdir(path):
return path
directory, garbage = os.path.split(path)
while directory:
if os.access(directory, os.F_OK) and os.access(directory, os.W_OK):
return directory
directory, garbage = os.path.split(directory)
if garbage == '':
break
return None
def smart_dir(path):
path = find_dir(path)
if path is None:
path = Desktop.desktop
return path
class MenuItem(gtk.MenuItem):
def __init__(self, label, accel_group=None, func=None):
gtk.MenuItem.__init__(self, label)
if func is not None:
self.connect("activate", func)
else:
self.set_sensitive(False)
if accel_group is not None:
label = label.decode('utf-8')
accel_index = label.find('_')
if -1 < accel_index < len(label) - 1:
accel_char = long(ord(label[accel_index+1]))
accel_key = gtk.gdk.unicode_to_keyval(accel_char)
if accel_key != accel_char | 0x01000000:
self.add_accelerator("activate", accel_group, accel_key,
gtk.gdk.CONTROL_MASK, gtk.ACCEL_VISIBLE)
self.show()
def build_menu(menu_items, accel_group=None):
menu = gtk.Menu()
for label,func in menu_items:
if label == '----':
s = gtk.SeparatorMenuItem()
s.show()
menu.add(s)
else:
item = MenuItem(label, accel_group=accel_group, func=func)
item.show()
menu.add(item)
return menu
class Validator(gtk.Entry):
valid_chars = '1234567890'
minimum = None
maximum = None
cast = int
def __init__(self, option_name, config, setfunc):
gtk.Entry.__init__(self)
self.option_name = option_name
self.config = config
self.setfunc = setfunc
self.set_text(str(config[option_name]))
self.set_size_request(self.width,-1)
self.connect('insert-text', self.text_inserted)
self.connect('focus-out-event', self.focus_out)
def get_value(self):
value = None
try:
value = self.cast(self.get_text())
except ValueError:
pass
return value
def set_value(self, value):
self.set_text(str(value))
self.setfunc(self.option_name, value)
def focus_out(self, entry, widget):
value = self.get_value()
if value is None:
return
if (self.minimum is not None) and (value < self.minimum):
value = self.minimum
if (self.maximum is not None) and (value > self.maximum):
value = self.maximum
self.set_value(value)
def text_inserted(self, entry, input, position, user_data):
for i in input:
if (self.valid_chars is not None) and (i not in self.valid_chars):
self.emit_stop_by_name('insert-text')
return True
return False
class IPValidator(Validator):
valid_chars = '1234567890.'
width = 128
cast = str
class PortValidator(Validator):
width = 64
minimum = 1024
maximum = 65535
def add_end(self, end_name):
self.end_option_name = end_name
def set_value(self, value):
self.set_text(str(value))
self.setfunc(self.option_name, value)
self.setfunc(self.end_option_name, value+PORT_RANGE)
class PercentValidator(Validator):
width = 48
minimum = 0
class MinutesValidator(Validator):
width = 48
minimum = 1
class EnterUrlDialog(MessageDialog):
flags = gtk.DIALOG_DESTROY_WITH_PARENT
def __init__(self, parent):
self.entry = gtk.Entry()
self.entry.show()
self.main = parent
MessageDialog.__init__(self, parent.mainwindow,
_("Enter torrent URL"),
_("Enter the URL of a torrent file to open:"),
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=lambda *args: parent.open_url(self.entry.get_text()),
default=gtk.RESPONSE_OK
)
hbox = gtk.HBox()
hbox.pack_start(self.entry, padding=SPACING)
hbox.show()
self.entry.set_activates_default(True)
self.entry.set_flags(gtk.CAN_FOCUS)
self.vbox.pack_start(hbox)
self.entry.grab_focus()
def close(self, *args):
self.destroy()
def destroy(self):
MessageDialog.destroy(self)
self.main.window_closed('enterurl')
class RateSliderBox(gtk.VBox):
base = 10
multiplier = 4
max_exponent = 3.3
def __init__(self, config, torrentqueue):
gtk.VBox.__init__(self, homogeneous=False)
self.config = config
self.torrentqueue = torrentqueue
if self.config['max_upload_rate'] < self.slider_to_rate(0):
self.config['max_upload_rate'] = self.slider_to_rate(0)
self.speed_classes = {
( 4, 5):_("dialup" ),
( 6, 14):_("DSL/cable 128k up"),
( 15, 29):_("DSL/cable 256k up"),
( 30, 91):_("DSL 768k up" ),
( 92, 137):_("T1" ),
( 138, 182):_("T1/E1" ),
( 183, 249):_("E1" ),
( 250, 5446):_("T3" ),
(5447,18871):_("OC3" ),
}
biggest_size = 0
for v in self.speed_classes.values():
width = gtk.Label(v).size_request()[0]
if width > biggest_size:
biggest_size = width
self.rate_slider_label_box = gtk.HBox(spacing=SPACING,
homogeneous=True)
self.rate_slider_label = gtk.Label(_("Maximum upload rate:"))
self.rate_slider_label.set_ellipsize(pango.ELLIPSIZE_START)
self.rate_slider_label.set_alignment(1, 0.5)
self.rate_slider_label_box.pack_start(self.rate_slider_label,
expand=True, fill=True)
self.rate_slider_value = gtk.Label(
self.value_to_label(self.config['max_upload_rate']))
self.rate_slider_value.set_alignment(0, 0.5)
self.rate_slider_value.set_size_request(biggest_size, -1)
self.rate_slider_label_box.pack_start(self.rate_slider_value,
expand=True, fill=True)
self.rate_slider_adj = gtk.Adjustment(
self.rate_to_slider(self.config['max_upload_rate']), 0,
self.max_exponent, 0.01, 0.1)
self.rate_slider = gtk.HScale(self.rate_slider_adj)
self.rate_slider.set_draw_value(False)
self.rate_slider_adj.connect('value_changed', self.set_max_upload_rate)
self.pack_start(self.rate_slider , expand=False, fill=False)
self.pack_start(self.rate_slider_label_box , expand=False, fill=False)
if False: # this shows the legend for the slider
self.rate_slider_legend = gtk.HBox(homogeneous=True)
for i in range(int(self.max_exponent+1)):
label = gtk.Label(str(self.slider_to_rate(i)))
alabel = halign(label, i/self.max_exponent)
self.rate_slider_legend.pack_start(alabel,
expand=True, fill=True)
self.pack_start(self.rate_slider_legend, expand=False, fill=False)
def start(self):
self.set_max_upload_rate(self.rate_slider_adj)
def rate_to_slider(self, value):
return math.log(value/self.multiplier, self.base)
def slider_to_rate(self, value):
return int(round(self.base**value * self.multiplier))
def value_to_label(self, value):
conn_type = ''
for key, conn in self.speed_classes.items():
min_v, max_v = key
if min_v <= value <= max_v:
conn_type = ' (%s)'%conn
break
label = str(Rate(value*1024)) + conn_type
return label
def set_max_upload_rate(self, adj):
option = 'max_upload_rate'
value = self.slider_to_rate(adj.get_value())
self.config[option] = value
self.torrentqueue.set_config(option, value)
self.rate_slider_value.set_text(self.value_to_label(int(value)))
class StopStartButton(gtk.Button):
stop_tip = _("Temporarily stop all running torrents")
start_tip = _("Resume downloading")
def __init__(self, main):
gtk.Button.__init__(self)
self.main = main
self.connect('clicked', self.toggle)
self.stop_image = gtk.Image()
self.stop_image.set_from_stock('bt-pause', gtk.ICON_SIZE_BUTTON)
self.stop_image.show()
self.start_image = gtk.Image()
self.start_image.set_from_stock('bt-play', gtk.ICON_SIZE_BUTTON)
self.start_image.show()
def toggle(self, widget):
self.set_paused(not self.main.config['pause'])
def set_paused(self, paused):
image = self.get_child()
if paused:
if image == self.stop_image:
self.remove(self.stop_image)
if image != self.start_image:
self.add(self.start_image)
self.main.tooltips.set_tip(self, self.start_tip)
self.main.stop_queue()
else:
if image == self.start_image:
self.remove(self.start_image)
if image != self.stop_image:
self.add(self.stop_image)
self.main.tooltips.set_tip(self, self.stop_tip )
self.main.restart_queue()
class VersionWindow(Window):
def __init__(self, main, newversion, download_url):
Window.__init__(self)
self.set_title(_("New %s version available")%app_name)
self.set_border_width(SPACING)
self.set_resizable(False)
self.main = main
self.newversion = newversion
self.download_url = download_url
self.connect('destroy', lambda w: self.main.window_closed('version'))
self.vbox = gtk.VBox(spacing=SPACING)
self.hbox = gtk.HBox(spacing=SPACING)
self.image = gtk.Image()
self.image.set_from_stock(gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_DIALOG)
self.hbox.pack_start(self.image)
self.label = gtk.Label()
self.label.set_markup(
(_("A newer version of %s is available.\n") % app_name) +
(_("You are using %s, and the new version is %s.\n") % (version, newversion)) +
(_("You can always get the latest version from \n%s") % self.download_url)
)
self.label.set_selectable(True)
self.hbox.pack_start(self.label)
self.vbox.pack_start(self.hbox)
self.bbox = gtk.HBox(spacing=SPACING)
self.closebutton = gtk.Button(_("Download _later"))
self.closebutton.connect('clicked', self.close)
self.newversionbutton = gtk.Button(_("Download _now"))
self.newversionbutton.connect('clicked', self.get_newversion)
self.bbox.pack_end(self.newversionbutton, expand=False, fill=False)
self.bbox.pack_end(self.closebutton , expand=False, fill=False)
self.checkbox = gtk.CheckButton(_("_Remind me later"))
self.checkbox.set_active(True)
self.checkbox.connect('toggled', self.remind_toggle)
self.bbox.pack_start(self.checkbox, expand=False, fill=False)
self.vbox.pack_start(self.bbox)
self.add(self.vbox)
self.show_all()
def remind_toggle(self, widget):
v = self.checkbox.get_active()
notified = ''
if v:
notified = ''
else:
notified = self.newversion
self.main.set_config('notified', str(notified))
def close(self, widget):
self.destroy()
def get_newversion(self, widget):
if self.main.updater.can_install():
if self.main.updater.torrentfile is None:
self.main.visit_url(self.download_url)
else:
self.main.start_auto_update()
else:
self.main.visit_url(self.download_url)
self.destroy()
class AboutWindow(object):
def __init__(self, main, donatefunc):
self.win = Window()
self.win.set_title(_("About %s")%app_name)
self.win.set_size_request(300,400)
self.win.set_border_width(SPACING)
self.win.set_resizable(False)
self.win.connect('destroy', lambda w: main.window_closed('about'))
self.scroll = gtk.ScrolledWindow()
self.scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.scroll.set_shadow_type(gtk.SHADOW_IN)
self.outervbox = gtk.VBox()
self.outervbox.pack_start(get_logo(96), expand=False, fill=False)
version_str = version
if int(version_str[2]) % 2:
version_str = version_str + ' ' + _("Beta")
self.outervbox.pack_start(gtk.Label(_("Version %s")%version_str), expand=False, fill=False)
if branch is not None:
blabel = gtk.Label('cdv client dir: %s' % branch)
self.outervbox.pack_start(blabel, expand=False, fill=False)
self.vbox = gtk.VBox()
self.vbox.set_size_request(250, -1)
for i, fn in enumerate(('credits', 'credits-l10n')):
if i != 0:
self.vbox.pack_start(gtk.HSeparator(), padding=SPACING,
expand=False, fill=False)
filename = os.path.join(doc_root, fn+'.txt')
l = ''
if not os.access(filename, os.F_OK|os.R_OK):
l = _("Couldn't open %s") % filename
else:
credits_f = file(filename)
l = credits_f.read()
credits_f.close()
if os.name == 'nt':
# gtk ignores blank lines on win98
l = l.replace('\n\n', '\n\t\n')
label = gtk.Label(l.strip())
label.set_line_wrap(True)
label.set_selectable(True)
label.set_justify(gtk.JUSTIFY_CENTER)
label.set_size_request(250,-1)
self.vbox.pack_start(label, expand=False, fill=False)
self.scroll.add_with_viewport(self.vbox)
self.outervbox.pack_start(self.scroll, padding=SPACING)
self.donatebutton = gtk.Button(_("Donate"))
self.donatebutton.connect('clicked', donatefunc)
self.donatebuttonbox = gtk.HButtonBox()
self.donatebuttonbox.pack_start(self.donatebutton,
expand=False, fill=False)
self.outervbox.pack_end(self.donatebuttonbox, expand=False, fill=False)
self.win.add(self.outervbox)
self.win.show_all()
def close(self, widget):
self.win.destroy()
class LogWindow(object):
def __init__(self, main, logbuffer, config):
self.config = config
self.main = main
self.win = Window()
self.win.set_title(_("%s Activity Log")%app_name)
self.win.set_default_size(600, 200)
self.win.set_border_width(SPACING)
self.buffer = logbuffer
self.text = gtk.TextView(self.buffer)
self.text.set_editable(False)
self.text.set_cursor_visible(False)
self.text.set_wrap_mode(gtk.WRAP_WORD)
self.scroll = gtk.ScrolledWindow()
self.scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.scroll.set_shadow_type(gtk.SHADOW_IN)
self.scroll.add(self.text)
self.vbox = gtk.VBox(spacing=SPACING)
self.vbox.pack_start(self.scroll)
self.buttonbox = gtk.HButtonBox()
self.buttonbox.set_spacing(SPACING)
self.closebutton = gtk.Button(stock='gtk-close')
self.closebutton.connect('clicked', self.close)
self.savebutton = gtk.Button(stock='gtk-save')
self.savebutton.connect('clicked', self.save_log_file_selection)
self.clearbutton = gtk.Button(stock='gtk-clear')
self.clearbutton.connect('clicked', self.clear_log)
self.buttonbox.pack_start(self.savebutton)
self.buttonbox.pack_start(self.closebutton)
self.hbox2 = gtk.HBox(homogeneous=False)
self.hbox2.pack_end(self.buttonbox, expand=False, fill=False)
bb = gtk.HButtonBox()
bb.pack_start(self.clearbutton)
self.hbox2.pack_start(bb, expand=False, fill=True)
self.vbox.pack_end(self.hbox2, expand=False, fill=True)
self.win.add(self.vbox)
self.win.connect("destroy", lambda w: self.main.window_closed('log'))
self.scroll_to_end()
self.win.show_all()
def scroll_to_end(self):
mark = self.buffer.create_mark(None, self.buffer.get_end_iter())
self.text.scroll_mark_onscreen(mark)
def save_log_file_selection(self, *args):
name = 'bittorrent.log'
path = smart_dir(self.config['save_in'])
fullname = os.path.join(path, name)
self.main.open_window('savefile',
title=_("Save log in:"),
fullname=fullname,
got_location_func=self.save_log,
no_location_func=lambda: self.main.window_closed('savefile'))
def save_log(self, saveas):
self.main.window_closed('savefile')
f = file(saveas, 'w')
f.write(self.buffer.get_text(self.buffer.get_start_iter(),
self.buffer.get_end_iter()))
save_message = self.buffer.log_text(_("log saved"), None)
f.write(save_message)
f.close()
def clear_log(self, *args):
self.buffer.clear_log()
def close(self, widget):
self.win.destroy()
class LogBuffer(gtk.TextBuffer):
def __init__(self):
gtk.TextBuffer.__init__(self)
tt = self.get_tag_table()
size_tag = gtk.TextTag('small')
size_tag.set_property('size-points', 10)
tt.add(size_tag)
info_tag = gtk.TextTag('info')
info_tag.set_property('foreground', '#00a040')
tt.add(info_tag)
warning_tag = gtk.TextTag('warning')
warning_tag.set_property('foreground', '#a09000')
tt.add(warning_tag)
error_tag = gtk.TextTag('error')
error_tag.set_property('foreground', '#b00000')
tt.add(error_tag)
critical_tag = gtk.TextTag('critical')
critical_tag.set_property('foreground', '#b00000')
critical_tag.set_property('weight', pango.WEIGHT_BOLD)
tt.add(critical_tag)
def log_text(self, text, severity=CRITICAL):
now_str = datetime.datetime.strftime(datetime.datetime.now(),
'[%Y-%m-%d %H:%M:%S] ')
self.insert_with_tags_by_name(self.get_end_iter(), now_str, 'small')
if severity is not None:
self.insert_with_tags_by_name(self.get_end_iter(), '%s\n'%text,
'small', status_dict[severity])
else:
self.insert_with_tags_by_name(self.get_end_iter(),
' -- %s -- \n'%text, 'small')
return now_str+text+'\n'
def clear_log(self):
self.set_text('')
self.log_text(_("log cleared"), None)
class CheckButton(gtk.CheckButton):
def __init__(self, label, main, option_name, initial_value,
extra_callback=None):
gtk.CheckButton.__init__(self, label)
self.main = main
self.option_name = option_name
self.option_type = type(initial_value)
self.set_active(bool(initial_value))
self.extra_callback = extra_callback
self.connect('toggled', self.callback)
def callback(self, *args):
self.main.config[self.option_name] = \
self.option_type(not self.main.config[self.option_name])
self.main.setfunc(self.option_name, self.main.config[self.option_name])
if self.extra_callback is not None:
self.extra_callback()
class SettingsWindow(object):
def __init__(self, main, config, setfunc):
self.main = main
self.setfunc = setfunc
self.config = config
self.win = Window()
self.win.connect("destroy", lambda w: main.window_closed('settings'))
self.win.set_title(_("%s Settings")%app_name)
self.win.set_border_width(SPACING)
self.notebook = gtk.Notebook()
self.vbox = gtk.VBox(spacing=SPACING)
self.vbox.pack_start(self.notebook, expand=False, fill=False)
# General tab
if os.name == 'nt':
self.cb_box = gtk.VBox(spacing=SPACING)
self.cb_box.set_border_width(SPACING)
self.notebook.append_page(self.cb_box, gtk.Label(_("General")))
self.startup_checkbutton = CheckButton(
_("Launch BitTorrent when Windows starts"), self,
'launch_on_startup', self.config['launch_on_startup'])
self.cb_box.pack_start(self.startup_checkbutton, expand=False, fill=False)
self.startup_checkbutton.connect('toggled', self.launch_on_startup)
self.minimize_checkbutton = CheckButton(
_("Minimize to system tray"), self,
'minimize_to_tray', self.config['minimize_to_tray'])
self.cb_box.pack_start(self.minimize_checkbutton, expand=False, fill=False)
# allow the user to set the progress bar text to all black
self.progressbar_hack = CheckButton(
_("Progress bar text is always black\n(requires restart)"),
self, 'progressbar_hack', self.config['progressbar_hack'])
self.cb_box.pack_start(self.progressbar_hack, expand=False, fill=False)
# end General tab
# Saving tab
self.saving_box = gtk.VBox(spacing=SPACING)
self.saving_box.set_border_width(SPACING)
self.notebook.append_page(self.saving_box, gtk.Label(_("Saving")))
self.dl_frame = gtk.Frame(_("Save new downloads in:"))
self.saving_box.pack_start(self.dl_frame, expand=False, fill=False)
self.dl_box = gtk.VBox(spacing=SPACING)
self.dl_box.set_border_width(SPACING)
self.dl_frame.add(self.dl_box)
self.save_in_box = gtk.HBox(spacing=SPACING)
self.dl_save_in = gtk.Entry()
self.dl_save_in.set_editable(False)
self.set_save_in(self.config['save_in'])
self.save_in_box.pack_start(self.dl_save_in, expand=True, fill=True)
self.dl_save_in_button = gtk.Button(_("Change..."))
self.dl_save_in_button.connect('clicked', self.get_save_in)
self.save_in_box.pack_start(self.dl_save_in_button, expand=False, fill=False)
self.dl_box.pack_start(self.save_in_box, expand=False, fill=False)
self.dl_ask_checkbutton = CheckButton(
_("Ask where to save each new download"), self,
'ask_for_save', self.config['ask_for_save'])
self.dl_box.pack_start(self.dl_ask_checkbutton, expand=False, fill=False)
# end Saving tab
# Downloading tab
self.downloading_box = gtk.VBox(spacing=SPACING)
self.downloading_box.set_border_width(SPACING)
self.notebook.append_page(self.downloading_box, gtk.Label(_("Downloading")))
self.dnd_frame = gtk.Frame(_("When starting a new torrent:"))
self.dnd_box = gtk.VBox(spacing=SPACING, homogeneous=True)
self.dnd_box.set_border_width(SPACING)
self.dnd_states = ['replace','add','ask']
self.dnd_original_state = self.config['start_torrent_behavior']
self.always_replace_radio = gtk.RadioButton(
group=None,
label=_("_Stop another running torrent to make room"))
self.dnd_box.pack_start(self.always_replace_radio)
self.always_replace_radio.state_name = self.dnd_states[0]
self.always_add_radio = gtk.RadioButton(
group=self.always_replace_radio,
label=_("_Don't stop other running torrents"))
self.dnd_box.pack_start(self.always_add_radio)
self.always_add_radio.state_name = self.dnd_states[1]
self.always_ask_radio = gtk.RadioButton(
group=self.always_replace_radio,
label=_("_Ask each time")
)
self.dnd_box.pack_start(self.always_ask_radio)
self.always_ask_radio.state_name = self.dnd_states[2]
self.dnd_group = self.always_replace_radio.get_group()
for r in self.dnd_group:
r.connect('toggled', self.start_torrent_behavior_changed)
self.set_start_torrent_behavior(self.config['start_torrent_behavior'])
self.dnd_frame.add(self.dnd_box)
self.downloading_box.pack_start(self.dnd_frame, expand=False, fill=False)
# Seeding tab
self.seeding_box = gtk.VBox(spacing=SPACING)
self.seeding_box.set_border_width(SPACING)
self.notebook.append_page(self.seeding_box, gtk.Label(_("Seeding")))
def colon_split(framestr):
COLONS = (':', u'\uff1a')
for colon in COLONS:
if colon in framestr:
return framestr.split(colon)
return '', framestr
nt_framestr = _("Seed completed torrents: until share ratio reaches [_] percent, or for [_] minutes, whichever comes first.")
nt_title, nt_rem = colon_split(nt_framestr)
nt_msg1, nt_msg2, nt_msg4 = nt_rem.split('[_]')
nt_msg3 = ''
if ',' in nt_msg2:
nt_msg2, nt_msg3 = nt_msg2.split(',')
nt_msg2 += ','
self.next_torrent_frame = gtk.Frame(nt_title+':')
self.next_torrent_box = gtk.VBox(spacing=SPACING, homogeneous=True)
self.next_torrent_box.set_border_width(SPACING)
self.next_torrent_frame.add(self.next_torrent_box)
self.next_torrent_ratio_box = gtk.HBox()
self.next_torrent_ratio_box.pack_start(gtk.Label(nt_msg1),
fill=False, expand=False)
self.next_torrent_ratio_field = PercentValidator('next_torrent_ratio',
self.config, self.setfunc)
self.next_torrent_ratio_box.pack_start(self.next_torrent_ratio_field,
fill=False, expand=False)
self.next_torrent_ratio_box.pack_start(gtk.Label(nt_msg2),
fill=False, expand=False)
self.next_torrent_box.pack_start(self.next_torrent_ratio_box)
self.next_torrent_time_box = gtk.HBox()
self.next_torrent_time_box.pack_start(gtk.Label(nt_msg3),
fill=False, expand=False)
self.next_torrent_time_field = MinutesValidator('next_torrent_time',
self.config, self.setfunc)
self.next_torrent_time_box.pack_start(self.next_torrent_time_field,
fill=False, expand=False)
self.next_torrent_time_box.pack_start(gtk.Label(nt_msg4),
fill=False, expand=False)
self.next_torrent_box.pack_start(self.next_torrent_time_box)
def seed_forever_extra():
for field in (self.next_torrent_ratio_field,
self.next_torrent_time_field):
field.set_sensitive(not self.config['seed_forever'])
seed_forever_extra()
self.seed_forever = CheckButton( _("Seed indefinitely"), self,
'seed_forever',
self.config['seed_forever'],
seed_forever_extra)
self.next_torrent_box.pack_start(self.seed_forever)
# end next torrent seed behavior
# begin last torrent seed behavior
lt_framestr = _("Seed last completed torrent: until share ratio reaches [_] percent.")
lt_title, lt_rem = colon_split(lt_framestr)
lt_msg1, lt_msg2 = lt_rem.split('[_]')
self.seeding_box.pack_start(self.next_torrent_frame, expand=False, fill=False)
self.last_torrent_frame = gtk.Frame(lt_title+':')
self.last_torrent_vbox = gtk.VBox(spacing=SPACING)
self.last_torrent_vbox.set_border_width(SPACING)
self.last_torrent_box = gtk.HBox()
self.last_torrent_box.pack_start(gtk.Label(lt_msg1),
expand=False, fill=False)
self.last_torrent_ratio_field = PercentValidator('last_torrent_ratio',
self.config, self.setfunc)
self.last_torrent_box.pack_start(self.last_torrent_ratio_field,
fill=False, expand=False)
self.last_torrent_box.pack_start(gtk.Label(lt_msg2),
fill=False, expand=False)
self.last_torrent_vbox.pack_start(self.last_torrent_box)
def seed_last_forever_extra():
self.last_torrent_ratio_field.set_sensitive(
not self.config['seed_last_forever'])
seed_last_forever_extra()
self.seed_last_forever = CheckButton(_("Seed indefinitely"), self,
'seed_last_forever',
self.config['seed_last_forever'],
seed_last_forever_extra)
self.last_torrent_vbox.pack_start(self.seed_last_forever)
self.last_torrent_frame.add(self.last_torrent_vbox)
self.seeding_box.pack_start(self.last_torrent_frame, expand=False, fill=False)
# Network tab
self.network_box = gtk.VBox(spacing=SPACING)
self.network_box.set_border_width(SPACING)
self.notebook.append_page(self.network_box, gtk.Label(_("Network")))
self.port_range_frame = gtk.Frame(_("Look for available port:"))
self.port_range_box = gtk.VBox(spacing=SPACING)
self.port_range_box.set_border_width(SPACING)
self.port_range = gtk.HBox()
self.port_range.pack_start(gtk.Label(_("starting at port: ")),
expand=False, fill=False)
self.minport_field = PortValidator('minport', self.config, self.setfunc)
self.minport_field.add_end('maxport')
self.port_range.pack_start(self.minport_field, expand=False, fill=False)
self.minport_field.settingswindow = self
self.port_range.pack_start(gtk.Label(' (1024-65535)'),
expand=False, fill=False)
self.port_range_box.pack_start(self.port_range,
expand=False, fill=False)
self.upnp = CheckButton(_("Enable automatic port mapping")+' (_UPnP)',
self, 'upnp', self.config['upnp'], None)
self.port_range_box.pack_start(self.upnp,
expand=False, fill=False)
self.port_range_frame.add(self.port_range_box)
self.network_box.pack_start(self.port_range_frame, expand=False, fill=False)
self.ip_frame = gtk.Frame(_("IP to report to the tracker:"))
self.ip_box = gtk.VBox()
self.ip_box.set_border_width(SPACING)
self.ip_field = IPValidator('ip', self.config, self.setfunc)
self.ip_box.pack_start(self.ip_field, expand=False, fill=False)
label = gtk.Label(_("(Has no effect unless you are on the\nsame local network as the tracker)"))
label.set_line_wrap(True)
self.ip_box.pack_start(lalign(label), expand=False, fill=False)
self.ip_frame.add(self.ip_box)
self.network_box.pack_start(self.ip_frame, expand=False, fill=False)
# end Network tab
# Language tab
self.languagechooser = LanguageChooser()
self.notebook.append_page(self.languagechooser, gtk.Label("Language"))
# end Language tab
# Advanced tab
if advanced_ui:
self.advanced_box = gtk.VBox(spacing=SPACING)
self.advanced_box.set_border_width(SPACING)
hint = gtk.Label(_("WARNING: Changing these settings can\nprevent %s from functioning correctly.")%app_name)
self.advanced_box.pack_start(lalign(hint), expand=False, fill=False)
self.store = gtk.ListStore(*[gobject.TYPE_STRING] * 2)
for option in ui_options[advanced_ui_options_index:]:
self.store.append((option, str(self.config[option])))
self.treeview = gtk.TreeView(self.store)
r = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Option"), r, text=0)
self.treeview.append_column(column)
r = gtk.CellRendererText()
r.set_property('editable', True)
r.connect('edited', self.store_value_edited)
column = gtk.TreeViewColumn(_("Value"), r, text=1)
self.treeview.append_column(column)
self.advanced_frame = gtk.Frame()
self.advanced_frame.set_shadow_type(gtk.SHADOW_IN)
self.advanced_frame.add(self.treeview)
self.advanced_box.pack_start(self.advanced_frame, expand=False, fill=False)
self.notebook.append_page(self.advanced_box, gtk.Label(_("Advanced")))
self.win.add(self.vbox)
self.win.show_all()
def get_save_in(self, widget=None):
self.file_selection = self.main.open_window('choosefolder',
title=_("Choose default download directory"),
fullname=self.config['save_in'],
got_location_func=self.set_save_in,
no_location_func=lambda: self.main.window_closed('choosefolder'))
def set_save_in(self, save_location):
self.main.window_closed('choosefolder')
if os.path.isdir(save_location):
if save_location[-1] != os.sep:
save_location += os.sep
self.config['save_in'] = save_location
save_in = path_wrap(self.config['save_in'])
self.dl_save_in.set_text(save_in)
self.setfunc('save_in', self.config['save_in'])
def launch_on_startup(self, *args):
dst = os.path.join(get_startup_dir(), app_name)
if self.config['launch_on_startup']:
src = os.path.abspath(sys.argv[0])
create_shortcut(src, dst, "--start_minimized")
else:
try:
remove_shortcut(dst)
except Exception, e:
self.main.global_error(WARNING, _("Failed to remove shortcut: %s") % str(e))
def set_start_torrent_behavior(self, state_name):
if state_name in self.dnd_states:
for r in self.dnd_group:
if r.state_name == state_name:
r.set_active(True)
else:
r.set_active(False)
else:
self.always_replace_radio.set_active(True)
def start_torrent_behavior_changed(self, radiobutton):
if radiobutton.get_active():
self.setfunc('start_torrent_behavior', radiobutton.state_name)
def store_value_edited(self, cell, row, new_text):
it = self.store.get_iter_from_string(row)
option = ui_options[int(row)+advanced_ui_options_index]
t = type(defconfig[option])
try:
if t is type(None) or t is str:
value = new_text
elif t is int or t is long:
value = int(new_text)
elif t is float:
value = float(new_text)
elif t is bool:
value = value == 'True'
else:
raise TypeError, str(t)
except ValueError:
return
self.setfunc(option, value)
self.store.set(it, 1, str(value))
def close(self, widget):
self.win.destroy()
class FileListWindow(object):
SET_PRIORITIES = False
def __init__(self, metainfo, closefunc):
self.metainfo = metainfo
self.setfunc = None
self.allocfunc = None
self.win = Window()
self.win.set_title(_('Files in "%s"') % self.metainfo.name)
self.win.connect("destroy", closefunc)
self.tooltips = gtk.Tooltips()
self.filepath_to_iter = {}
self.box1 = gtk.VBox()
size_request = (0,0)
if self.SET_PRIORITIES:
self.toolbar = gtk.Toolbar()
for label, tip, stockicon, method, arg in (
(_("Never" ), _("Never download" ), gtk.STOCK_DELETE, self.dosomething, -1,),
(_("Normal"), _("Download normally"), gtk.STOCK_NEW , self.dosomething, 0,),
(_("First" ), _("Download first" ),'bt-finished' , self.dosomething, +1,),):
self.make_tool_item(label, tip, stockicon, method, arg)
size_request = (-1,54)
self.box1.pack_start(self.toolbar, False)
self.sw = gtk.ScrolledWindow()
self.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.box1.pack_start(self.sw)
self.win.add(self.box1)
columns = [_("Filename"),_("Length"),_('%')]
pre_size_list = ['M'*30, '6666 MB', '100.0', 'Download','black']
if self.SET_PRIORITIES:
columns.append(_("Download"))
num_columns = len(pre_size_list)
self.store = gtk.TreeStore(*[gobject.TYPE_STRING] * num_columns)
self.store.append(None, pre_size_list)
self.treeview = gtk.TreeView(self.store)
self.treeview.set_enable_search(True)
self.treeview.set_search_column(0)
cs = []
for i, name in enumerate(columns):
r = gtk.CellRendererText()
r.set_property('xalign', (0, 1, 1, 1)[i])
if i == 0:
column = gtk.TreeViewColumn(name, r, text = i, foreground = len(pre_size_list)-1)
else:
column = gtk.TreeViewColumn(name, r, text = i)
column.set_resizable(True)
self.treeview.append_column(column)
cs.append(column)
self.sw.add(self.treeview)
self.treeview.set_headers_visible(False)
self.treeview.columns_autosize()
self.box1.show_all()
self.treeview.realize()
for column in cs:
column.set_fixed_width(max(5,column.get_width()))
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
self.treeview.set_headers_visible(True)
self.store.clear()
if self.SET_PRIORITIES:
self.treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
else:
self.treeview.get_selection().set_mode(gtk.SELECTION_NONE)
self.piecelen = self.metainfo.piece_length
self.lengths = self.metainfo.sizes
self.initialize_file_priorities()#[0,0])
for name, size, priority in itertools.izip(self.metainfo.orig_files,
self.metainfo.sizes, self.priorities):
parent_name, local_name = os.path.split(name)
parent_iter = self.recursive_add(parent_name)
row = [local_name, Size(size), '?','', 'black']
it = self.store.append(parent_iter, row)
self.filepath_to_iter[name] = it
self.treeview.expand_all()
tvsr = self.treeview.size_request()
vertical_padding = 18
size_request = [max(size_request[0],tvsr[0]),
(size_request[1] + tvsr[1] ) + vertical_padding]
maximum_height = 300
if size_request[1] > maximum_height - SCROLLBAR_WIDTH:
size_request[1] = maximum_height
size_request[0] = size_request[0] + SCROLLBAR_WIDTH
self.win.set_default_size(*size_request)
self.win.show_all()
def recursive_add(self, fullpath):
if fullpath == '':
return None
elif self.filepath_to_iter.has_key(fullpath):
return self.filepath_to_iter[fullpath]
else:
parent_path, local_path = os.path.split(fullpath)
parent_iter = self.recursive_add(parent_path)
it = self.store.append(parent_iter,
(local_path,) +
('',) * (self.store.get_n_columns()-2) +
('black',))
self.filepath_to_iter[fullpath] = it
return it
def make_tool_item(self, label, tip, stockicon, method, arg):
icon = gtk.Image()
icon.set_from_stock(stockicon, gtk.ICON_SIZE_SMALL_TOOLBAR)
item = gtk.ToolButton(icon_widget=icon, label=label)
item.set_homogeneous(True)
item.set_tooltip(self.tooltips, tip)
if arg is not None:
item.connect('clicked', method, arg)
else:
item.connect('clicked', method)
self.toolbar.insert(item, 0)
def initialize_file_priorities(self):
self.priorities = []
for length in self.lengths:
self.priorities.append(0)
## Uoti wrote these methods. I have no idea what this code is supposed to do.
## --matt
## def set_priorities(self, widget):
## r = []
## piece = 0
## pos = 0
## curprio = prevprio = 1000
## for priority, length in itertools.izip(self.priorities, self.lengths):
## pos += length
## curprio = min(priority, curprio)
## while pos >= (piece + 1) * self.piecelen:
## if curprio != prevprio:
## r.extend((piece, curprio))
## prevprio = curprio
## if curprio == priority:
## piece = pos // self.piecelen
## else:
## piece += 1
## if pos == piece * self.piecelen:
## curprio = 1000
## else:
## curprio = priority
## if curprio != prevprio:
## r.extend((piece, curprio))
## self.setfunc(r)
## it = self.store.get_iter_first()
## for i in xrange(len(self.priorities)):
## self.store.set_value(it, 5, "black")
## it = self.store.iter_next(it)
## self.origpriorities = list(self.priorities)
##
## def initialize_file_priorities(self, piecepriorities):
## self.priorities = []
## piecepriorities = piecepriorities + [999999999]
## it = iter(piecepriorities)
## assert it.next() == 0
## pos = piece = curprio = 0
## for length in self.lengths:
## pos += length
## priority = curprio
## while pos >= piece * self.piecelen:
## curprio = it.next()
## if pos > piece * self.piecelen:
## priority = max(priority, curprio)
## piece = it.next()
## self.priorities.append(priority)
## self.origpriorities = list(self.priorities)
def dosomething(self, widget, dowhat):
self.treeview.get_selection().selected_foreach(self.adjustfile, dowhat)
def adjustfile(self, treemodel, path, it, dowhat):
length = treemodel.get(it, 1)[0]
if length == '':
child = treemodel.iter_children(it)
while True:
if child is None:
return
elif not treemodel.is_ancestor(it, child):
return
else:
self.adjustfile(treemodel, path, child, dowhat)
child = treemodel.iter_next(child)
else:
# BUG: need to set file priorities in backend here
if dowhat == -1:
text, color = _("never"), 'darkgrey'
elif dowhat == 1:
text, color = _("first"), 'darkgreen'
else:
text, color = '', 'black'
treemodel.set_value(it, 3, text )
treemodel.set_value(it, 4, color)
def update(self, left, allocated):
for name, left, total, alloc in itertools.izip(
self.metainfo.orig_files, left, self.lengths, allocated):
it = self.filepath_to_iter[name]
if total == 0:
p = 1
else:
p = (total - left) / total
self.store.set_value(it, 2, "%.1f" % (int(p * 1000)/10))
def close(self):
self.win.destroy()
class PeerListWindow(object):
def __init__(self, torrent_name, closefunc):
self.win = Window()
self.win.connect("destroy", closefunc)
self.win.set_title( _('Peers for "%s"')%torrent_name)
self.sw = gtk.ScrolledWindow()
self.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.sw.set_shadow_type(gtk.SHADOW_IN)
self.win.add(self.sw)
column_header = [_("IP address"), _("Client"), _("Connection"), _("KB/s down"), _("KB/s up"), _("MB downloaded"), _("MB uploaded"), _("% complete"), _("KB/s est. peer download")]
pre_size_list = ['666.666.666.666', 'TorrentStorm 1.3', 'bad peer', 66666, 66666, '1666.66', '1666.66', '100.0', 6666]
numeric_cols = [3,4,5,6,7,8]
store_types = [gobject.TYPE_STRING]*3 + [gobject.TYPE_INT]*2 + [gobject.TYPE_STRING]*3 + [gobject.TYPE_INT]
if advanced_ui:
column_header[2:2] = [_("Peer ID")]
pre_size_list[2:2] = ['-AZ2104-']
store_types[2:2] = [gobject.TYPE_STRING]
column_header[5:5] = [_("Interested"),_("Choked"),_("Snubbed")]
pre_size_list[5:5] = ['*','*','*']
store_types[5:5] = [gobject.TYPE_STRING]*3
column_header[9:9] = [_("Interested"),_("Choked"),_("Optimistic upload")]
pre_size_list[9:9] = ['*','*','*']
store_types[9:9] = [gobject.TYPE_STRING]*3
numeric_cols = [4,8,12,13,14,15]
num_columns = len(column_header)
self.store = gtk.ListStore(*store_types)
self.store.append(pre_size_list)
def makesortfunc(sort_func):
def sortfunc(treemodel, iter1, iter2, column):
a_str = treemodel.get_value(iter1, column)
b_str = treemodel.get_value(iter2, column)
if a_str is not None and b_str is not None:
return sort_func(a_str,b_str)
else:
return 0
return sortfunc
def ip_sort(a_str,b_str):
for a,b in zip(a_str.split('.'), b_str.split('.')):
if a == b:
continue
if len(a) == len(b):
return cmp(a,b)
return cmp(int(a), int(b))
return 0
def float_sort(a_str,b_str):
a,b = 0,0
try: a = float(a_str)
except ValueError: pass
try: b = float(b_str)
except ValueError: pass
return cmp(a,b)
self.store.set_sort_func(0, makesortfunc(ip_sort), 0)
for i in range(2,5):
self.store.set_sort_func(num_columns-i, makesortfunc(float_sort), num_columns-i)
self.treeview = gtk.TreeView(self.store)
cs = []
for i, name in enumerate(column_header):
r = gtk.CellRendererText()
if i in numeric_cols:
r.set_property('xalign', 1)
column = gtk.TreeViewColumn(name, r, text = i)
column.set_resizable(True)
column.set_min_width(5)
column.set_sort_column_id(i)
self.treeview.append_column(column)
cs.append(column)
self.treeview.set_rules_hint(True)
self.sw.add(self.treeview)
self.treeview.set_headers_visible(False)
self.treeview.columns_autosize()
self.sw.show_all()
self.treeview.realize()
for column in cs:
column.set_fixed_width(column.get_width())
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
self.treeview.set_headers_visible(True)
self.store.clear()
self.treeview.get_selection().set_mode(gtk.SELECTION_NONE)
width = self.treeview.size_request()[0]
self.win.set_default_size(width+SCROLLBAR_WIDTH, 300)
self.win.show_all()
self.prev = []
def update(self, peers, bad_peers):
fields = []
def p_bool(value): return value and '*' or ''
for peer in peers:
field = []
field.append(peer['ip'])
client, version = ClientIdentifier.identify_client(peer['id'])
field.append(client + ' ' + version)
if advanced_ui:
field.append(zurllib.quote(peer['id']))
field.append(peer['initiation'] == 'R' and _("remote") or _("local"))
dl = peer['download']
ul = peer['upload']
for l in (dl, ul):
rate = l[1]
if rate > 100:
field.append(int(round(rate/(2**10))))
else:
field.append(0)
if advanced_ui:
field.append(p_bool(l[2]))
field.append(p_bool(l[3]))
if len(l) > 4:
field.append(p_bool(l[4]))
else:
field.append(p_bool(peer['is_optimistic_unchoke']))
field.append('%.2f'%round(dl[0] / 2**20, 2))
field.append('%.2f'%round(ul[0] / 2**20, 2))
field.append('%.1f'%round(int(peer['completed']*1000)/10, 1))
field.append(int(peer['speed']//(2**10)))
fields.append(field)
for (ip, (is_banned, stats)) in bad_peers.iteritems():
field = []
field.append(ip)
client, version = ClientIdentifier.identify_client(stats.peerid)
field.append(client + ' ' + version)
if advanced_ui:
field.append(zurllib.quote(stats.peerid))
field.append(_("bad peer"))
# the sortable peer list won't take strings in these fields
field.append(0)
if advanced_ui:
field.extend([0] * 7) # upRate, * fields
else:
field.extend([0] * 1) # upRate
field.append(_("%d ok") % stats.numgood)
field.append(_("%d bad") % len(stats.bad))
if is_banned: # completion
field.append(_("banned"))
else:
field.append(_("ok"))
field.append(0) # peer dl rate
fields.append(field)
if self.store.get_sort_column_id() < 0:
# ListStore is unsorted, it might be faster to set only modified fields
it = self.store.get_iter_first()
for old, new in itertools.izip(self.prev, fields):
if old != new:
for i, value in enumerate(new):
if value != old[i]:
self.store.set_value(it, i, value)
it = self.store.iter_next(it)
for i in range(len(fields), len(self.prev)):
self.store.remove(it)
for i in range(len(self.prev), len(fields)):
self.store.append(fields[i])
self.prev = fields
else:
# ListStore is sorted, no reason not to to reset all fields
self.store.clear()
for field in fields:
self.store.append(field)
def close(self):
self.win.destroy()
class TorrentInfoWindow(object):
def __init__(self, torrent_box, closefunc):
self.win = Window()
self.torrent_box = torrent_box
name = self.torrent_box.metainfo.name
self.win.set_title(_('Info for "%s"')%name)
self.win.set_size_request(-1,-1)
self.win.set_border_width(SPACING)
self.win.set_resizable(False)
self.win.connect('destroy', closefunc)
self.vbox = gtk.VBox(spacing=SPACING)
self.table = gtk.Table(rows=4, columns=3, homogeneous=False)
self.table.set_row_spacings(SPACING)
self.table.set_col_spacings(SPACING)
y = 0
def add_item(key, val, y):
self.table.attach(ralign(gtk.Label(key)), 0, 1, y, y+1)
v = gtk.Label(val)
v.set_selectable(True)
self.table.attach(lalign(v), 1, 2, y, y+1)
add_item(_("Torrent name:"), name, y)
y+=1
announce = ''
if self.torrent_box.metainfo.is_trackerless:
announce = _("(trackerless torrent)")
else:
announce = self.torrent_box.metainfo.announce
add_item(_("Announce url:"), announce, y)
y+=1
size = Size(self.torrent_box.metainfo.total_bytes)
num_files = _(", in one file")
if self.torrent_box.is_batch:
num_files = _(", in %d files") % len(self.torrent_box.metainfo.sizes)
add_item(_("Total size:"), str(size)+num_files, y)
y+=1
if advanced_ui:
pl = self.torrent_box.metainfo.piece_length
count, lastlen = divmod(size, pl)
sizedetail = '%d x %d + %d = %d' % (count, pl, lastlen, int(size))
add_item(_("Pieces:"), sizedetail, y)
y+=1
add_item(_("Info hash:"), self.torrent_box.infohash.encode('hex'), y)
y+=1
path = self.torrent_box.dlpath
filename = ''
if path is None:
path = ''
else:
if not self.torrent_box.is_batch:
path,filename = os.path.split(self.torrent_box.dlpath)
if path[-1] != os.sep:
path += os.sep
path = path_wrap(path)
add_item(_("Save in:"), path, y)
y+=1
if not self.torrent_box.is_batch:
add_item(_("File name:"), path_wrap(filename), y)
y+=1
self.vbox.pack_start(self.table)
if self.torrent_box.metainfo.comment not in (None, ''):
commentbuffer = gtk.TextBuffer()
commentbuffer.set_text(self.torrent_box.metainfo.comment)
commenttext = gtk.TextView(commentbuffer)
commenttext.set_editable(False)
commenttext.set_cursor_visible(False)
commenttext.set_wrap_mode(gtk.WRAP_WORD)
commentscroll = gtk.ScrolledWindow()
commentscroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
commentscroll.set_shadow_type(gtk.SHADOW_IN)
commentscroll.add(commenttext)
self.vbox.pack_start(commentscroll)
self.vbox.pack_start(gtk.HSeparator(), expand=False, fill=False)
self.hbox = gtk.HBox(spacing=SPACING)
lbbox = gtk.HButtonBox()
rbbox = gtk.HButtonBox()
lbbox.set_spacing(SPACING)
if LaunchPath.can_launch_files:
opendirbutton = IconButton(_("_Open directory"), stock=gtk.STOCK_OPEN)
opendirbutton.connect('clicked', self.torrent_box.open_dir)
lbbox.pack_start(opendirbutton, expand=False, fill=False)
opendirbutton.set_sensitive(self.torrent_box.can_open_dir())
filelistbutton = IconButton(_("Show _file list"), stock='gtk-index')
if self.torrent_box.is_batch:
filelistbutton.connect('clicked', self.torrent_box.open_filelist)
else:
filelistbutton.set_sensitive(False)
lbbox.pack_start(filelistbutton, expand=False, fill=False)
closebutton = gtk.Button(stock='gtk-close')
closebutton.connect('clicked', lambda w: self.close())
rbbox.pack_end(closebutton, expand=False, fill=False)
self.hbox.pack_start(lbbox, expand=False, fill=False)
self.hbox.pack_end( rbbox, expand=False, fill=False)
self.vbox.pack_end(self.hbox, expand=False, fill=False)
self.win.add(self.vbox)
self.win.show_all()
def close(self):
self.win.destroy()
class TorrentBox(gtk.EventBox):
torrent_tip_format = '%s:\n %s\n %s'
def __init__(self, infohash, metainfo, dlpath, completion, main):
gtk.EventBox.__init__(self)
self.infohash = infohash
self.metainfo = metainfo
self.completion = completion
self.main = main
self.main_torrent_dnd_tip = _("drag to reorder")
self.torrent_menu_tip = _("right-click for menu")
self.set_save_location(dlpath)
self.uptotal = self.main.torrents[self.infohash].uptotal
self.downtotal = self.main.torrents[self.infohash].downtotal
if self.downtotal > 0:
self.up_down_ratio = self.uptotal / self.metainfo.total_bytes
else:
self.up_down_ratio = None
self.infowindow = None
self.filelistwindow = None
self.is_batch = metainfo.is_batch
self.menu = None
self.menu_handler = None
self.vbox = gtk.VBox(homogeneous=False, spacing=SPACING)
self.label = gtk.Label()
self.set_name()
self.vbox.pack_start(lalign(self.label), expand=False, fill=False)
self.hbox = gtk.HBox(homogeneous=False, spacing=SPACING)
self.icon = gtk.Image()
self.icon.set_size_request(-1, 29)
self.iconbox = gtk.VBox()
self.iconevbox = gtk.EventBox()
self.iconevbox.add(self.icon)
self.iconbox.pack_start(self.iconevbox, expand=False, fill=False)
self.hbox.pack_start(self.iconbox, expand=False, fill=False)
self.vbox.pack_start(self.hbox)
self.infobox = gtk.VBox(homogeneous=False)
self.progressbarbox = gtk.HBox(homogeneous=False, spacing=SPACING)
self.progressbar = gtk.ProgressBar()
self.reset_progressbar_color()
if self.completion is not None:
self.progressbar.set_fraction(self.completion)
if self.completion >= 1:
done_label = self.make_done_label()
self.progressbar.set_text(done_label)
else:
self.progressbar.set_text('%.1f%%'%(self.completion*100))
else:
self.progressbar.set_text('?')
self.progressbarbox.pack_start(self.progressbar,
expand=True, fill=True)
self.buttonevbox = gtk.EventBox()
self.buttonbox = gtk.HBox(homogeneous=True, spacing=SPACING)
self.infobutton = gtk.Button()
self.infoimage = gtk.Image()
self.infoimage.set_from_stock('bt-info', gtk.ICON_SIZE_BUTTON)
self.infobutton.add(self.infoimage)
self.infobutton.connect('clicked', self.open_info)
self.main.tooltips.set_tip(self.infobutton,
_("Torrent info"))
self.buttonbox.pack_start(self.infobutton, expand=True)
self.cancelbutton = gtk.Button()
self.cancelimage = gtk.Image()
if self.completion is not None and self.completion >= 1:
self.cancelimage.set_from_stock('bt-remove', gtk.ICON_SIZE_BUTTON)
self.main.tooltips.set_tip(self.cancelbutton,
_("Remove torrent"))
else:
self.cancelimage.set_from_stock('bt-abort', gtk.ICON_SIZE_BUTTON)
self.main.tooltips.set_tip(self.cancelbutton,
_("Abort torrent"))
self.cancelbutton.add(self.cancelimage)
# not using 'clicked' because we want to check for CTRL key
self.cancelbutton.connect('button-release-event', self.confirm_remove)
self.buttonbox.pack_start(self.cancelbutton, expand=True, fill=False)
self.buttonevbox.add(self.buttonbox)
vbuttonbox = gtk.VBox(homogeneous=False)
vbuttonbox.pack_start(self.buttonevbox, expand=False, fill=False)
self.hbox.pack_end(vbuttonbox, expand=False, fill=False)
self.infobox.pack_start(self.progressbarbox, expand=False, fill=False)
self.hbox.pack_start(self.infobox, expand=True, fill=True)
self.add( self.vbox )
self.drag_source_set(gtk.gdk.BUTTON1_MASK,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_get', self.drag_data_get)
self.connect('drag_begin' , self.drag_begin )
self.connect('drag_end' , self.drag_end )
self.cursor_handler_id = self.connect('enter_notify_event', self.change_cursors)
def set_save_location(self, dlpath):
self.dlpath = dlpath
updater_infohash = self.main.updater.infohash
if updater_infohash == self.infohash:
my_installer_dir = os.path.split(self.dlpath)[0]
if self.main.updater.installer_dir != my_installer_dir:
self.main.updater.set_installer_dir(my_installer_dir)
def reset_progressbar_color(self):
# Hack around broken GTK-Wimp theme:
# make progress bar text always black
# see task #694
if is_frozen_exe and self.main.config['progressbar_hack']:
style = self.progressbar.get_style().copy()
black = style.black
self.progressbar.modify_fg(gtk.STATE_PRELIGHT, black)
def change_cursors(self, *args):
# BUG: this is in a handler that is disconnected because the
# window attributes are None until after show_all() is called
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND2))
self.buttonevbox.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
self.disconnect(self.cursor_handler_id)
def drag_data_get(self, widget, context, selection, targetType, eventTime):
selection.set(selection.target, 8, self.infohash)
def drag_begin(self, *args):
pass
def drag_end(self, *args):
self.main.drag_end()
def make_done_label(self, statistics=None):
s = ''
if statistics and statistics['timeEst'] is not None:
s = _(", will seed for %s") % Duration(statistics['timeEst'])
elif statistics:
s = _(", will seed indefinitely.")
if self.up_down_ratio is not None:
done_label = _("Done, share ratio: %d%%") % \
(self.up_down_ratio*100) + s
elif statistics is not None:
done_label = _("Done, %s uploaded") % \
Size(statistics['upTotal']) + s
else:
done_label = _("Done")
return done_label
def set_name(self):
self.label.set_text(self.metainfo.name)
self.label.set_ellipsize(pango.ELLIPSIZE_END)
def make_menu(self, extra_menu_items=[]):
if self.menu_handler:
self.disconnect(self.menu_handler)
## Basic Info
menu_items = [ MenuItem(_("Torrent _info" ), func=self.open_info), ]
open_dir_func = None
if LaunchPath.can_launch_files and self.can_open_dir():
open_dir_func = self.open_dir
menu_items.append( MenuItem(_("_Open directory" ), func=open_dir_func) )
filelistfunc = None
if self.is_batch:
filelistfunc = self.open_filelist
menu_items.append(MenuItem(_("_File list"), func=filelistfunc))
if self.torrent_state == RUNNING:
menu_items.append(MenuItem(_("_Peer list"), func=self.open_peerlist))
## end Basic Info
menu_items.append(gtk.SeparatorMenuItem())
## Settings
# change save location
change_save_location_func = None
if self.torrent_state != RUNNING and self.completion <= 0:
change_save_location_func = self.change_save_location
menu_items.append(MenuItem(_("_Change location"),
func=change_save_location_func))
# seed forever item
self.seed_forever_item = gtk.CheckMenuItem(_("_Seed indefinitely"))
self.reset_seed_forever()
def sft(widget, *args):
active = widget.get_active()
infohash = self.infohash
for option in ('seed_forever', 'seed_last_forever'):
self.main.torrentqueue.set_config(option, active, infohash)
self.main.torrentqueue.set_config(option, active, infohash)
self.seed_forever_item.connect('toggled', sft)
menu_items.append(self.seed_forever_item)
## end Settings
menu_items.append(gtk.SeparatorMenuItem())
## Queue state dependent items
if self.torrent_state == KNOWN:
menu_items.append( MenuItem(_("Re_start"), func=self.move_to_end ))
elif self.torrent_state == QUEUED:
#Here's where we'll put the "Start hash check" menu item
menu_items.append(MenuItem(_("Download _now"), func=self.start))
elif self.torrent_state in (RUNNING, RUN_QUEUED):
# no items for here
pass
## Completion dependent items
if self.completion is not None and self.completion >= 1:
if self.torrent_state != KNOWN:
menu_items.append(MenuItem(_("_Finish"), func=self.finish))
menu_items.append( MenuItem(_("_Remove" ), func=self.confirm_remove))
else:
if self.torrent_state in (RUNNING, RUN_QUEUED):
menu_items.append(MenuItem(_("Download _later"), func=self.move_to_end))
else:
#Here's where we'll put the "Seed _later" menu item
pass
menu_items.append(MenuItem(_("_Abort" ), func=self.confirm_remove))
## build the menu
self.menu = gtk.Menu()
for i in menu_items:
i.show()
self.menu.add(i)
self.menu_handler = self.connect_object("event", self.show_menu, self.menu)
def reset_seed_forever(self):
sfb = False
d = self.main.torrents[self.infohash].config.getDict()
if d.has_key('seed_forever'):
sfb = d['seed_forever']
self.seed_forever_item.set_active(bool(sfb))
def change_save_location(self, widget=None):
self.main.change_save_location(self.infohash)
def open_info(self, widget=None):
if self.infowindow is None:
self.infowindow = TorrentInfoWindow(self, self.infoclosed)
def infoclosed(self, widget=None):
self.infowindow = None
def close_info(self):
if self.infowindow is not None:
self.infowindow.close()
def open_filelist(self, widget):
if not self.is_batch:
return
if self.filelistwindow is None:
self.filelistwindow = FileListWindow(self.metainfo,
self.filelistclosed)
self.main.torrentqueue.check_completion(self.infohash, True)
def filelistclosed(self, widget):
self.filelistwindow = None
def close_filelist(self):
if self.filelistwindow is not None:
self.filelistwindow.close()
def close_child_windows(self):
self.close_info()
self.close_filelist()
def destroy(self):
if self.menu is not None:
self.menu.destroy()
self.menu = None
gtk.EventBox.destroy(self)
def show_menu(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:
widget.popup(None, None, None, event.button, event.time)
return True
return False
def _short_path(self, dlpath):
path_length = 40
sep = '...'
ret = os.path.split(dlpath)[0]
if len(ret) > path_length+len(sep):
return ret[:int(path_length/2)]+sep+ret[-int(path_length/2):]
else:
return ret
def get_path_to_open(self):
path = self.dlpath
if not self.is_batch:
path = os.path.split(self.dlpath)[0]
return path
def can_open_dir(self):
return os.access(self.get_path_to_open(), os.F_OK|os.R_OK)
def open_dir(self, widget):
LaunchPath.launchdir(self.get_path_to_open())
def confirm_remove(self, widget, event=None):
if event is not None and event.get_state() & gtk.gdk.CONTROL_MASK:
self.remove()
else:
message = _('Are you sure you want to remove "%s"?') % self.metainfo.name
if self.completion >= 1:
if self.up_down_ratio is not None:
message = _("Your share ratio for this torrent is %d%%. ")%(self.up_down_ratio*100) + message
else:
message = _("You have uploaded %s to this torrent. ")%(Size(self.uptotal)) + message
d = MessageDialog(self.main.mainwindow,
_("Remove this torrent?"),
message,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.remove,
default=gtk.RESPONSE_OK,
)
def remove(self):
self.main.torrentqueue.remove_torrent(self.infohash)
class KnownTorrentBox(TorrentBox):
torrent_state = KNOWN
def __init__(self, infohash, metainfo, dlpath, completion, main):
TorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
status_tip = ''
if completion >= 1:
self.icon.set_from_stock('bt-finished', gtk.ICON_SIZE_LARGE_TOOLBAR)
status_tip = _("Finished")
known_torrent_dnd_tip = _("drag into list to seed")
else:
self.icon.set_from_stock('bt-broken', gtk.ICON_SIZE_LARGE_TOOLBAR)
status_tip = _("Failed")
known_torrent_dnd_tip = _("drag into list to resume")
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (status_tip,
known_torrent_dnd_tip,
self.torrent_menu_tip))
self.make_menu()
self.show_all()
def move_to_end(self, widget):
self.main.change_torrent_state(self.infohash, QUEUED)
class DroppableTorrentBox(TorrentBox):
def __init__(self, infohash, metainfo, dlpath, completion, main):
TorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion', self.drag_motion)
self.index = None
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
half_height = self.size_request()[1] // 2
where = cmp(y, half_height)
if where == 0: where = 1
self.parent.put_infohash_at_child(selection.data, self, where)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
def drag_motion(self, widget, context, x, y, time):
self.get_current_index()
half_height = self.size_request()[1] // 2
if y < half_height:
self.parent.highlight_before_index(self.index)
else:
self.parent.highlight_after_index(self.index)
return False
def drag_end(self, *args):
self.parent.highlight_child()
TorrentBox.drag_end(self, *args)
def get_current_index(self):
self.index = self.parent.get_index_from_child(self)
class QueuedTorrentBox(DroppableTorrentBox):
icon_name = 'bt-queued'
torrent_state = QUEUED
def __init__(self, infohash, metainfo, dlpath, completion, main):
DroppableTorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.state_name = _("Waiting")
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (self.state_name,
self.main_torrent_dnd_tip,
self.torrent_menu_tip))
self.icon.set_from_stock(self.icon_name, gtk.ICON_SIZE_LARGE_TOOLBAR)
self.make_menu()
self.show_all()
def start(self, widget):
self.main.runbox.put_infohash_last(self.infohash)
def finish(self, widget):
self.main.change_torrent_state(self.infohash, KNOWN)
class PausedTorrentBox(DroppableTorrentBox):
icon_name = 'bt-paused'
torrent_state = RUN_QUEUED
def __init__(self, infohash, metainfo, dlpath, completion, main):
DroppableTorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.state_name = _("Paused")
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (self.state_name,
self.main_torrent_dnd_tip,
self.torrent_menu_tip))
self.icon.set_from_stock(self.icon_name, gtk.ICON_SIZE_LARGE_TOOLBAR)
self.make_menu()
self.show_all()
def move_to_end(self, widget):
self.main.change_torrent_state(self.infohash, QUEUED)
def finish(self, widget):
self.main.change_torrent_state(self.infohash, KNOWN)
def update_status(self, statistics):
# in case the TorrentQueue thread calls widget.update_status()
# before the GUI has changed the torrent widget to a
# RunningTorrentBox
pass
class RunningTorrentBox(PausedTorrentBox):
torrent_state = RUNNING
def __init__(self, infohash, metainfo, dlpath, completion, main):
DroppableTorrentBox.__init__(self, infohash, metainfo, dlpath, completion, main)
self.main.tooltips.set_tip(self.iconevbox,
self.torrent_tip_format % (_("Running"),
self.main_torrent_dnd_tip,
self.torrent_menu_tip))
self.seed = False
self.peerlistwindow = None
self.update_peer_list_flag = 0
self.icon.set_from_stock('bt-running', gtk.ICON_SIZE_LARGE_TOOLBAR)
self.rate_label_box = gtk.HBox(homogeneous=True)
self.up_rate = gtk.Label()
self.down_rate = gtk.Label()
self.rate_label_box.pack_start(lalign(self.up_rate ),
expand=True, fill=True)
self.rate_label_box.pack_start(lalign(self.down_rate),
expand=True, fill=True)
self.infobox.pack_start(self.rate_label_box)
if advanced_ui:
self.extrabox = gtk.VBox(homogeneous=False)
#self.extrabox = self.vbox
self.up_curr = FancyLabel(_("Current up: %s" ), 0)
self.down_curr = FancyLabel(_("Current down: %s"), 0)
self.curr_box = gtk.HBox(homogeneous=True)
self.curr_box.pack_start(lalign(self.up_curr ), expand=True, fill=True)
self.curr_box.pack_start(lalign(self.down_curr), expand=True, fill=True)
self.extrabox.pack_start(self.curr_box)
self.up_prev = FancyLabel(_("Previous up: %s" ), 0)
self.down_prev = FancyLabel(_("Previous down: %s"), 0)
self.prev_box = gtk.HBox(homogeneous=True)
self.prev_box.pack_start(lalign(self.up_prev ), expand=True, fill=True)
self.prev_box.pack_start(lalign(self.down_prev), expand=True, fill=True)
self.extrabox.pack_start(self.prev_box)
self.share_ratio = FancyLabel(_("Share ratio: %0.02f%%"), 0)
self.extrabox.pack_start(lalign(self.share_ratio))
self.peer_info = FancyLabel(_("%s peers, %s seeds. Totals from "
"tracker: %s"), 0, 0, 'NA')
self.extrabox.pack_start(lalign(self.peer_info))
self.dist_copies = FancyLabel(_("Distributed copies: %d; Next: %s"), 0, '')
self.extrabox.pack_start(lalign(self.dist_copies))
self.piece_info = FancyLabel(_("Pieces: %d total, %d complete, "
"%d partial, %d active (%d empty)"), *(0,)*5)
self.extrabox.pack_start(lalign(self.piece_info))
self.bad_info = FancyLabel(_("%d bad pieces + %s in discarded requests"), 0, 0)
self.extrabox.pack_start(lalign(self.bad_info))
# extra info
pl = self.metainfo.piece_length
tl = self.metainfo.total_bytes
count, lastlen = divmod(tl, pl)
self.piece_count = count + (lastlen > 0)
self.infobox.pack_end(self.extrabox, expand=False, fill=False)
self.make_menu()
self.show_all()
def change_to_completed(self):
self.completion = 1.0
self.cancelimage.set_from_stock('bt-remove', gtk.ICON_SIZE_BUTTON)
self.main.tooltips.set_tip(self.cancelbutton,
_("Remove torrent"))
updater_infohash = self.main.updater.infohash
if updater_infohash == self.infohash:
self.main.updater.start_install()
self.make_menu()
def close_child_windows(self):
TorrentBox.close_child_windows(self)
self.close_peerlist()
def open_filelist(self, widget):
if not self.is_batch:
return
if self.filelistwindow is None:
self.filelistwindow = FileListWindow(self.metainfo,
self.filelistclosed)
self.main.make_statusrequest()
def open_peerlist(self, widget):
if self.peerlistwindow is None:
self.peerlistwindow = PeerListWindow(self.metainfo.name,
self.peerlistclosed)
self.main.make_statusrequest()
def peerlistclosed(self, widget):
self.peerlistwindow = None
self.update_peer_list_flag = 0
def close_peerlist(self):
if self.peerlistwindow is not None:
self.peerlistwindow.close()
rate_label = ': %s'
eta_label = '?'
done_label = _("Done")
progress_bar_label = _("%.1f%% done, %s remaining")
down_rate_label = _("Download rate")
up_rate_label = _("Upload rate" )
def update_status(self, statistics):
fractionDone = statistics.get('fractionDone')
activity = statistics.get('activity')
self.main.set_title(torrentName=self.metainfo.name,
fractionDone=fractionDone)
dt = self.downtotal
if statistics.has_key('downTotal'):
dt += statistics['downTotal']
ut = self.uptotal
if statistics.has_key('upTotal'):
ut += statistics['upTotal']
if dt > 0:
self.up_down_ratio = ut / self.metainfo.total_bytes
done_label = self.done_label
eta_label = self.eta_label
if 'numPeers' in statistics:
eta = statistics.get('timeEst')
if eta is not None:
eta_label = Duration(eta)
if fractionDone == 1:
done_label = self.make_done_label(statistics)
if fractionDone == 1:
self.progressbar.set_fraction(1)
self.progressbar.set_text(done_label)
self.reset_seed_forever()
if not self.completion >= 1:
self.change_to_completed()
else:
self.progressbar.set_fraction(fractionDone)
progress_bar_label = self.progress_bar_label % \
(int(fractionDone*1000)/10, eta_label)
self.progressbar.set_text(progress_bar_label)
if 'numPeers' not in statistics:
return
self.down_rate.set_text(self.down_rate_label+self.rate_label %
Rate(statistics['downRate']))
self.up_rate.set_text (self.up_rate_label+self.rate_label %
Rate(statistics['upRate']))
if advanced_ui:
if self.up_down_ratio is not None:
self.share_ratio.set_value(self.up_down_ratio*100)
num_seeds = statistics['numSeeds']
if self.seed:
num_seeds = statistics['numOldSeeds'] = 0 # !@# XXX
if statistics['trackerPeers'] is not None:
totals = '%d/%d' % (statistics['trackerPeers'],
statistics['trackerSeeds'])
else:
totals = _("NA")
self.peer_info.set_value(statistics['numPeers'], num_seeds, totals)
self.up_curr.set_value(str(Size(statistics['upTotal'])))
self.down_curr.set_value(str(Size(statistics['downTotal'])))
self.up_prev.set_value(str(Size(self.uptotal)))
self.down_prev.set_value(str(Size(self.downtotal)))
# refresh extra info
self.piece_info.set_value(self.piece_count,
statistics['storage_numcomplete'],
statistics['storage_dirty'],
statistics['storage_active'],
statistics['storage_new'] )
self.dist_copies.set_value( statistics['numCopies'], ', '.join(["%d:%.1f%%" % (a, int(b*1000)/10) for a, b in zip(itertools.count(int(statistics['numCopies']+1)), statistics['numCopyList'])]))
self.bad_info.set_value(statistics['storage_numflunked'], Size(statistics['discarded']))
if self.peerlistwindow is not None:
if self.update_peer_list_flag == 0:
spew = statistics.get('spew')
if spew is not None:
self.peerlistwindow.update(spew, statistics['bad_peers'])
self.update_peer_list_flag = (self.update_peer_list_flag + 1) % 4
if self.filelistwindow is not None:
if 'files_left' in statistics:
self.filelistwindow.update(statistics['files_left'],
statistics['files_allocated'])
class DroppableHSeparator(PaddedHSeparator):
def __init__(self, box, spacing=SPACING):
PaddedHSeparator.__init__(self, spacing)
self.box = box
self.main = box.main
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion' , self.drag_motion )
def drag_highlight(self):
self.sep.drag_highlight()
self.main.add_unhighlight_handle()
def drag_unhighlight(self):
self.sep.drag_unhighlight()
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
self.box.drop_on_separator(self, selection.data)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
def drag_motion(self, wid, context, x, y, time):
self.drag_highlight()
return False
class DroppableBox(HSeparatedBox):
def __init__(self, main, spacing=0):
HSeparatedBox.__init__(self, spacing=spacing)
self.main = main
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion', self.drag_motion)
def drag_motion(self, widget, context, x, y, time):
return False
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
pass
class KnownBox(DroppableBox):
def __init__(self, main, spacing=0):
DroppableBox.__init__(self, main, spacing=spacing)
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
def pack_start(self, widget, *args, **kwargs):
old_len = len(self.get_children())
DroppableBox.pack_start(self, widget, *args, **kwargs)
if old_len <= 0:
self.main.maximize_known_pane()
self.main.knownscroll.scroll_to_bottom()
def remove(self, widget):
DroppableBox.remove(self, widget)
new_len = len(self.get_children())
if new_len == 0:
self.main.maximize_known_pane()
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
infohash = selection.data
self.main.finish(infohash)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
def drag_motion(self, widget, context, x, y, time):
self.main.drag_highlight(widget=self)
def drag_highlight(self):
self.main.knownscroll.drag_highlight()
self.main.add_unhighlight_handle()
def drag_unhighlight(self):
self.main.knownscroll.drag_unhighlight()
class RunningAndQueueBox(gtk.VBox):
def __init__(self, main, **kwargs):
gtk.VBox.__init__(self, **kwargs)
self.main = main
def drop_on_separator(self, sep, infohash):
self.main.change_torrent_state(infohash, QUEUED, 0)
def highlight_between(self):
self.drag_highlight()
def drag_highlight(self):
self.get_children()[1].drag_highlight()
def drag_unhighlight(self):
self.get_children()[1].drag_unhighlight()
class SpacerBox(DroppableBox):
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
infohash = selection.data
self.main.queuebox.put_infohash_last(infohash)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
return True
BEFORE = -1
AFTER = 1
class ReorderableBox(DroppableBox):
def new_separator(self):
return DroppableHSeparator(self)
def __init__(self, main):
DroppableBox.__init__(self, main)
self.main = main
self.drag_dest_set(gtk.DEST_DEFAULT_DROP,
TARGET_ALL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.connect('drag_data_received', self.drag_data_received)
self.connect('drag_motion' , self.drag_motion)
def drag_data_received(self, widget, context, x, y, selection, targetType, time):
if targetType == BT_TARGET_TYPE:
half_height = self.size_request()[1] // 2
if y < half_height:
self.put_infohash_first(selection.data)
else:
self.put_infohash_last(selection.data)
else:
self.main.accept_dropped_file(widget, context, x, y, selection, targetType, time)
return True
def drag_motion(self, widget, context, x, y, time):
return False
def drag_highlight(self):
final = self.get_children()[-1]
final.drag_highlight()
self.main.add_unhighlight_handle()
def drag_unhighlight(self):
self.highlight_child(index=None)
self.parent.drag_unhighlight()
def highlight_before_index(self, index):
self.drag_unhighlight()
children = self._get_children()
if index > 0:
children[index*2 - 1].drag_highlight()
else:
self.highlight_at_top()
def highlight_after_index(self, index):
self.drag_unhighlight()
children = self._get_children()
if index*2 < len(children)-1:
children[index*2 + 1].drag_highlight()
else:
self.highlight_at_bottom()
def highlight_child(self, index=None):
for i, child in enumerate(self._get_children()):
if index is not None and i == index*2:
child.drag_highlight()
else:
child.drag_unhighlight()
def drop_on_separator(self, sep, infohash):
children = self._get_children()
for i, child in enumerate(children):
if child == sep:
reference_child = children[i-1]
self.put_infohash_at_child(infohash, reference_child, AFTER)
break
def get_queue(self):
queue = []
c = self.get_children()
for t in c:
queue.append(t.infohash)
return queue
def put_infohash_first(self, infohash):
self.highlight_child()
children = self.get_children()
if len(children) > 1 and infohash == children[0].infohash:
return
self.put_infohash_at_index(infohash, 0)
def put_infohash_last(self, infohash):
self.highlight_child()
children = self.get_children()
end = len(children)
if len(children) > 1 and infohash == children[end-1].infohash:
return
self.put_infohash_at_index(infohash, end)
def put_infohash_at_child(self, infohash, reference_child, where):
self.highlight_child()
if infohash == reference_child.infohash:
return
target_index = self.get_index_from_child(reference_child)
if where == AFTER:
target_index += 1
self.put_infohash_at_index(infohash, target_index)
def get_index_from_child(self, child):
c = self.get_children()
ret = -1
try:
ret = c.index(child)
except ValueError:
pass
return ret
def highlight_at_top(self):
raise NotImplementedError
def highlight_at_bottom(self):
raise NotImplementedError
def put_infohash_at_index(self, infohash, end):
raise NotImplementedError
class RunningBox(ReorderableBox):
def put_infohash_at_index(self, infohash, target_index):
#print 'RunningBox.put_infohash_at_index', infohash.encode('hex')[:8], target_index
l = self.get_queue()
replaced = None
if l:
replaced = l[-1]
self.main.confirm_replace_running_torrent(infohash, replaced,
target_index)
def highlight_at_top(self):
pass
# BUG: Don't know how I will indicate in the UI that the top of the list is highlighted
def highlight_at_bottom(self):
self.parent.highlight_between()
class QueuedBox(ReorderableBox):
def put_infohash_at_index(self, infohash, target_index):
#print 'want to put', infohash.encode('hex'), 'at', target_index
self.main.change_torrent_state(infohash, QUEUED, target_index)
def highlight_at_top(self):
self.parent.highlight_between()
def highlight_at_bottom(self):
pass
# BUG: Don't know how I will indicate in the UI that the bottom of the list is highlighted
class Struct(object):
pass
class SearchField(gtk.Entry):
def __init__(self, default_text, visit_url_func):
gtk.Entry.__init__(self)
self.default_text = default_text
self.visit_url_func = visit_url_func
self.set_text(self.default_text)
self.set_size_request(150, -1)
# default gtk Entry dnd processing is broken on linux!
# - default Motion handling causes asyncs
# - there's no way to filter the default text dnd
# see the parent window for a very painful work-around
self.drag_dest_unset()
self.connect('key-press-event', self.check_for_enter)
self.connect('button-press-event', self.begin_edit)
self.search_completion = gtk.EntryCompletion()
self.search_completion.set_text_column(0)
self.search_store = gtk.ListStore(gobject.TYPE_STRING)
self.search_completion.set_model(self.search_store)
self.set_completion(self.search_completion)
self.reset_text()
self.timeout_id = None
def begin_edit(self, *args):
if self.get_text() == self.default_text:
self.set_text('')
def check_for_enter(self, widget, event):
if event.keyval in (gtk.keysyms.Return, gtk.keysyms.KP_Enter):
self.search()
def reset_text(self):
self.set_text(self.default_text)
def search(self, *args):
search_term = self.get_text()
if search_term and search_term != self.default_text:
self.search_store.append([search_term])
search_url = SEARCH_URL % {'query' :zurllib.quote(search_term),
'client':'M-%s'%version.replace('.','-')}
self.timeout_id = gobject.timeout_add(2000, self.resensitize)
self.set_sensitive(False)
self.visit_url_func(search_url, callback=self.resensitize)
else:
self.reset_text()
self.select_region(0, -1)
self.grab_focus()
def resensitize(self):
self.set_sensitive(True)
self.reset_text()
if self.timeout_id is not None:
gobject.source_remove(self.timeout_id)
self.timeout_id = None
class DownloadInfoFrame(object):
def __init__(self, config, torrentqueue):
self.config = config
if self.config['save_in'] == '':
self.config['save_in'] = smart_dir('')
self.torrentqueue = torrentqueue
self.torrents = {}
self.running_torrents = {}
self.lists = {}
self.update_handle = None
self.unhighlight_handle = None
self.custom_size = False
self.child_windows = {}
self.postponed_save_windows = []
self.helpwindow = None
self.errordialog = None
self.mainwindow = Window(gtk.WINDOW_TOPLEVEL)
#tray icon
self.trayicon = TrayIcon(not self.config['start_minimized'],
toggle_func=self.toggle_shown,
quit_func=self.quit)
self.traythread = threading.Thread(target=self.trayicon.enable,
args=())
self.traythread.setDaemon(True)
if os.name == "nt":
# gtk has no way to check this?
self.iconized = False
self.mainwindow.connect('window-state-event', self.window_event)
if self.config['start_minimized']:
self.mainwindow.iconify()
gtk.threads_enter()
self.mainwindow.set_border_width(0)
self.set_seen_remote_connections(False)
self.set_seen_connections(False)
self.mainwindow.drag_dest_set(gtk.DEST_DEFAULT_ALL,
TARGET_EXTERNAL,
gtk.gdk.ACTION_MOVE|gtk.gdk.ACTION_COPY)
self.mainwindow.connect('drag_leave' , self.drag_leave )
self.mainwindow.connect('drag_data_received', self.accept_dropped_file)
self.mainwindow.set_size_request(WINDOW_WIDTH, -1)
self.mainwindow.connect('destroy', self.cancel)
self.mainwindow.connect('size-allocate', self.size_was_allocated)
self.accel_group = gtk.AccelGroup()
self.mainwindow.add_accel_group(self.accel_group)
#self.accel_group.connect(ord('W'), gtk.gdk.CONTROL_MASK, gtk.ACCEL_LOCKED,
# lambda *args: self.mainwindow.destroy())
self.tooltips = gtk.Tooltips()
self.logbuffer = LogBuffer()
self.log_text(_("%s started")%app_name, severity=None)
self.box1 = gtk.VBox(homogeneous=False, spacing=0)
self.box2 = gtk.VBox(homogeneous=False, spacing=0)
self.box2.set_border_width(SPACING)
self.menubar = gtk.MenuBar()
self.box1.pack_start(self.menubar, expand=False, fill=False)
self.ssbutton = StopStartButton(self)
# keystrokes used: A D F H L N O P Q S U X (E)
quit_menu_label = _("_Quit")
if os.name == 'nt':
quit_menu_label = _("E_xit")
file_menu_items = ((_("_Open torrent file"), self.select_torrent_to_open),
(_("Open torrent _URL"), self.enter_url_to_open),
(_("Make _new torrent" ), self.make_new_torrent),
('----' , None),
(_("_Pause/Play"), self.ssbutton.toggle),
('----' , None),
(quit_menu_label , lambda w: self.mainwindow.destroy()),
)
view_menu_items = ((_("Show/Hide _finished torrents"), self.toggle_known),
# BUG: if you reorder this menu, see def set_custom_size() first
(_("_Resize window to fit"), lambda w: self.resize_to_fit()),
('----' , None),
(_("_Log") , lambda w: self.open_window('log')),
# 'View log of all download activity',
#('----' , None),
(_("_Settings") , lambda w: self.open_window('settings')),
#'Change download behavior and network settings',
)
help_menu_items = ((_("_Help") , self.open_help),
#(_("_Help Window") , lambda w: self.open_window('help')),
(_("_About") , lambda w: self.open_window('about')),
(_("_Donate") , lambda w: self.donate()),
#(_("Rais_e") , lambda w: self.raiseerror()),
)
self.filemenu = gtk.MenuItem(_("_File"))
self.filemenu.set_submenu(build_menu(file_menu_items, self.accel_group))
self.filemenu.show()
self.viewmenu = gtk.MenuItem(_("_View"))
self.viewmenu.set_submenu(build_menu(view_menu_items, self.accel_group))
self.viewmenu.show()
self.helpmenu = gtk.MenuItem(_("_Help"))
self.helpmenu.set_submenu(build_menu(help_menu_items, self.accel_group))
self.helpmenu.show()
if os.name != 'nt':
self.helpmenu.set_right_justified(True)
self.menubar.append(self.filemenu)
self.menubar.append(self.viewmenu)
self.menubar.append(self.helpmenu)
self.menubar.show()
self.header = gtk.HBox(homogeneous=False)
self.box1.pack_start(self.box2, expand=False, fill=False)
# control box: rate slider, start-stop button, search widget, status light
self.controlbox = gtk.HBox(homogeneous=False)
controlbox_padding = SPACING//2
# stop-start button
self.controlbox.pack_start(malign(self.ssbutton),
expand=False, fill=False)
# rate slider
self.rate_slider_box = RateSliderBox(self.config, self.torrentqueue)
self.controlbox.pack_start(self.rate_slider_box,
expand=True, fill=True,
padding=controlbox_padding)
self.controlbox.pack_start(gtk.VSeparator(), expand=False, fill=False,
padding=controlbox_padding)
# search box
self.search_field = SearchField(_("Search for torrents"), self.visit_url)
sfa = gtk.Alignment(xalign=0, yalign=0.5, xscale=1, yscale=0)
sfa.add(self.search_field)
self.controlbox.pack_start(sfa,
expand=False, fill=False, padding=controlbox_padding)
# separator
self.controlbox.pack_start(gtk.VSeparator(), expand=False, fill=False,
padding=controlbox_padding)
# status light
self.status_light = StatusLight(self)
self.controlbox.pack_start(malign(self.status_light),
expand=False, fill=False)
self.box2.pack_start(self.controlbox,
expand=False, fill=False, padding=0)
# end control box
self.paned = gtk.VPaned()
self.knownscroll = ScrolledWindow()
self.knownscroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.knownscroll.set_shadow_type(gtk.SHADOW_NONE)
self.knownscroll.set_size_request(-1, SPACING)
self.knownbox = KnownBox(self)
self.knownbox.set_border_width(SPACING)
self.knownscroll.add_with_viewport(self.knownbox)
self.paned.pack1(self.knownscroll, resize=False, shrink=True)
self.mainscroll = AutoScrollingWindow()
self.mainscroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.mainscroll.set_shadow_type(gtk.SHADOW_NONE)
self.mainscroll.set_size_request(-1, SPACING)
self.scrollbox = RunningAndQueueBox(self, homogeneous=False)
self.scrollbox.set_border_width(SPACING)
self.runbox = RunningBox(self)
self.scrollbox.pack_start(self.runbox, expand=False, fill=False)
self.scrollbox.pack_start(DroppableHSeparator(self.scrollbox), expand=False, fill=False)
self.queuebox = QueuedBox(self)
self.scrollbox.pack_start(self.queuebox, expand=False, fill=False)
self.scrollbox.pack_start(SpacerBox(self), expand=True, fill=True)
self.mainscroll.add_with_viewport(self.scrollbox)
self.paned.pack2(self.mainscroll, resize=True, shrink=False)
self.box1.pack_start(self.paned)
self.box1.show_all()
self.mainwindow.add(self.box1)
self.set_title()
self.set_size()
self.mainwindow.show()
self.paned.set_position(0)
self.search_field.grab_focus()
self.updater = NewVersion.Updater(
gtk_wrap,
self.new_version,
self.torrentqueue.start_new_torrent,
self.confirm_install_new_version ,
self.global_error ,
self.config['new_version'] ,
self.config['current_version'] )
self.nag()
gtk.threads_leave()
def window_event(self, widget, event, *args):
if event.changed_mask == gtk.gdk.WINDOW_STATE_ICONIFIED:
if self.config['minimize_to_tray']:
if self.iconized == False:
self.mainwindow.hide()
self.trayicon.set_toggle_state(self.iconized)
self.iconized = not self.iconized
def drag_leave(self, *args):
self.drag_end()
def make_new_torrent(self, widget=None):
btspawn(self.torrentqueue, 'maketorrent')
def accept_dropped_file(self, widget, context, x, y, selection,
targetType, time):
if targetType == EXTERNAL_FILE_TYPE:
d = selection.data.strip()
file_uris = d.split('\r\n')
for file_uri in file_uris:
# this catches non-url entries, I've seen "\x00" at the end of lists
if file_uri.find(':/') != -1:
file_name = zurllib.url2pathname(file_uri)
file_name = file_name[7:]
if os.name == 'nt':
file_name = file_name.strip('\\')
self.open_torrent( file_name )
elif targetType == EXTERNAL_STRING_TYPE:
data = selection.data.strip()
# size must be > 0,0 for the intersection code to register it
drop_rect = gtk.gdk.Rectangle(x, y, 1, 1)
if ((self.search_field.intersect(drop_rect) is not None) and
(not data.lower().endswith(".torrent"))):
client_point = self.mainwindow.translate_coordinates(self.search_field, x, y)
layout_offset = self.search_field.get_layout_offsets()
point = []
# subtract (not add) the offset, because we're hit-testing the layout, not the widget
point.append(client_point[0] - layout_offset[0])
point.append(client_point[1] - layout_offset[1])
# ha ha ha. pango is so ridiculous
point[0] *= pango.SCALE
point[1] *= pango.SCALE
layout = self.search_field.get_layout()
position = layout.xy_to_index(*point)
self.search_field.insert_text(data, position[0])
else:
self.open_url(data)
def drag_highlight(self, widget=None):
widgets = (self.knownbox, self.runbox, self.queuebox)
for w in widgets:
if w != widget:
w.drag_unhighlight()
for w in widgets:
if w == widget:
w.drag_highlight()
self.add_unhighlight_handle()
def drag_end(self):
self.drag_highlight(widget=None)
self.mainscroll.stop_scrolling()
def set_title(self, torrentName=None, fractionDone=None):
title = app_name
trunc = '...'
sep = ': '
if self.config['pause']:
title += sep+_("(stopped)")
elif len(self.running_torrents) == 1 and torrentName and \
fractionDone is not None:
maxlen = WINDOW_TITLE_LENGTH - len(app_name) - len(trunc) - len(sep)
if len(torrentName) > maxlen:
torrentName = torrentName[:maxlen] + trunc
title = '%s%s%0.1f%%%s%s'% (app_name,
sep,
(int(fractionDone*1000)/10),
sep,
torrentName)
elif len(self.running_torrents) > 1:
title += sep+_("(multiple)")
if self.mainwindow.get_title() != title:
self.mainwindow.set_title(title)
if self.trayicon.get_tooltip() != title:
self.trayicon.set_tooltip(title)
def _guess_size(self):
paned_height = self.scrollbox.size_request()[1]
if hasattr(self.paned, 'style_get_property'):
paned_height += self.paned.style_get_property('handle-size')
else:
paned_height += 5
paned_height += self.paned.get_position()
paned_height += 4 # fudge factor, probably from scrolled window beveling ?
paned_height = max(paned_height, MIN_MULTI_PANE_HEIGHT)
new_height = self.menubar.size_request()[1] + \
self.box2.size_request()[1] + \
paned_height
new_height = min(new_height, MAX_WINDOW_HEIGHT)
new_width = max(self.scrollbox.size_request()[0] + SCROLLBAR_WIDTH, WINDOW_WIDTH)
return new_width, new_height
def set_size(self):
if not self.custom_size:
self.mainwindow.resize(*self._guess_size())
def size_was_allocated(self, *args):
current_size = self.mainwindow.get_size()
target_size = self._guess_size()
if current_size == target_size:
self.set_custom_size(False)
else:
self.set_custom_size(True)
def resize_to_fit(self):
self.set_custom_size(False)
self.set_size()
def set_custom_size(self, val):
self.custom_size = val
# BUG this is a hack:
self.viewmenu.get_submenu().get_children()[1].set_sensitive(val)
# BUG need to add handler on resize event to keep track of
# old_position when pane is hidden manually
def split_pane(self):
pos = self.paned.get_position()
if pos > 0:
self.paned.old_position = pos
self.paned.set_position(0)
else:
if hasattr(self.paned, 'old_position'):
self.paned.set_position(self.paned.old_position)
else:
self.maximize_known_pane()
def maximize_known_pane(self):
self.set_pane_position(self.knownbox.size_request()[1])
def set_pane_position(self, pane_position):
pane_position = min(MAX_WINDOW_HEIGHT//2, pane_position)
self.paned.set_position(pane_position)
def toggle_known(self, widget=None):
self.split_pane()
def open_window(self, window_name, *args, **kwargs):
if os.name == 'nt':
self.mainwindow.present()
savewidget = SaveFileSelection
if window_name == 'savedir':
savewidget = CreateFolderSelection
window_name = 'savefile'
if self.child_windows.has_key(window_name):
if window_name == 'savefile':
kwargs['show'] = False
self.postponed_save_windows.append(savewidget(self, **kwargs))
return
if window_name == 'log' :
self.child_windows[window_name] = LogWindow(self, self.logbuffer, self.config)
elif window_name == 'about' :
self.child_windows[window_name] = AboutWindow(self, lambda w: self.donate())
elif window_name == 'help' :
self.child_windows[window_name] = HelpWindow(self, makeHelp('bittorrent', defaults))
elif window_name == 'settings':
self.child_windows[window_name] = SettingsWindow(self, self.config, self.set_config)
elif window_name == 'version' :
self.child_windows[window_name] = VersionWindow(self, *args)
elif window_name == 'openfile':
self.child_windows[window_name] = OpenFileSelection(self, **kwargs)
elif window_name == 'savefile':
self.child_windows[window_name] = savewidget(self, **kwargs)
elif window_name == 'choosefolder':
self.child_windows[window_name] = ChooseFolderSelection(self, **kwargs)
elif window_name == 'enterurl':
self.child_windows[window_name] = EnterUrlDialog(self, **kwargs)
return self.child_windows[window_name]
def window_closed(self, window_name):
if self.child_windows.has_key(window_name):
del self.child_windows[window_name]
if window_name == 'savefile' and self.postponed_save_windows:
newwin = self.postponed_save_windows.pop(-1)
newwin.show()
self.child_windows['savefile'] = newwin
def close_window(self, window_name):
self.child_windows[window_name].close(None)
def new_version(self, newversion, download_url):
if not self.config['notified'] or \
newversion != NewVersion.Version.from_str(self.config['notified']):
if not self.torrents.has_key(self.updater.infohash):
self.open_window('version', newversion, download_url)
else:
dlpath = os.path.split(self.torrents[self.updater.infohash].dlpath)[0]
self.updater.set_installer_dir(dlpath)
self.updater.start_install()
def check_version(self):
self.updater.check()
def start_auto_update(self):
if not self.torrents.has_key(self.updater.infohash):
self.updater.download()
else:
self.global_error(INFO, _("Already downloading %s installer") % self.updater.version)
def confirm_install_new_version(self):
MessageDialog(self.mainwindow,
_("Install new %s now?")%app_name,
_("Do you want to quit %s and install the new version, "
"%s, now?")%(app_name,self.updater.version),
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
yesfunc=self.install_new_version,
nofunc=None,
default=gtk.RESPONSE_YES
)
def install_new_version(self):
self.updater.launch_installer(self.torrentqueue)
self.cancel()
def open_help(self,widget):
if self.helpwindow is None:
msg = (_("%s help is at \n%s\nWould you like to go there now?")%
(app_name, HELP_URL))
self.helpwindow = MessageDialog(self.mainwindow,
_("Visit help web page?"),
msg,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.visit_help,
nofunc =self.help_closed,
default=gtk.RESPONSE_OK
)
def visit_help(self):
self.visit_url(HELP_URL)
self.help_closed()
def close_help(self):
self.helpwindow.close()
def help_closed(self, widget=None):
self.helpwindow = None
def set_config(self, option, value):
self.config[option] = value
if option == 'display_interval':
self.init_updates()
self.torrentqueue.set_config(option, value)
def confirm_remove_finished_torrents(self,widget):
count = 0
for infohash, t in self.torrents.iteritems():
if t.state == KNOWN and t.completion >= 1:
count += 1
if count:
if self.paned.get_position() == 0:
self.toggle_known()
msg = ''
if count == 1:
msg = _("There is one finished torrent in the list. ") + \
_("Do you want to remove it?")
else:
msg = _("There are %d finished torrents in the list. ") % count +\
_("Do you want to remove all of them?")
MessageDialog(self.mainwindow,
_("Remove all finished torrents?"),
msg,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.remove_finished_torrents,
default=gtk.RESPONSE_OK)
else:
MessageDialog(self.mainwindow,
_("No finished torrents"),
_("There are no finished torrents to remove."),
type=gtk.MESSAGE_INFO,
default=gtk.RESPONSE_OK)
def remove_finished_torrents(self):
for infohash, t in self.torrents.iteritems():
if t.state == KNOWN and t.completion >= 1:
self.torrentqueue.remove_torrent(infohash)
if self.paned.get_position() > 0:
self.toggle_known()
def cancel(self, widget=None):
for window_name in self.child_windows.keys():
self.close_window(window_name)
if self.errordialog is not None:
self.errordialog.destroy()
self.errors_closed()
for t in self.torrents.itervalues():
if t.widget is not None:
t.widget.close_child_windows()
self.torrentqueue.set_done()
gtk.main_quit()
# Currently called if the user started bittorrent from a terminal
# and presses ctrl-c there, or if the user quits BitTorrent from
# the tray icon (on windows)
def quit(self):
self.mainwindow.destroy()
def make_statusrequest(self):
if self.config['pause']:
return True
for infohash, t in self.running_torrents.iteritems():
self.torrentqueue.request_status(infohash, t.widget.peerlistwindow
is not None, t.widget.filelistwindow is not None)
if not len(self.running_torrents):
self.status_light.send_message('empty')
return True
def enter_url_to_open(self, widget):
self.open_window('enterurl')
def open_url(self, url):
self.torrentqueue.start_new_torrent_by_name(url)
def select_torrent_to_open(self, widget):
open_location = self.config['open_from']
if not open_location:
open_location = self.config['save_in']
path = smart_dir(open_location)
self.open_window('openfile',
title=_("Open torrent:"),
fullname=path,
got_location_func=self.open_torrent,
no_location_func=lambda: self.window_closed('openfile'))
def open_torrent(self, name):
self.window_closed('openfile')
open_location = os.path.split(name)[0]
if open_location[-1] != os.sep:
open_location += os.sep
self.set_config('open_from', open_location)
self.torrentqueue.start_new_torrent_by_name(name)
def change_save_location(self, infohash):
def no_location():
self.window_closed('savefile')
t = self.torrents[infohash]
metainfo = t.metainfo
selector = self.open_window(metainfo.is_batch and 'savedir' or \
'savefile',
title=_("Change save location for ") + metainfo.name,
fullname=t.dlpath,
got_location_func = \
lambda fn: self.got_changed_location(infohash, fn),
no_location_func=no_location)
def got_changed_location(self, infohash, fullpath):
self.window_closed('savefile')
self.torrentqueue.set_save_location(infohash, fullpath)
def save_location(self, infohash, metainfo):
name = metainfo.name_fs
if self.config['save_as'] and \
os.access(os.path.split(self.config['save_as'])[0], os.W_OK):
path = self.config['save_as']
self.got_location(infohash, path, store_in_config=False)
self.config['save_as'] = ''
return
path = smart_dir(self.config['save_in'])
fullname = os.path.join(path, name)
if not self.config['ask_for_save']:
if os.access(fullname, os.F_OK):
message = MessageDialog(self.mainwindow,
_("File exists!"),
_('"%s" already exists. '
"Do you want to choose a different file name?") % path_wrap(name),
buttons=gtk.BUTTONS_YES_NO,
nofunc= lambda : self.got_location(infohash, fullname),
yesfunc=lambda : self.get_save_location(infohash, metainfo, fullname),
default=gtk.RESPONSE_NO)
else:
self.got_location(infohash, fullname)
else:
self.get_save_location(infohash, metainfo, fullname)
def get_save_location(self, infohash, metainfo, fullname):
def no_location():
self.window_closed('savefile')
self.torrentqueue.remove_torrent(infohash)
selector = self.open_window(metainfo.is_batch and 'savedir' or \
'savefile',
title=_("Save location for ") + metainfo.name,
fullname=fullname,
got_location_func = lambda fn: \
self.got_location(infohash, fn),
no_location_func=no_location)
self.torrents[infohash].widget = selector
def got_location(self, infohash, fullpath, store_in_config=True):
self.window_closed('savefile')
self.torrents[infohash].widget = None
save_in = os.path.split(fullpath)[0]
metainfo = self.torrents[infohash].metainfo
if metainfo.is_batch:
bottom_dirs, top_dir_name = os.path.split(save_in)
if metainfo.name_fs == top_dir_name:
message = MessageDialog(self.mainwindow, _("Directory exists!"),
_('"%s" already exists.'\
" Do you intend to create an identical,"\
" duplicate directory inside the existing"\
" directory?")%path_wrap(save_in),
buttons=gtk.BUTTONS_YES_NO,
nofunc =lambda : self.got_location(infohash, save_in ),
yesfunc=lambda : self._got_location(infohash, save_in, fullpath, store_in_config=store_in_config),
default=gtk.RESPONSE_NO,
)
return
self._got_location(infohash, save_in, fullpath, store_in_config=store_in_config)
def _got_location(self, infohash, save_in, fullpath, store_in_config=True):
if store_in_config:
if save_in[-1] != os.sep:
save_in += os.sep
self.set_config('save_in', save_in)
self.torrents[infohash].dlpath = fullpath
self.torrentqueue.set_save_location(infohash, fullpath)
def add_unhighlight_handle(self):
if self.unhighlight_handle is not None:
gobject.source_remove(self.unhighlight_handle)
self.unhighlight_handle = gobject.timeout_add(2000,
self.unhighlight_after_a_while,
priority=gobject.PRIORITY_LOW)
def unhighlight_after_a_while(self):
self.drag_highlight()
gobject.source_remove(self.unhighlight_handle)
self.unhighlight_handle = None
return False
def init_updates(self):
if self.update_handle is not None:
gobject.source_remove(self.update_handle)
self.update_handle = gobject.timeout_add(
int(self.config['display_interval'] * 1000),
self.make_statusrequest)
def remove_torrent_widget(self, infohash):
t = self.torrents[infohash]
self.lists[t.state].remove(infohash)
if t.state == RUNNING:
del self.running_torrents[infohash]
self.set_title()
if t.state == ASKING_LOCATION:
if t.widget is not None:
t.widget.destroy()
return
if t.state in (KNOWN, RUNNING, QUEUED):
t.widget.close_child_windows()
if t.state == RUNNING:
self.runbox.remove(t.widget)
elif t.state == QUEUED:
self.queuebox.remove(t.widget)
elif t.state == KNOWN:
self.knownbox.remove(t.widget)
t.widget.destroy()
self.set_size()
def create_torrent_widget(self, infohash, queuepos=None):
t = self.torrents[infohash]
l = self.lists.setdefault(t.state, [])
if queuepos is None:
l.append(infohash)
else:
l.insert(queuepos, infohash)
if t.state == ASKING_LOCATION:
self.save_location(infohash, t.metainfo)
self.nag()
return
elif t.state == RUNNING:
self.running_torrents[infohash] = t
if not self.config['pause']:
t.widget = RunningTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
else:
t.widget = PausedTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
box = self.runbox
elif t.state == QUEUED:
t.widget = QueuedTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
box = self.queuebox
elif t.state == KNOWN:
t.widget = KnownTorrentBox(infohash, t.metainfo, t.dlpath,
t.completion, self)
box = self.knownbox
box.pack_start(t.widget, expand=False, fill=False)
if queuepos is not None:
box.reorder_child(t.widget, queuepos)
self.set_size()
def log_text(self, text, severity=ERROR):
self.logbuffer.log_text(text, severity)
if self.child_windows.has_key('log'):
self.child_windows['log'].scroll_to_end()
def _error(self, severity, err_str):
err_str = err_str.decode('utf-8', 'replace').encode('utf-8')
err_str = err_str.strip()
if severity >= ERROR:
self.error_modal(err_str)
self.log_text(err_str, severity)
def error(self, infohash, severity, text):
if self.torrents.has_key(infohash):
name = self.torrents[infohash].metainfo.name
err_str = '"%s" : %s'%(name,text)
self._error(severity, err_str)
else:
ihex = infohash.encode('hex')
err_str = '"%s" : %s'%(ihex,text)
self._error(severity, err_str)
self._error(WARNING, 'Previous error raised for invalid infohash: "%s"' % ihex)
def global_error(self, severity, text):
err_str = _("(global message) : %s")%text
self._error(severity, err_str)
def error_modal(self, text):
if self.child_windows.has_key('log'):
return
title = _("%s Error") % app_name
if self.errordialog is not None:
if not self.errordialog.multi:
self.errordialog.destroy()
self.errordialog = MessageDialog(self.mainwindow, title,
_("Multiple errors have occurred. "
"Click OK to view the error log."),
buttons=gtk.BUTTONS_OK_CANCEL,
yesfunc=self.multiple_errors_yes,
nofunc=self.errors_closed,
default=gtk.RESPONSE_OK
)
self.errordialog.multi = True
else:
# already showing the multi error dialog, so do nothing
pass
else:
self.errordialog = MessageDialog(self.mainwindow, title, text,
yesfunc=self.errors_closed,
default=gtk.RESPONSE_OK)
self.errordialog.multi = False
def multiple_errors_yes(self):
self.errors_closed()
self.open_window('log')
def errors_closed(self):
self.errordialog = None
def open_log(self):
self.open_window('log')
def stop_queue(self):
self.set_config('pause', True)
self.set_title()
self.status_light.send_message('stop')
self.set_seen_remote_connections(False)
self.set_seen_connections(False)
q = list(self.runbox.get_queue())
for infohash in q:
t = self.torrents[infohash]
self.remove_torrent_widget(infohash)
self.create_torrent_widget(infohash)
def restart_queue(self):
self.set_config('pause', False)
q = list(self.runbox.get_queue())
for infohash in q:
t = self.torrents[infohash]
self.remove_torrent_widget(infohash)
self.create_torrent_widget(infohash)
self.start_status_light()
def start_status_light(self):
if len(self.running_torrents):
self.status_light.send_message('start')
else:
self.status_light.send_message('empty')
def update_status(self, torrent, statistics):
if self.config['pause']:
self.status_light.send_message('start')
return
if self.seen_remote_connections:
self.status_light.send_message('seen_remote_peers')
elif self.seen_connections:
self.status_light.send_message('seen_peers')
else:
self.start_status_light()
self.running_torrents[torrent].widget.update_status(statistics)
if statistics.get('numPeers'):
self.set_seen_connections(seen=True)
if (not self.seen_remote_connections and
statistics.get('ever_got_incoming')):
self.set_seen_remote_connections(seen=True)
if self.updater is not None:
updater_infohash = self.updater.infohash
if self.torrents.has_key(updater_infohash):
updater_torrent = self.torrents[updater_infohash]
if updater_torrent.state == QUEUED:
self.change_torrent_state(updater_infohash, RUNNING,
index=0, replaced=0,
force_running=True)
def set_seen_remote_connections(self, seen=False):
if seen:
self.status_light.send_message('seen_remote_peers')
self.seen_remote_connections = seen
def set_seen_connections(self, seen=False):
if seen:
self.status_light.send_message('seen_peers')
self.seen_connections = seen
def new_displayed_torrent(self, infohash, metainfo, dlpath, state, config,
completion=None, uptotal=0, downtotal=0):
t = Struct()
t.metainfo = metainfo
t.dlpath = dlpath
t.state = state
t.config = config
t.completion = completion
t.uptotal = uptotal
t.downtotal = downtotal
t.widget = None
self.torrents[infohash] = t
self.create_torrent_widget(infohash)
def torrent_state_changed(self, infohash, dlpath, state, completion,
uptotal, downtotal, queuepos=None):
t = self.torrents[infohash]
self.remove_torrent_widget(infohash)
t.dlpath = dlpath
t.state = state
t.completion = completion
t.uptotal = uptotal
t.downtotal = downtotal
self.create_torrent_widget(infohash, queuepos)
def reorder_torrent(self, infohash, queuepos):
self.remove_torrent_widget(infohash)
self.create_torrent_widget(infohash, queuepos)
def update_completion(self, infohash, completion, files_left=None,
files_allocated=None):
t = self.torrents[infohash]
if files_left is not None and t.widget.filelistwindow is not None:
t.widget.filelistwindow.update(files_left, files_allocated)
def removed_torrent(self, infohash):
self.remove_torrent_widget(infohash)
del self.torrents[infohash]
def change_torrent_state(self, infohash, newstate, index=None,
replaced=None, force_running=False):
t = self.torrents[infohash]
pred = succ = None
if index is not None:
l = self.lists.setdefault(newstate, [])
if index > 0:
pred = l[index - 1]
if index < len(l):
succ = l[index]
self.torrentqueue.change_torrent_state(infohash, t.state, newstate,
pred, succ, replaced, force_running)
def finish(self, infohash):
t = self.torrents[infohash]
if t is None or t.state == KNOWN:
return
self.change_torrent_state(infohash, KNOWN)
def confirm_replace_running_torrent(self, infohash, replaced, index):
replace_func = lambda *args: self.change_torrent_state(infohash,
RUNNING, index, replaced)
add_func = lambda *args: self.change_torrent_state(infohash,
RUNNING, index, force_running=True)
moved_torrent = self.torrents[infohash]
if moved_torrent.state == RUNNING:
self.change_torrent_state(infohash, RUNNING, index)
return
if self.config['start_torrent_behavior'] == 'replace':
replace_func()
return
elif self.config['start_torrent_behavior'] == 'add':
add_func()
return
moved_torrent_name = moved_torrent.metainfo.name
confirm = MessageDialog(self.mainwindow,
_("Stop running torrent?"),
_('You are about to start "%s". Do you want to stop another running torrent as well?')%(moved_torrent_name),
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
yesfunc=replace_func,
nofunc=add_func,
default=gtk.RESPONSE_YES)
def nag(self):
if ((self.config['donated'] != version) and
#(random.random() * NAG_FREQUENCY) < 1) and
False):
title = _("Have you donated?")
message = _("Welcome to the new version of %s. Have you donated?")%app_name
self.nagwindow = MessageDialog(self.mainwindow,
title,
message,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
yesfunc=self.nag_yes, nofunc=self.nag_no,
default=gtk.RESPONSE_NO)
def nag_no(self):
self.donate()
def nag_yes(self):
self.set_config('donated', version)
MessageDialog(self.mainwindow,
_("Thanks!"),
_("Thanks for donating! To donate again, "
'select "Donate" from the "Help" menu.'),
type=gtk.MESSAGE_INFO,
default=gtk.RESPONSE_OK
)
def donate(self):
self.visit_url(DONATE_URL)
def visit_url(self, url, callback=None):
t = threading.Thread(target=self._visit_url,
args=(url,callback))
t.setDaemon(True)
t.start()
def _visit_url(self, url, callback=None):
webbrowser.open(url)
if callback:
gtk_wrap(callback)
def toggle_shown(self):
if self.config['minimize_to_tray']:
if self.mainwindow.get_property('visible'):
self.mainwindow.hide()
else:
self.mainwindow.show_all()
else:
if not self.iconized:
self.mainwindow.iconify()
else:
self.mainwindow.deiconify()
def raiseerror(self, *args):
raise ValueError('test traceback behavior')
#this class provides a thin layer around the loop so that the main window
#doesn't have to run it. It protects againstexceptions in mainwindow creation
#preventing the loop from starting (and causing "The grey screen of BT")
class MainLoop:
def __init__(self):
self.mainwindow = None
self.started = 0
gtk.threads_init()
def set_mainwindow(self, mainwindow):
self.mainwindow = mainwindow
def run(self):
self.mainwindow.traythread.start()
gtk.threads_enter()
if self.mainwindow:
self.mainwindow.ssbutton.set_paused(self.mainwindow.config['pause'])
self.mainwindow.rate_slider_box.start()
self.mainwindow.init_updates()
try:
#the main loop has been started
self.started = 1
gtk.main()
except KeyboardInterrupt:
gtk.threads_leave()
if self.mainwindow:
self.mainwindow.torrentqueue.set_done()
raise
gtk.threads_leave()
def quit(self):
if self.mainwindow:
self.mainwindow.quit()
def btgui_exit_gtk(mainloop):
# if the main loop has never run, we have to run it to flush blocking threads
# if it has run, running it a second time will cause duplicate-destruction problems
if not mainloop.started:
# queue up a command to close the gui
gobject.idle_add(lock_wrap, mainloop.quit)
# run the main loop so we process all queued commands, then quit
mainloop.run()
if __name__ == '__main__':
mainloop = MainLoop()
# make sure we start the gtk loop once before we close
atexit.register(btgui_exit_gtk, mainloop)
torrentqueue = TorrentQueue.TorrentQueue(config, ui_options, ipc)
d = DownloadInfoFrame(config,TorrentQueue.ThreadWrappedQueue(torrentqueue))
mainloop.set_mainwindow(d)
global_log_func.logger = d.global_error
startflag = threading.Event()
dlthread = threading.Thread(target = torrentqueue.run,
args = (d, gtk_wrap, startflag))
dlthread.setDaemon(False)
dlthread.start()
startflag.wait()
# the wait may have been terminated because of an error
if torrentqueue.initialized == -1:
raise BTFailure(_("Could not start the TorrentQueue, see above for errors."))
torrentqueue.rawserver.install_sigint_handler()
for name in newtorrents:
d.torrentqueue.start_new_torrent_by_name(name)
try:
mainloop.run()
except KeyboardInterrupt:
# the gtk main loop is closed in MainLoop
sys.exit(1)
d.trayicon.disable()
|
joshovi/fossology
|
src/nomos/agent_tests/testdata/NomosTestfiles/Bittorrent/bittorrent.py
|
Python
|
gpl-2.0
| 145,532
|
[
"VisIt"
] |
999a8ec3ca580dca27e2ef9c9d9350cd3d42a877e4b89a5dd0d99c6d7a6551ea
|
template = """<div class="w3-card-2 w3-quarter w3-margin cast-card bloody">
<header class="w3-container">
<h3>{name}</h3>
</header>
<div class="w3-container">
<p> {role}</p>
</div>
</div>
"""
cast_positions = """Margo
Mingus
Samantha
Parker
Carla
Luna"""
cast_names = """Brenda Wooley
Aldo B.
Nikki Green
Michael Curry
Natalie Kelly
Courtney Land"""
print """<div class="w3-container" > <h3 class="w3-wide">Cast</h3>"""
cast_pairs = zip(cast_names.split("\n"), cast_positions.split("\n"))
for name, position in cast_pairs:
print(template.format(name=name, role=position))
print "</div>"
crew_positions = """Director
Producer
Executive Producer
Writer/Screenplay
1st AD
Director of Photography
1st AC
2nd AC
Gaffer
Key Grip
Sound Op
Boom Op
Key MUA
BTS Photography
PA 1
PA 2
PA 3
Editor
Composer 1
Composer 2
Composer 3
Craft Services"""
crew_names = """Brian "Sham" Green
Aldo Billingslea
Wm. Derek Grasty
Wm. Derek Grasty
Jay Raja
Gavin Murray
Ferguson Rogan
Stephen Bartlett
Ryan Nelson
Kat.
Adam Drakewolf
Steven Lagosh
Whittany Robinson
Nathan Adams
Lee Harold
Skylar Adams
Thomas Ariniello
Brian Green
Peder B Helland
Vidal Garcia
Derek Thomas
Grasty Family"""
print """<div class="w3-container" > <h3 class="w3-wide">Crew </h3>"""
crew_pairs = zip(crew_positions.split("\n"), crew_names.split("\n"))
for name, position in crew_pairs:
print(template.format(name=name, role=position))
print "</div>"
|
asteroidb612/night-skies
|
cast.py
|
Python
|
mit
| 1,438
|
[
"Brian"
] |
54ae7cd7638a7fb42709e2c45db313b4ef6ea721861640e2dd4b24526af00c1e
|
# Copyright (c) 2014, Andre Severo Pereira Gomes
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import math
import sys
from electronic_state import *
class fscc_results:
"""
fscc_results fetched the relevant information from a dirac fock-space cc
output
"""
def __init__ (self) :
self.states = [ ]
def process_output (self,outfile):
#
# what we want to read
#
version_str = "Release DIRAC([0-9]+)"
start_str = "\s+Solving equations for sector ([0-9]+)"
state_str = "\s+Irrep\s+(-?[A-Za-z0-9]+)\s+State\s+(\d+)\s+([-.0-9]+)\s+([-.0-9]+)"
det_s11_r_str = "^\s+(-?\d+\.\d+)\s+(-?[0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\) ->\s+(-?[0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)"
det_s11_c_str = "^\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?[0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\) ->\s+(-?[0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)"
det_s01_s10_r_str = "^\s+(-?\d+\.\d+)\s+\|\s+(-?[0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)"
det_s01_s10_c_str = "^\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+\|\s+(-?[0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)"
det_s02_s20_r_str = "^\s+(-?\d+\.\d+)\s+\|\s+(-?[0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)\, \s+(-?[-0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)"
det_s02_s20_c_str = "^\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+\|\s+(-?[-0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)\, \s+(-?[-0-9a-zA-Z]+) #\s+(\d+) \(\s*(-?\d*\.\d+)\)"
version_re = re.compile (r''+ version_str +'', re.IGNORECASE)
start_re = re.compile (r''+ start_str +'', re.IGNORECASE)
state_re = re.compile (r''+ state_str +'', re.IGNORECASE)
det_s11_r_re = re.compile (r''+ det_s11_r_str +'', re.IGNORECASE)
det_s11_c_re = re.compile (r''+ det_s11_c_str +'', re.IGNORECASE)
det_s01_s10_r_re = re.compile (r''+ det_s01_s10_r_str +'', re.IGNORECASE)
det_s01_s10_c_re = re.compile (r''+ det_s01_s10_c_str +'', re.IGNORECASE)
det_s02_s20_r_re = re.compile (r''+ det_s02_s20_r_str +'', re.IGNORECASE)
det_s02_s20_c_re = re.compile (r''+ det_s02_s20_c_str +'', re.IGNORECASE)
#
# now we get to work
#
program_version = 12
print "reading fscc output from file: ",outfile,"\n"
f = file(outfile,'r')
lines = f.readlines()
read_state = False
for i, l in enumerate(lines) :
if version_re.match (l) :
program_version = version_re.match(l).group(1)
print "Dirac version recognized as ",program_version
if start_re.match (l) :
current_sector = start_re.match(l).group(1)
read_state = True
if read_state :
ca = 0.0
cb = 0.0
e_h = 0
e_p = 0
s_h = ""
s_p = ""
if state_re.match (l) :
label = state_re.match(l).group(1)
index = int(state_re.match(l).group(2))
if program_version <= 11 :
energy = float(state_re.match(l).group(4))
rel_en = float(state_re.match(l).group(3))
else :
energy = float(state_re.match(l).group(3))
rel_en = float(state_re.match(l).group(4))
state = electronic_state()
state.set_sector(current_sector)
state.set_label(label)
state.set_index(index)
state.set_energy(energy)
state.set_relative_energy(rel_en)
self.states.append(state)
elif det_s11_r_re.match (l) :
ca = float(det_s11_r_re.match(l).group(1))
cb = 0.0
i_h = float(det_s11_r_re.match(l).group(3))
i_p = float(det_s11_r_re.match(l).group(6))
e_h = float(det_s11_r_re.match(l).group(4))
e_p = float(det_s11_r_re.match(l).group(7))
s_h = det_s11_r_re.match(l).group(2)
s_p = det_s11_r_re.match(l).group(5)
state.add_determinant(ca,cb,i_h,e_h,i_p,e_p,s_h,s_p)
elif det_s11_c_re.match (l) :
ca = float(det_s11_c_re.match(l).group(1))
cb = float(det_s11_c_re.match(l).group(2))
i_h = float(det_s11_c_re.match(l).group(4))
i_p = float(det_s11_c_re.match(l).group(7))
e_h = float(det_s11_c_re.match(l).group(5))
e_p = float(det_s11_c_re.match(l).group(8))
s_h = det_s11_c_re.match(l).group(3)
s_p = det_s11_c_re.match(l).group(6)
state.add_determinant(ca,cb,i_h,e_h,i_p,e_p,s_h,s_p)
elif det_s02_s20_r_re.match (l) :
ca = float(det_s02_s20_r_re.match(l).group(1))
cb = 0.0
i_h = float(det_s02_s20_r_re.match(l).group(3))
i_p = float(det_s02_s20_r_re.match(l).group(6))
e_h = float(det_s02_s20_r_re.match(l).group(4))
e_p = float(det_s02_s20_r_re.match(l).group(7))
s_h = det_s02_s20_r_re.match(l).group(2)
s_p = det_s02_s20_r_re.match(l).group(5)
state.add_determinant(ca,cb,i_h,e_h,i_p,e_p,s_h,s_p)
elif det_s02_s20_c_re.match (l) :
ca = float(det_s02_s20_c_re.match(l).group(1))
cb = float(det_s02_s20_c_re.match(l).group(2))
i_h = float(det_s02_s20_c_re.match(l).group(4))
i_p = float(det_s02_s20_c_re.match(l).group(7))
e_h = float(det_s02_s20_c_re.match(l).group(5))
e_p = float(det_s02_s20_c_re.match(l).group(8))
s_h = det_s02_s20_c_re.match(l).group(3)
s_p = det_s02_s20_c_re.match(l).group(6)
state.add_determinant(ca,cb,i_h,e_h,i_p,e_p,s_h,s_p)
elif det_s01_s10_r_re.match (l) :
ca = float(det_s01_s10_r_re.match(l).group(1))
cb = 0.0
i = float(det_s01_s10_r_re.match(l).group(3))
e = float(det_s01_s10_r_re.match(l).group(4))
s = det_s01_s10_r_re.match(l).group(2)
if current_sector == "01" :
i_p = i
e_p = e
s_p = s
i_h = 0
e_h = 0.0
s_h = ""
elif current_sector == "10" :
i_p = 0
e_p = 0.0
s_p = ""
i_h = i
e_h = e
s_h = s
state.add_determinant(ca,cb,i_h,e_h,i_p,e_p,s_h,s_p)
elif det_s01_s10_c_re.match (l) :
ca = float(det_s01_s10_c_re.match(l).group(1))
cb = float(det_s01_s10_c_re.match(l).group(2))
i = float(det_s01_s10_c_re.match(l).group(4))
e = float(det_s01_s10_c_re.match(l).group(5))
s = det_s01_s10_c_re.match(l).group(3)
if current_sector == "01" :
i_p = i
e_p = e
s_p = s
i_h = 0
e_h = 0.0
s_h = ""
elif current_sector == "10" :
i_p = 0
e_p = 0.0
s_p = ""
i_h = i
e_h = e
s_h = s
state.add_determinant(ca,cb,i_h,e_h,i_p,e_p,s_h,s_p)
f.close()
# end
class tdrsp_results:
"""
fscc_results fetched the relevant information from a dirac fock-space cc
output
"""
def __init__ (self) :
self.states = [ ]
def process_output (self,outfile):
#
# what we want to read
#
start_str = "\s+Analysis of response solution vectors"
tdrsp_symmetry = "^.*solution vectors : PP EXCITATION([0-9a-zA-Z]+)\s+Irrep:\s*([0-9a-zA-Z]+)"
tdrsp_energy = "^ Freq.:\s+([0-9.-]+)\s+Norm:\s+([0-9.eEdD+-]+)\s+Residual norm:\s+([0-9.eEdD+-]+)"
tdrsp_composition = "^\s+(\d+)\(i\:([0-9a-zA-Z]+)\)\s+--->\s+(\d+)\(v\:([0-9a-zA-Z]+)\)\s+([0-9.eEdD+-]+)"
start_re = re.compile (r''+ start_str +'', re.IGNORECASE)
tdrsp_sym_re = re.compile (r''+ tdrsp_symmetry +'', re.IGNORECASE)
tdrsp_ener_re = re.compile (r''+ tdrsp_energy +'', re.IGNORECASE)
tdrsp_compo_re = re.compile (r''+ tdrsp_composition +'', re.IGNORECASE)
#
# now we get to work
#
# for consistency, we define sector here, and since it is a single excitation we put it as 11=1h1p
current_sector = "11"
print "reading time-dependend calculation (tddft,tdhf) output from file: ",outfile,"\n"
f = file(outfile,'r')
lines = f.readlines()
read_state = False
index = 0
label = "a"
label_prev = "a"
for i, l in enumerate(lines) :
if start_re.match (l) :
read_state = True
if read_state :
ca = 0.0
cb = 0.0
e_h = 0
e_p = 0
s_h = ""
s_p = ""
if tdrsp_sym_re.match(l) :
label = tdrsp_sym_re.match(l).group(2)
elif tdrsp_ener_re.match(l) :
rel_en = float(tdrsp_ener_re.match(l).group(1))
norm = float(tdrsp_ener_re.match(l).group(2))
if label != label_prev :
index = 0
label_prev = label
index = index + 1
state = electronic_state()
state.set_sector(current_sector)
state.set_label(label)
state.set_index(index)
state.set_energy(rel_en)
self.states.append(state)
elif tdrsp_compo_re.match (l) :
# symm_h = tdrsp_compo_re.match(l).group(2)
# symm_p = tdrsp_compo_re.match(l).group(4)
weight = float(tdrsp_compo_re.match(l).group(5))
ca = weight*math.sqrt(2)
cb = 0.0
i_h = int(tdrsp_compo_re.match(l).group(1))
i_p = int(tdrsp_compo_re.match(l).group(3))
e_h = 0.0
e_p = 0.0
state.add_determinant(ca,cb,i_h,e_h,i_p,e_p,s_h,s_p)
f.close()
# end
|
aspgomes/dirac-tools
|
table-maker/src/reader.py
|
Python
|
bsd-3-clause
| 12,591
|
[
"DIRAC"
] |
644b919b5ef836c62d312fae9dc8b7ec1936dd49a2b63274c8725d7a29b459f8
|
import ambgmin_ as GMIN
import pele.potentials.gminpotential as gminpot
import numpy as np
import pele.basinhopping as bh
from pele.optimize import _quench as quench
from pele.takestep import displace
# export PYTHONPATH=/home/ss2029/svn/GMIN/bin:$PWD/../..
GMIN.initialize()
pot = gminpot.GMINPotental(GMIN)
coords = pot.getCoords()
step = displace.RandomDisplacement(stepsize=0.7)
opt = bh.BasinHopping(coords, pot, takeStep=step, quench=quench.lbfgs_py)
opt.quenchParameters['tol'] = 1e-4
opt.run(3)
# some visualization
try:
import pele.utils.pymolwrapper as pym
pym.start()
pym.draw_spheres(opt.coords, "A", 1)
except:
print "Could not draw using pymol, skipping this step"
from pele.utils.xyz import write_xyz
write_xyz(open("final.xyz", "w"), opt.coords)
|
smcantab/pele
|
playground/amber/metenk/amber.py
|
Python
|
gpl-3.0
| 788
|
[
"PyMOL"
] |
7d728c87bc7aeaf8316a322dc2c0b443c7bebfcad123e1ff39fe001e6383aa4e
|
################################################################################
#
# Author: Christopher Helmes
# Date: August 2015
#
# Copyright (C) 2015 Christopher Helmes
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tmLQCD. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# Function: Functions for linear and quadratic interpolation
#
# For informations on input parameters see the description of the function.
#
################################################################################
from scipy.optimize import leastsq
import scipy.stats
import math
import numpy as np
import analyze_fcts as af
import chiral_fits as chf
__all__=["ipol_lin","ipol_quad","eval_lin","eval_quad","err_prop_gauss",
"eval_chi_pt_cont","sum_error_sym","sum_error_asym"]
def ipol_lin(y_boot,x):
""" Interpolate bootstrapsamples of data linearly
This function calculates a linear interpolation from 2 x values and
bootstrapsamples of 2 yvalues y = c0*x+c1
Args:
y_boot: the bootstrapsamples of the data points to interpolate. Need
shape[1] = 2
x: the x-values to use not bootstrapped with shape[0] = 2
Returns:
The interpolation coefficients c for all bootstrapsamples
"""
# Use a bootstrapsamplewise linear, newtonian interpolation
b_m = np.divide((y_boot[:,1]-y_boot[:,0]),(x[1]-x[0]))
b_b = y_boot[:,0]-np.multiply(b_m,x[0])
interpol = np.zeros_like(y_boot)
interpol[:,0], interpol[:,1] = b_m, b_b
return interpol
def ipol_quad(y_boot, x):
""" Interpolate bootstrapsamples of data quadratically
This function calculates a quadratic interpolation from 3 x values and
bootstrapsamples of 3 yvalues like y = c0*x**2 + c1*x + c2
Args:
y_boot: the bootstrapsamples of the data points to interpolate. Need
shape[1] = 3
x: the x-values to use not bootstrapped with shape[0] = 3
Returns:
The interpolation coefficients c for all bootstrapsamples
"""
# Use a bootstrapsamplewise quadratic interpolation
# result coefficients
interpol = np.zeros_like(y_boot)
# loop over bootstrapsamples
for _b in range(y_boot.shape[0]):
# the known function values
y = y_boot[_b,:]
m = np.zeros((y.shape[0],y.shape[0]))
mu_sq = np.square(x)
# Setting the coefficient matrix m with the x values
#TODO: Have to automate setting somehow
m[:,0] = np.square(x)
m[:,1] = np.asarray(x)
m[:,2] = np.ones_like(x)
# Solve the matrix wise problem with linalg
coeff = np.linalg.solve(m,y)
if np.allclose(np.dot(m, coeff), y) is False:
print("solve failed in sample %d" % _b)
else:
interpol[_b:] = coeff
return interpol
def eval_lin(lin_coeff, x):
""" Evaluates bootstrapsamples of coefficients at bootstraps of x for y =
m*x+b
Args:
lin_coeff: (nb_samples,coeff) NumPy array
x: Bootstrapsamples of xvalues
Returns:
eval_boot: The bootstrapsamples of y values
"""
eval_boot = np.multiply(lin_coeff[:,0],x)+lin_coeff[:,1]
return eval_boot
def eval_quad(quad_coeff, x):
""" Evaluates bootstrapsamples of coefficients at bootstraps of x for y =
c0*x^2 + c1*x + c2
Args:
quad_coeff: (nb_samples,coeff) NumPy array
x: Bootstrapsamples of xvalues
Returns:
eval_boot: The bootstrapsamples of y values
"""
eval_boot = np.multiply(quad_coeff[:,0],np.square(x))+ np.multiply(quad_coeff[:,1],x)+quad_coeff[:,2]
return eval_boot
def eval_chi_pt_cont(p,mpi):
""" Continuum chiral perturbation formula for KK I = 1 scattering
This function calculates the product MK*akk for a given set of input
parameters. This is the continuum extrapolation formula for chi-pt
Args:
mpi: The pion mass
mk: The kaon mass
fk: the kaon decay constant
meta: the eta mass
ren: the value of the chosen renormalization scale
lkk: the counterterm involving the Gasser-Leutwyler coefficients
Returns:
mk*akk: The product of scattering length and Kaon mass at one set of
parameters
"""
lkk, Bms = p
# try fit with physical values (MeV)
fk = 160
ren = fk
#ren = 130.7
#convert mpi to phys
_mpi = chf.lat_to_phys(mpi)
# Overall prefactor
pre_out = (2.*Bms - _mpi**2)/(16*math.pi*fk**2)
# inner prefactor
pre_in = (2.*Bms + _mpi**2)/(32*math.pi**2*fk**2)
# 3 coefficients to the logarithms
coeff = np.array([2, 1./(2.*Bms/_mpi**2-1.), 20./9.*(Bms-_mpi**2)/(2.*Bms-_mpi**2)])
# 3 logarithms
log = np.log(np.array([(_mpi**2+2.*Bms)/ren**2,_mpi**2/ren**2,(_mpi**2+4.*Bms)/(3.*ren**2)]))
# sum_i coeff[i]*log[i]
prod = np.multiply(coeff,log)
# decorated counterterm
count = 14./9. + 32.*(4*math.pi)**2*lkk
brac_in = prod[0] - prod[1] + prod[2] - count
brac_out = 1. + pre_in*brac_in
mk_akk = (pre_out*brac_out)
return mk_akk
def err_prop_gauss(_a,_b,oper='div'):
""" Evaluates gaussian propagated error without correlation for different
operations
Args:
a,b: numpy arrays of the values of interest
da,db: numpy arrays of the corresponding errors
oper: flag to determine derived value (default: a/b)
Returns:
err_der: a numpy array of the derived errors
"""
a,b = _a[:,0],_b[:,0]
da,db = _a[:,1], _b[:,1]
if oper == 'div':
sq_1 = np.square(np.divide(da,b))
tmp_prod = np.multiply(a,db)
sq_2 = np.square(np.divide(tmp_prod,np.square(b)))
err_der = np.sqrt(np.add(sq_1,sq_2))
else:
print("Not able to determine error")
err_der = 0
return err_der
def sum_error_sym(meas):
"""gets a n _mpi 3 numpy array holding a value, a statistical and a systematic
uncertainty to be added in quadrature
returns a n _mpi 2 array holding the value and the combined uncertainty for each
row
"""
print meas.shape[0]
val_err = np.zeros((meas.shape[0],2))
val_err[:,0] = meas[:,0]
val_err[:,1] = np.sqrt(np.add(np.square(meas[:,1]),np.square(meas[:,2])))
return val_err
def sum_error_asym(meas):
"""gets a n _mpi 4 numpy array holding a value, a statistical and two systematic
uncertainties to be added in quadrature
returns a n _mpi 2 array holding the value and the combined uncertainty for each
row
"""
print meas.shape[0]
val_err = np.zeros((meas.shape[0],2))
val_err[:,0] = meas[:,0]
sys_err_sum =np.add( np.square(meas[:,2]), np.square(meas[:,3]) )
val_err[:,1] = np.sqrt(np.add(np.square(meas[:,1]),sys_err_sum))
return val_err
|
chjost/analysis-code
|
analysis/interpol.py
|
Python
|
gpl-3.0
| 7,303
|
[
"Gaussian"
] |
b6f6c6199fba5bb606d49827f9d526ddfa121ed8052a451966630cddd5ce834f
|
from twisted.internet import reactor
from twisted.python import log
from octopus.sequence.shortcuts import *
from octopus.experiment import Experiment
s = wait(2)
e = Experiment(s)
reactor.callLater(0.5, e.pause)
reactor.callLater(1, e.resume)
reactor.callLater(1.5, e.stop)
reactor.callLater(0, e.run)
s1 = wait(2)
e1 = Experiment(s1)
reactor.callLater(2.5, s1.abort)
reactor.callLater(2, e1.run)
s2 = wait(2)
e2 = Experiment(s2)
reactor.callLater(3.5, s2.cancel)
reactor.callLater(3, e2.run)
reactor.callLater(4, reactor.stop)
reactor.run()
|
rasata/octopus
|
examples/test_abort.py
|
Python
|
mit
| 547
|
[
"Octopus"
] |
1c94b9db0ff1831946b15850840c5913375a54dcad3d076bc2c2254d8b8f2c33
|
# Copyright 2008, 2009 CAMd
# (see accompanying license files for details).
"""Definition of the Atoms class.
This module defines the central object in the ASE package: the Atoms
object.
"""
import warnings
from math import cos, sin
import numpy as np
import ase.units as units
from ase.atom import Atom
from ase.data import atomic_numbers, chemical_symbols, atomic_masses
from ase.utils import basestring
from ase.utils.geometry import wrap_positions, find_mic
class Atoms(object):
"""Atoms object.
The Atoms object can represent an isolated molecule, or a
periodically repeated structure. It has a unit cell and
there may be periodic boundary conditions along any of the three
unit cell axes.
Information about the atoms (atomic numbers and position) is
stored in ndarrays. Optionally, there can be information about
tags, momenta, masses, magnetic moments and charges.
In order to calculate energies, forces and stresses, a calculator
object has to attached to the atoms object.
Parameters:
symbols: str (formula) or list of str
Can be a string formula, a list of symbols or a list of
Atom objects. Examples: 'H2O', 'COPt12', ['H', 'H', 'O'],
[Atom('Ne', (x, y, z)), ...].
positions: list of xyz-positions
Atomic positions. Anything that can be converted to an
ndarray of shape (n, 3) will do: [(x1,y1,z1), (x2,y2,z2),
...].
scaled_positions: list of scaled-positions
Like positions, but given in units of the unit cell.
Can not be set at the same time as positions.
numbers: list of int
Atomic numbers (use only one of symbols/numbers).
tags: list of int
Special purpose tags.
momenta: list of xyz-momenta
Momenta for all atoms.
masses: list of float
Atomic masses in atomic units.
magmoms: list of float or list of xyz-values
Magnetic moments. Can be either a single value for each atom
for collinear calculations or three numbers for each atom for
non-collinear calculations.
charges: list of float
Atomic charges.
cell: 3x3 matrix
Unit cell vectors. Can also be given as just three
numbers for orthorhombic cells. Default value: [1, 1, 1].
celldisp: Vector
Unit cell displacement vector. To visualize a displaced cell
around the center of mass of a Systems of atoms. Default value
= (0,0,0)
pbc: one or three bool
Periodic boundary conditions flags. Examples: True,
False, 0, 1, (1, 1, 0), (True, False, False). Default
value: False.
constraint: constraint object(s)
Used for applying one or more constraints during structure
optimization.
calculator: calculator object
Used to attach a calculator for calculating energies and atomic
forces.
info: dict of key-value pairs
Dictionary of key-value pairs with additional information
about the system. The following keys may be used by ase:
- spacegroup: Spacegroup instance
- unit_cell: 'conventional' | 'primitive' | int | 3 ints
- adsorbate_info:
Items in the info attribute survives copy and slicing and can
be store to and retrieved from trajectory files given that the
key is a string, the value is picklable and, if the value is a
user-defined object, its base class is importable. One should
not make any assumptions about the existence of keys.
Examples:
These three are equivalent:
>>> d = 1.104 # N2 bondlength
>>> a = Atoms('N2', [(0, 0, 0), (0, 0, d)])
>>> a = Atoms(numbers=[7, 7], positions=[(0, 0, 0), (0, 0, d)])
>>> a = Atoms([Atom('N', (0, 0, 0)), Atom('N', (0, 0, d)])
FCC gold:
>>> a = 4.05 # Gold lattice constant
>>> b = a / 2
>>> fcc = Atoms('Au',
... cell=[(0, b, b), (b, 0, b), (b, b, 0)],
... pbc=True)
Hydrogen wire:
>>> d = 0.9 # H-H distance
>>> L = 7.0
>>> h = Atoms('H', positions=[(0, L / 2, L / 2)],
... cell=(d, L, L),
... pbc=(1, 0, 0))
"""
def __init__(self, symbols=None,
positions=None, numbers=None,
tags=None, momenta=None, masses=None,
magmoms=None, charges=None,
scaled_positions=None,
cell=None, pbc=None, celldisp=None,
constraint=None,
calculator=None,
info=None):
atoms = None
if hasattr(symbols, 'get_positions'):
atoms = symbols
symbols = None
elif (isinstance(symbols, (list, tuple)) and
len(symbols) > 0 and isinstance(symbols[0], Atom)):
# Get data from a list or tuple of Atom objects:
data = [[atom.get_raw(name) for atom in symbols]
for name in
['position', 'number', 'tag', 'momentum',
'mass', 'magmom', 'charge']]
atoms = self.__class__(None, *data)
symbols = None
if atoms is not None:
# Get data from another Atoms object:
if scaled_positions is not None:
raise NotImplementedError
if symbols is None and numbers is None:
numbers = atoms.get_atomic_numbers()
if positions is None:
positions = atoms.get_positions()
if tags is None and atoms.has('tags'):
tags = atoms.get_tags()
if momenta is None and atoms.has('momenta'):
momenta = atoms.get_momenta()
if magmoms is None and atoms.has('magmoms'):
magmoms = atoms.get_initial_magnetic_moments()
if masses is None and atoms.has('masses'):
masses = atoms.get_masses()
if charges is None and atoms.has('charges'):
charges = atoms.get_initial_charges()
if cell is None:
cell = atoms.get_cell()
if celldisp is None:
celldisp = atoms.get_celldisp()
if pbc is None:
pbc = atoms.get_pbc()
if constraint is None:
constraint = [c.copy() for c in atoms.constraints]
if calculator is None:
calculator = atoms.get_calculator()
self.arrays = {}
if symbols is None:
if numbers is None:
if positions is not None:
natoms = len(positions)
elif scaled_positions is not None:
natoms = len(scaled_positions)
else:
natoms = 0
numbers = np.zeros(natoms, int)
self.new_array('numbers', numbers, int)
else:
if numbers is not None:
raise ValueError(
'Use only one of "symbols" and "numbers".')
else:
self.new_array('numbers', symbols2numbers(symbols), int)
if cell is None:
cell = np.eye(3)
self.set_cell(cell)
if celldisp is None:
celldisp = np.zeros(shape=(3, 1))
self.set_celldisp(celldisp)
if positions is None:
if scaled_positions is None:
positions = np.zeros((len(self.arrays['numbers']), 3))
else:
positions = np.dot(scaled_positions, self._cell)
else:
if scaled_positions is not None:
raise RuntimeError('Both scaled and cartesian positions set!')
self.new_array('positions', positions, float, (3,))
self.set_constraint(constraint)
self.set_tags(default(tags, 0))
self.set_momenta(default(momenta, (0.0, 0.0, 0.0)))
self.set_masses(default(masses, None))
self.set_initial_magnetic_moments(default(magmoms, 0.0))
self.set_initial_charges(default(charges, 0.0))
if pbc is None:
pbc = False
self.set_pbc(pbc)
if info is None:
self.info = {}
else:
self.info = dict(info)
self.adsorbate_info = {}
self.set_calculator(calculator)
def set_calculator(self, calc=None):
"""Attach calculator object."""
if hasattr(calc, '_SetListOfAtoms'):
from ase.old import OldASECalculatorWrapper
calc = OldASECalculatorWrapper(calc, self)
if hasattr(calc, 'set_atoms'):
calc.set_atoms(self)
self._calc = calc
def get_calculator(self):
"""Get currently attached calculator object."""
return self._calc
def _del_calculator(self):
self._calc = None
calc = property(get_calculator, set_calculator, _del_calculator,
doc='Calculator object.')
def set_constraint(self, constraint=None):
"""Apply one or more constrains.
The *constraint* argument must be one constraint object or a
list of constraint objects."""
if constraint is None:
self._constraints = []
else:
if isinstance(constraint, (list, tuple)):
self._constraints = constraint
else:
self._constraints = [constraint]
def _get_constraints(self):
return self._constraints
def _del_constraints(self):
self._constraints = []
constraints = property(_get_constraints, set_constraint, _del_constraints,
'Constraints of the atoms.')
def set_cell(self, cell, scale_atoms=False, fix=None):
"""Set unit cell vectors.
Parameters:
cell :
Unit cell. A 3x3 matrix (the three unit cell vectors) or
just three numbers for an orthorhombic cell.
scale_atoms : bool
Fix atomic positions or move atoms with the unit cell?
Default behavior is to *not* move the atoms (scale_atoms=False).
Examples:
Two equivalent ways to define an orthorhombic cell:
>>> a.set_cell([a, b, c])
>>> a.set_cell([(a, 0, 0), (0, b, 0), (0, 0, c)])
FCC unit cell:
>>> a.set_cell([(0, b, b), (b, 0, b), (b, b, 0)])
"""
if fix is not None:
raise TypeError('Please use scale_atoms=%s' % (not fix))
cell = np.array(cell, float)
if cell.shape == (3,):
cell = np.diag(cell)
elif cell.shape != (3, 3):
raise ValueError('Cell must be length 3 sequence or '
'3x3 matrix!')
if scale_atoms:
M = np.linalg.solve(self._cell, cell)
self.arrays['positions'][:] = np.dot(self.arrays['positions'], M)
self._cell = cell
def set_celldisp(self, celldisp):
"""Set the unit cell displacement vectors."""
celldisp = np.array(celldisp, float)
self._celldisp = celldisp
def get_celldisp(self):
"""Get the unit cell displacement vectors."""
return self._celldisp.copy()
def get_cell(self):
"""Get the three unit cell vectors as a 3x3 ndarray."""
return self._cell.copy()
def get_reciprocal_cell(self):
"""Get the three reciprocal lattice vectors as a 3x3 ndarray.
Note that the commonly used factor of 2 pi for Fourier
transforms is not included here."""
rec_unit_cell = np.linalg.inv(self.get_cell()).transpose()
return rec_unit_cell
def set_pbc(self, pbc):
"""Set periodic boundary condition flags."""
if isinstance(pbc, int):
pbc = (pbc,) * 3
self._pbc = np.array(pbc, bool)
def get_pbc(self):
"""Get periodic boundary condition flags."""
return self._pbc.copy()
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype)
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""Get an array.
Returns a copy unless the optional argument copy is false.
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'magmoms',
'charges'."""
return name in self.arrays
def set_atomic_numbers(self, numbers):
"""Set atomic numbers."""
self.set_array('numbers', numbers, int, ())
def get_atomic_numbers(self):
"""Get integer array of atomic numbers."""
return self.arrays['numbers'].copy()
def get_chemical_symbols(self):
"""Get list of chemical symbol strings."""
return [chemical_symbols[Z] for Z in self.arrays['numbers']]
def set_chemical_symbols(self, symbols):
"""Set chemical symbols."""
self.set_array('numbers', symbols2numbers(symbols), int, ())
def get_chemical_formula(self, mode='hill'):
"""Get the chemial formula as a string based on the chemical symbols.
Parameters:
mode: str
There are three different modes available:
'all': The list of chemical symbols are contracted to at string,
e.g. ['C', 'H', 'H', 'H', 'O', 'H'] becomes 'CHHHOH'.
'reduce': The same as 'all' where repeated elements are contracted
to a single symbol and a number, e.g. 'CHHHOCHHH' is reduced to
'CH3OCH3'.
'hill': The list of chemical symbols are contracted to a string
following the Hill notation (alphabetical order with C and H
first), e.g. 'CHHHOCHHH' is reduced to 'C2H6O' and 'SOOHOHO' to
'H2O4S'. This is default.
"""
if len(self) == 0:
return ''
if mode == 'reduce':
numbers = self.get_atomic_numbers()
n = len(numbers)
changes = np.concatenate(([0], np.arange(1, n)[numbers[1:] !=
numbers[:-1]]))
symbols = [chemical_symbols[e] for e in numbers[changes]]
counts = np.append(changes[1:], n) - changes
elif mode == 'hill':
numbers = self.get_atomic_numbers()
elements = np.unique(numbers)
symbols = np.array([chemical_symbols[e] for e in elements])
counts = np.array([(numbers == e).sum() for e in elements])
ind = symbols.argsort()
symbols = symbols[ind]
counts = counts[ind]
if 'H' in symbols:
i = np.arange(len(symbols))[symbols == 'H']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
if 'C' in symbols:
i = np.arange(len(symbols))[symbols == 'C']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
elif mode == 'all':
numbers = self.get_atomic_numbers()
symbols = [chemical_symbols[n] for n in numbers]
counts = [1] * len(numbers)
else:
raise ValueError("Use mode = 'all', 'reduce' or 'hill'.")
formula = ''
for s, c in zip(symbols, counts):
formula += s
if c > 1:
formula += str(c)
return formula
def set_tags(self, tags):
"""Set tags for all atoms. If only one tag is supplied, it is
applied to all atoms."""
if isinstance(tags, int):
tags = [tags] * len(self)
self.set_array('tags', tags, int, ())
def get_tags(self):
"""Get integer array of tags."""
if 'tags' in self.arrays:
return self.arrays['tags'].copy()
else:
return np.zeros(len(self), int)
def set_momenta(self, momenta):
"""Set momenta."""
if len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
if hasattr(constraint, 'adjust_momenta'):
constraint.adjust_momenta(self, momenta)
self.set_array('momenta', momenta, float, (3,))
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if 'momenta' in self.arrays:
return self.arrays['momenta'].copy()
else:
return np.zeros((len(self), 3))
def set_masses(self, masses='defaults'):
"""Set atomic masses.
The array masses should contain a list of masses. In case
the masses argument is not given or for those elements of the
masses list that are None, standard values are set."""
if masses == 'defaults':
masses = atomic_masses[self.arrays['numbers']]
elif isinstance(masses, (list, tuple)):
newmasses = []
for m, Z in zip(masses, self.arrays['numbers']):
if m is None:
newmasses.append(atomic_masses[Z])
else:
newmasses.append(m)
masses = newmasses
self.set_array('masses', masses, float, ())
def get_masses(self):
"""Get array of masses."""
if 'masses' in self.arrays:
return self.arrays['masses'].copy()
else:
return atomic_masses[self.arrays['numbers']]
def set_initial_magnetic_moments(self, magmoms=None):
"""Set the initial magnetic moments.
Use either one or three numbers for every atom (collinear
or non-collinear spins)."""
if magmoms is None:
self.set_array('magmoms', None)
else:
magmoms = np.asarray(magmoms)
self.set_array('magmoms', magmoms, float, magmoms.shape[1:])
def get_initial_magnetic_moments(self):
"""Get array of initial magnetic moments."""
if 'magmoms' in self.arrays:
return self.arrays['magmoms'].copy()
else:
return np.zeros(len(self))
def get_magnetic_moments(self):
"""Get calculated local magnetic moments."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_magnetic_moments(self)
def get_magnetic_moment(self):
"""Get calculated total magnetic moment."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_magnetic_moment(self)
def set_initial_charges(self, charges=None):
"""Set the initial charges."""
if charges is None:
self.set_array('charges', None)
else:
self.set_array('charges', charges, float, ())
def get_initial_charges(self):
"""Get array of initial charges."""
if 'charges' in self.arrays:
return self.arrays['charges'].copy()
else:
return np.zeros(len(self))
def get_charges(self):
"""Get calculated charges."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
try:
return self._calc.get_charges(self)
except AttributeError:
raise NotImplementedError
def set_positions(self, newpositions):
"""Set positions, honoring any constraints."""
if self.constraints:
newpositions = np.array(newpositions, float)
for constraint in self.constraints:
constraint.adjust_positions(self, newpositions)
self.set_array('positions', newpositions, shape=(3,))
def get_positions(self, wrap=False):
"""Get array of positions. If wrap==True, wraps atoms back
into unit cell.
"""
if wrap:
scaled = self.get_scaled_positions()
return np.dot(scaled, self._cell)
else:
return self.arrays['positions'].copy()
def get_calculation_done(self):
"""Let the calculator calculate its thing,
using the current input.
"""
if self.calc is None:
raise RuntimeError('Atoms object has no calculator.')
self.calc.initialize(self)
self.calc.calculate(self)
def get_potential_energy(self, force_consistent=False,
apply_constraint=True):
"""Calculate potential energy.
Ask the attached calculator to calculate the potential energy and
apply constraints. Use *apply_constraint=False* to get the raw
forces.
When supported by the calculator, either the energy extrapolated
to zero Kelvin or the energy consistent with the forces (the free
energy) can be returned.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
if force_consistent:
energy = self._calc.get_potential_energy(
self, force_consistent=force_consistent)
else:
energy = self._calc.get_potential_energy(self)
if apply_constraint:
constraints = [c for c in self.constraints
if hasattr(c, 'adjust_potential_energy')]
for constraint in constraints:
energy += constraint.adjust_potential_energy(self, energy)
return energy
def get_potential_energies(self):
"""Calculate the potential energies of all the atoms.
Only available with calculators supporting per-atom energies
(e.g. classical potentials).
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_potential_energies(self)
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get('momenta')
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get('momenta')
if momenta is None:
return None
m = self.arrays.get('masses')
if m is None:
m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def get_total_energy(self):
"""Get the total energy - potential plus kinetic energy."""
return self.get_potential_energy() + self.get_kinetic_energy()
def get_forces(self, apply_constraint=True):
"""Calculate atomic forces.
Ask the attached calculator to calculate the forces and apply
constraints. Use *apply_constraint=False* to get the raw
forces."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
forces = self._calc.get_forces(self)
if apply_constraint:
for constraint in self.constraints:
constraint.adjust_forces(self, forces)
return forces
def get_stress(self, voigt=True):
"""Calculate stress tensor.
Returns an array of the six independent components of the
symmetric stress tensor, in the traditional Voigt order
(xx, yy, zz, yz, xz, xy) or as a 3x3 matrix. Default is Voigt
order.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
stress = self._calc.get_stress(self)
shape = stress.shape
if shape == (3, 3):
warnings.warn('Converting 3x3 stress tensor from %s ' %
self._calc.__class__.__name__ +
'calculator to the required Voigt form.')
stress = np.array([stress[0, 0], stress[1, 1], stress[2, 2],
stress[1, 2], stress[0, 2], stress[0, 1]])
else:
assert shape == (6,)
if voigt:
return stress
else:
xx, yy, zz, yz, xz, xy = stress
return np.array([(xx, xy, xz),
(xy, yy, yz),
(xz, yz, zz)])
def get_stresses(self):
"""Calculate the stress-tensor of all the atoms.
Only available with calculators supporting per-atom energies and
stresses (e.g. classical potentials). Even for such calculators
there is a certain arbitrariness in defining per-atom stresses.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_stresses(self)
def get_dipole_moment(self):
"""Calculate the electric dipole moment for the atoms object.
Only available for calculators which has a get_dipole_moment()
method."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_dipole_moment(self)
def copy(self):
"""Return a copy."""
import copy
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a.copy()
atoms.constraints = copy.deepcopy(self.constraints)
atoms.adsorbate_info = copy.deepcopy(self.adsorbate_info)
return atoms
def __len__(self):
return len(self.arrays['positions'])
def get_number_of_atoms(self):
"""Returns the number of atoms.
Equivalent to len(atoms) in the standard ASE Atoms class.
"""
return len(self)
def __repr__(self):
num = self.get_atomic_numbers()
N = len(num)
if N == 0:
symbols = ''
elif N <= 60:
symbols = self.get_chemical_formula('reduce')
else:
symbols = self.get_chemical_formula('hill')
s = "%s(symbols='%s', " % (self.__class__.__name__, symbols)
for name in self.arrays:
if name == 'numbers':
continue
s += '%s=..., ' % name
if (self._cell - np.diag(self._cell.diagonal())).any():
s += 'cell=%s, ' % self._cell.tolist()
else:
s += 'cell=%s, ' % self._cell.diagonal().tolist()
s += 'pbc=%s, ' % self._pbc.tolist()
if len(self.constraints) == 1:
s += 'constraint=%s, ' % repr(self.constraints[0])
if len(self.constraints) > 1:
s += 'constraint=%s, ' % repr(self.constraints)
if self._calc is not None:
s += 'calculator=%s(...), ' % self._calc.__class__.__name__
return s[:-2] + ')'
def __add__(self, other):
atoms = self.copy()
atoms += other
return atoms
def extend(self, other):
"""Extend atoms object by appending atoms from *other*."""
if isinstance(other, Atom):
other = self.__class__([other])
n1 = len(self)
n2 = len(other)
for name, a1 in self.arrays.items():
a = np.zeros((n1 + n2,) + a1.shape[1:], a1.dtype)
a[:n1] = a1
if name == 'masses':
a2 = other.get_masses()
else:
a2 = other.arrays.get(name)
if a2 is not None:
a[n1:] = a2
self.arrays[name] = a
for name, a2 in other.arrays.items():
if name in self.arrays:
continue
a = np.empty((n1 + n2,) + a2.shape[1:], a2.dtype)
a[n1:] = a2
if name == 'masses':
a[:n1] = self.get_masses()[:n1]
else:
a[:n1] = 0
self.set_array(name, a)
return self
__iadd__ = extend
def append(self, atom):
"""Append atom to end."""
self.extend(self.__class__([atom]))
def __getitem__(self, i):
"""Return a subset of the atoms.
i -- scalar integer, list of integers, or slice object
describing which atoms to return.
If i is a scalar, return an Atom object. If i is a list or a
slice, return an Atoms object with the same cell, pbc, and
other associated info as the original Atoms object. The
indices of the constraints will be shuffled so that they match
the indexing in the subset returned.
"""
if isinstance(i, int):
natoms = len(self)
if i < -natoms or i >= natoms:
raise IndexError('Index out of range.')
return Atom(atoms=self, index=i)
import copy
from ase.constraints import FixConstraint
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
# TODO: Do we need to shuffle indices in adsorbate_info too?
atoms.adsorbate_info = self.adsorbate_info
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a[i].copy()
# Constraints need to be deepcopied, since we need to shuffle
# the indices
atoms.constraints = copy.deepcopy(self.constraints)
condel = []
for con in atoms.constraints:
if isinstance(con, FixConstraint):
try:
con.index_shuffle(i)
except IndexError:
condel.append(con)
for con in condel:
atoms.constraints.remove(con)
return atoms
def __delitem__(self, i):
from ase.constraints import FixAtoms
check_constraint = np.array([isinstance(c, FixAtoms)
for c in self._constraints])
if (len(self._constraints) > 0 and (not check_constraint.all() or
isinstance(i, list))):
raise RuntimeError('Remove constraint using set_constraint() '
'before deleting atoms.')
mask = np.ones(len(self), bool)
mask[i] = False
for name, a in self.arrays.items():
self.arrays[name] = a[mask]
if len(self._constraints) > 0:
for n in range(len(self._constraints)):
self._constraints[n].delete_atom(range(len(mask))[i])
def pop(self, i=-1):
"""Remove and return atom at index *i* (default last)."""
atom = self[i]
atom.cut_reference_to_atoms()
del self[i]
return atom
def __imul__(self, m):
"""In-place repeat of atoms."""
if isinstance(m, int):
m = (m, m, m)
M = np.product(m)
n = len(self)
for name, a in self.arrays.items():
self.arrays[name] = np.tile(a, (M,) + (1,) * (len(a.shape) - 1))
positions = self.arrays['positions']
i0 = 0
for m0 in range(m[0]):
for m1 in range(m[1]):
for m2 in range(m[2]):
i1 = i0 + n
positions[i0:i1] += np.dot((m0, m1, m2), self._cell)
i0 = i1
if self.constraints is not None:
self.constraints = [c.repeat(m, n) for c in self.constraints]
self._cell = np.array([m[c] * self._cell[c] for c in range(3)])
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
__mul__ = repeat
def translate(self, displacement):
"""Translate atomic positions.
The displacement argument can be a float an xyz vector or an
nx3 array (where n is the number of atoms)."""
self.arrays['positions'] += np.array(displacement)
def center(self, vacuum=None, axis=(0, 1, 2)):
"""Center atoms in unit cell.
Centers the atoms in the unit cell, so there is the same
amount of vacuum on all sides.
vacuum: float (default: None)
If specified adjust the amount of vacuum when centering.
If vacuum=10.0 there will thus be 10 Angstrom of vacuum
on each side.
axis: int or sequence of ints
Axis or axes to act on. Default: Act on all axes.
"""
# Find the orientations of the faces of the unit cell
c = self.get_cell()
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.sqrt(np.dot(dirs[i], dirs[i])) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if isinstance(axis, int):
axes = (axis,)
else:
axes = axis
p = self.arrays['positions']
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.sqrt(np.dot(c[i], c[i]))
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self._cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.arrays['positions'] += translation
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays['positions']) / m.sum()
if scaled:
return np.linalg.solve(self._cell.T, com)
else:
return com
def get_moments_of_inertia(self, vectors=False):
"""Get the moments of inertia along the principal axes.
The three principal moments of inertia are computed from the
eigenvalues of the symmetric inertial tensor. Periodic boundary
conditions are ignored. Units of the moments of inertia are
amu*angstrom**2.
"""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
masses = self.get_masses()
# Initialize elements of the inertial tensor
I11 = I22 = I33 = I12 = I13 = I23 = 0.0
for i in range(len(self)):
x, y, z = positions[i]
m = masses[i]
I11 += m * (y ** 2 + z ** 2)
I22 += m * (x ** 2 + z ** 2)
I33 += m * (x ** 2 + y ** 2)
I12 += -m * x * y
I13 += -m * x * z
I23 += -m * y * z
I = np.array([[I11, I12, I13],
[I12, I22, I23],
[I13, I23, I33]])
evals, evecs = np.linalg.eigh(I)
if vectors:
return evals, evecs.transpose()
else:
return evals
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def rotate(self, v, a=None, center=(0, 0, 0), rotate_cell=False):
"""Rotate atoms based on a vector and an angle, or two vectors.
Parameters:
v:
Vector to rotate the atoms around. Vectors can be given as
strings: 'x', '-x', 'y', ... .
a = None:
Angle that the atoms is rotated around the vecor 'v'. If an angle
is not specified, the length of 'v' is used as the angle
(default). The angle can also be a vector and then 'v' is rotated
into 'a'.
center = (0, 0, 0):
The center is kept fixed under the rotation. Use 'COM' to fix
the center of mass, 'COP' to fix the center of positions or
'COU' to fix the center of cell.
rotate_cell = False:
If true the cell is also rotated.
Examples:
Rotate 90 degrees around the z-axis, so that the x-axis is
rotated into the y-axis:
>>> a = pi / 2
>>> atoms.rotate('z', a)
>>> atoms.rotate((0, 0, 1), a)
>>> atoms.rotate('-z', -a)
>>> atoms.rotate((0, 0, a))
>>> atoms.rotate('x', 'y')
"""
norm = np.linalg.norm
v = string2vector(v)
if a is None:
a = norm(v)
if isinstance(a, (float, int)):
v /= norm(v)
c = cos(a)
s = sin(a)
else:
v2 = string2vector(a)
v /= norm(v)
v2 /= norm(v2)
c = np.dot(v, v2)
v = np.cross(v, v2)
s = norm(v)
# In case *v* and *a* are parallel, np.cross(v, v2) vanish
# and can't be used as a rotation axis. However, in this
# case any rotation axis perpendicular to v2 will do.
eps = 1e-7
if s < eps:
v = np.cross((0, 0, 1), v2)
if norm(v) < eps:
v = np.cross((1, 0, 0), v2)
assert norm(v) >= eps
elif s > 0:
v /= s
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
p = self.arrays['positions'] - center
self.arrays['positions'][:] = (c * p -
np.cross(p, s * v) +
np.outer(np.dot(p, v), (1.0 - c) * v) +
center)
if rotate_cell:
rotcell = self.get_cell()
rotcell[:] = (c * rotcell -
np.cross(rotcell, s * v) +
np.outer(np.dot(rotcell, v), (1.0 - c) * v))
self.set_cell(rotcell)
def rotate_euler(self, center=(0, 0, 0), phi=0.0, theta=0.0, psi=0.0):
"""Rotate atoms via Euler angles.
See e.g http://mathworld.wolfram.com/EulerAngles.html for explanation.
Parameters:
center :
The point to rotate about. A sequence of length 3 with the
coordinates, or 'COM' to select the center of mass, 'COP' to
select center of positions or 'COU' to select center of cell.
phi :
The 1st rotation angle around the z axis.
theta :
Rotation around the x axis.
psi :
2nd rotation around the z axis.
"""
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
# First move the molecule to the origin In contrast to MATLAB,
# numpy broadcasts the smaller array to the larger row-wise,
# so there is no need to play with the Kronecker product.
rcoords = self.positions - center
# First Euler rotation about z in matrix form
D = np.array(((cos(phi), sin(phi), 0.),
(-sin(phi), cos(phi), 0.),
(0., 0., 1.)))
# Second Euler rotation about x:
C = np.array(((1., 0., 0.),
(0., cos(theta), sin(theta)),
(0., -sin(theta), cos(theta))))
# Third Euler rotation, 2nd rotation about z:
B = np.array(((cos(psi), sin(psi), 0.),
(-sin(psi), cos(psi), 0.),
(0., 0., 1.)))
# Total Euler rotation
A = np.dot(B, np.dot(C, D))
# Do the rotation
rcoords = np.dot(A, np.transpose(rcoords))
# Move back to the rotation point
self.positions = np.transpose(rcoords) + center
def get_dihedral(self, list):
"""Calculate dihedral angle.
Calculate dihedral angle between the vectors list[0]->list[1]
and list[2]->list[3], where list contains the atomic indexes
in question.
"""
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = self.positions[list[1]] - self.positions[list[0]]
b = self.positions[list[2]] - self.positions[list[1]]
c = self.positions[list[3]] - self.positions[list[2]]
bxa = np.cross(b, a)
bxa /= np.linalg.norm(bxa)
cxb = np.cross(c, b)
cxb /= np.linalg.norm(cxb)
angle = np.vdot(bxa, cxb)
# check for numerical trouble due to finite precision:
if angle < -1:
angle = -1
if angle > 1:
angle = 1
angle = np.arccos(angle)
if np.vdot(bxa, c) > 0:
angle = 2 * np.pi - angle
return angle
def _masked_rotate(self, center, axis, diff, mask):
# do rotation of subgroup by copying it to temporary atoms object
# and then rotating that
#
# recursive object definition might not be the most elegant thing,
# more generally useful might be a rotation function with a mask?
group = self.__class__()
for i in range(len(self)):
if mask[i]:
group += self[i]
group.translate(-center)
group.rotate(axis, diff)
group.translate(center)
# set positions in original atoms object
j = 0
for i in range(len(self)):
if mask[i]:
self.positions[i] = group[j].position
j += 1
def set_dihedral(self, list, angle, mask=None, indices=None):
"""Set the dihedral angle between vectors list[0]->list[1] and
list[2]->list[3] by changing the atom indexed by list[3]
if mask is not None, all the atoms described in mask
(read: the entire subgroup) are moved. Alternatively to the mask,
the indices of the atoms to be rotated can be supplied.
example: the following defines a very crude
ethane-like molecule and twists one half of it by 30 degrees.
>>> atoms = Atoms('HHCCHH', [[-1, 1, 0], [-1, -1, 0], [0, 0, 0],
[1, 0, 0], [2, 1, 0], [2, -1, 0]])
>>> atoms.set_dihedral([1,2,3,4],7*pi/6,mask=[0,0,0,1,1,1])
"""
# if not provided, set mask to the last atom in the
# dihedral description
if mask is None and indices is None:
mask = np.zeros(len(self))
mask[list[3]] = 1
elif indices:
mask = [index in indices for index in range(len(self))]
# compute necessary in dihedral change, from current value
current = self.get_dihedral(list)
diff = angle - current
axis = self.positions[list[2]] - self.positions[list[1]]
center = self.positions[list[2]]
self._masked_rotate(center, axis, diff, mask)
def rotate_dihedral(self, list, angle, mask=None):
"""Rotate dihedral angle.
Complementing the two routines above: rotate a group by a
predefined dihedral angle, starting from its current
configuration
"""
start = self.get_dihedral(list)
self.set_dihedral(list, angle + start, mask)
def get_angle(self, list):
"""Get angle formed by three atoms.
calculate angle between the vectors list[1]->list[0] and
list[1]->list[2], where list contains the atomic indexes in
question."""
# normalized vector 1->0, 1->2:
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
angle = np.vdot(v10, v12)
angle = np.arccos(angle)
return angle
def set_angle(self, list, angle, mask=None):
"""Set angle formed by three atoms.
Sets the angle between vectors list[1]->list[0] and
list[1]->list[2].
Same usage as in set_dihedral."""
# If not provided, set mask to the last atom in the angle description
if mask is None:
mask = np.zeros(len(self))
mask[list[2]] = 1
# Compute necessary in angle change, from current value
current = self.get_angle(list)
diff = angle - current
# Do rotation of subgroup by copying it to temporary atoms object and
# then rotating that
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
axis = np.cross(v10, v12)
center = self.positions[list[1]]
self._masked_rotate(center, axis, diff, mask)
def rattle(self, stdev=0.001, seed=42):
"""Randomly displace atoms.
This method adds random displacements to the atomic positions,
taking a possible constraint into account. The random numbers are
drawn from a normal distribution of standard deviation stdev.
For a parallel calculation, it is important to use the same
seed on all processors! """
rs = np.random.RandomState(seed)
positions = self.arrays['positions']
self.set_positions(positions +
rs.normal(scale=stdev, size=positions.shape))
def get_distance(self, a0, a1, mic=False, vector=False):
"""Return distance between two atoms.
Use mic=True to use the Minimum Image Convention.
vector=True gives the distance vector (from a0 to a1).
"""
R = self.arrays['positions']
D = np.array([R[a1] - R[a0]])
if mic:
D, D_len = find_mic(D, self._cell, self._pbc)
else:
D_len = np.array([np.sqrt((D**2).sum())])
if vector:
return D[0]
return D_len[0]
def get_distances(self, a, indices, mic=False, vector=False):
"""Return distances of atom No.i with a list of atoms.
Use mic=True to use the Minimum Image Convention.
vector=True gives the distance vector (from a to self[indices]).
"""
R = self.arrays['positions']
D = R[indices] - R[a]
if mic:
D, D_len = find_mic(D, self._cell, self._pbc)
else:
D_len = np.sqrt((D**2).sum(1))
if vector:
return D
return D_len
def get_all_distances(self, mic=False):
"""Return distances of all of the atoms with all of the atoms.
Use mic=True to use the Minimum Image Convention.
"""
L = len(self)
R = self.arrays['positions']
D = []
for i in range(L - 1):
D.append(R[i + 1:] - R[i])
D = np.concatenate(D)
if mic:
D, D_len = find_mic(D, self._cell, self._pbc)
else:
D_len = np.sqrt((D**2).sum(1))
results = np.zeros((L, L), dtype=float)
start = 0
for i in range(L - 1):
results[i, i + 1:] = D_len[start:start + L - i - 1]
start += L - i - 1
return results + results.T
def set_distance(self, a0, a1, distance, fix=0.5, mic=False):
"""Set the distance between two atoms.
Set the distance between atoms *a0* and *a1* to *distance*.
By default, the center of the two atoms will be fixed. Use
*fix=0* to fix the first atom, *fix=1* to fix the second
atom and *fix=0.5* (default) to fix the center of the bond."""
R = self.arrays['positions']
D = np.array([R[a1] - R[a0]])
if mic:
D, D_len = find_mic(D, self._cell, self._pbc)
else:
D_len = np.array([np.sqrt((D**2).sum())])
x = 1.0 - distance / D_len[0]
R[a0] += (x * fix) * D[0]
R[a1] -= (x * (1.0 - fix)) * D[0]
def get_scaled_positions(self, wrap=True):
"""Get positions relative to unit cell.
If wrap is True, atoms outside the unit cell will be wrapped into
the cell in those directions with periodic boundary conditions
so that the scaled coordinates are between zero and one."""
fractional = np.linalg.solve(self.cell.T, self.positions.T).T
if wrap:
for i, periodic in enumerate(self.pbc):
if periodic:
# Yes, we need to do it twice.
# See the scaled_positions.py test.
fractional[:, i] %= 1.0
fractional[:, i] %= 1.0
return fractional
def set_scaled_positions(self, scaled):
"""Set positions relative to unit cell."""
self.arrays['positions'][:] = np.dot(scaled, self._cell)
def wrap(self, center=(0.5, 0.5, 0.5), pbc=None, eps=1e-7):
"""Wrap positions to unit cell.
Parameters:
center: three float
The positons in fractional coordinates that the new positions
will be nearest possible to.
pbc: one or 3 bool
For each axis in the unit cell decides whether the positions
will be moved along this axis. By default, the boundary
conditions of the Atoms object will be used.
eps: float
Small number to prevent slightly negative coordinates from beeing
wrapped.
See also the :func:`ase.utils.geometry.wrap_positions` function.
Example:
>>> a = Atoms('H',
... [[-0.1, 1.01, -0.5]],
... cell=[[1, 0, 0], [0, 1, 0], [0, 0, 4]],
... pbc=[1, 1, 0])
>>> a.wrap()
>>> a.positions
array([[ 0.9 , 0.01, -0.5 ]])
"""
if pbc is None:
pbc = self.pbc
self.positions[:] = wrap_positions(self.positions, self.cell,
pbc, center, eps)
def get_temperature(self):
"""Get the temperature in Kelvin."""
ekin = self.get_kinetic_energy() / len(self)
return ekin / (1.5 * units.kB)
def __eq__(self, other):
"""Check for identity of two atoms objects.
Identity means: same positions, atomic numbers, unit cell and
periodic boundary conditions."""
try:
a = self.arrays
b = other.arrays
return (len(self) == len(other) and
(a['positions'] == b['positions']).all() and
(a['numbers'] == b['numbers']).all() and
(self._cell == other.cell).all() and
(self._pbc == other.pbc).all())
except AttributeError:
return NotImplemented
def __ne__(self, other):
"""Check if two atoms objects are not equal.
Any differences in positions, atomic numbers, unit cell or
periodic boundary condtions make atoms objects not equal.
"""
eq = self.__eq__(other)
if eq is NotImplemented:
return eq
else:
return not eq
__hash__ = None
def get_volume(self):
"""Get volume of unit cell."""
return abs(np.linalg.det(self._cell))
def _get_positions(self):
"""Return reference to positions-array for in-place manipulations."""
return self.arrays['positions']
def _set_positions(self, pos):
"""Set positions directly, bypassing constraints."""
self.arrays['positions'][:] = pos
positions = property(_get_positions, _set_positions,
doc='Attribute for direct ' +
'manipulation of the positions.')
def _get_atomic_numbers(self):
"""Return reference to atomic numbers for in-place
manipulations."""
return self.arrays['numbers']
numbers = property(_get_atomic_numbers, set_atomic_numbers,
doc='Attribute for direct ' +
'manipulation of the atomic numbers.')
def _get_cell(self):
"""Return reference to unit cell for in-place manipulations."""
return self._cell
cell = property(_get_cell, set_cell, doc='Attribute for direct ' +
'manipulation of the unit cell.')
def _get_pbc(self):
"""Return reference to pbc-flags for in-place manipulations."""
return self._pbc
pbc = property(_get_pbc, set_pbc,
doc='Attribute for direct manipulation ' +
'of the periodic boundary condition flags.')
def write(self, filename, format=None, **kwargs):
"""Write atoms object to a file.
see ase.io.write for formats.
kwargs are passed to ase.io.write.
"""
from ase.io import write
write(filename, self, format, **kwargs)
def edit(self):
"""Modify atoms interactively through ase-gui viewer.
Conflicts leading to undesirable behaviour might arise
when matplotlib has been pre-imported with certain
incompatible backends and while trying to use the
plot feature inside the interactive ag. To circumvent,
please set matplotlib.use('gtk') before calling this
method.
"""
from ase.gui.images import Images
from ase.gui.gui import GUI
images = Images([self])
gui = GUI(images)
gui.run()
# use atoms returned from gui:
# (1) delete all currently available atoms
self.set_constraint()
for z in range(len(self)):
self.pop()
edited_atoms = gui.images.get_atoms(0)
# (2) extract atoms from edit session
self.extend(edited_atoms)
self.set_constraint(edited_atoms._get_constraints())
self.set_cell(edited_atoms.get_cell())
self.set_initial_magnetic_moments(edited_atoms.get_magnetic_moments())
self.set_tags(edited_atoms.get_tags())
return
def string2symbols(s):
"""Convert string to list of chemical symbols."""
n = len(s)
if n == 0:
return []
c = s[0]
if c.isdigit():
i = 1
while i < n and s[i].isdigit():
i += 1
return int(s[:i]) * string2symbols(s[i:])
if c == '(':
p = 0
for i, c in enumerate(s):
if c == '(':
p += 1
elif c == ')':
p -= 1
if p == 0:
break
j = i + 1
while j < n and s[j].isdigit():
j += 1
if j > i + 1:
m = int(s[i + 1:j])
else:
m = 1
return m * string2symbols(s[1:i]) + string2symbols(s[j:])
if c.isupper():
i = 1
if 1 < n and s[1].islower():
i += 1
j = i
while j < n and s[j].isdigit():
j += 1
if j > i:
m = int(s[i:j])
else:
m = 1
return m * [s[:i]] + string2symbols(s[j:])
else:
raise ValueError
def symbols2numbers(symbols):
if isinstance(symbols, str):
symbols = string2symbols(symbols)
numbers = []
for s in symbols:
if isinstance(s, basestring):
numbers.append(atomic_numbers[s])
else:
numbers.append(s)
return numbers
def string2vector(v):
if isinstance(v, str):
if v[0] == '-':
return -string2vector(v[1:])
w = np.zeros(3)
w['xyz'.index(v)] = 1.0
return w
return np.array(v, float)
def default(data, dflt):
"""Helper function for setting default values."""
if data is None:
return None
elif isinstance(data, (list, tuple)):
newdata = []
allnone = True
for x in data:
if x is None:
newdata.append(dflt)
else:
newdata.append(x)
allnone = False
if allnone:
return None
return newdata
else:
return data
|
suttond/MODOI
|
ase/atoms.py
|
Python
|
lgpl-3.0
| 58,731
|
[
"ASE"
] |
bd85d4cdfb883ead8f79a17ad9174419161eae81ef95c763ebd5f119e562bd14
|
'''
Created on Jun 6, 2013
@author: dusenberrymw
'''
import random
class Network(object):
"""A class for the overall network"""
def __init__(self):
"""Constructor"""
self.layers = []
def forward(self, input_vector):
"""
Given an input vector, forward propagate it through the network,
setting the input_vectors for all neurons, and return the output
vector of the network
"""
for layer in self.layers:
input_vector = layer.forward(input_vector)
output_vector = input_vector
return output_vector
def backward(self, downstream_gradient_vector):
"""
Given a gradient vector, calculate the gradients (error) for all
network parameters (weights/thresholds), store in each neuron,
and return the gradient vector for the inputs to the network
"""
for layer in reversed(self.layers):
downstream_gradient_vector = layer.backward(downstream_gradient_vector)
upstream_gradient_vector = downstream_gradient_vector
return upstream_gradient_vector
def cost(self, target_output_vector, reg_lambda=0):
"""
Cost J ("error") of the network, which is the summation of the cost
vector of the output layer, given the last input vector forward
propagated through the network
reg_lambda is a regularization coefficient that is multiplied by the
sum of each theta^2 in the entire network. Regularization serves
to push the theta values (weights) towards 0, thus limiting the
chance of overfitting.
"""
thetas_squared = 0;
for layer in self.layers:
for neuron in layer.neurons:
thetas_squared += sum([w**2 for w in neuron.weights])
cost = (sum(self.layers[-1].cost(target_output_vector))
+ reg_lambda/2 * thetas_squared)
return cost
def cost_gradient(self, target_output_vector):
"""
Cost gradient vector (partial derivatives) of the network, which is
the cost gradient vector of the output layer, given the last input
vector forward propagated through the network
"""
cost_gradient_vector = self.layers[-1].cost_gradient(target_output_vector)
return cost_gradient_vector
def update_parameters(self, learning_rate, batch_size, reg_lambda=0):
"""
Adjust the weights and threshold down the gradient to reduce error
"""
for layer in self.layers:
layer.update_parameters(learning_rate, batch_size, reg_lambda)
def reset_gradients(self):
"""
Set all parameter gradients to 0
"""
for layer in self.layers:
layer.reset_gradients()
class Layer(object):
"""A class for layers in the network"""
def __init__(self):
"""Constructor"""
self.neurons = []
def forward(self, input_vector):
"""
Given an input vector [from previous layer], compute the output vector
of this layer of neurons in a forward pass
"""
output_vector = [n.forward(input_vector) for n in self.neurons]
return output_vector
def backward(self, downstream_gradient_vector):
"""
Given an error gradient vector from the downstream layer, calculate
the gradients (error) for this layer
"""
gradient_vectors = [neuron.backward(downstream_gradient)
for neuron, downstream_gradient
in zip(self.neurons, downstream_gradient_vector)]
# now reduce the vectors into one vector using element-wise summation,
# since each neuron in this layer shares the same input sources
upstream_gradient_vector = gradient_vectors.pop()
for gradient_vector in gradient_vectors:
for i in range(len(gradient_vector)):
upstream_gradient_vector[i] += gradient_vector[i]
return upstream_gradient_vector
def cost(self, target_output_vector):
"""
Cost ("error") vector of this layer, given the last input vector
forward propagated through the layer
"""
cost_vector = [neuron.cost(target_output)
for neuron, target_output
in zip(self.neurons, target_output_vector)]
return cost_vector
def cost_gradient(self, target_output_vector):
"""
Cost gradient vector (partial derivatives) of this layer, given the
last input vector forward propagated through the layer
"""
cost_gradient_vector = [neuron.cost_gradient(target_output)
for neuron, target_output
in zip(self.neurons, target_output_vector)]
return cost_gradient_vector
def update_parameters(self, learning_rate, batch_size, reg_lambda=0):
"""
Adjust the weights and threshold down the gradient to reduce error
"""
for neuron in self.neurons:
neuron.update_parameters(learning_rate, batch_size, reg_lambda)
def reset_gradients(self):
"""
Set all parameter gradients to 0
"""
for neuron in self.neurons:
neuron.reset_gradients()
class Neuron(object):
"""A class for neurons in the network"""
def __init__(self, num_inputs, activation_function):
"""Constructor"""
self.input_vector = [0]*num_inputs # inputs coming from prev neurons
self.output = 0.0 # the activation of this neuron
self.activation_function = activation_function
# need a weight for each input to the neuron
self.weights = [random.uniform(-0.9,0.9) for _ in range(num_inputs)]
self.threshold = random.uniform(-0.9,0.9)
# gradients
self.weight_gradients = [0]*num_inputs
self.threshold_gradient = 0
def forward(self, input_vector):
"""
Given an input vector from previous layer neurons, compute the output
of the neuron in a forward pass
Explanation:
self.output = f(z), where z = w1x1 + w2x2 + ... + wnxn + thresh
and f() is the activation function
"""
# keep track of what inputs were sent to this neuron
self.input_vector = input_vector
# multiply each input with the associated weight for that connection,
# then add the threshold value
net_input = (sum([x*y for x, y in zip(input_vector, self.weights)])
+ self.threshold)
# finally, use the activation function to compute the output
self.output = self.activation_function.activate(net_input)
return self.output
def backward(self, downstream_gradient):
"""
Given an error gradient from the downstream layer, calculate the
gradients (error) for each of the parameters (weights & threshold)
and add to the existing corresponding gradients (for batch
purposes).
Explanation:
self.output = f(z), where z = w1x1 + w2x2 + ... + wnxn + thresh
Therefore, the derivative of the output wrt weight i is:
doutput/dwi = f'(z) * dz/dwi, where dz/dwi = xi
The gradient ("downstream error") is passed in and multiplied by
the derivative wrt to a weight to compute the gradient for
that weight.
"""
chain_gradient = (downstream_gradient *
self.activation_function.derivative(self.output))
for i in range(len(self.weights)):
self.weight_gradients[i] += chain_gradient * self.input_vector[i]
self.threshold_gradient += chain_gradient#* 1, b/c thresh 'input' = 1
input_gradients = [0]*len(self.input_vector)
for i in range(len(self.input_vector)):
input_gradients[i] = chain_gradient * self.weights[i]
return input_gradients
def cost(self, target_output):
"""
Cost ("error") of this neuron, given the last input vector forward
propagated through the neuron
"""
cost = self.activation_function.cost(self.output, target_output)
return cost
def cost_gradient(self, target_output):
"""
Cost gradient (partial derivative of cost) of this neuron, given the
last input vector forward propagated through the neuron
This will basically determine how much the hypothesis (output of the
neuron) contributed to the cost ("error") of the neuron
"""
cost_gradient = self.activation_function.cost_derivative(self.output,
target_output)
return cost_gradient
def update_parameters(self, learning_rate, batch_size, reg_lambda=0):
"""
Update each neuron's weights and threshold value by subtracting the
average gradient multiplied by the learning rate, alpha. Subtract
because the gradient will give direction of cost increase, and
we want to move in the opposite direction (gradient descent) in
order to lower the overall error (minimize the cost function, J).
Also, use a lambda regularization term to penalize the weights
further, which will push the weights towards 0.
"""
for i in range(len(self.weights)):
self.weights[i] -= (learning_rate/batch_size *
(self.weight_gradients[i] +
reg_lambda*self.weights[i]))
self.threshold -= learning_rate/batch_size * (self.threshold_gradient)
def reset_gradients(self):
"""
Set all parameter gradients to 0
"""
self.weight_gradients = [0]*len(self.weight_gradients)
self.threshold_gradient = 0
|
dusenberrymw/Pine
|
pine/network.py
|
Python
|
mit
| 10,126
|
[
"NEURON"
] |
102f0aff476e600d950d958cf5a4cf78745466ece70df2afec1c21ea85b79629
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-08-04 20:09:38
# @Last modified by: Brian Cherinka
# @Last modified time: 2018-11-22 13:11:86
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import re
import warnings
import datetime
from collections import Counter, defaultdict
from functools import wraps
from operator import eq, ge, gt, le, lt, ne
import numpy as np
import six
from marvin import config, log
from marvin.api.api import Interaction
from marvin.core import marvin_pickle
from marvin.core.exceptions import MarvinError, MarvinUserWarning
from marvin.tools.results import Results, remote_mode_only
from marvin.utils.general import temp_setattr, getKeywordArgs
from marvin.utils.datamodel.query import datamodel
from marvin.utils.datamodel.query.base import query_params
if config.db:
from marvin import marvindb
from marvin.utils.general.structs import string_folding_wrapper
from sqlalchemy import bindparam, func, and_
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import desc
from sqlalchemy_boolean_search import (BooleanSearchException, parse_boolean_search)
try:
import cPickle as pickle
except:
import pickle
__all__ = ['Query', 'doQuery']
opdict = {'<=': le, '>=': ge, '>': gt, '<': lt, '!=': ne, '=': eq, '==': eq}
def doQuery(**kwargs):
"""Convenience function for building a Query and retrieving the Results.
Parameters:
N/A:
See the :class:`~marvin.tools.query.Query` class for a list
of inputs.
Returns:
query, results:
A tuple containing the built
:class:`~marvin.tools.query.Query` instance, and the
:class:`~marvin.tools.results.Results` instance.
"""
# pop some pagination keywords
start = kwargs.pop('start', None)
end = kwargs.pop('end', None)
query_type = kwargs.pop('query_type', None)
# get Query keyword arguments and check input kwargs
qwargs = getKeywordArgs(Query)
good_kwargs = {k: v for k, v in kwargs.items() if k in qwargs}
# run the query
q = Query(**good_kwargs)
try:
res = q.run(start=start, end=end, query_type=query_type)
except TypeError as e:
warnings.warn('Cannot run, query object is None: {0}.'.format(e), MarvinUserWarning)
res = None
return q, res
def update_config(f):
"""Decorator that updates query object with new config drpver and dapver versions."""
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.query and self.data_origin == 'db':
self.query = self.query.params({'drpver': self._drpver, 'dapver': self._dapver})
return f(self, *args, **kwargs)
return wrapper
def tree():
return defaultdict(tree)
class Query(object):
''' A class to perform queries on the MaNGA dataset.
This class is the main way of performing a query. A query works by minimally
specifying a string filter a string filter condition in a natural language SQL format,
as well as, a list of desired parameters to return.
Query will use a local database if it finds on. Otherwise a remote query uses
the API to run a query on the Utah Server and return the results.
The Query returns a list of tupled parameters and passed them into the
Marvin Results object. The parameters are a combination of user-defined
return parameters, parameters used in the filter condition, and a set of pre-defined
default parameters. The object plateifu or mangaid is always returned by default.
For queries involving DAP properties, the bintype, template, and spaxel x and y are
also returned by default.
Parameters:
search_filter (str):
A (natural language) string containing the filter conditions
in the query.
return_params (list):
A list of string parameter names desired to be returned in the query
return_type ({'cube', 'spaxel', 'maps', 'rss', and 'modelcube'}):
The requested Marvin Tool object that the results are converted into.
targets (list):
A list of manga_target flags to filter on
quality (list):
A list of quality flags to filter on
mode ({'local', 'remote', 'auto'}):
The load mode to use. See :doc:`Mode secision tree</mode_decision>`.
return_all (bool):
If True, attempts to return the entire set of results. Default is False.
default_params (list):
Optionally specify additional parameters as defaults
sort (str):
The parameter name to sort the query on
order ({'asc', 'desc'}):
The sort order. Can be either ascending or descending.
limit (int):
The number limit on the number of returned results
count_threshold (int):
The threshold number to begin paginating results. Default is 1000.
nexus (str):
The name of the database table to use as the nexus point for building
the join table tree. Can only be set in local mode.
caching (bool):
If True, turns on the dogpile memcache caching of results. Default is True.
verbose (bool):
If True, turns on verbosity.
'''
def __init__(self, search_filter=None, return_params=None, return_type=None, targets=None,
quality=None, mode=None, return_all=False, default_params=None, nexus='cube',
sort='mangaid', order='asc', caching=True, limit=100, count_threshold=1000,
verbose=False, release=None):
# basic parameters
self.release = release or config.release
self._drpver, self._dapver = config.lookUpVersions(release=self.release)
self.mode = mode if mode is not None else config.mode
self.data_origin = None
# main parameters
self.search_filter = search_filter
self.return_params = return_params
self.return_type = return_type
self.default_params = default_params
self.targets = targets
self.quality = quality
self.filter_params = {}
self.params = []
self._nexus = nexus
self._results = None
self.datamodel = datamodel[self.release]
# optional parameters
self.return_all = return_all
self.sort = sort
self.order = order
self._caching = caching
self.count_threshold = count_threshold
self.limit = limit
self.verbose = verbose
# add db specific parameters
if config.db:
self._marvinform = self.datamodel._marvinform
self.session = marvindb.session
self._modelgraph = marvindb.modelgraph
# timings
self._run_time = None
self._final_time = None
# define the Query MMA
self._set_mma()
# initialize a query
if self.data_origin == 'file':
raise MarvinError('Cannot currently query a file')
elif self.data_origin == 'db':
self._init_local_query()
elif self.data_origin == 'api':
self._init_remote_query()
def __repr__(self):
return ('Marvin Query(filter={0}, mode={1}, data_origin={2})'
.format(repr(self.search_filter), repr(self.mode), repr(self.data_origin)))
@property
def nexus(self):
return self._nexus
@nexus.setter
def nexus(self, value):
if not self.data_origin == 'db':
raise MarvinError('Can only set nexus point with a local db origin')
self._nexus = value
def _set_mma(self):
''' Sets up the Query MMA system '''
if self.mode == 'local':
self._do_local()
if self.mode == 'remote':
self._do_remote()
if self.mode == 'auto':
try:
self._do_local()
except MarvinError as e:
log.debug('local mode failed. Trying remote now.')
self._do_remote()
# Sanity check to make sure data_origin has been properly set.
assert self.data_origin in ['file', 'db', 'api'], 'data_origin is not properly set.'
def _do_local(self):
''' Sets up to perform queries locally. '''
if not config.db:
warnings.warn('No local database found. Cannot perform queries.', MarvinUserWarning)
raise MarvinError('No local database found. Query cannot be run in local mode')
else:
self.mode = 'local'
self.data_origin = 'db'
def _do_remote(self):
''' Sets up to perform queries remotely. '''
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote query calls!')
else:
self.mode = 'remote'
self.data_origin = 'api'
def _init_local_query(self):
''' Initialize a local database query '''
# set default parameters
self._set_defaultparams()
# get user-defined input parameters
self._set_return_params()
# setup the search filter
self._set_filter()
# build the query
self._build_query()
def _init_remote_query(self):
''' Initialize a remote API query '''
# set up the parameters
returns = ','.join(self.return_params) if self.return_params else None
defaults = ','.join(self.default_params) if self.default_params else None
targets = ','.join(self.targets) if self.targets else None
quality = ','.join(self.quality) if self.quality else None
search = self.search_filter if self.search_filter else ''
self._remote_params = {'searchfilter': search,
'returnparams': returns,
'returntype': self.return_type,
'defaults': defaults,
'targets': targets,
'quality': quality,
'release': self.release,
'sort': self.sort,
'order': self.order,
'limit': self.limit,
'return_all': self.return_all,
'caching': self._caching}
def run(self, start=None, end=None, query_type=None):
''' Runs a Query
Runs a query either locally or remotely.
Parameters:
start (int):
A starting index when slicing the query
end (int):
An ending index when slicing the query
query_type (str):
The type of SQLAlchemy to submit. Can be "raw", "core", "orm"
Returns:
An instance of the :class:`~marvin.tools.query.results.Results`
class containing the results of your Query.
Example:
>>> # filter of "NSA redshift less than 0.1 and stellar mass > 1.e10"
>>> searchfilter = 'nsa.z < 0.1 and nsa.elpetro_mass > 1.e10'
>>> returnparams = ['cube.ra', 'cube.dec']
>>> q = Query(search_filter=searchfilter, return_params=returnparams)
>>> results = q.run()
'''
if self.data_origin == 'api':
results = self._run_remote(start=start, end=end, query_type=query_type)
elif self.data_origin == 'db':
results = self._run_local(start=start, end=end, query_type=query_type)
return results
def _run_remote(self, start=None, end=None, query_type=None):
''' Run a remote Query
Runs a query remotely. Creates a dictionary of all input parameters and
performs the appropriate API call. On return, converts the JSON results
into a Marvin Results object.
Parameters:
start (int):
A starting index when slicing the query
end (int):
An ending index when slicing the query
query_type (str):
The type of SQLAlchemy to submit. Can be "raw", "core", "orm"
Returns:
An instance of the :class:`~marvin.tools.query.results.Results`
class containing the results of your Query.
'''
if self.return_all:
warnings.warn('Warning: Attempting to return all results. This may take a while or crash.')
# Get the query route
url = config.urlmap['api']['querycubes']['url']
# Update the remote params
self._remote_params.update({'start': start, 'end': end, 'query_type': query_type})
# set the start time of query
starttime = datetime.datetime.now()
# Request the query
try:
ii = Interaction(route=url, params=self._remote_params, stream=True, datastream=self.return_all)
except Exception as e:
raise MarvinError('API Query call failed: {0}'.format(e))
else:
# retrive and set some parameters
remotes = self._get_remote_parameters(ii)
remotes.update({'start': start, 'end': end})
# do results stuff here
if self.return_all:
msg = 'Returning all {0} results'.format(remotes['totalcount'])
else:
msg = 'Only returning the first {0} results.'.format(remotes['count'])
if self.verbose:
print('Results contain of a total of {0}. {1}'.format(remotes['totalcount'], msg))
# get Marvin Results
final = Results(**remotes)
# get the final time
posttime = datetime.datetime.now()
self._final_time = (posttime - starttime)
return final
def _get_remote_parameters(self, interaction):
''' Retrieve or set parameters needed
Parameters:
interaction (object):
The Marvin Interaction response object
Returns:
A dict of parameters needed to pass into Marvin Results
'''
results = interaction.results
response_time = interaction.response_time
assert isinstance(results, dict), 'Interaction results must be a dictionary'
# get some parameters
data = interaction.getData()
params = results.get('params', None)
query = results.get('query', self.search_filter)
count = results.get('count', None)
chunk = results.get('chunk', self.limit)
totalcount = results.get('totalcount', None)
runtime = results.get('runtime', None)
# set some parameters when only data is available
if len(results) == 1 and 'data' in results:
# check first data row
row = data[0]
isstr = any([isinstance(i, six.string_types) for i in row])
data = data[1:] if isstr else data
# compute additional info
params = row if isstr else []
totalcount = len(data)
count = totalcount
chunk = self.limit
runtime = response_time
self.query = query
self.params = params
remotes = dict(response_time=response_time, params=params, query=query, results=data,
totalcount=totalcount, count=count, runtime=runtime, chunk=int(chunk),
mode=self.mode, queryobj=self)
return remotes
def _run_local(self, start=None, end=None, query_type=None):
''' Run a local database Query
Parameters:
start (int):
A starting index when slicing the query
end (int):
An ending index when slicing the query
query_type (str):
The type of SQLAlchemy to submit. Can be "raw", "core", "orm"
Returns:
An instance of the :class:`~marvin.tools.query.results.Results`
class containing the results of your Query.
'''
# Check for adding a sort
self._sort_query()
# Check to add the cache
if self._caching:
from marvin.core.caching_query import FromCache
self.query = self.query.options(FromCache("default")).\
options(*marvindb.cache_bits)
# turn on streaming of results
self.query = self.query.execution_options(stream_results=True)
# set the start time of query
starttime = datetime.datetime.now()
# check for query and get count
totalcount = self._get_query_count()
# slice the query
query = self._slice_query(start=start, end=end, totalcount=totalcount)
# run the query and get the results
results = self._get_results(query, query_type=query_type, totalcount=totalcount)
# get the runtime
endtime = datetime.datetime.now()
self._run_time = (endtime - starttime)
# convert to Marvin Results
final = Results(results=results, query=query, count=self._count, mode=self.mode,
returntype=self.return_type, queryobj=self, totalcount=totalcount,
chunk=self.limit, runtime=self._run_time, start=self._start, end=self._end)
# get the final time
posttime = datetime.datetime.now()
self._final_time = (posttime - starttime)
return final
def _sort_query(self):
''' Sort the SQLA query object by a given parameter '''
if not isinstance(self.sort, type(None)):
# check any shortcut names
self.sort = self._marvinform._param_form_lookup.get_real_name(self.sort)
# set the sort variable ModelClass parameter
if '.' in self.sort:
param = self.datamodel.parameters[str(self.sort)].full
else:
param = self.datamodel.parameters.get_full_from_remote(self.sort)
sortparam = self._marvinform._param_form_lookup.mapToColumn(param)
# check if sort param actually in the parameter list
if sortparam.class_ not in self._modellist:
return
# If order is specified, then do the sort
if self.order:
assert self.order in ['asc', 'desc'], 'Sort order parameter must be either "asc" or "desc"'
# Check if order by already applied
if 'ORDER' in str(self.query.statement):
self.query = self.query.order_by(None)
# Do the sorting
if 'desc' in self.order:
self.query = self.query.order_by(desc(sortparam))
else:
self.query = self.query.order_by(sortparam)
def _get_query_count(self):
''' Get the SQL query count of rows
First checks the query history table to look up if this query has
already been run and a count produced.
Returns:
The total count of rows for the query
'''
totalcount = None
if marvindb.isdbconnected:
qm = self._check_history(check_only=True)
totalcount = qm.count if qm else None
# run count if it doesn't exist
if totalcount is None:
totalcount = self.query.count()
return totalcount
def _check_history(self, check_only=None, totalcount=None):
''' Check the query against the query history schema
Looks up the current query in the query table of
the history schema and if found, returns the SQLA object
Parameters:
check_only (bool):
If True, only checks the history schema but does not write to it
totalcount (int):
The total count of rows to add, when adding a new query to the table
Returns:
The SQLAlchemy row from the query table of the history schema
'''
sqlcol = self._marvinform._param_form_lookup.mapToColumn('sql')
stringfilter = self.search_filter.strip().replace(' ', '') if self.search_filter else ''
rawsql = self.show().strip()
returns = ','.join(self.return_params) if self.return_params else ''
qm = self.session.query(sqlcol.class_).\
filter(sqlcol == rawsql, sqlcol.class_.release == self.release).one_or_none()
if check_only:
return qm
with self.session.begin():
if not qm:
qm = sqlcol.class_(searchfilter=stringfilter, n_run=1, release=self.release,
count=totalcount, sql=rawsql, return_params=returns)
self.session.add(qm)
else:
qm.n_run += 1
return qm
def _slice_query(self, start=None, end=None, totalcount=None):
''' Slice the SQLA query object
Parameters:
start (int):
A starting index when slicing the query
end (int):
An ending index when slicing the query
totalcount (int):
The total count of rows of the query
Returns:
A new SQLA query object that has been sliced
'''
# get the new count if start and end exist
if start and end:
count = (end - start)
else:
count = totalcount
# # run the query
# res = self.query.slice(start, end).all()
# count = len(res)
# self.totalcount = count if not self.totalcount else self.totalcount
# check history
if marvindb.isdbconnected:
__ = self._check_history(totalcount=totalcount)
if count > self.count_threshold and self.return_all is False:
# res = res[0:self.limit]
start = 0
end = self.limit
count = (end - start)
warnings.warn('Results contain more than {0} entries. '
'Only returning first {1}'.format(self.count_threshold, self.limit), MarvinUserWarning)
elif self.return_all is True:
warnings.warn('Warning: Attempting to return all results. This may take a long time or crash.', MarvinUserWarning)
start = None
end = None
elif start and end:
warnings.warn('Getting subset of data {0} to {1}'.format(start, end), MarvinUserWarning)
# slice the query
query = self.query.slice(start, end)
# set updated start, end, count, and total
self._start = start
self._end = end
self._count = count
self._total = totalcount
return query
def _get_results(self, query, query_type=None, totalcount=None):
''' Get the raw results of the query
Runs the SQLAlchemy query. query_type will determine how the query is run.
"raw" means the query is run using the psycopg2 cursor object. "core" means
the query is run using the SQLA connection object. The "raw" and "core" methods
will submit the raw sql string and retrieve the results in chunks using `fetchall`.
orm" means the query is running using the SQLA query object. This uses yield_per
to generate the results in chunk. It also folds similar strings together.
Parameters:
query (object):
The current SQLA query object
query_type (str):
The type of SQLAlchemy to submit. Can be "raw", "core", "orm". Default is raw.
totalcount (int):
The total count of rows of the query
Returns:
A list of tupled results
'''
if query_type:
assert query_type in ['raw', 'core', 'orm'], 'Query Type can only be raw, core, or orm.'
else:
query_type = 'raw'
# run the query
if query_type == 'raw':
# use the db api cursor
sql = str(self._get_sql(query))
conn = marvindb.db.engine.raw_connection()
cursor = conn.cursor('query_cursor')
cursor.execute(sql)
res = self._fetch_data(cursor)
conn.close()
elif query_type == 'core':
# use the core connection
sql = str(self._get_sql(query))
with marvindb.db.engine.connect() as conn:
results = conn.execution_options(stream_results=True).execute(sql)
res = self._fetch_data(results)
elif query_type == 'orm':
# use the orm query
yield_num = int(10**(np.floor(np.log10(totalcount))))
results = string_folding_wrapper(query.yield_per(yield_num), keys=self.params)
res = list(results)
return res
def _fetch_data(self, obj, n_rows=100000):
''' Fetch query results using fetchall or fetchmany
Parameters:
obj (object):
SQLAlchemy connection object or Pyscopg2 cursor object
n_rows (int):
The number of rows to fetch at a time
Returns:
A list of results from a query
'''
res = []
if not self.return_all:
res = obj.fetchall()
else:
while True:
rows = obj.fetchmany(n_rows)
if rows:
res.extend(rows)
else:
break
return res
@staticmethod
def _get_sql(query):
''' Get the sql for a given query
Parameters:
query (object):
An SQLAlchemy Query object
Returns:
A raw sql string
'''
return query.statement.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True})
def show(self, prop='query'):
''' Prints into the console
Displays the query to the console with parameter variables plugged in.
Works only in local mode. Input prop can be one of query, joins, or filter.
Allowed Values for Prop:
- query: displays the entire query (default if nothing specified)
- joins: displays the tables that have been joined in the query
- filter: displays only the filter used on the query
Parameters:
prop (str):
The type of info to print. Can be 'query', 'joins', or 'filter'.
Returns:
The SQL string
'''
assert prop in [None, 'query', 'joins', 'filter'], 'Input must be query, joins, or filter'
if self.data_origin == 'db':
if not prop or prop == 'query':
sql = self._get_sql(self.query)
elif prop == 'joins':
sql = self._joins
elif prop == 'filter':
if hasattr(self.query, 'whereclause'):
sql = self.query.whereclause.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True})
else:
sql = 'cannot extract filter from where clause'
else:
sql = self.__getattribute__(prop)
return str(sql)
elif self.data_origin == 'api':
sql = self.search_filter
return sql
@classmethod
def get_available_params(cls, paramdisplay='best', release=None):
''' Retrieve the available parameters to query on
Retrieves a list of the available query parameters. Can either
retrieve a list of all the parameters or only the vetted parameters.
Parameters:
paramdisplay (str {all|best}):
String indicating to grab either all or just the vetted parameters.
Default is to only return 'best', i.e. vetted parameters
Returns:
A list of all of the available queryable parameters
'''
assert paramdisplay in ['all', 'best'], 'paramdisplay can only be either "all" or "best"!'
release = release or config.release
if paramdisplay == 'all':
if release not in datamodel:
raise MarvinError('release {0} not found in query datamodel'.format(release))
qparams = datamodel[release].groups.list_params('full')
elif paramdisplay == 'best':
qparams = query_params
return qparams
@remote_mode_only
def save(self, path=None, overwrite=False):
''' Save the query as a pickle object
Parameters:
path (str):
Filepath and name of the pickled object
overwrite (bool):
Set this to overwrite an existing pickled file
Returns:
path (str):
The filepath and name of the pickled object
'''
sf = self.search_filter.replace(' ', '') if self.search_filter else 'anon'
# set the path
if not path:
path = os.path.expanduser('~/marvin_query_{0}.mpf'.format(sf))
# check for file extension
if not os.path.splitext(path)[1]:
path = os.path.join(path + '.mpf')
path = os.path.realpath(path)
if os.path.isdir(path):
raise MarvinError('path must be a full route, including the filename.')
if os.path.exists(path) and not overwrite:
warnings.warn('file already exists. Not overwriting.', MarvinUserWarning)
return
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
# set bad pickled attributes to None
attrs = ['session', 'datamodel', '_marvinform', '_modelgraph']
# pickle the query
try:
with temp_setattr(self, attrs, None):
pickle.dump(self, open(path, 'wb'), protocol=-1)
except Exception as ee:
if os.path.exists(path):
os.remove(path)
raise MarvinError('Error found while pickling: {0}'.format(str(ee)))
return path
@classmethod
def restore(cls, path, delete=False):
''' Restore a pickled object
Parameters:
path (str):
The filename and path to the pickled object
delete (bool):
Turn this on to delete the pickled fil upon restore
Returns:
Query (instance):
The instantiated Marvin Query class
'''
obj = marvin_pickle.restore(path, delete=delete)
obj._modelgraph = marvindb.modelgraph
obj.session = marvindb.session
obj.datamodel = datamodel[obj.release]
obj.marvinform = obj.datamodel._marvinform
return obj
def update_return_params(self, params):
''' Update the list of return parameters '''
#
# This section describes the methods that run for local database queries
#
def _set_defaultparams(self):
''' Set the default parameters
Loads any default parameters set. Will also include appropriate
defaults when specifying an object return type
Default Parameters for Objects:
- Cubes/RSS - plateifu/mangaid
- Maps/ModelCube - plateifu/mangaid, bintype, template
- Spaxel - plateifu/mangaid, x, y, bintype, template
'''
if self.return_type:
assert self.return_type in ['cube', 'spaxel', 'maps', 'rss',
'modelcube'], ('Query return_type must be either cube, spaxel, '
'maps, modelcube, rss')
# set some initial defaults
assert isinstance(self.default_params, (list, type(None))), 'default_params must be a list'
defaults = self.default_params or (['cube.mangaid', 'cube.plateifu'] if self.nexus == 'cube' else [])
extras = []
if self.return_type == 'cube':
extras = ['cube.mangaid', 'cube.plateifu']
elif self.return_type == 'spaxel':
pass
elif self.return_type == 'modelcube':
extras = ['bintype.name', 'template.name']
elif self.return_type == 'rss':
pass
elif self.return_type == 'maps':
extras = ['bintype.name', 'template.name']
defaults.extend([e for e in extras if e not in defaults])
self.default_params = defaults
# add the defaults to the main set of parameters
self.params.extend(self.default_params)
def _set_return_params(self):
''' Set the return parameters '''
# set the initial returns list
returns = self.return_params or []
returns = [returns] if not isinstance(returns, list) else returns
# look up shortcut names for the return parameters
full_returnparams = [self._marvinform._param_form_lookup._nameShortcuts[rp]
if rp in self._marvinform._param_form_lookup._nameShortcuts else rp
for rp in returns]
self.return_params = full_returnparams
# remove any return parameters that aren't already in the list of params
use_only = self._filter_duplicates(full_returnparams)
# add the return parameters to the main set of parameters
self.params.extend(use_only)
def _set_filter(self):
''' Set up the search filter '''
# do nothing if nothing
if not self.search_filter:
return
# check and parse the SQL string
self._parse_sql_string()
def _parse_sql_string(self):
''' Parse the SQL string '''
# if params is a string, then parse and filter
if not isinstance(self.search_filter, six.string_types):
raise MarvinError('Input parameters must be a natural language string!')
else:
self._check_shortcuts_in_filter()
try:
parsed = parse_boolean_search(self.search_filter)
except BooleanSearchException as e:
raise MarvinError('Your boolean expression contained a syntax error: {0}'.format(e))
# update the parameters dictionary
self._check_parsed(parsed)
self.filter_params.update(parsed.params)
# remove keys that are already in the list of params
filterkeys = self._filter_duplicates(parsed.uniqueparams)
self.params.extend(filterkeys)
def _check_shortcuts_in_filter(self):
''' Check for shortcuts in string filter and replace them '''
# find all named parameters in the filter
keys = re.findall('(?<!\d)[a-z\._]+\d*', self.search_filter)
# remove the boolean operators
keys = [i for i in keys if i not in ['and', 'or', 'not']]
# look up real names of all keys
real_names = {k: self._marvinform._param_form_lookup.get_real_name(k) for k in keys}
# replace the shortcut names with real ones
sf = self.search_filter
for key, value in real_names.items():
if key in sf:
sf = sf.replace(key, value)
self.search_filter = sf
def _check_parsed(self, parsed):
''' Check the boolean parsed object
check for function conditions vs normal. This should be moved
into SQLalchemy Boolean Search
'''
# Triggers for only one filter and it is a function condition
if hasattr(parsed, 'fxn_name'):
parsed.functions = [parsed]
self._parsed = parsed
def _filter_duplicates(self, columns, use_params='all'):
''' Filter out parameter duplicates
Parameters:
columns (list):
A list of parameter names to check for duplicates in
use_params (str):
Indicates which parameter set to check the input columns against.
Choices are "all" (default), "default", "return", "or filter".
"all" uses self.params
Returns:
A list of new parameter names to add without existing duplicates
'''
assert use_params in ['all', 'default', 'return', 'filter'], (
'Can only be "all", "default", "return", or "filter"')
# get the params attribute
if use_params == 'all':
param_name = 'params'
else:
param_name = '{0}_params'.format(use_params)
params = self.__getattribute__(param_name)
# perform the check
use_only = []
for col in columns:
# get the shortcut name
shortcol = self._marvinform._param_form_lookup.get_shortcut_name(col)
# if column or short name is already in list of params, add it
if col not in params and shortcol not in params:
# don't use the real column name for spaxelprop
key = shortcol if 'spaxelprop' in col else col
use_only.append(key)
return use_only
def _check_for(self, parameters, schema=None, tables=None, only=None):
''' Check if a schema or test of tables names are in the provided parameters
Checks a list of parameters to see if any schema or table names are present
Parameters:
parameters (list)
List of string parameters names to use in check
schema (str):
Schema name to check for in parameter list
tables (list):
List of table names to check for in parameter list
only (bool):
If True, checks if all parameters match the schema/table name conditions
Returns:
True if any/all of the parameters match the schema or tables
'''
# function to check if all or any parameters meet conditions
fxn = all if only else any
fparams = self._marvinform._param_form_lookup.mapToColumn(parameters)
fparams = [fparams] if not isinstance(fparams, list) else fparams
if schema and not tables:
inschema = [schema in c.class_.__table__.schema for c in fparams]
return True if fxn(inschema) else False
if tables:
schema_cond = schema if schema else ''
tables = [tables] if not isinstance(tables, list) else tables
# convert to full names
tables = [self._marvinform._param_form_lookup._tableShortcuts.get(t, t) for t in tables]
# get the parameter table names
param_tables = [c.class_.__table__.name for c in fparams
if schema_cond in c.class_.__table__.schema]
if only:
diff = set(param_tables) ^ set(tables)
return diff == set()
else:
intables = sum([[t in c for c in param_tables] for t in tables], [])
return True if fxn(intables) else False
def _build_query(self):
''' Build the query '''
# create SQLA query parameters
self._set_query_parameters()
# create base SQLA object
self._create_base_query()
# join tables
self._join_tables()
# add condition
self._add_condition()
# add PipelineInfo
self._add_pipeline()
# check if the query filter is functional
self._run_functional_queries()
# check if the query parameters are against the DAP
if self._check_for(self.params, schema='dapdb'):
# Checks if the only table queried from dapdb is dapall. In that
# case we allow the query.
# checking for dapall, bintype, template since we have default parameters now
all_dapall = self._check_for(self.params, schema='dapdb',
tables=['dapall', 'bintype', 'template'], only=True)
if not all_dapall and not config._allow_DAP_queries:
raise NotImplementedError(
'DAP spaxel queries are disabled in this version. '
'We plan to reintroduce this feature in the future.')
self._check_dapall_query()
def _set_query_parameters(self):
''' Creates a list of database ModelClasses from a list of parameter names '''
# adjust the default parameters for any necessary DAP
self._add_default_params(['spaxelprop.x', 'spaxelprop.y', 'bintype.name', 'template.name'],
tables=['spaxelprop', 'modelspaxel'])
# adjust the default parameters for any necessary DAP
self._add_default_params(['bintype.name', 'template.name'], tables=['dapall'])
# adjust the default parameters for any necessary DAP
self._add_default_params(['obsinfo.expnum', 'obsinfo.mgdpos'], tables=['obsinfo'])
# final check to remove duplicates
parset = set()
psadd = parset.add
self.params = [item for item in self.params if not (item in parset or psadd(item))]
# create the list of parameter attributes
queryparams = self._marvinform._param_form_lookup.mapToColumn(self.params)
if not isinstance(queryparams, list):
queryparams = [queryparams]
# create a list of key names maintaining the column order
self._query_params = [item for item in queryparams if item in set(queryparams)]
self._query_params_order = [q.key for q in self._query_params]
def _add_default_params(self, columns, tables=None):
''' Add new default parameters into the query
Parameters:
columns (list):
A list of column names to add
tables (list):
A list of table names to check exists within the query
'''
# adjust the default parameters for any necessary columns
if self._check_for(self.params, tables=tables):
# remove any columns that are also defaults
use_only = [c for c in columns if c not in self.default_params]
self.default_params.extend(use_only)
self.params.extend(use_only)
def _create_base_query(self):
''' Create the base query session object. Passes in a list of parameters defined in
returnparams, filterparams, and defaultparams
'''
labeledqps = [qp.label(self.params[i]) for i, qp in enumerate(self._query_params)]
self.query = self.session.query(*labeledqps)
@update_config
def _join_tables(self):
''' Build the join statement from the input parameters '''
# from marvin import marvindb
ifu = marvindb.datadb.IFUDesign
self._joins = []
# build list of SQLA models for the joins from the parameters
self._modellist = []
for param in self._query_params:
# add the proper parameter Model
if param.class_ not in self._modellist:
self._modellist.append(param.class_)
# if plateifu is a parameter, make sure we add the IFUDesign Model
if 'plateifu' in str(param) and ifu not in self._modellist:
self._modellist.append(ifu)
# if there are no additional join tables, return
if len(set(self._modellist)) == 1:
return
# Look up the nexus point. If nexus is still none, takes the most common table from
# the list of query parameters. Default nexus is cube.
nexus = self._marvinform.look_up_table(self.nexus)
if not nexus:
nexus = Counter(self._modellist).most_common(1)[0][0]
# Gets the list of joins from ModelGraph. Uses Cube as nexus, so that
# the order of the joins is the correct one.
joinmodellist = self._modelgraph.getJoins(self._modellist, format_out='models', nexus=nexus)
# Add the tables from the join list into the query
for model in joinmodellist:
name = '{0}.{1}'.format(model.__table__.schema, model.__tablename__)
if not self._table_in_query(name):
self._joins.append(model.__tablename__)
if 'template' not in model.__tablename__:
self.query = self.query.join(model)
else:
# assume template_kin only now, TODO deal with template_pop later
self.query = self.query.join(model, marvindb.dapdb.Structure.template_kin)
def _table_in_query(self, name):
''' Checks if a given SQL table is already in the SQL query '''
# do the check
try:
isin = name in str(self.query._from_obj[0])
except IndexError as e:
isin = False
except AttributeError as e:
if isinstance(self.query, six.string_types):
isin = name in self.query
else:
isin = False
return isin
def _add_condition(self):
''' Loop over all input forms and add a filter condition based on the input parameter form data. '''
# validate the forms
self._validate_forms()
# build the actual filter
self._build_filter()
# add the filter to the query
if self.search_filter and not isinstance(self.filter, type(None)):
self.query = self.query.filter(self.filter)
# check for targets and quality flags to add in the filter
self._check_targets()
self._check_quality()
def _validate_forms(self):
''' Validate all the data in the forms '''
errors = []
forms = self._set_forms()
isgood = [form.validate() for form in forms.values()]
if not all(isgood):
inds = np.where(np.invert(isgood))[0]
for index in inds:
errors.append(list(forms.values())[index].errors)
raise MarvinError('Parameters failed to validate: {0}'.format(errors))
def _set_forms(self):
''' Set the appropriate WTForms in myforms and set the parameters '''
forms = defaultdict(str)
paramtree = tree()
for key in self.filter_params.keys():
forms[key] = self._marvinform.callInstance(self._marvinform._param_form_lookup[key], params=self.filter_params)
paramtree[forms[key].Meta.model.__name__][key]
return forms
def _build_filter(self):
''' Builds a filter condition to load into sqlalchemy filter. '''
# do nothing if nothing
if not self.search_filter:
return
try:
self.filter = self._parsed.filter(self._modellist)
except BooleanSearchException as e:
raise MarvinError('Your boolean expression could not me mapped to model: {0}'.format(e))
def _check_targets(self):
''' Check for any target flags to add into the filter
Checks for input manga_target flag labels in the Query.targets attribute
and adds them to the Query filter condition. All target labels are joined with
a Boolean "or", then joined to the existing filter through a Boolean "and".
To perform a bitwise not on a quality flag, specify the flag and value explicitly
in the search_filter. E.g. "cube.manga_target1 & ~1024"
Available target options are:
primary - selects targets in both the PRIMARY_V1_2_0 and COLOR_ENHANCED_V1_2_0 samples
color-enhanced - selects targets in the COLOR_ENHANCED_V1_2_0 sample
secondary - selects targets in the SECONDARY_V1_2_0 sample
ancillary - selects any targets in the ANCILLARY sample
stellar libraray - selects targets from the stellar library
flux standards - selects flux standard stars
any ancillary catalog name, e.g. 'MWA', or 'DWARF'
Example
>>> # perform query selecting primary and secondary targets
>>> q = Query(targets=['primary', 'secondary'])
>>>
>>> # select ancillary targets from Milky Way Analogs and Dwarfs
>>> q = Query(targets=['MWA', 'DWARF'])
>>>
>>> # select all targets that are not a part of the PRIMARY_v1_2_0 sample
>>> q = Query(search_filter='cube.manga_target & ~1024')
>>>
'''
if not self.targets:
return
targets = [self.targets] if not isinstance(self.targets, list) else self.targets
targets = [t.upper() for t in targets]
# check for ancillary targets
ancillaries = [a.upper() for a in self.datamodel.bitmasks['MANGA_TARGET3'].schema.label]
anc_labels = list(set(targets) & set(ancillaries))
targets = list(set(targets) - set(ancillaries))
# build the string filter
target_filter = ''
for target in targets:
target_filter = self._create_target_filter(target, target_filter=target_filter)
# add in any ancillary targets
if anc_labels:
target_filter = self._create_target_filter('ancillary', target_filter=target_filter, ancillaries=anc_labels)
# parse the filter and add to the main
self._add_filter(target_filter)
def _create_target_filter(self, target, target_filter='', ancillaries=None):
''' Create a string target bitwise filter
Creates a string manga_target filter bitwise filter. If passed an existing
target_filter string, it will append the new filter as an "or" boolean condition.
Example filter syntax is 'cube.manga_target1 & 5120'
Parameters:
target (str):
The name of the target sample to load
target_filter (str):
The current existing target filter string
ancillarites (list):
A list of ancillary target labels
Returns:
A string filter condition
'''
target = target.lower()
defaults = ['primary', 'color-enhanced', 'secondary', 'ancillary', 'stellar library', 'flux standards']
#options = defaults + ancillaries
assert target in defaults, 'target list can only contain one of {0}'.format(defaults)
if 'primary' == target:
name = 'manga_target1'
value = self.datamodel.bitmasks['MANGA_TARGET1'].labels_to_value(['PRIMARY_v1_2_0', 'COLOR_ENHANCED_v1_2_0'])
elif 'color-enhanced' == target:
name = 'manga_target1'
value = self.datamodel.bitmasks['MANGA_TARGET1'].labels_to_value(['COLOR_ENHANCED_v1_2_0'])
elif 'secondary' == target:
name = 'manga_target1'
value = self.datamodel.bitmasks['MANGA_TARGET1'].labels_to_value(['SECONDARY_v1_2_0'])
elif 'ancillary' == target:
name = 'manga_target3'
value = self.datamodel.bitmasks['MANGA_TARGET3'].labels_to_value(ancillaries) if ancillaries else 0
elif 'stellar library' == target:
name = 'manga_target2'
value = sum([1<<i for i in (self.datamodel.bitmasks['MANGA_TARGET2'].schema.bit[2:17])])
elif 'flux standards' == target:
name = 'manga_target2'
bits = [20, 22, 23, 25, 26, 27]
value = sum([1<<i for i in bits])
# add the column to the query
self._add_columns('cube.{0}'.format(name))
base = ' or ' if target_filter else ''
op = '>' if value == 0 else '&'
target_filter += '{0}cube.{1} {2} {3}'.format(base, name, op, value)
return target_filter
def _check_quality(self):
''' Check for any quality flags to add into the filter
Checks for input DRP or DAP quality flag labels in the Query.quality attribute
and adds them to the Query filter condition. All quality labels are joined with
a Boolean "or", then joined to the existing filter through a Boolean "and".
To perform a bitwise not on a quality flag, specify the flag and value explicitly
in the search_filter. E.g. "cube.quality & ~64"
Example
>>> # perform query selecting galaxies with a bad flux calibration
>>> q = Query(quality=['BADFLUX'])
>>>
>>> # select galaxies that do not have bad flux calibration
>>> q = Query(search_filter='cube.quality & ~256')
>>>
'''
if not self.quality:
return
# format the quality flags
quality = [self.quality] if not isinstance(self.quality, list) else self.quality
quality = [t.upper() for t in quality]
# get all the available flags
flags = sum([list(i.schema.label) for i in self.datamodel.bitmasks.values()
if 'QUAL' in i.name or 'MASK' in i.name], [])
assert set(quality).issubset(set(flags)), 'quality flag must be one of {0}'.format(flags)
# get any individual label sets
drplabels = self._get_labelset(quality, name='MANGA_DRP3QUAL')
daplabels = self._get_labelset(quality, name='MANGA_DAPQUAL')
dapspeclabels = self._get_labelset(quality, name='MANGA_DAPSPECMASK')
dappixlabels = self._get_labelset(quality, name='MANGA_DAPPIXMASK')
# build the quality filter
quality_filter = ''
if drplabels:
quality_filter = self._create_quality_filter(drplabels, flag='DRP3QUAL', quality_filter=quality_filter)
if daplabels:
quality_filter = self._create_quality_filter(daplabels, flag='DAPQUAL', quality_filter=quality_filter)
# parse the filter and add to the main
if quality_filter:
spaxelprop = marvindb.dapdb.__getattribute__('Clean{0}'.format(self.datamodel.dap_datamodel.property_table))
models = [marvindb.datadb.Cube, marvindb.dapdb.File, spaxelprop]
self._add_filter(quality_filter, modellist=models)
def _get_labelset(self, flags, name=None):
''' Return matching labels in the set of flags
Selects out those labels that are in the set of labels from the
named flag.
Parameters:
flags (list):
A list of the input flag labels
name (str):
The name of the flag set to match against
Returns:
The list of labels that are in the named flag set
'''
# return empty list if the bitmask set not available
if name not in self.datamodel.bitmasks:
return []
labels = [q.upper() for q in self.datamodel.bitmasks[name].schema.label]
flags = [f.upper() for f in flags]
inset = list(set(flags) & set(labels))
return inset
def _create_quality_filter(self, labels, flag='DRP3QUAL', quality_filter=''):
''' Create a string quality bitwise filter
Creates a string quality filter bitwise filter. If passed an existing
quality_filter string, it will append the new filter as an "or" boolean condition.
Example filter syntax is 'cube.quality & 64'
Parameters:
labels (list):
A list of string quality flag labels
flag (str):
The short hand name of the flag, i.e. MANGA_XXXX
quality_filter(str):
An existing string quality filter condition
Returns:
A string quality filter condition
'''
# get the value given the labels
name = 'MANGA_{0}'.format(flag.upper())
value = self.datamodel.bitmasks[name].labels_to_value(labels)
# add the column to the query
colname = 'cube.quality' if 'DRP' in flag else 'file.quality'
self._add_columns(colname)
base = ' or ' if quality_filter else ''
quality_filter += '{0}{1} & {2}'.format(base, colname, value)
return quality_filter
def _add_filter(self, strfilter, modellist=None):
''' Parse and add a string filter into the query
Pass a string filter condition into the boolean parser and
explicitly add it to the query filter
Parameters:
strfilter (str):
The filter condition to parse
modellist (list):
A list of models needed for the filter to
identify the correct parameters
'''
modellist = modellist if modellist else marvindb.datadb
# parse the filter and add to the main
parsed = parse_boolean_search(strfilter)
f = parsed.filter(modellist)
self.query = self.query.filter(and_(f))
def _add_columns(self, columns):
''' Add columns to the query
Parameters:
columns (list):
A list of string column names to add to the query
'''
# get new columns not already added
columns = [columns] if not isinstance(columns, list) else columns
new_columns = list(set(columns) - set(self.params))
if any(new_columns):
colattrs = self._marvinform._param_form_lookup.mapToColumn([c for c in new_columns])
colattrs = [colattrs] if not isinstance(colattrs, list) else colattrs
self.query = self.query.add_columns(*colattrs)
self.params.extend(new_columns)
def _add_pipeline(self):
''' Adds the DRP and DAP Pipeline Info into the Query '''
self._drp_alias = aliased(marvindb.datadb.PipelineInfo, name='drpalias')
self._dap_alias = aliased(marvindb.datadb.PipelineInfo, name='dapalias')
drppipe = self._get_pipe_info('drp')
dappipe = self._get_pipe_info('dap')
# Add DRP pipeline version
if drppipe:
self.query = self.query.join(self._drp_alias, marvindb.datadb.Cube.pipelineInfo).\
filter(self._drp_alias.pk == drppipe.pk)
# Add DAP pipeline version
if dappipe:
self.query = self.query.join(self._dap_alias, marvindb.dapdb.File.pipelineinfo).\
filter(self._dap_alias.pk == dappipe.pk)
def _get_pipe_info(self, pipename):
''' Retrieve the pipeline Info for a given pipeline version name '''
assert pipename.lower() in ['drp', 'dap'], 'Pipeline Name must either be DRP or DAP'
# bindparam values
bindname = 'drpver' if pipename.lower() == 'drp' else 'dapver'
bindvalue = self._drpver if pipename.lower() == 'drp' else self._dapver
# class names
if pipename.lower() == 'drp':
inclasses = self._table_in_query('cube') or 'cube' in str(self.query.statement.compile())
elif pipename.lower() == 'dap':
inclasses = self._table_in_query('file') or 'file' in str(self.query.statement.compile())
# set alias
pipealias = self._drp_alias if pipename.lower() == 'drp' else self._dap_alias
# get the pipeinfo
if inclasses:
pipeinfo = marvindb.session.query(pipealias).\
join(marvindb.datadb.PipelineName, marvindb.datadb.PipelineVersion).\
filter(marvindb.datadb.PipelineName.label == pipename.upper(),
marvindb.datadb.PipelineVersion.version == bindparam(bindname, bindvalue)).one()
else:
pipeinfo = None
return pipeinfo
def _group_by(self, params=None):
''' Group the query by a set of parameters
Parameters:
params (list):
A list of string parameter names to group the query by
Returns:
A new SQLA Query object
'''
if not params:
params = [d for d in self.default_params if 'spaxelprop' not in d]
#newdefaults = self._marvinform._param_form_lookup.mapToColumn(params)
newdefaults = [d for d in self._query_params if str(d).lower() in params]
self.default_params = params
self.params = params
newq = self.query.from_self(*newdefaults).group_by(*newdefaults)
return newq
def _check_query(self, name):
''' Check if string is inside the query statement '''
qstate = str(self.query.statement.compile(compile_kwargs={'literal_binds': True}))
return name in qstate
def _update_filter_params(self, param):
''' Update the input parameters '''
param = {key: val.decode('UTF-8') if '*' not in val.decode('UTF-8') else
val.replace('*', '%').decode('UTF-8') for key, val in param.items()
if key in self.filter_params.keys()}
self.filter_params.update(param)
def _already_in_filter(self, names):
''' Checks if the parameter name already added into the filter '''
infilter = None
if names:
if not isinstance(self.query, type(None)):
if not isinstance(self.query.whereclause, type(None)):
wc = str(self.query.whereclause.compile(dialect=postgresql.dialect(),
compile_kwargs={'literal_binds': True}))
infilter = any([name in wc for name in names])
return infilter
#
# Methods specific to functional queries
#
def _run_functional_queries(self):
''' Checks for functional filter conditions and runs them '''
if not self.search_filter:
return
# check for additional modifier criteria
if self._parsed.functions:
# loop over all functions
for fxn in self._parsed.functions:
# look up the function name in the marvinform dictionary
try:
methodname = self._marvinform._param_fxn_lookup[fxn.fxn_name]
except KeyError as e:
raise MarvinError('Could not set function: {0}'.format(e))
else:
# run the method
methodcall = self.__getattribute__(methodname)
methodcall(fxn)
def _get_good_spaxels(self):
''' Subquery - Counts the number of good spaxels
Counts the number of good spaxels with binid != -1
Uses the spaxelprop.bindid_pk != 9999 since this is known and set.
Removes need to join to the binid table
Returns:
bincount (subquery):
An SQLalchemy subquery to be joined into the main query object
'''
spaxelname = self._spaxelclass.__name__
bincount = self.session.query(self._spaxelclass.file_pk.label('binfile'),
func.count(self._spaxelclass.pk).label('goodcount'))
# optionally add the filter if the table is SpaxelProp
if 'CleanSpaxelProp' not in spaxelname:
bincount = bincount.filter(self._spaxelclass.binid != -1)
# group the results by file_pk
bincount = bincount.group_by(self._spaxelclass.file_pk).subquery('bingood', with_labels=True)
return bincount
def _get_count_of(self, expression):
''' Subquery - Counts spaxels satisfying an expression
Counts the number of spaxels of a given
parameter above a certain value.
Parameters:
expression (str):
The filter expression to parse
Returns:
valcount (subquery):
An SQLalchemy subquery to be joined into the main query object
Example:
>>> expression = 'spaxelprop.emline_gflux_ha_6564 >= 25'
'''
# parse the expression into name, operator, value
param, ops, value = self._parse_expression(expression)
# look up the InstrumentedAttribute, Operator, and convert Value
attribute = self._marvinform._param_form_lookup.mapToColumn(param)
op = opdict[ops]
value = float(value)
# Build the subquery
valcount = self.session.query(self._spaxelclass.file_pk.label('valfile'),
(func.count(self._spaxelclass.pk)).label('valcount')).\
filter(op(attribute, value)).\
group_by(self._spaxelclass.file_pk).subquery('goodhacount', with_labels=True)
return valcount
def _get_percent(self, fxn, **kwargs):
''' Query - Computes count comparisons
Retrieves the number of objects that have satisfy a given expression
in x% of good spaxels. Expression is of the form
Parameter Operand Value. This function is mapped to
the "npergood" filter name.
Syntax: fxn_name(expression) operator value
Parameters:
fxn (str):
The function condition used in the query filter
Example:
>>> fxn = 'npergood(spaxelprop.emline_gflux_ha_6564 > 25) >= 20'
>>> Syntax: npergood() - function name
>>> npergood(expression) operator value
>>>
>>> Select objects that have Ha flux > 25 in more than
>>> 20% of their (good) spaxels.
'''
# get the appropriate SpaxelProp ModelClass
self._spaxelclass = self._marvinform._param_form_lookup['spaxelprop.file'].Meta.model
# parse the function into name, condition, operator, and value
name, condition, ops, value = self._parse_fxn(fxn)
percent = float(value) / 100.
op = opdict[ops]
# Retrieve the necessary subqueries
bincount = self._get_good_spaxels()
valcount = self._get_count_of(condition)
# Join to the main query
self.query = self.query.join(bincount, bincount.c.binfile == self._spaxelclass.file_pk).\
join(valcount, valcount.c.valfile == self._spaxelclass.file_pk).\
filter(op(valcount.c.valcount, percent * bincount.c.goodcount))
# Group the results by main default datadb parameters, so as not to include all spaxels
newdefs = [d for d in self.default_params if 'spaxelprop' not in d]
self.query = self._group_by(params=newdefs)
def _parse_fxn(self, fxn):
''' Parse a fxn condition '''
return fxn.fxn_name, fxn.condition, fxn.operator, fxn.value
def _parse_expression(self, expr):
''' Parse an expression '''
return expr.fullname, expr.op, expr.value
def _check_dapall_query(self):
''' Checks if the query is on the DAPall table, and regroup the parameters plateifu'''
isdapall = self._check_query('dapall')
# if isdapall:
# self.query = self._group_by()
def _radial_query(self, fxn, **kwargs):
''' Runs a radial cone search around an RA, Dec
Performs a radial cone search around an RA, Dec point
within some specified radial distance in units of degrees.
Syntax: radial(ra, dec, radius)
Parameters:
fxn (str):
The function condition used in the query filter
Example:
>>> fxn = 'radial(232.5447, 48.6902, 1)'
>>> Syntax: radial() - function name
>>> radial(ra, dec, radius)
>>>
>>> Select objects that are within 1 degree of
>>> RA, Dec = (232.5447, 48.6902)
'''
# extract the RA, Dec and search radius
ra, dec = map(float, (fxn.coords))
radius = float(fxn.value)
# add RA, Dec as returned columns
self._add_columns(['cube.ra', 'cube.dec'])
# Join to the main query
cone_filter = func.q3c_radial_query(marvindb.datadb.Cube.ra, marvindb.datadb.Cube.dec, ra, dec, radius)
self.query = self.query.filter(cone_filter)
|
sdss/marvin
|
python/marvin/tools/query.py
|
Python
|
bsd-3-clause
| 67,552
|
[
"Brian"
] |
80b46f41bfa03cecbcbbd4d951ed987aa8209e104bab5e490ded88e35a4191bc
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
box = vtk.vtkBox()
box.SetXMin(0,2,4)
box.SetXMax(2,4,6)
sample = vtk.vtkSampleFunction()
sample.SetSampleDimensions(30,30,30)
sample.SetImplicitFunction(box)
sample.SetModelBounds(0,1.5,1,5,2,8)
sample.ComputeNormalsOn()
contours = vtk.vtkContourFilter()
contours.SetInputConnection(sample.GetOutputPort())
contours.GenerateValues(5,-0.5,1.5)
w = vtk.vtkPolyDataWriter()
w.SetInputConnection(contours.GetOutputPort())
w.SetFileName("junk.vtk")
#w Write
contMapper = vtk.vtkPolyDataMapper()
contMapper.SetInputConnection(contours.GetOutputPort())
contMapper.SetScalarRange(-0.5,1.5)
contActor = vtk.vtkActor()
contActor.SetMapper(contMapper)
# We'll put a simple outline around the data.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(sample.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
# The usual rendering stuff.
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
renWin.SetSize(500,500)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(1,1,1)
ren1.AddActor(contActor)
ren1.AddActor(outlineActor)
camera = vtk.vtkCamera()
camera.SetClippingRange(6.31875,20.689)
camera.SetFocalPoint(0.75,3,5)
camera.SetPosition(9.07114,-4.10065,-1.38712)
camera.SetViewAngle(30)
camera.SetViewUp(-0.580577,-0.802756,0.13606)
ren1.SetActiveCamera(camera)
iren.Initialize()
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Modeling/Testing/Python/TestBoxFunction.py
|
Python
|
bsd-3-clause
| 1,761
|
[
"VTK"
] |
903714c590757a88a5eefa643ab110839f4ce58879a195c94834975ba97b1af9
|
#wonLottery = True
#bigWin = True
#if wonLottery and bigWin :
# print("you can retire! ")
#team = input("enter your favourite hockey team; ").upper()
#sport = input("enter your favourite sport: ").upper()
#if sport == "football" and team == "barcelona":
# print("putu amo")
#elif team == "madrid" or team =="atletico":
# print("fck")
#else:
# print("putu looser")
#if month == "Sep" or month =="Apr" \
# or month == "Jun" or month == "Nov":
# print("There are 30 days in this month")
#if favMovies == "Star Wars" \
# and favBook == "Lord of the Rings" \
# and favEvent == "ComicCon":
# print("You and I should hang out")
#country = input("Where are you from ?").upper()
#pet = input("Which is your fabourite animal? ").upper()
#if country == "CATALONIA" and pet == "CAT" \
# or pet == "DOG":
# print("Is Barcelona your fabourite team?")
#if country == "CANADA" and \
# pet == "MOOSE" or pet == "BEAVER" :
# print("Do you play hockey too?")
team = input("Enter your favourite hockey team: ").upper()
sport = input("Enter your favourite sport: ").upper()
#If the sport is hockey and the team is senators or leafs, display the cup message
if sport == "HOCKEY" and (team == "SENATORS" or team == "LEAFS"):
print ("Good luck getting the cup this year")
#Exemple simplificació
sportIsHockey = False
if sport == 'HOCKEY':
sportIsHockey = True
teamIsCorrect = False
if team == 'SENATORS' or team == 'LEAFS':
teamIsCorrect = True
if sportIsHockey and teamIsCorrect:
print ("Good luck getting the cup this year")
|
aesquis/Introduccion-a-la-programacion-con-Python
|
07-Decisiones-complejas-con-codigo/examplesAndOr.py
|
Python
|
gpl-2.0
| 1,576
|
[
"MOOSE"
] |
e65d5fdd040809cfcc815ec3789f68e7e57d9f36451a7f7fa7df576942452ba5
|
import sys
tests = [
("testExecs/itertest.exe", "", {}),
("testExecs/MolOpsTest.exe", "", {}),
("testExecs/testCanon.exe", "C1OCCC1 C1CCOC1", {}),
("testExecs/testPickler.exe", "", {}),
("testExecs/test1.exe", "", {}),
("testExecs/testChirality.exe", "", {}),
("python", "test_list.py", {'dir': 'Depictor'}),
("python", "test_list.py", {'dir': 'FileParsers'}),
("python", "test_list.py", {'dir': 'SmilesParse'}),
("python", "test_list.py", {'dir': 'Substruct'}),
("python", "test_list.py", {'dir': 'Subgraphs'}),
("python", "test_list.py", {'dir': 'FragCatalog'}),
("python", "test_list.py", {'dir': 'Fingerprints'}),
("python", "test_list.py", {'dir': 'MolTransforms'}),
("python", "test_list.py", {'dir': 'Wrap'}),
("python", "test_list.py", {'dir': 'Depictor/Wrap'}),
("python", "test_list.py", {'dir': 'FragCatalog/Wrap'}),
("python", "test_list.py", {'dir': 'PartialCharges/Wrap'}),
("python", "test_list.py", {'dir': 'ForceFieldHelpers'}),
("python", "test_list.py", {'dir': 'DistGeomHelpers'}),
("python", "test_list.py", {'dir': 'Descriptors'}),
("python", "test_list.py", {'dir': 'Descriptors/Wrap'}),
("python", "test_list.py", {'dir': 'MolChemicalFeatures'}),
("python", "test_list.py", {'dir': 'MolAlign'}),
("python", "test_list.py", {'dir': 'ShapeHelpers'}),
("python", "test_list.py", {'dir': 'ChemTransforms'}),
("python", "test_list.py", {'dir': 'MolCatalog'}),
("python", "test_list.py", {'dir': 'MolCatalog/Wrap'}),
("python", "test_list.py", {'dir': 'ChemReactions'}),
("python", "test_list.py", {'dir': 'SLNParse'}),
("python", "test_list.py", {'dir': 'SLNParse/Wrap'}),
]
if sys.platform != 'win32':
tests.extend([
("testExecs/cptest.exe", "", {}),
("testExecs/querytest.exe", "", {}),
])
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
jandom/rdkit
|
Code/GraphMol/test_list.py
|
Python
|
bsd-3-clause
| 1,960
|
[
"RDKit"
] |
f0748cf3952d991a17edf1a05b0999b8037d5eb6e2014c19c69834dbed3d22ed
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAgilp(RPackage):
"""Agilent expression array processing package."""
homepage = "http://bioconductor.org/packages/agilp/"
git = "https://git.bioconductor.org/packages/agilp.git"
version('3.8.0', commit='c772a802af1b4c0741f2edd78053a0425160ea53')
depends_on('r@3.4.0:3.4.9', when='@3.8.0')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-agilp/package.py
|
Python
|
lgpl-2.1
| 1,580
|
[
"Bioconductor"
] |
58f2468bb4afa8222de4568a6c6790545cf810cf9fd7aa52691ca4853f27754d
|
# -*- coding: utf-8 -*-
"""
Write EC and SMC Meshes in legacy VTK format as .vtk.
"""
import os
import sys
# Run in current directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Relative import path for the DumpMeshToLegacyFormat script.
importPath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../util'))
if not importPath in sys.path:
sys.path.insert(1, importPath)
del importPath
import DumpMeshToLegacyFormat
# This is for the c2000 mesh.
DumpMeshToLegacyFormat.numQuadsPerRing = 50
DumpMeshToLegacyFormat.meshSet = [
"quadMeshFullc2000.vtp",
"quadMeshFullECc2000.vtp",
"quadMeshFullSMCc2000.vtp"
]
def main():
DumpMeshToLegacyFormat.writeLegacyVTK()
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
main()
print "Exiting", os.path.basename(__file__)
else:
print __file__, "is to be run as main script."
|
BlueFern/DBiharMesher
|
meshes/c2000f/Dump2000MeshLegacyFormat.py
|
Python
|
gpl-2.0
| 890
|
[
"VTK"
] |
037c4c3b1cdf0887656fad0a134242712604563a7ecf2dea47f6eb215e4a9d38
|
#from distutils.core import setup
from setuptools import find_packages, setup
from PePr import __version__
setup(name="PePr",
version=__version__, # change the version info in the PePr.__init__ file
author="Yanxiao Zhang",
author_email="troublezhang@gmail.com",
url="https://github.com/shawnzhangyx/PePr/",
license="GNU GPL v3",
description="Peak-calling and Prioritization pipeline for replicated ChIP-Seq data",
long_description="Peak-calling and Prioritization pipeline for replicated ChIP-Seq data",
platforms = ['any'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)"
],
packages=find_packages(),
package_data={"PePr": ['data/*.bed']},
install_requires=[
'numpy>=1.6.0',
'scipy>=0.14.0',
'pysam',
'sharedmem',
],
entry_points={
'console_scripts': [
'PePr=PePr.PePr:argless_main',
'PePr-preprocess=PePr.PePr:pre_processing_module',
'PePr-postprocess=PePr.post_processing.post_process_PePr:post_processing_module',
]
}
)
|
shawnzhangyx/PePr
|
setup.py
|
Python
|
gpl-3.0
| 1,435
|
[
"pysam"
] |
3ac51d1f7673e9d9e59df3e6356edd23ce6be1268e6cf80373cc412c7070b185
|
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy.random import normal, multivariate_normal
import scipy.stats
from filterpy.kalman import MerweScaledSigmaPoints, unscented_transform
from filterpy.stats import multivariate_gaussian
def plot_nonlinear_func(data, f, out_lim=None, num_bins=300):
ys = f(data)
x0 = np.mean(data)
in_std = np.std(data)
y = f(x0)
std = np.std(ys)
in_lims = [x0 - in_std*3, x0 + in_std*3]
if out_lim is None:
out_lim = [y - std*3, y + std*3]
# plot output
h = np.histogram(ys, num_bins, density=False)
plt.subplot(221)
plt.plot(h[1][1:], h[0], lw=2, alpha=0.8)
if out_lim is not None:
plt.xlim(out_lim[0], out_lim[1])
plt.gca().yaxis.set_ticklabels([])
plt.title('Output')
plt.axvline(np.mean(ys), ls='--', lw=2)
plt.axvline(f(x0), lw=1)
norm = scipy.stats.norm(y, in_std)
'''min_x = norm.ppf(0.001)
max_x = norm.ppf(0.999)
xs = np.arange(min_x, max_x, (max_x - min_x) / 1000)
pdf = norm.pdf(xs)
plt.plot(pdf * max(h[0])/max(pdf), xs, lw=1, color='k')
print(max(norm.pdf(xs)))'''
# plot transfer function
plt.subplot(2, 2, 3)
x = np.arange(in_lims[0], in_lims[1], 0.1)
y = f(x)
plt.plot (x, y, 'k')
isct = f(x0)
plt.plot([x0, x0, in_lims[1]], [out_lim[1], isct, isct], color='r', lw=1)
plt.xlim(in_lims)
plt.ylim(out_lim)
#plt.axis('equal')
plt.title('f(x)')
# plot input
h = np.histogram(data, num_bins, density=True)
plt.subplot(2,2,4)
plt.plot(h[0], h[1][1:], lw=2)
#plt.ylim(in_lims)
plt.gca().xaxis.set_ticklabels([])
plt.title('Input')
plt.tight_layout()
plt.show()
def plot_ekf_vs_mc():
def fx(x):
return x**3
def dfx(x):
return 3*x**2
mean = 1
var = .1
std = math.sqrt(var)
data = normal(loc=mean, scale=std, size=50000)
d_t = fx(data)
mean_ekf = fx(mean)
slope = dfx(mean)
std_ekf = abs(slope*std)
norm = scipy.stats.norm(mean_ekf, std_ekf)
xs = np.linspace(-3, 5, 200)
plt.plot(xs, norm.pdf(xs), lw=2, ls='--', color='b')
try:
plt.hist(d_t, bins=200, density=True, histtype='step', lw=2, color='g')
except:
# older versions of matplotlib don't have the density keyword
plt.hist(d_t, bins=200, normed=True, histtype='step', lw=2, color='g')
actual_mean = d_t.mean()
plt.axvline(actual_mean, lw=2, color='g', label='Monte Carlo')
plt.axvline(mean_ekf, lw=2, ls='--', color='b', label='EKF')
plt.legend()
plt.show()
print('actual mean={:.2f}, std={:.2f}'.format(d_t.mean(), d_t.std()))
print('EKF mean={:.2f}, std={:.2f}'.format(mean_ekf, std_ekf))
def plot_ukf_vs_mc(alpha=0.001, beta=3., kappa=1.):
def fx(x):
return x**3
def dfx(x):
return 3*x**2
mean = 1
var = .1
std = math.sqrt(var)
data = normal(loc=mean, scale=std, size=50000)
d_t = fx(data)
points = MerweScaledSigmaPoints(1, alpha, beta, kappa)
Wm, Wc = points.Wm, points.Wc
sigmas = points.sigma_points(mean, var)
sigmas_f = np.zeros((3, 1))
for i in range(3):
sigmas_f[i] = fx(sigmas[i, 0])
### pass through unscented transform
ukf_mean, ukf_cov = unscented_transform(sigmas_f, Wm, Wc)
ukf_mean = ukf_mean[0]
ukf_std = math.sqrt(ukf_cov[0])
norm = scipy.stats.norm(ukf_mean, ukf_std)
xs = np.linspace(-3, 5, 200)
plt.plot(xs, norm.pdf(xs), ls='--', lw=2, color='b')
try:
plt.hist(d_t, bins=200, density=True, histtype='step', lw=2, color='g')
except:
# older versions of matplotlib don't have the density keyword
plt.hist(d_t, bins=200, normed=True, histtype='step', lw=2, color='g')
actual_mean = d_t.mean()
plt.axvline(actual_mean, lw=2, color='g', label='Monte Carlo')
plt.axvline(ukf_mean, lw=2, ls='--', color='b', label='UKF')
plt.legend()
plt.show()
print('actual mean={:.2f}, std={:.2f}'.format(d_t.mean(), d_t.std()))
print('UKF mean={:.2f}, std={:.2f}'.format(ukf_mean, ukf_std))
def test_plot():
import math
from numpy.random import normal
from scipy import stats
global data
def f(x):
return 2*x + 1
mean = 2
var = 3
std = math.sqrt(var)
data = normal(loc=2, scale=std, size=50000)
d2 = f(data)
n = scipy.stats.norm(mean, std)
kde1 = stats.gaussian_kde(data, bw_method='silverman')
kde2 = stats.gaussian_kde(d2, bw_method='silverman')
xs = np.linspace(-10, 10, num=200)
#plt.plot(data)
plt.plot(xs, kde1(xs))
plt.plot(xs, kde2(xs))
plt.plot(xs, n.pdf(xs), color='k')
num_bins=100
h = np.histogram(data, num_bins, density=True)
plt.plot(h[1][1:], h[0], lw=4)
h = np.histogram(d2, num_bins, density=True)
plt.plot(h[1][1:], h[0], lw=4)
def plot_bivariate_colormap(xs, ys):
xs = np.asarray(xs)
ys = np.asarray(ys)
xmin = xs.min()
xmax = xs.max()
ymin = ys.min()
ymax = ys.max()
values = np.vstack([xs, ys])
kernel = scipy.stats.gaussian_kde(values)
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
Z = np.reshape(kernel.evaluate(positions).T, X.shape)
plt.gca().imshow(np.rot90(Z), cmap=plt.cm.Greys,
extent=[xmin, xmax, ymin, ymax])
def plot_monte_carlo_mean(xs, ys, f, mean_fx, label, plot_colormap=True):
fxs, fys = f(xs, ys)
computed_mean_x = np.average(fxs)
computed_mean_y = np.average(fys)
ax = plt.subplot(121)
ax.grid(b=False)
plot_bivariate_colormap(xs, ys)
plt.scatter(xs, ys, marker='.', alpha=0.02, color='k')
ax.set_xlim(-20, 20)
ax.set_ylim(-20, 20)
ax = plt.subplot(122)
ax.grid(b=False)
plt.scatter(fxs, fys, marker='.', alpha=0.02, color='k')
plt.scatter(mean_fx[0], mean_fx[1],
marker='v', s=300, c='r', label=label)
plt.scatter(computed_mean_x, computed_mean_y,
marker='*',s=120, c='b', label='Computed Mean')
plot_bivariate_colormap(fxs, fys)
ax.set_xlim([-100, 100])
ax.set_ylim([-10, 200])
plt.legend(loc='best', scatterpoints=1)
print ('Difference in mean x={:.3f}, y={:.3f}'.format(
computed_mean_x-mean_fx[0], computed_mean_y-mean_fx[1]))
def plot_cov_ellipse_colormap(cov=[[1,1],[1,1]]):
side = np.linspace(-3, 3, 200)
X,Y = np.meshgrid(side,side)
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X;
pos[:, :, 1] = Y
plt.axes(xticks=[], yticks=[], frameon=True)
rv = scipy.stats.multivariate_normal((0,0), cov)
plt.gca().grid(b=False)
plt.gca().imshow(rv.pdf(pos), cmap=plt.cm.Greys, origin='lower')
plt.show()
def plot_gaussians(xs, ps, x_range, y_range, N):
""" given a list of 2d states (x,y) and 2x2 covariance matrices, produce
a surface plot showing all of the gaussians"""
xs = np.asarray(xs)
x = np.linspace (x_range[0], x_range[1], N)
y = np.linspace (y_range[0], y_range[1], N)
xx, yy = np.meshgrid(x, y)
zv = np.zeros((N, N))
for mean, cov in zip(xs, ps):
zs = np.array([multivariate_gaussian(np.array([i ,j]), mean, cov)
for i, j in zip(np.ravel(xx), np.ravel(yy))])
zv += zs.reshape(xx.shape)
ax = plt.figure().add_subplot(111, projection='3d')
ax.plot_surface(xx, yy, zv, rstride=1, cstride=1, lw=.5, edgecolors='#191919',
antialiased=True, shade=True, cmap=cm.autumn)
ax.view_init(elev=40., azim=230)
if __name__ == "__main__":
#plot_cov_ellipse_colormap(cov=[[2, 1.2], [1.2, 2]])
'''
from numpy.random import normal
import numpy as np
plot_ukf_vs_mc()'''
'''x0 = (1, 1)
data = normal(loc=x0[0], scale=x0[1], size=500000)
def g(x):
return x*x
return (np.cos(3*(x/2+0.7)))*np.sin(0.7*x)-1.6*x
return -2*x
#plot_transfer_func (data, g, lims=(-3,3), num_bins=100)
plot_nonlinear_func (data, g, gaussian=x0,
num_bins=100)
'''
Ps = np.array([[[ 2.85841814, 0.71772898],
[ 0.71772898, 0.93786824]],
[[ 3.28939458, 0.52634978],
[ 0.52634978, 0.13435503]],
[[ 2.40532661, 0.29692055],
[ 0.29692055, 0.07671416]],
[[ 2.23084082, 0.27823192],
[ 0.27823192, 0.07488681]]])
Ms = np.array([[ 0.68040795, 0.17084572],
[ 8.46201389, 1.15070342],
[ 13.7992229 , 0.96022707],
[ 19.95838208, 0.87524265]])
plot_multiple_gaussians(Ms, Ps, (-5,25), (-5, 5), 75)
|
zaqwes8811/micro-apps
|
self_driving/deps/Kalman_and_Bayesian_Filters_in_Python_master/kf_book/nonlinear_plots.py
|
Python
|
mit
| 9,116
|
[
"Gaussian"
] |
194acb67e58d35fb38f0e200be949e9ae083e6ee39a6517f5d3a94e52c8175b5
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import sys
from ast import Import, ImportFrom, NodeVisitor, parse
from collections import defaultdict
from os.path import dirname, sep
from typing import Dict, List, Optional, Tuple
from setup import PROVIDERS_REQUIREMENTS
sys.path.append(os.path.join(dirname(__file__), os.pardir))
AIRFLOW_PROVIDERS_FILE_PREFIX = f"airflow{sep}providers{sep}"
AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX = f"tests{sep}providers{sep}"
AIRFLOW_PROVIDERS_IMPORT_PREFIX = "airflow.providers."
# List of information messages generated
infos: List[str] = []
# List of warnings generated
warnings: List[str] = []
# list of errors generated
errors: List[str] = []
# store dependencies
dependencies: Dict[str, List[str]] = defaultdict(list)
def find_provider(provider_elements: List[str]) -> Optional[str]:
"""
Finds provider name from the list of elements provided. It looks the providers up
in PROVIDERS_REQUIREMENTS dict taken from the setup.py.
:param provider_elements: array of elements of the path (split)
:return: provider name or None if no provider could be found
"""
provider = ""
separator = ""
provider_keys = PROVIDERS_REQUIREMENTS.keys()
for element in provider_elements:
provider = provider + separator + element
if provider in provider_keys:
return provider
separator = "."
return None
def get_provider_from_file_name(file_name: str) -> Optional[str]:
"""
Retrieves provider name from file name
:param file_name: name of the file
:return: provider name or None if no provider could be found
"""
if (
AIRFLOW_PROVIDERS_FILE_PREFIX not in file_name
and AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX not in file_name
):
# We should only check file that are provider
errors.append(f"Wrong file not in the providers package = {file_name}")
return None
suffix = get_file_suffix(file_name)
assert suffix
split_path = suffix.split(sep)[2:]
provider = find_provider(split_path)
if not provider and file_name.endswith("__init__.py"):
infos.append(f"Skipped file = {file_name}")
elif not provider:
warnings.append(f"Provider not found for path = {file_name}")
return provider
def get_file_suffix(file_name) -> Optional[str]:
if AIRFLOW_PROVIDERS_FILE_PREFIX in file_name:
return file_name[file_name.find(AIRFLOW_PROVIDERS_FILE_PREFIX) :]
if AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX in file_name:
return file_name[file_name.find(AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX) :]
return None
def get_provider_from_import(import_name: str) -> Optional[str]:
"""
Retrieves provider name from file name
:param import_name: name of the import
:return: provider name or None if no provider could be found
"""
if AIRFLOW_PROVIDERS_IMPORT_PREFIX not in import_name:
# skip silently - we expect non-providers imports
return None
suffix = import_name[import_name.find(AIRFLOW_PROVIDERS_IMPORT_PREFIX) :]
split_import = suffix.split(".")[2:]
provider = find_provider(split_import)
if not provider:
warnings.append(f"Provider not found for import = {import_name}")
return provider
class ImportFinder(NodeVisitor):
"""
AST visitor that collects all imported names in its imports
"""
def __init__(self, filename: str) -> None:
self.imports: List[str] = []
self.filename = filename
self.handled_import_exception = List[str]
self.tried_imports: List[str] = []
def process_import(self, import_name: str) -> None:
self.imports.append(import_name)
def get_import_name_from_import_from(self, node: ImportFrom) -> List[str]:
"""
Retrieves import name from the "from" import.
:param node: ImportFrom name
:return: import name
"""
import_names: List[str] = []
for alias in node.names:
name = alias.name
fullname = f'{node.module}.{name}' if node.module else name
import_names.append(fullname)
return import_names
def visit_Import(self, node: Import):
for alias in node.names:
self.process_import(alias.name)
def visit_ImportFrom(self, node: ImportFrom):
if node.module == '__future__':
return
for fullname in self.get_import_name_from_import_from(node):
self.process_import(fullname)
def get_imports_from_file(file_name: str) -> List[str]:
"""
Retrieves imports from file.
:param file_name: name of the file
:return: list of import names
"""
try:
with open(file_name, encoding="utf-8") as f:
root = parse(f.read(), file_name)
except Exception:
print(f"Error when opening file {file_name}", file=sys.stderr)
raise
visitor = ImportFinder(file_name)
visitor.visit(root)
return visitor.imports
def check_if_different_provider_used(file_name: str) -> None:
file_provider = get_provider_from_file_name(file_name)
if not file_provider:
return
imports = get_imports_from_file(file_name)
for import_name in imports:
import_provider = get_provider_from_import(import_name)
if import_provider and file_provider != import_provider:
dependencies[file_provider].append(import_provider)
def parse_arguments() -> Tuple[str, str, str]:
import argparse
parser = argparse.ArgumentParser(
description='Checks if dependencies between packages are handled correctly.'
)
parser.add_argument(
"-f", "--provider-dependencies-file", help="Stores dependencies between providers in the file(.json)"
)
parser.add_argument(
"-d", "--documentation-file", help="Updates package documentation in the file specified (.rst)"
)
parser.add_argument('files', nargs='*')
args = parser.parse_args()
if len(args.files) < 1:
parser.print_usage()
print()
sys.exit(2)
return args.files, args.provider_dependencies_file, args.documentation_file
PREFIX = " "
HEADER = """
========================== ===========================
Package Extras
========================== ===========================
"""
FOOTER = """========================== ===========================
"""
def insert_documentation(deps_dict: Dict[str, List[str]], res: List[str]) -> None:
res += HEADER.splitlines(keepends=True)
for package, deps in deps_dict.items():
deps_str = ",".join(deps)
res.append(f"{package:27}{deps_str}\n")
res += FOOTER.splitlines(keepends=True)
if __name__ == '__main__':
print()
files, provider_dependencies_file_name, documentation_file_name = parse_arguments()
num_files = 0
for file in files:
check_if_different_provider_used(file)
num_files += 1
print(f"Verified {num_files} files.")
if infos:
print("\nInformation messages:\n")
for info in infos:
print(PREFIX + info)
print(f"Total: {len(infos)} information messages.")
if warnings:
print("\nWarnings!\n")
for warning in warnings:
print(PREFIX + warning)
print(f"Total: {len(warnings)} warnings.")
if errors:
print("\nErrors!\n")
for error in errors:
print(PREFIX + error)
print(f"Total: {len(errors)} errors.")
unique_sorted_dependencies: Dict[str, List[str]] = {}
for key in sorted(dependencies.keys()):
unique_sorted_dependencies[key] = sorted(set(dependencies[key]))
if provider_dependencies_file_name:
with open(provider_dependencies_file_name, "w") as providers_file:
json.dump(unique_sorted_dependencies, providers_file, indent=2)
providers_file.write("\n")
print()
print(f"Written provider dependencies to the file {provider_dependencies_file_name}")
print()
if documentation_file_name:
with open(documentation_file_name, encoding="utf-8") as documentation_file:
text = documentation_file.readlines()
replacing = False
result: List[str] = []
for line in text:
if line.startswith(" .. START PACKAGE DEPENDENCIES HERE"):
replacing = True
result.append(line)
insert_documentation(unique_sorted_dependencies, result)
if line.startswith(" .. END PACKAGE DEPENDENCIES HERE"):
replacing = False
if not replacing:
result.append(line)
with open(documentation_file_name, "w", encoding="utf-8") as documentation_file:
documentation_file.write("".join(result))
print()
print(f"Written package extras to the file {documentation_file_name}")
print()
if errors:
print()
print("ERROR! Errors found during verification. Exiting!")
print()
sys.exit(1)
print()
print("Verification complete! Success!")
print()
|
Acehaidrey/incubator-airflow
|
tests/build_provider_packages_dependencies.py
|
Python
|
apache-2.0
| 9,855
|
[
"VisIt"
] |
25ada2cfb74a3d2a098dcbefdbfe028c60e804ce262ca66b8283b51c96ffc3b1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ldap
from nose.plugins.attrib import attr
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.test_utils import grant_access
from desktop.lib.django_test_util import make_logged_in_client
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from useradmin.models import LdapGroup, UserProfile
from useradmin.models import get_profile
from hadoop import pseudo_hdfs4
from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \
add_ldap_users, add_ldap_groups, sync_ldap_users_groups
import ldap_access
from tests import LdapTestConnection, reset_all_groups, reset_all_users
def get_nonsense_config():
return {'nonsense': {
'users': {},
'groups': {}
}}
def test_useradmin_ldap_user_group_membership_sync():
settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware')
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import curly who is part of TestUsers and Test Administrators
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False)
# Set a password so that we can login
user = User.objects.get(username='curly')
user.set_password('test')
user.save()
# Should have 0 groups
assert_equal(0, user.groups.all().count())
# Make an authenticated request as curly so that we can see call middleware.
c = make_logged_in_client('curly', 'test', is_superuser=False)
grant_access("curly", "test", "useradmin")
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
# Now remove a group and try again.
old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()
# Make an authenticated request as curly so that we can see call middleware.
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
finally:
settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware')
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 4)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
assert_equal(test_users.user_set.all().count(), 3)
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and not members of suboordinate groups (even though specified)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Nested group import
# First without recursive import, then with.
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 0, nested_group.user_set.all())
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 1, nested_group.user_set.all())
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test nested groups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups (there should be no subgroups)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Import all members of NestedPosixGroups and members of subgroups
reset_all_users()
reset_all_groups()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedPosixGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='NestedPosixGroups')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_user_integration():
done = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
done.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Try importing a user
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'lårry', sync_groups=False, import_by_dn=False)
larry = User.objects.get(username='lårry')
assert_true(larry.first_name == 'Larry')
assert_true(larry.last_name == 'Stooge')
assert_true(larry.email == 'larry@stooges.com')
assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL))
# Should be a noop
sync_ldap_users(ldap_access.CACHED_LDAP_CONN)
sync_ldap_groups(ldap_access.CACHED_LDAP_CONN)
assert_equal(User.objects.all().count(), 1)
assert_equal(Group.objects.all().count(), 0)
# Make sure that if a Hue user already exists with a naming collision, we
# won't overwrite any of that user's information.
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'otherguy', sync_groups=False, import_by_dn=False)
hue_user = User.objects.get(username='otherguy')
assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE))
assert_equal(hue_user.first_name, 'Different')
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
# Try importing a user and sync groups
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=True, import_by_dn=False)
curly = User.objects.get(username='curly')
assert_equal(curly.first_name, 'Curly')
assert_equal(curly.last_name, 'Stooge')
assert_equal(curly.email, 'curly@stooges.com')
assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL))
assert_equal(2, curly.groups.all().count(), curly.groups.all())
reset_all_users()
reset_all_groups()
# Test import case sensitivity
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Lårry', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Lårry').exists())
assert_true(User.objects.filter(username='lårry').exists())
# Test lower case
User.objects.filter(username__iexact='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
User.objects.filter(username='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
finally:
for finish in done:
finish()
def test_add_ldap_users():
done = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
done.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(server='nonsense', username_pattern='moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
response = c.post(URL, dict(server='nonsense', username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(server='nonsense', username_pattern='*rr*', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'], response)
# Test ignore case
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
User.objects.filter(username='moe').delete()
assert_false(User.objects.filter(username='Moe').exists())
assert_false(User.objects.filter(username='moe').exists())
response = c.post(URL, dict(server='nonsense', username_pattern='Moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Moe').exists())
assert_true(User.objects.filter(username='moe').exists())
# Test lower case
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
User.objects.filter(username__iexact='Rock').delete()
assert_false(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
response = c.post(URL, dict(server='nonsense', username_pattern='rock', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
# Test regular with spaces (should fail)
response = c.post(URL, dict(server='nonsense', username_pattern='user with space', password1='test', password2='test'))
assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response)
# Test dn with spaces in username and dn (should fail)
response = c.post(URL, dict(server='nonsense', username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true("There was a problem with some of the LDAP information" in response.content, response)
assert_true("Username must not contain whitespaces" in response.content, response)
# Test dn with spaces in dn, but not username (should succeed)
response = c.post(URL, dict(server='nonsense', username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true(User.objects.filter(username='spaceless').exists())
finally:
for finish in done:
finish()
def test_add_ldap_groups():
URL = reverse(add_ldap_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client(username='test', is_superuser=True)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
assert_true(c.get(URL))
response = c.post(URL, dict(server='nonsense', groupname_pattern='TestUsers'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'])
# Test with space
response = c.post(URL, dict(server='nonsense', groupname_pattern='Test Administrators'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'], response)
response = c.post(URL, dict(server='nonsense', groupname_pattern='toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'))
assert_true('Ensure this value has at most 256 characters' in response.context['form'].errors['groupname_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(server='nonsense', groupname_pattern='*r*'))
assert_true('/useradmin/groups' in response['Location'], response)
finally:
for finish in reset:
finish()
def test_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
assert_true(c.get(URL))
assert_true(c.post(URL))
finally:
for finish in reset:
finish()
def test_ldap_exception_handling():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
class LdapTestConnectionError(LdapTestConnection):
def find_users(self, user, find_by_dn=False):
raise ldap.LDAPError('No such object')
ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError()
c = make_logged_in_client('test', is_superuser=True)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
response = c.post(reverse(add_ldap_users), dict(server='nonsense', username_pattern='moe', password1='test', password2='test'), follow=True)
assert_true('There was an error when communicating with LDAP' in response.content, response)
finally:
for finish in reset:
finish()
@attr('requires_hadoop')
def test_ensure_home_directory_add_ldap_users():
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
assert_true(c.get(URL))
response = c.post(URL, dict(server='nonsense', username_pattern='moe', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'])
assert_false(cluster.fs.exists('/user/moe'))
# Try same thing with home directory creation.
response = c.post(URL, dict(server='nonsense', username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
response = c.post(URL, dict(server='nonsense', username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
assert_false(cluster.fs.exists('/user/bad_name'))
# See if moe, who did not ask for his home directory, has a home directory.
assert_false(cluster.fs.exists('/user/moe'))
# Try wild card now
response = c.post(URL, dict(server='nonsense', username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
assert_true(cluster.fs.exists(u'/user/lårry'))
assert_false(cluster.fs.exists('/user/otherguy'))
finally:
# Clean up
for finish in reset:
finish()
if cluster.fs.exists('/user/curly'):
cluster.fs.rmtree('/user/curly')
if cluster.fs.exists(u'/user/lårry'):
cluster.fs.rmtree(u'/user/lårry')
if cluster.fs.exists('/user/otherguy'):
cluster.fs.rmtree('/user/otherguy')
@attr('requires_hadoop')
def test_ensure_home_directory_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
c.post(reverse(add_ldap_users), dict(server='nonsense', username_pattern='curly', password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/curly'))
assert_true(c.post(URL, dict(server='nonsense', ensure_home_directory=True)))
assert_true(cluster.fs.exists('/user/curly'))
finally:
for finish in reset:
finish()
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/apps/useradmin/src/useradmin/test_ldap.py
|
Python
|
gpl-2.0
| 35,547
|
[
"MOE"
] |
c33ab627701585b45c63c0b4d6504205e60044299ce6b2f4782b28d5d5f5f0b4
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Differentially private tree aggregation factory."""
import math
import tensorflow_privacy as tfp
from tensorflow_federated.python.aggregators import differential_privacy
from tensorflow_federated.python.aggregators import secure
from tensorflow_federated.python.aggregators import sum_factory
from tensorflow_federated.python.analytics.hierarchical_histogram import clipping_factory
from tensorflow_federated.python.analytics.hierarchical_histogram import modular_clipping_factory
# Supported no-noise mechanisms.
NO_NOISE_MECHANISMS = ['no-noise']
# Supported central DP mechanisms.
CENTRAL_DP_MECHANISMS = [
'central-gaussian', # Central Gaussian mechanism.
]
# Supported distributed DP mechanisms.
DISTRIBUTED_DP_MECHANISMS = [
'distributed-discrete-gaussian', # Distributed discrete Gaussian mechanism.
]
DP_MECHANISMS = CENTRAL_DP_MECHANISMS + DISTRIBUTED_DP_MECHANISMS + NO_NOISE_MECHANISMS
def create_hierarchical_histogram_aggregation_factory(
num_bins: int,
arity: int = 2,
clip_mechanism: str = 'sub-sampling',
max_records_per_user: int = 10,
dp_mechanism: str = 'no-noise',
noise_multiplier: float = 0.0,
expected_clients_per_round: int = 10,
bits: int = 22):
"""Creates hierarchical histogram aggregation factory.
Hierarchical histogram factory is constructed by composing 3 aggregation
factories.
(1) The inner-most factory is `SumFactory`.
(2) The middle factory is `DifferentiallyPrivateFactory` whose inner query is
`TreeRangeSumQuery`. This factory 1) takes in a clipped histogram,
constructs the hierarchical histogram and checks the norm bound of the
hierarchical histogram at clients, 2) adds noise either at clients or at
server according to `dp_mechanism`.
(3) The outer-most factory is `HistogramClippingSumFactory` which clips the
input histogram to bound each user's contribution.
Args:
num_bins: An `int` representing the input histogram size.
arity: An `int` representing the branching factor of the tree. Defaults to
2.
clip_mechanism: A `str` representing the clipping mechanism. Currently
supported mechanisms are
- 'sub-sampling': (Default) Uniformly sample up to `max_records_per_user`
records without replacement from the client dataset.
- 'distinct': Uniquify client dataset and uniformly sample up to
`max_records_per_user` records without replacement from it.
max_records_per_user: An `int` representing the maximum of records each user
can include in their local histogram. Defaults to 10.
dp_mechanism: A `str` representing the differentially private mechanism to
use. Currently supported mechanisms are
- 'no-noise': (Default) Tree aggregation mechanism without noise.
- 'central-gaussian': Tree aggregation with central Gaussian mechanism.
- 'distributed-discrete-gaussian': Tree aggregation mechanism with
distributed discrete Gaussian mechanism in "The Distributed Discrete
Gaussian Mechanism for Federated Learning with Secure Aggregation. Peter
Kairouz, Ziyu Liu, Thomas Steinke".
noise_multiplier: A `float` specifying the noise multiplier (central noise
stddev / L2 clip norm) for model updates. Only needed when `dp_mechanism`
is not 'no-noise'. Defaults to 0.0.
expected_clients_per_round: An `int` specifying the lower bound of the
expected number of clients. Only needed when `dp_mechanism` is
'distributed-discrete-gaussian. Defaults to 10.
bits: A positive integer specifying the communication bit-width B (where
2**B will be the field size for SecAgg operations). Only needed when
`dp_mechanism` is 'distributed-discrete-gaussian'. Please read the below
precautions carefully and set `bits` accordingly. Otherwise, unexpected
overflow or accuracy degradation might happen.
(1) Should be in the inclusive range [1, 22] to avoid overflow inside
secure aggregation;
(2) Should be at least as large as
`log2(4 * sqrt(expected_clients_per_round)* noise_multiplier *
l2_norm_bound + expected_clients_per_round * max_records_per_user) + 1`
to avoid accuracy degradation caused by frequent modular clipping;
(3) If the number of clients exceed `expected_clients_per_round`, overflow
might happen.
Returns:
`tff.aggregators.UnweightedAggregationFactory`.
Raises:
TypeError: If arguments have the wrong type(s).
ValueError: If arguments have invalid value(s).
"""
_check_positive(num_bins, 'num_bins')
_check_greater_equal(arity, 2, 'arity')
_check_membership(clip_mechanism, clipping_factory.CLIP_MECHANISMS,
'clip_mechanism')
_check_positive(max_records_per_user, 'max_records_per_user')
_check_membership(dp_mechanism, DP_MECHANISMS, 'dp_mechanism')
_check_non_negative(noise_multiplier, 'noise_multiplier')
_check_positive(expected_clients_per_round, 'expected_clients_per_round')
_check_in_range(bits, 'bits', 1, 22)
# Converts `max_records_per_user` to the corresponding norm bound according to
# the chosen `clip_mechanism` and `dp_mechanism`.
if dp_mechanism in ['central-gaussian', 'distributed-discrete-gaussian']:
if clip_mechanism == 'sub-sampling':
l2_norm_bound = max_records_per_user * math.sqrt(
_tree_depth(num_bins, arity))
elif clip_mechanism == 'distinct':
# The following code block converts `max_records_per_user` to L2 norm
# bound of the hierarchical histogram layer by layer. For the bottom
# layer with only 0s and at most `max_records_per_user` 1s, the L2 norm
# bound is `sqrt(max_records_per_user)`. For the second layer from bottom,
# the worst case is only 0s and `max_records_per_user/2` 2s. And so on
# until the root node. Another natural L2 norm bound on each layer is
# `max_records_per_user` so we take the minimum between the two bounds.
square_l2_norm_bound = 0.
square_layer_l2_norm_bound = max_records_per_user
for _ in range(_tree_depth(num_bins, arity)):
square_l2_norm_bound += min(max_records_per_user**2,
square_layer_l2_norm_bound)
square_layer_l2_norm_bound *= arity
l2_norm_bound = math.sqrt(square_l2_norm_bound)
# Build nested aggregtion factory from innermost to outermost.
# 1. Sum factory. The most inner factory that sums the preprocessed records.
# (1) If `dp_mechanism` is in `CENTRAL_DP_MECHANISMS` or
# `NO_NOISE_MECHANISMS`, should be `SumFactory`.
if dp_mechanism in CENTRAL_DP_MECHANISMS + NO_NOISE_MECHANISMS:
nested_factory = sum_factory.SumFactory()
# (2) If `dp_mechanism` is in `DISTRIBUTED_DP_MECHANISMS`, should be
# `SecureSumFactory`. To preserve DP and avoid overflow, we have 4 modular
# clips from nesting two modular clip aggregators:
# #1. outer-client: clips to [-2**(bits-1), 2**(bits-1))
# Bounds the client values.
# #2. inner-client: clips to [0, 2**bits)
# Similar to applying a two's complement to the values such that
# frequent values (post-rotation) are now near 0 (representing small
# positives) and 2**bits (small negatives). 0 also always map to 0, and
# we do not require another explicit value range shift from
# [-2**(bits-1), 2**(bits-1)] to [0, 2**bits] to make sure that values
# are compatible with SecAgg's mod m = 2**bits. This can be reverted at
# #4.
# #3. inner-server: clips to [0, 2**bits)
# Ensures the aggregated value range does not grow by
# `log2(expected_clients_per_round)`.
# NOTE: If underlying SecAgg is implemented using the new
# `tff.federated_secure_modular_sum()` operator with the same
# modular clipping range, then this would correspond to a no-op.
# #4. outer-server: clips to [-2**(bits-1), 2**(bits-1))
# Keeps aggregated values centered near 0 out of the logical SecAgg
# black box for outer aggregators.
elif dp_mechanism in DISTRIBUTED_DP_MECHANISMS:
# TODO(b/196312838): Please add scaling to the distributed case once we have
# a stable guideline for setting scaling factor to improve performance and
# avoid overflow. The below test is to make sure that modular clipping
# happens with small probability so the accuracy of the result won't be
# harmed. However, if the number of clients exceeds
# `expected_clients_per_round`, overflow still might happen. It is the
# caller's responsibility to carefully choose `bits` according to system
# details to avoid overflow or performance degradation.
if bits < math.log2(4 * math.sqrt(expected_clients_per_round) *
noise_multiplier * l2_norm_bound +
expected_clients_per_round * max_records_per_user) + 1:
raise ValueError(f'The selected bit-width ({bits}) is too small for the '
f'given parameters (expected_clients_per_round = '
f'{expected_clients_per_round}, max_records_per_user = '
f'{max_records_per_user}, noise_multiplier = '
f'{noise_multiplier}) and will harm the accuracy of the '
f'result. Please decrease the '
f'`expected_clients_per_round` / `max_records_per_user` '
f'/ `noise_multiplier`, or increase `bits`.')
nested_factory = secure.SecureSumFactory(
upper_bound_threshold=2**bits - 1, lower_bound_threshold=0)
nested_factory = modular_clipping_factory.ModularClippingSumFactory(
clip_range_lower=0,
clip_range_upper=2**bits,
inner_agg_factory=nested_factory)
nested_factory = modular_clipping_factory.ModularClippingSumFactory(
clip_range_lower=-2**(bits - 1),
clip_range_upper=2**(bits - 1),
inner_agg_factory=nested_factory)
# 2. DP operations.
# Constructs `DifferentiallyPrivateFactory` according to the chosen
# `dp_mechanism`.
if dp_mechanism == 'central-gaussian':
query = tfp.TreeRangeSumQuery.build_central_gaussian_query(
l2_norm_bound, noise_multiplier * l2_norm_bound, arity)
# If the inner `DifferentiallyPrivateFactory` uses `GaussianSumQuery`, then
# the record is casted to `tf.float32` before feeding to the DP factory.
cast_to_float = True
elif dp_mechanism == 'distributed-discrete-gaussian':
query = tfp.TreeRangeSumQuery.build_distributed_discrete_gaussian_query(
l2_norm_bound, noise_multiplier * l2_norm_bound /
math.sqrt(expected_clients_per_round), arity)
# If the inner `DifferentiallyPrivateFactory` uses
# `DistributedDiscreteGaussianQuery`, then the record is kept as `tf.int32`
# before feeding to the DP factory.
cast_to_float = False
elif dp_mechanism == 'no-noise':
inner_query = tfp.NoPrivacySumQuery()
query = tfp.TreeRangeSumQuery(arity=arity, inner_query=inner_query)
# If the inner `DifferentiallyPrivateFactory` uses `NoPrivacyQuery`, then
# the record is kept as `tf.int32` before feeding to the DP factory.
cast_to_float = False
else:
raise ValueError('Unexpected dp_mechanism.')
nested_factory = differential_privacy.DifferentiallyPrivateFactory(
query, nested_factory)
# 3. Clip as specified by `clip_mechanism`.
nested_factory = clipping_factory.HistogramClippingSumFactory(
clip_mechanism=clip_mechanism,
max_records_per_user=max_records_per_user,
inner_agg_factory=nested_factory,
cast_to_float=cast_to_float)
return nested_factory
def _check_greater_equal(value, threshold, label):
if value < threshold:
raise ValueError(f'`{label}` must be at least {threshold}, got {value}.')
def _check_positive(value, label):
if value <= 0:
raise ValueError(f'{label} must be positive. Found {value}.')
def _check_non_negative(value, label):
if value < 0:
raise ValueError(f'{label} must be non-negative. Found {value}.')
def _check_membership(value, valid_set, label):
if value not in valid_set:
raise ValueError(f'`{label}` must be one of {valid_set}. '
f'Found {value}.')
def _check_in_range(value, label, left, right):
"""Checks that a scalar value is in specified range."""
if not value >= left or not value <= right:
raise ValueError(f'{label} should be within [{left}, {right}]. '
f'Found {value}.')
def _tree_depth(num_leaves: int, arity: int):
"""Returns the depth of the tree given the number of leaf nodes and arity."""
return math.ceil(math.log(num_leaves) / math.log(arity)) + 1
|
tensorflow/federated
|
tensorflow_federated/python/analytics/hierarchical_histogram/hierarchical_histogram_factory.py
|
Python
|
apache-2.0
| 13,326
|
[
"Gaussian"
] |
83b37f8430da559e26489d96b131d4a891eba41ec8c80da9d14e40c51f5af6ca
|
from __future__ import (absolute_import, division, print_function)
import math
from mantid.kernel import *
from mantid.api import *
import mantid.simpleapi as mantid
class EnggFitPeaks(PythonAlgorithm):
EXPECTED_DIM_TYPE = 'Time-of-flight'
PEAK_TYPE = 'BackToBackExponential'
# Max limit on the estimated error of a center for it to be accepted as a good fit
# (in percentage of the center value)
CENTER_ERROR_LIMIT = 10
_expected_peaks_are_in_tof = True
def category(self):
return "Diffraction\\Engineering;Diffraction\\Fitting"
def seeAlso(self):
return [ "EnggFitDIFCFromPeaks","GSASIIRefineFitPeaks","Fit" ]
def name(self):
return "EnggFitPeaks"
def summary(self):
return ("The algorithm fits an expected diffraction pattern to a spectrum from a workspace "
"by fitting one peak at a time (single peak fits).")
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "", Direction.Input),
"Workspace to fit peaks in. The X units must be time of flight (TOF).")
self.declareProperty("WorkspaceIndex", 0,
"Index of the spectra to fit peaks in")
self.declareProperty(FloatArrayProperty("ExpectedPeaks", (self._get_default_peaks())),
"A list of peak centre values to be translated into TOF (if required) to find expected "
"peaks.")
self.declareProperty(FileProperty(name="ExpectedPeaksFromFile", defaultValue="",
action=FileAction.OptionalLoad, extensions=[".csv"]),
"Load from file a list of peak centre values to be translated into TOF (if required) to "
"find expected peaks. This takes precedence over 'ExpectedPeaks' if both "
"options are given.")
peaks_grp = 'Peaks to fit'
self.setPropertyGroup('ExpectedPeaks', peaks_grp)
self.setPropertyGroup('ExpectedPeaksFromFile', peaks_grp)
self.declareProperty('OutFittedPeaksTable', '', direction=Direction.Input,
doc='Name for a table workspace with the parameters of the peaks found and '
'fitted. If not given, the table workspace is not created.')
self.declareProperty(ITableWorkspaceProperty("FittedPeaks", "", Direction.Output),
doc="Information on fitted peaks. The table contains, for every peak fitted "
"the expected peak value (in d-spacing), and the parameters fitted. The expected "
"values are given in the column labelled 'dSpacing'. When fitting "
"back-to-back exponential functions, the 'X0' column has the fitted peak center.")
def PyExec(self):
import EnggUtils
# Get peaks in dSpacing from file
expected_peaks = EnggUtils.read_in_expected_peaks(self.getPropertyValue("ExpectedPeaksFromFile"),
self.getProperty('ExpectedPeaks').value)
if len(expected_peaks) < 1:
raise ValueError("Cannot run this algorithm without any input expected peaks")
# Get expected peaks in TOF for the detector
in_wks = self.getProperty("InputWorkspace").value
dim_type = in_wks.getXDimension().name
if self.EXPECTED_DIM_TYPE != dim_type:
raise ValueError("This algorithm expects a workspace with %s X dimension, but "
"the X dimension of the input workspace is: '%s'" % (self.EXPECTED_DIM_TYPE, dim_type))
wks_index = self.getProperty("WorkspaceIndex").value
if self._any_expected_peaks_in_ws_range(in_wks, expected_peaks):
expected_peaks_tof = sorted(expected_peaks)
else:
expected_peaks_tof = sorted(self._expected_peaks_in_tof(expected_peaks, in_wks, wks_index))
self._expected_peaks_are_in_tof = False
if not self._any_expected_peaks_in_ws_range(in_wks, expected_peaks_tof):
raise ValueError("Expected peak centres lie outside the limits of the workspace x axis")
found_peaks = self._peaks_from_find_peaks(in_wks, expected_peaks_tof, wks_index)
if found_peaks.rowCount() < len(expected_peaks_tof):
txt = "Peaks effectively found: " + str(found_peaks)[1:-1]
self.log().warning("Some peaks from the list of expected peaks were not found by the algorithm "
"FindPeaks which this algorithm uses to check that the data has the the "
"expected peaks. " + txt)
peaks_table_name = self.getPropertyValue("OutFittedPeaksTable")
fitted_peaks = self._fit_all_peaks(in_wks, wks_index,
(found_peaks, expected_peaks), peaks_table_name)
# mandatory output
self.setProperty('FittedPeaks', fitted_peaks)
def _any_expected_peaks_in_ws_range(self, input_ws, expected_peaks):
x_axis = input_ws.readX(0)
x_min = min(x_axis)
x_max = max(x_axis)
for peak_centre in expected_peaks:
if self._expected_peak_in_ws_range(x_min, x_max, peak_centre):
return True
return False
def _expected_peak_in_ws_range(self, ws_x_min, ws_x_max, expected_peak_centre):
return ws_x_min <= expected_peak_centre <= ws_x_max
def _get_default_peaks(self):
"""
Gets default peaks for Engg algorithms. Values from CeO2
"""
import EnggUtils
return EnggUtils.default_ceria_expected_peaks()
def _estimate_start_end_fitting_range(self, center, width):
"""
Try to predict a fit window for the peak (using magic numbers). The heuristic
+-COEF_LEFT/RIGHT sometimes produces ranges that are too narrow and contain too few
samples (one or a handful) for the fitting to run correctly. A minimum is enforced.
@Returns :: a tuple with the range (start and end values) for fitting a peak.
"""
# Magic numbers, approx. represanting the shape/proportions of a B2BExponential peak
COEF_LEFT = 2
COEF_RIGHT = 3
# Current approach: don't force a minimum width. If the width initial guess is too
# narrow we might miss some peaks.
# To prevent that, the minimum could be set to for example the arbitrary '175' which
# seemed to have good effects overall, but that can lead to fitting the wrong
# (neighbor) peaks.
MIN_RANGE_WIDTH = 1
startx = center - (width * COEF_LEFT)
endx = center + (width * COEF_RIGHT)
x_diff = endx-startx
if x_diff < MIN_RANGE_WIDTH:
inc = (min_width-x_diff)/5
endx = endx + 3*inc
startx = startx - 2*inc
return startx, endx
def _fit_all_peaks(self, in_wks, wks_index, peaks, peaks_table_name):
"""
This method is the core of EnggFitPeaks. It tries to fit as many peaks as there are in the list of
expected peaks passed to the algorithm. This is a single peak fitting, in the sense that peaks
are fitted separately, one at a time.
The parameters from the (Gaussian) peaks fitted by FindPeaks elsewhere (before calling this method)
are used as initial guesses.
@param in_wks :: input workspace with spectra for fitting
@param wks_index :: workspace index of the spectrum where the given peaks should be fitted
@param peaks :: tuple made of two lists: found_peaks (peaks found by FindPeaks or similar
algorithm), and expected_peaks_dsp (expected peaks given as input to this algorithm
(in dSpacing units)
@param peaks_table_name :: name of an (output) table with peaks parameters. If empty, the table is anonymous
@returns a table with parameters for every fitted peak.
"""
if self._expected_peaks_are_in_tof:
peaks = (peaks[0], self._expected_peaks_in_d(peaks[1], in_wks))
found_peaks = peaks[0]
fitted_peaks = self._create_fitted_peaks_table(peaks_table_name)
prog = Progress(self, start=0, end=1, nreports=found_peaks.rowCount())
for i in range(found_peaks.rowCount()):
prog.report('Fitting peak number ' + str(i+1))
row = found_peaks.row(i)
# Peak parameters estimated by FindPeaks
initial_params = (row['centre'], row['width'], row['height'])
# Oh oh, this actually happens sometimes for some spectra of the system test dataset
# and it should be clarified when the role of FindPeaks etc. is fixed (trac ticket #10907)
width = initial_params[2]
if width <= 0.:
failure_msg = ("Cannot fit a peak with these initial parameters from FindPeaks, center: %s "
", width: %s, height: %s" % (initial_params[0], width, initial_params[1]))
self.log().notice('For workspace index ' + str(wks_index) + ', a peak that is in the list of '
'expected peaks and was found by FindPeaks has not been fitted correctly. '
'It will be ignored. ' + "Expected, dSpacing: {0}. Details: {1}".
format(peaks[1][i], failure_msg))
continue
try:
param_table, chi_over_dof = self._fit_single_peak(peaks[1][i], initial_params, in_wks, wks_index)
except RuntimeError:
self.log().warning("Problem found when trying to fit a peak centered at {0} (dSpacing), "
"for which the initial guess from FindPeaks is at {1} (ToF). Single "
"peak fitting failed. Skipping this peak."
.format(peaks[1][i], initial_params[0]))
continue
fitted_params = {}
fitted_params['dSpacing'] = peaks[1][i]
fitted_params['Chi'] = chi_over_dof
self._add_parameters_to_map(fitted_params, param_table)
if self._peak_is_acceptable(fitted_params, in_wks, wks_index):
fitted_peaks.addRow(fitted_params)
else:
self.log().notice("Discarding peak found with wrong center and/or excessive or suspicious "
"error estimate in the center estimate: {0} (ToF) ({1}, dSpacing), "
"with error: {2}, for dSpacing {3}".
format(fitted_params['X0'], peaks[1][i],
fitted_params['X0_Err'], fitted_params['dSpacing']))
# Check if we were able to really fit any peak
if 0 == fitted_peaks.rowCount():
failure_msg = ("Could find " + str(len(found_peaks)) + " peaks using the algorithm FindPeaks but " +
"then it was not possible to fit any peak starting from these peaks found and using '" +
self.PEAK_TYPE + "' as peak function.")
self.log().warning('Could not fit any peak. Please check the list of expected peaks, as it does not '
'seem to be appropriate for the workspace given. More details: ' +
failure_msg)
raise RuntimeError('Could not fit any peak. Failed to fit peaks with peak type ' +
self.PEAK_TYPE + ' even though FindPeaks found ' + str(found_peaks.rowCount()) +
' peaks in principle. See the logs for further details.')
self.log().information("Fitted {0} peaks in total.".format(fitted_peaks.rowCount()))
return fitted_peaks
def _fit_single_peak(self, expected_center, initial_params, wks, wks_index):
"""
Fits one peak, given an initial guess of parameters (center, width, height).
@param expected_center :: expected peak position
@param initial_params :: tuple with initial guess of the peak center, width and height
@param wks :: workspace with data (spectra) to fit
@param wks_index :: index of the spectrum to fit
@return parameters from Fit, and the goodness of fit estimation from Fit (as Chi^2/DoF)
"""
center, width, height = initial_params
# Sigma value of the peak, assuming Gaussian shape
sigma = width / (2 * math.sqrt(2 * math.log(2)))
# Approximate peak intensity, assuming Gaussian shape
intensity = height * sigma * math.sqrt(2 * math.pi)
peak = FunctionFactory.createFunction(self.PEAK_TYPE)
peak.setParameter('X0', center)
peak.setParameter('S', sigma)
peak.setParameter('I', intensity)
# Fit using predicted window and a proper function with approximated initial values
fit_alg = self.createChildAlgorithm('Fit')
fit_function = 'name=LinearBackground;{0}'.format(peak)
fit_alg.setProperty('Function', fit_function)
fit_alg.setProperty('InputWorkspace', wks)
fit_alg.setProperty('WorkspaceIndex', wks_index)
fit_alg.setProperty('CreateOutput', True)
(startx, endx) = self._estimate_start_end_fitting_range(center, width)
fit_alg.setProperty('StartX', startx)
fit_alg.setProperty('EndX', endx)
self.log().debug("Fitting for peak expected in (d-spacing): {0}, Fitting peak function: "
"{1}, with startx: {2}, endx: {3}".
format(expected_center, fit_function, startx, endx))
fit_alg.execute()
param_table = fit_alg.getProperty('OutputParameters').value
chi_over_dof = fit_alg.getProperty('OutputChi2overDoF').value
return param_table, chi_over_dof
def _peaks_from_find_peaks(self, in_wks, expected_peaks_tof, wks_index):
"""
Use the algorithm FindPeaks to check that the expected peaks are there.
@param in_wks data workspace
@param expected_peaks_tof vector/list of expected peak values
@param wks_index workspace index
@return list of peaks found by FindPeaks. If there are no issues, the length
of this list should be the same as the number of expected peaks received.
"""
# Find approximate peak positions, asumming Gaussian shapes
find_peaks_alg = self.createChildAlgorithm('FindPeaks')
find_peaks_alg.setProperty('InputWorkspace', in_wks)
find_peaks_alg.setProperty('PeakPositions', expected_peaks_tof)
find_peaks_alg.setProperty('PeakFunction', 'Gaussian')
find_peaks_alg.setProperty('WorkspaceIndex', wks_index)
find_peaks_alg.execute()
found_peaks = find_peaks_alg.getProperty('PeaksList').value
return found_peaks
def _expected_peaks_in_d(self, expected_peaks, input_ws):
run = input_ws.getRun()
if run.hasProperty("difc"):
difc = run.getLogData("difc").value
return self._gsas_convert_to_d(expected_peaks, run, difc)
return self._convert_peaks_to_d_using_convert_units(expected_peaks, input_ws)
def _gsas_convert_to_d(self, expected_peaks, run, difc):
tzero = run.getLogData("tzero").value if run.hasProperty("tzero") else 0
difa = run.getLogData("difa").value if run.hasProperty("difa") else 0
return [self._gsas_convert_single_peak_to_d(peak, difa, difc, tzero) for peak in expected_peaks]
def _gsas_convert_single_peak_to_d(self, peak_tof, difa, difc, tzero):
if difa < 0:
return (-difc / (2 * difa)) - math.sqrt(peak_tof / difa + math.pow(difc / 2 * difa, 2) - tzero / difa)
if difa > 0:
return (-difc / (2 * difa)) + math.sqrt(peak_tof / difa + math.pow(difc / 2 * difa, 2) - tzero / difa)
return (peak_tof - tzero) / difc
def _convert_peaks_to_d_using_convert_units(self, expected_peaks, input_ws):
y_values = [1] * (len(expected_peaks) - 1)
ws_tof = mantid.CreateWorkspace(UnitX="TOF", DataX=expected_peaks, DataY=y_values, ParentWorkspace=input_ws)
ws_d = mantid.ConvertUnits(InputWorkspace=ws_tof, Target="dSpacing")
return ws_d.readX(0)
def _expected_peaks_in_tof(self, expected_peaks, in_wks, wks_index):
"""
Converts expected peak dSpacing values to TOF values for the
detector. Implemented by using the Mantid algorithm ConvertUnits. A
simple user script to do what this function does would be
as follows:
import mantid.simpleapi as sapi
yVals = [1] * (len(expected_peaks) - 1)
ws_from = sapi.CreateWorkspace(UnitX='dSpacing', DataX=expected_peaks, DataY=yVals,
ParentWorkspace=in_wks)
target_units = 'TOF'
wsTo = sapi.ConvertUnits(InputWorkspace=ws_from, Target=target_units)
peaks_ToF = wsTo.dataX(0)
values = [peaks_ToF[i] for i in range(0,len(peaks_ToF))]
@param expected_peaks :: vector of expected peaks, in dSpacing units
@param in_wks :: input workspace with the relevant instrument/geometry
@param wks_index workspace index
Returns:
a vector of ToF values converted from the input (dSpacing) vector.
"""
# This and the next exception, below, still need revisiting:
# https://github.com/mantidproject/mantid/issues/12930
run = in_wks.getRun()
if 1 == in_wks.getNumberHistograms() and run.hasProperty('difc'):
difc = run.getLogData('difc').value
if run.hasProperty('tzero'):
tzero = run.getLogData('tzero').value
else:
tzero = 0
# If the log difc is present, then use these GSAS calibration parameters from the logs
return [(epd * difc + tzero) for epd in expected_peaks]
# When receiving a (for example) focused workspace we still do not know how
# to properly deal with it. CreateWorkspace won't copy the instrument sample
# and source even if given the option ParentWorkspace. Resort to old style
# hard-coded calculation.
# The present behavior of 'ConvertUnits' is to show an information log:
# "Unable to calculate sample-detector distance for 1 spectra. Masking spectrum"
# and silently produce a wrong output workspace. That might need revisiting.
if 1 == in_wks.getNumberHistograms():
return self._do_approx_hard_coded_convert_units_to_ToF(expected_peaks, in_wks, wks_index)
# Create workspace just to convert dSpacing -> ToF, yVals are irrelevant
# this used to be calculated with:
# lambda d: 252.816 * 2 * (50 + detL2) * math.sin(detTwoTheta / 2.0) * d
# which is approximately what ConverUnits will do
# remember the -1, we must produce a histogram data workspace, which is what
# for example EnggCalibrate expects
y_vals = [1] * (len(expected_peaks) - 1)
# Do like: ws_from = sapi.CreateWorkspace(UnitX='dSpacing', DataX=expected_peaks, DataY=yVals,
# ParentWorkspace=self.getProperty("InputWorkspace").value)
create_alg = self.createChildAlgorithm("CreateWorkspace")
create_alg.setProperty("UnitX", 'dSpacing')
create_alg.setProperty("DataX", expected_peaks)
create_alg.setProperty("DataY", y_vals)
create_alg.setProperty("ParentWorkspace", in_wks)
create_alg.execute()
ws_from = create_alg.getProperty("OutputWorkspace").value
# finally convert units, like: sapi.ConvertUnits(InputWorkspace=ws_from, Target=target_units)
conv_alg = self.createChildAlgorithm("ConvertUnits")
conv_alg.setProperty("InputWorkspace", ws_from)
target_units = 'TOF'
conv_alg.setProperty("Target", target_units)
# note: this implicitly uses default property "EMode" value 'Elastic'
good_exec = conv_alg.execute()
if not good_exec:
raise RuntimeError("Conversion of units went wrong. Failed to run ConvertUnits for {0} "
"peaks. Details: {1}".format(len(expected_peaks), expected_peaks))
output_ws = conv_alg.getProperty('OutputWorkspace').value
peaks_tof = output_ws.readX(0)
if len(peaks_tof) != len(expected_peaks):
raise RuntimeError("Conversion of units went wrong. Converted {0} peaks from the "
"original list of {1} peaks. The instrument definition might be "
"incomplete for the original workspace / file.".
format(len(peaks_tof), len(expected_peaks)))
tof_values = [peaks_tof[i] for i in range(0, len(peaks_tof))]
# catch potential failures because of geometry issues, etc.
if tof_values == expected_peaks:
vals = self._do_approx_hard_coded_convert_units_to_ToF(expected_peaks, in_wks, wks_index)
return vals
return tof_values
def _do_approx_hard_coded_convert_units_to_ToF(self, dsp_values, ws, wks_index):
"""
Converts from dSpacing to Time-of-flight, for one spectrum/detector. This method
is here for exceptional cases that presently need clarification / further work,
here and elsewhere in Mantid, and should ideally be removed in favor of the more
general method that uses the algorithm ConvertUnits.
@param dsp_values to convert from dSpacing
@param ws workspace with the appropriate instrument / geometry definition
@param wks_index index of the spectrum
Returns:
input values converted from dSpacing to ToF
"""
det = ws.getDetector(wks_index)
# Current detector parameters
detL2 = det.getDistance(ws.getInstrument().getSample())
detTwoTheta = ws.detectorTwoTheta(det)
# hard coded equation to convert dSpacing -> TOF for the single detector
# Values (in principle, expected peak positions) in TOF for the detector
tof_values = [252.816 * 2 * (50 + detL2) * math.sin(detTwoTheta / 2.0) * ep for ep in dsp_values]
return tof_values
def _create_fitted_peaks_table(self, tbl_name):
"""
Creates a table where to put peak fitting results to
@param tbl_name :: name of the table workspace (can be empty)
"""
if not tbl_name:
alg = self.createChildAlgorithm('CreateEmptyTableWorkspace')
alg.execute()
table = alg.getProperty('OutputWorkspace').value
else:
import mantid.simpleapi as sapi
table = sapi.CreateEmptyTableWorkspace(OutputWorkspace=tbl_name)
table.addColumn('double', 'dSpacing')
for param in ['A0', 'A1', 'X0', 'A', 'B', 'S', 'I']:
table.addColumn('double', param)
table.addColumn('double', param + '_Err')
table.addColumn('double', 'Chi')
return table
def _peak_is_acceptable(self, fitted_params, wks, wks_index):
"""
Decide whether a peak fitted looks acceptable, based on the values fitted for the
parameters of the peak and other metrics from Fit (Chi^2).
It applies for example a simple rule: if the peak center is
negative, it is obviously a fit failure. This is sometimes not so straightforward
from the error estimates and Chi^2 values returned from Fit, as there seem to be
a small percentage of cases with numberical issues (nan, zeros, etc.).
@param fitted_params :: parameters fitted from Fit algorithm
@param in_wks :: input workspace where a spectrum was fitted
@param wks_index :: workspace index of the spectrum that was fitted
Returns:
True if the peak function parameters and error estimates look acceptable
so the peak should be used.
"""
spec_x_axis = wks.readX(wks_index)
center = self._find_peak_center_in_params(fitted_params)
intensity = self._find_peak_intensity_in_params(fitted_params)
return (spec_x_axis.min() <= center <= spec_x_axis.max() and
intensity > 0 and
fitted_params['Chi'] < 10 and self._b2bexp_is_acceptable(fitted_params))
def _find_peak_center_in_params(self, fitted_params):
"""
Retrieve the fitted peak center/position from the set of parameters fitted.
Returns:
The peak center from the fitted parameters
"""
if 'BackToBackExponential' == self.PEAK_TYPE:
return fitted_params['X0']
else:
raise ValueError('Inconsistency found. I do not know how to deal with centers of peaks '
'of types other than {0}'.format(PEAK_TYPE))
def _find_peak_intensity_in_params(self, fitted_params):
"""
Retrieve the fitted peak intensity/height/amplitude from the set of parameters fitted.
Returns:
The peak intensity from the fitted parameters
"""
if 'BackToBackExponential' == self.PEAK_TYPE:
return fitted_params['I']
else:
raise ValueError('Inconsistency found. I do not know how to deal with intensities of '
'peaks of types other than {0}'.format(PEAK_TYPE))
def _b2bexp_is_acceptable(self, fitted_params):
"""
Checks specific to Back2BackExponential peak functions.
@param fitted_params :: parameters fitted, where it is assumed that the
standard Back2BackExponential parameter names are used
Returns:
True if the Bk2BkExponential parameters and error estimates look acceptable
so the peak should be used.
"""
# Ban: negative centers, negative left (A) and right (B) exponential coefficient,
# and Gaussian spread (S).
# Also ban strange error estimates (nan, all zero error)
# And make sure that the error on the center (X0) is not too big in relative terms
return (fitted_params['X0'] > 0
and fitted_params['A'] > 0 and fitted_params['B'] > 0 and fitted_params['S'] > 0
and not math.isnan(fitted_params['X0_Err'])
and not math.isnan(fitted_params['A_Err'])
and not math.isnan(fitted_params['B_Err'])
and fitted_params['X0_Err'] < (fitted_params['X0'] * 100.0 / self.CENTER_ERROR_LIMIT)
and
(0 != fitted_params['X0_Err'] and 0 != fitted_params['A_Err'] and
0 != fitted_params['B_Err'] and 0 != fitted_params['S_Err'] and
0 != fitted_params['I_Err'])
)
def _add_parameters_to_map(self, param_map, param_table):
"""
Takes parameters from a table that contains output parameters from a Fit run, and adds
them as name:value and name_Err:error_value pairs to the map.
@param param_map :: map where to add the fitting parameters
@param param_table :: table with parameters from a Fit algorithm run
"""
for i in range(param_table.rowCount() - 1): # Skip the last (fit goodness) row
row = param_table.row(i)
# Get local func. param name. E.g., not f1.A0, but just A0
name = (row['Name'].rpartition('.'))[2]
param_map[name] = row['Value']
param_map[name + '_Err'] = row['Error']
AlgorithmFactory.subscribe(EnggFitPeaks)
|
ScreamingUdder/mantid
|
Framework/PythonInterface/plugins/algorithms/EnggFitPeaks.py
|
Python
|
gpl-3.0
| 27,897
|
[
"Gaussian"
] |
b7a43afe1452872c5a19e319c83f8dc82372846d4312f9fc722a7f166368bd8e
|
"""Security service, manages roles and permissions."""
from __future__ import annotations
from functools import wraps
from itertools import chain
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
FrozenSet,
List,
Optional,
Set,
Type,
Union,
)
import sqlalchemy as sa
from flask import g
from flask_login import current_user
from sqlalchemy import sql
from sqlalchemy.orm import Session, object_session, subqueryload
from sqlalchemy.sql.selectable import Exists
from abilian.core.entities import Entity
from abilian.core.extensions import db
from abilian.core.extensions.login import AnonymousUser
from abilian.core.models import Model
from abilian.core.models.subjects import Group, Principal, User
from abilian.core.util import unwrap
from abilian.services import Service, ServiceState
from abilian.services.security.models import (
CREATE,
DELETE,
MANAGE,
PERMISSIONS_ATTR,
READ,
WRITE,
Admin,
)
from abilian.services.security.models import Anonymous as AnonymousRole
from abilian.services.security.models import (
Authenticated,
Creator,
FolderishModel,
InheritSecurity,
Manager,
Owner,
Permission,
PermissionAssignment,
Reader,
Role,
RoleAssignment,
SecurityAudit,
Writer,
)
if TYPE_CHECKING:
from abilian.app import Application
#: list of legacy supported permissions when not using :class:`Permission`
#: instance
PERMISSIONS = frozenset(["read", "write", "manage"])
__all__ = [
"security",
"SecurityError",
"SecurityService",
"InheritSecurity",
"SecurityAudit",
]
#: default security matrix
DEFAULT_PERMISSION_ROLE: dict[Permission, frozenset[Role]] = {}
prm = DEFAULT_PERMISSION_ROLE
prm[MANAGE] = frozenset({Admin, Manager})
prm[WRITE] = frozenset({Admin, Manager, Writer})
prm[CREATE] = frozenset({Admin, Manager, Writer})
prm[DELETE] = frozenset({Admin, Manager, Writer})
prm[READ] = frozenset({Admin, Manager, Writer, Reader})
del prm
class SecurityError(Exception):
pass
class SecurityServiceState(ServiceState):
use_cache = True
#: True if security has changed
needs_db_flush = False
def require_flush(fun: Callable) -> Callable:
"""Decorator for methods that need to query security.
It ensures all security related operations are flushed to DB, but
avoids unneeded flushes.
"""
@wraps(fun)
def ensure_flushed(service: SecurityService, *args: Any, **kwargs: Any) -> Any:
if service.app_state.needs_db_flush:
session = db.session()
if not session._flushing and any(
isinstance(m, (RoleAssignment, SecurityAudit))
for models in (session.new, session.dirty, session.deleted)
for m in models
):
session.flush()
service.app_state.needs_db_flush = False
return fun(service, *args, **kwargs)
return ensure_flushed
def query_pa_no_flush(
session: Session, permission: Permission, role: Role, obj: Model | None
):
"""Query for a :class:`PermissionAssignment` using `session` without any
`flush()`.
It works by looking in session `new`, `dirty` and `deleted`, and issuing a
query with no autoflush.
.. note::
This function is used by `add_permission` and `delete_permission` to allow
to add/remove the same assignment twice without issuing any flush. Since
:class:`Entity` creates its initial permissions in during
:sqlalchemy:`sqlalchemy.orm.events.SessionEvents.after_attach`, it might be
problematic to issue a flush when entity is not yet ready to be flushed
(missing required attributes for example).
"""
to_visit = [session.deleted, session.dirty, session.new]
with session.no_autoflush:
# no_autoflush is required to visit PERMISSIONS_ATTR without emitting a
# flush()
if obj:
to_visit.append(getattr(obj, PERMISSIONS_ATTR))
permissions = (
p for p in chain(*to_visit) if isinstance(p, PermissionAssignment)
)
for instance in permissions:
if (
instance.permission == permission
and instance.role == role
and instance.object == obj
):
return instance
# Last chance: perform a filtered query. If obj is not None, sometimes
# getattr(obj, PERMISSIONS_ATTR) has objects not present in session
# not in this query (maybe in a parent session transaction `new`?).
if obj is not None and obj.id is None:
obj = None
return (
session.query(PermissionAssignment)
.filter(
PermissionAssignment.permission == permission,
PermissionAssignment.role == role,
PermissionAssignment.object == obj,
)
.first()
)
class SecurityService(Service):
name = "security"
AppStateClass = SecurityServiceState
def init_app(self, app: Application):
super().init_app(app)
state = app.extensions[self.name]
state.use_cache = True
def _needs_flush(self):
"""Mark next security queries needs DB flush to have up to date
information."""
self.app_state.needs_db_flush = True
def clear(self):
pass
def _current_user_manager(self, session: Session = None) -> User:
"""Return the current user, or SYSTEM user."""
if session is None:
session = db.session()
try:
user = g.user
except Exception:
return session.query(User).get(0)
if sa.orm.object_session(user) is not session:
# this can happen when called from a celery task during development
# (with CELERY_ALWAYS_EAGER=True): the task SA session is not
# app.db.session, and we should not attach this object to
# the other session, because it can make weird, hard-to-debug
# errors related to session.identity_map.
return session.query(User).get(user.id)
else:
return user
# security log
@require_flush
def entries_for(self, obj, limit=20):
assert isinstance(obj, Entity)
return (
SecurityAudit.query.filter(SecurityAudit.object == obj)
.order_by(SecurityAudit.happened_at.desc())
.limit(limit)
)
# inheritance
def set_inherit_security(self, obj: FolderishModel, inherit_security: bool):
assert isinstance(obj, InheritSecurity)
assert isinstance(obj, Entity)
obj.inherit_security = inherit_security
session = object_session(obj) if obj is not None else db.session
session.add(obj)
manager = self._current_user_manager(session=session)
op = (
SecurityAudit.SET_INHERIT
if inherit_security
else SecurityAudit.UNSET_INHERIT
)
audit = SecurityAudit(
manager=manager,
op=op,
object=obj,
object_id=obj.id,
object_type=obj.entity_type,
object_name=obj.name,
)
session.add(audit)
self._needs_flush()
#
# Roles-related API.
#
@require_flush
def get_roles(
self,
principal: AnonymousUser | Group | User,
object: Model | None = None,
no_group_roles: bool = False,
) -> list[Role]:
"""Get all the roles attached to given `principal`, on a given
`object`.
:param principal: a :class:`User` or :class:`Group`
:param object: an :class:`Entity`
:param no_group_roles: If `True`, return only direct roles, not roles
acquired through group membership.
"""
assert principal
if hasattr(principal, "is_anonymous") and principal.is_anonymous:
return [AnonymousRole]
query = db.session.query(RoleAssignment.role)
if isinstance(principal, Group):
filter_principal = RoleAssignment.group == principal
else:
filter_principal = RoleAssignment.user == principal
if not no_group_roles:
groups = [g.id for g in principal.groups]
if groups:
filter_principal |= RoleAssignment.group_id.in_(groups)
query = query.filter(filter_principal)
if object is not None:
assert isinstance(object, Entity)
query = query.filter(RoleAssignment.object == object)
roles = {i[0] for i in query.all()}
if object is not None:
for attr, role in (("creator", Creator), ("owner", Owner)):
if getattr(object, attr) == principal:
roles.add(role)
return list(roles)
@require_flush
def get_principals(
self,
role: Role,
anonymous: bool = True,
users: bool = True,
groups: bool = True,
object: Model | None = None,
as_list: bool = True,
) -> Collection[Principal]:
"""Return all users which are assigned given role."""
if not isinstance(role, Role):
role = Role(role)
assert role
assert users or groups
query = RoleAssignment.query.filter_by(role=role)
if not anonymous:
query = query.filter(RoleAssignment.anonymous == False)
if not users:
query = query.filter(RoleAssignment.user == None)
elif not groups:
query = query.filter(RoleAssignment.group == None)
query = query.filter(RoleAssignment.object == object)
principals = {(ra.user or ra.group) for ra in query.all()}
if object is not None and role in (Creator, Owner):
p = object.creator if role == Creator else object.owner
if p:
principals.add(p)
if not as_list:
return principals
return list(principals)
@require_flush
def _all_roles(self, principal: Principal) -> dict[str | None, set[Role]]:
query = (
db.session.query(RoleAssignment.object_id, RoleAssignment.role)
.outerjoin(Entity)
.add_columns(Entity._entity_type)
)
if isinstance(principal, User):
filter_cond = RoleAssignment.user == principal
if principal.groups:
group_ids = (g.id for g in principal.groups)
filter_cond |= RoleAssignment.group_id.in_(group_ids)
query = query.filter(filter_cond)
else:
query = query.filter(RoleAssignment.group == principal)
results = query.all()
all_roles = {}
for object_id, role, object_type in results:
if object_id is None:
object_key = None
else:
object_key = f"{object_type}:{object_id}"
all_roles.setdefault(object_key, set()).add(role)
return all_roles
def _role_cache(self, principal: Principal) -> dict[str | None, set[Role]]:
if not self._has_role_cache(principal):
# FIXME: should call _fill_role_cache?
principal.__roles_cache__ = {}
return principal.__roles_cache__
def _has_role_cache(self, principal: Principal) -> bool:
return hasattr(principal, "__roles_cache__")
def _set_role_cache(self, principal: Principal, cache: dict[str | None, set[Role]]):
principal.__roles_cache__ = cache
def _fill_role_cache(
self, principal: Principal, overwrite: bool = False
) -> dict[str | None, set[Role]]:
"""Fill role cache for `principal` (User or Group), in order to avoid
too many queries when checking role access with 'has_role'.
Return role_cache of `principal`
"""
if not self.app_state.use_cache:
return {}
if not self._has_role_cache(principal) or overwrite:
self._set_role_cache(principal, self._all_roles(principal))
return self._role_cache(principal)
@require_flush
def _fill_role_cache_batch(
self, principals: Collection[Principal], overwrite: bool = False
):
"""Fill role cache for `principals` (Users and/or Groups), in order to
avoid too many queries when checking role access with 'has_role'."""
if not self.app_state.use_cache:
return
query = db.session.query(RoleAssignment)
users = {u for u in principals if isinstance(u, User)}
groups = {g for g in principals if isinstance(g, Group)}
groups |= {g for u in users for g in u.groups}
if not overwrite:
users = {u for u in users if not self._has_role_cache(u)}
groups = {g for g in groups if not self._has_role_cache(g)}
if not (users or groups):
return
# ensure principals processed here will have role cache. Thus users or
# groups without any role will have an empty role cache, to avoid
# unneeded individual DB query when calling self._fill_role_cache(p).
for p in chain(users, groups):
self._set_role_cache(p, {})
filter_cond = []
if users:
filter_cond.append(RoleAssignment.user_id.in_(u.id for u in users))
if groups:
filter_cond.append(RoleAssignment.group_id.in_(g.id for g in groups))
query = query.filter(sql.or_(*filter_cond))
ra_users: dict[User, dict[str, set[Role]]] = {}
ra_groups: dict[Group, dict[str, set[Role]]] = {}
for ra in query.all():
if ra.user:
all_roles = ra_users.setdefault(ra.user, {})
else:
all_roles = ra_groups.setdefault(ra.group, {})
object_key = (
f"{ra.object.entity_type}:{ra.object_id:d}"
if ra.object is not None
else None
)
all_roles.setdefault(object_key, set()).add(ra.role)
for group, all_roles in ra_groups.items():
self._set_role_cache(group, all_roles)
for user, all_roles in ra_users.items():
for gr in user.groups:
group_roles = self._fill_role_cache(gr)
for object_key, roles in group_roles.items():
obj_roles = all_roles.setdefault(object_key, set())
obj_roles |= roles
self._set_role_cache(user, all_roles)
def _clear_role_cache(self, principal: Principal):
if hasattr(principal, "__roles_cache__"):
del principal.__roles_cache__
if isinstance(principal, Group):
for u in principal.members:
if hasattr(u, "__roles_cache__"):
del u.__roles_cache__
def has_role(
self,
principal: Principal | Role | None,
role: Collection[Role] | Role | str,
object: Model | None = None,
) -> bool:
"""True if `principal` has `role` (either globally, if `object` is
None, or on the specific `object`).
:param:role: can be a list or tuple of strings or a :class:`Role`
instance
`object` can be an :class:`Entity`, a string, or `None`.
Note: we're using a cache for efficiency here. TODO: check that we're not
over-caching.
Note2: caching could also be moved upfront to when the user is loaded.
"""
if not principal:
return False
principal = unwrap(principal)
if not self.running:
return True
if isinstance(role, (Role, (str,))):
role = (role,)
# admin & manager always have role
valid_roles = frozenset((Admin, Manager) + tuple(role))
if AnonymousRole in valid_roles:
# everybody has the role 'Anonymous'
return True
if (
Authenticated in valid_roles
and isinstance(principal, User)
and not principal.is_anonymous
):
return True
if principal is AnonymousRole or (
hasattr(principal, "is_anonymous") and principal.is_anonymous
):
# anonymous user, and anonymous role isn't in valid_roles
return False
# root always have any role
if isinstance(principal, User) and principal.id == 0:
return True
if object:
assert isinstance(object, Entity)
object_key = f"{object.object_type}:{str(object.id)}"
if Creator in role:
if object.creator == principal:
return True
if Owner in role:
if object.owner == principal:
return True
else:
object_key = None
all_roles = (
self._fill_role_cache(principal)
if self.app_state.use_cache
else self._all_roles(principal)
)
roles = set()
roles |= all_roles.get(None, set())
roles |= all_roles.get(object_key, set())
return len(valid_roles & roles) > 0
def grant_role(
self, principal: Principal, role: Role | str, obj: Model | None = None
):
"""Grant `role` to `user` (either globally, if `obj` is None, or on the
specific `obj`)."""
assert principal
principal = unwrap(principal)
session = object_session(obj) if obj is not None else db.session
manager = self._current_user_manager(session=session)
args = {
"role": role,
"object": obj,
"anonymous": False,
"user": None,
"group": None,
}
if principal is AnonymousRole or (
hasattr(principal, "is_anonymous") and principal.is_anonymous
):
args["anonymous"] = True
elif isinstance(principal, User):
args["user"] = principal
else:
args["group"] = principal
query = session.query(RoleAssignment)
if query.filter_by(**args).limit(1).count():
# role already granted, nothing to do
return
# same as above but in current, not yet flushed objects in session. We
# cannot call flush() in grant_role() since this method may be called a
# great number of times in the same transaction, and sqlalchemy limits
# to 100 flushes before triggering a warning
for ra in (
o
for models in (session.new, session.dirty)
for o in models
if isinstance(o, RoleAssignment)
):
if all(getattr(ra, attr) == val for attr, val in args.items()):
return
ra = RoleAssignment(**args)
session.add(ra)
audit = SecurityAudit(manager=manager, op=SecurityAudit.GRANT, **args)
if obj is not None:
audit.object_id = obj.id
audit.object_type = obj.entity_type
object_name = ""
for attr_name in ("name", "path", "__path_before_delete"):
if hasattr(obj, attr_name):
object_name = getattr(obj, attr_name)
audit.object_name = object_name
session.add(audit)
self._needs_flush()
if hasattr(principal, "__roles_cache__"):
del principal.__roles_cache__
def ungrant_role(
self,
principal: Principal,
role: Role | str,
object: Model | None = None,
):
"""Ungrant `role` to `user` (either globally, if `object` is None, or
on the specific `object`)."""
assert principal
principal = unwrap(principal)
session = object_session(object) if object is not None else db.session
manager = self._current_user_manager(session=session)
args = {
"role": role,
"object": object,
"anonymous": False,
"user": None,
"group": None,
}
query = session.query(RoleAssignment)
query = query.filter(
RoleAssignment.role == role, RoleAssignment.object == object
)
if principal is AnonymousRole or (
hasattr(principal, "is_anonymous") and principal.is_anonymous
):
args["anonymous"] = True
query.filter(
RoleAssignment.anonymous == False,
RoleAssignment.user == None,
RoleAssignment.group == None,
)
elif isinstance(principal, User):
args["user"] = principal
query = query.filter(RoleAssignment.user == principal)
else:
args["group"] = principal
query = query.filter(RoleAssignment.group == principal)
ra = query.one()
session.delete(ra)
audit = SecurityAudit(manager=manager, op=SecurityAudit.REVOKE, **args)
session.add(audit)
self._needs_flush()
self._clear_role_cache(principal)
@require_flush
def get_role_assignements(self, obj: Model) -> list:
session = object_session(obj) if obj is not None else db.session
if not session:
session = db.session()
query = session.query(RoleAssignment)
query = query.filter(RoleAssignment.object == obj).options(
subqueryload("user.groups")
)
role_assignments = query.all()
results = []
for ra in role_assignments:
if ra.anonymous:
principal = AnonymousRole
elif ra.user:
principal = ra.user
else:
principal = ra.group
results.append((principal, ra.role))
return results
#
# Permission API, currently hardcoded
#
def has_permission(
self,
user: User,
permission: Permission | str,
obj: Model | None = None,
inherit: bool = False,
roles: None | Role | str | list[Role | str] = None,
) -> bool:
"""
:param obj: target object to check permissions.
:param inherit: check with permission inheritance. By default, check only
local roles.
:param roles: additional valid role or iterable of roles having
`permission`.
"""
if not isinstance(permission, Permission):
assert permission in PERMISSIONS
permission = Permission(permission)
user = unwrap(user)
if not self.running:
return True
session = None
if obj is not None:
session = object_session(obj)
if session is None:
session = db.session()
# root always have any permission
if isinstance(user, User) and user.id == 0:
return True
# valid roles
# 1: from database
pa_filter = PermissionAssignment.object == None
if obj is not None and obj.id is not None:
pa_filter |= PermissionAssignment.object == obj
pa_filter &= PermissionAssignment.permission == permission
valid_roles = session.query(PermissionAssignment.role).filter(pa_filter)
valid_roles = {res[0] for res in valid_roles.yield_per(1000)}
# complete with defaults
valid_roles |= {Admin} # always have all permissions
valid_roles |= DEFAULT_PERMISSION_ROLE.get(permission, set())
# FIXME: obj.__class__ could define default permisssion matrix too
if roles is not None:
if isinstance(roles, (Role, str)):
roles = (roles,)
for r in roles:
valid_roles.add(Role(r))
# FIXME: query permission_role: global and on object
if AnonymousRole in valid_roles:
return True
if Authenticated in valid_roles and not user.is_anonymous:
return True
# first test global roles, then object local roles
checked_objs = [None, obj]
if inherit and obj is not None:
while obj.inherit_security and obj.parent is not None:
obj = obj.parent
checked_objs.append(obj)
principals = [user] + list(user.groups)
self._fill_role_cache_batch(principals)
return any(
self.has_role(principal, valid_roles, item)
for principal in principals
for item in checked_objs
)
def query_entity_with_permission(
self,
permission: Permission,
user: User | None = None,
Model: type[Model] = Entity,
) -> Exists:
"""Filter a query on an :class:`Entity` or on of its subclasses.
Usage::
read_q = security.query_entity_with_permission(READ, Model=MyModel)
MyModel.query.filter(read_q)
It should always be placed before any `.join()` happens in the query; else
sqlalchemy might join to the "wrong" entity table when joining to other
:class:`Entity`.
:param user: user to filter for. Default: `current_user`.
:param permission: required :class:`Permission`
:param Model: An :class:`Entity` based class. Useful when there is more than
one Entity based object in query, or if an alias should be used.
:returns: a `sqlalchemy.sql.exists()` expression.
"""
assert isinstance(permission, Permission)
assert issubclass(Model, Entity)
RA = sa.orm.aliased(RoleAssignment)
PA = sa.orm.aliased(PermissionAssignment)
# id column from entity table. Model.id would refer to 'model' table.
# this allows the DB to use indexes / foreign key constraints.
id_column = sa.inspect(Model).primary_key[0]
creator = Model.creator
owner = Model.owner
if not self.running:
return sa.sql.exists([1])
if user is None:
user = unwrap(current_user)
# build role CTE
principal_filter = RA.anonymous == True
if not user.is_anonymous:
principal_filter |= RA.user == user
if user.groups:
principal_filter |= RA.group_id.in_([g.id for g in user.groups])
RA = sa.sql.select([RA], principal_filter).cte()
permission_exists = sa.sql.exists([1]).where(
sa.sql.and_(
PA.permission == permission,
PA.object_id == id_column,
(RA.c.role == PA.role) | (PA.role == AnonymousRole),
(RA.c.object_id == PA.object_id) | (RA.c.object_id == None),
)
)
# is_admin: self-explanatory. It search for local or global admin
# role, but PermissionAssignment is not involved, thus it can match on
# entities that don't have *any permission assignment*, whereas previous
# expressions cannot.
is_admin = sa.sql.exists([1]).where(
sa.sql.and_(
RA.c.role == Admin,
(RA.c.object_id == id_column) | (RA.c.object_id == None),
principal_filter,
)
)
filter_expr = permission_exists | is_admin
if user and not user.is_anonymous:
is_owner_or_creator = sa.sql.exists([1]).where(
sa.sql.and_(
PA.permission == permission,
PA.object_id == id_column,
sa.sql.or_(
(PA.role == Owner) & (owner == user),
(PA.role == Creator) & (creator == user),
),
)
)
filter_expr |= is_owner_or_creator
return filter_expr
def get_permissions_assignments(
self, obj: Model | None = None, permission: Permission | None = None
) -> dict[Permission, set[Role]]:
"""
:param permission: return only roles having this permission
:returns: an dict where keys are `permissions` and values `roles` iterable.
"""
session = None
if obj is not None:
assert isinstance(obj, Entity)
session = object_session(obj)
if obj.id is None:
obj = None
if session is None:
session = db.session()
pa = session.query(
PermissionAssignment.permission, PermissionAssignment.role
).filter(PermissionAssignment.object == obj)
if permission:
pa = pa.filter(PermissionAssignment.permission == permission)
results: dict[permission, set[Role]] = {}
for permission, role in pa.yield_per(1000):
results.setdefault(permission, set()).add(role)
return results
def add_permission(
self, permission: Permission, role: Role, obj: Model | None = None
):
session = None
if obj is not None:
session = object_session(obj)
if session is None:
session = db.session()
pa = query_pa_no_flush(session, permission, role, obj)
if not pa:
pa = PermissionAssignment(permission=permission, role=role, object=obj)
# do it in any case: it could have been found in session.deleted
session.add(pa)
def delete_permission(
self, permission: Permission, role: Role, obj: Model | None = None
):
session = None
if obj is not None:
session = object_session(obj)
if session is None:
session = db.session()
pa = query_pa_no_flush(session, permission, role, obj)
if pa:
session.delete(pa)
if obj:
# this seems to be required with sqlalchemy > 0.9
session.expire(obj, [PERMISSIONS_ATTR])
def filter_with_permission(
self,
user: User,
permission: Permission | str,
obj_list: list[Model],
inherit=False,
):
user = unwrap(user)
return [
obj
for obj in obj_list
if self.has_permission(user, permission, obj, inherit)
]
# Instanciate the service
security = SecurityService()
|
abilian/abilian-core
|
src/abilian/services/security/service.py
|
Python
|
lgpl-2.1
| 30,275
|
[
"VisIt"
] |
b4ba3c1adba584ba0faec697ef3425f8bbb165c1426c4dd19e5a9bfbd764e49b
|
import neuroml as N
from owmeta_core.data import DataUser
from .neuron import Neuron
class NeuroML(DataUser):
@classmethod
def generate(cls, o, t=2):
"""
Get a NeuroML object that represents the given object. The ``type``
determines what content is included in the NeuroML object:
:param o: The object to generate neuroml from
:param t: The what kind of content should be included in the document
- 0=full morphology+biophysics
- 1=cell body only+biophysics
- 2=full morphology only
:returns: A NeuroML object that represents the given object.
:rtype: NeuroMLDocument
"""
if isinstance(o, Neuron):
# read in the morphology data
d = N.NeuroMLDocument(id=o.name())
c = N.Cell(id=o.name())
c.morphology = o.morphology()
d.cells.append(c)
return d
else:
raise "Not a valid object for conversion to neuroml"
@classmethod
def write(cls, o, n):
"""
Write the given neuroml document object out to a file
:param o: The NeuroMLDocument to write
:param n: The name of the file to write to
"""
N.writers.NeuroMLWriter.write(o, n)
@classmethod
def validate(cls, o):
pass
|
openworm/PyOpenWorm
|
owmeta/my_neuroml.py
|
Python
|
mit
| 1,367
|
[
"NEURON"
] |
c484c8c1d31784dab516e21844452b9a38d1faf3cff663de735f5330d7d45cc4
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "copy_if_overlap" causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
* "overlap_assume_elementwise" can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when "copy_if_overlap" is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence. This argument can only be used to 'upcast' the array. For
downcasting, use the .astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=np.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same
shape as `condition`.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
Find the indices of elements of `x` that are in `goodvalues`.
>>> goodvalues = [3, 4, 7]
>>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape)
>>> ix
array([[False, False, False],
[ True, True, False],
[False, True, False]], dtype=bool)
>>> np.where(ix)
(array([1, 1, 2]), array([0, 1, 1]))
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print(ind)
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'matmul',
"""
matmul(a, b, out=None)
Matrix product of two arrays.
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
Multiplication by a scalar is not allowed, use ``*`` instead. Note that
multiplying a stack of matrices with a vector will result in a stack of
vectors, but matmul will not recognize it as such.
``matmul`` differs from ``dot`` in two important ways.
- Multiplication by scalars is not allowed.
- Stacks of matrices are broadcast together as if the matrices
were elements.
.. warning::
This function is preliminary and included in NumPy 1.10.0 for testing
and documentation. Its semantics will not change, but the number and
order of the optional arguments will.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
1-D arrays then a scalar is returned; otherwise an array is
returned. If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
If scalar value is passed.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = [[1, 0], [0, 1]]
>>> b = [1, 2]
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2*2*4).reshape((2,2,4))
>>> b = np.arange(2*2*4).reshape((2,4,2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a,b)[0,1,1]
98
>>> sum(a[0,1,:] * b[0,:,1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: Scalar operands are not allowed, use '*' instead
""")
add_newdoc('numpy.core', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
This is the core C function.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum, dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""")
add_newdoc('numpy.core', 'vdot',
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'shares_memory',
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=None)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tobytes()
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print(x)
[(1, 20) (3, 4)]
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'digitize',
"""
digitize(x, bins, right=False)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right
is True, then the right bin is closed so that the index ``i`` is such
that ``bins[i-1] < x <= bins[i]`` or ``bins[i-1] >= x > bins[i]`` if `bins`
is monotonically increasing or decreasing, respectively.
Parameters
----------
x : array_like
Input array to be binned. Prior to NumPy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique, searchsorted
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`np.digitize` is implemented in terms of `np.searchsorted`. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
""")
add_newdoc('numpy.core.multiarray', 'bincount',
"""
bincount(x, weights=None, minlength=None)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.core.multiarray', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.core.multiarray', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None, keepdims=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
keepdims : bool
Has no effect. Deprecated, and will be removed in future.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
`a[indices] += b`, except that results are accumulated for elements that
are indexed more than once. For example, `a[[0,0]] += 1` will only
increment the first element once because of buffering, whereas
`add.at(a, [0,0], 1)` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
::
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
::
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
PEP3118 interface description of the data-type.
The format is that required by the 'descr' key in the
PEP3118 `__array_interface__` attribute.
Warning: This attribute exists specifically for PEP3118 compliance, and
is not a datatype description compatible with `np.dtype`.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'is_busday',
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
""")
add_newdoc('numpy.core.multiarray', 'busday_offset',
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
""")
add_newdoc('numpy.core.multiarray', 'busday_count',
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
""")
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
""")
##############################################################################
#
# nd_grid instances
#
##############################################################################
add_newdoc('numpy.lib.index_tricks', 'mgrid',
"""
`nd_grid` instance which returns a dense multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
(or fleshed out) mesh-grid when indexed, so that each returned argument
has the same shape. The dimensions and number of the output arrays are
equal to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` all of the same dimensions
See Also
--------
numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
""")
add_newdoc('numpy.lib.index_tricks', 'ogrid',
"""
`nd_grid` instance which returns an open multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
(i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
of each returned array is greater than 1. The dimension and number of the
output arrays are equal to the number of indexing dimensions. If the step
length is not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
See Also
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for other scalar classes
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'bool_',
"""NumPy's Boolean type. Character code: ``?``. Alias: bool8""")
add_newdoc('numpy.core.numerictypes', 'complex64',
"""
Complex number type composed of two 32 bit floats. Character code: 'F'.
""")
add_newdoc('numpy.core.numerictypes', 'complex128',
"""
Complex number type composed of two 64 bit floats. Character code: 'D'.
Python complex compatible.
""")
add_newdoc('numpy.core.numerictypes', 'complex256',
"""
Complex number type composed of two 128-bit floats. Character code: 'G'.
""")
add_newdoc('numpy.core.numerictypes', 'float32',
"""
32-bit floating-point number. Character code 'f'. C float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float64',
"""
64-bit floating-point number. Character code 'd'. Python float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float96',
"""
""")
add_newdoc('numpy.core.numerictypes', 'float128',
"""
128-bit floating-point number. Character code: 'g'. C long float
compatible.
""")
add_newdoc('numpy.core.numerictypes', 'int8',
"""8-bit integer. Character code ``b``. C char compatible.""")
add_newdoc('numpy.core.numerictypes', 'int16',
"""16-bit integer. Character code ``h``. C short compatible.""")
add_newdoc('numpy.core.numerictypes', 'int32',
"""32-bit integer. Character code 'i'. C int compatible.""")
add_newdoc('numpy.core.numerictypes', 'int64',
"""64-bit integer. Character code 'l'. Python int compatible.""")
add_newdoc('numpy.core.numerictypes', 'object_',
"""Any Python object. Character code: 'O'.""")
|
bertrand-l/numpy
|
numpy/add_newdocs.py
|
Python
|
bsd-3-clause
| 226,717
|
[
"Brian"
] |
47c72371d87d86f8e7a086bfbed168b68b25f787b4f5230e54e0527913e95dea
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
import numpy as np
def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
ddof=0):
"""Compute the weighted average and standard deviation along the
specified axis.
Parameters
----------
a : array_like
Calculate average and standard deviation of these values.
axis : int, optional
Axis along which the statistics are computed. The default is
to compute them on the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each
value in `a` contributes to the average according to its
associated weight. The weights array can either be 1-D (in
which case its length must be the size of `a` along the given
axis) or of the same shape as `a`. If `weights=None`, then all
data in `a` are assumed to have a weight equal to one.
with_mean : bool, optional, defaults to True
Compute average if True.
with_std : bool, optional, defaults to True
Compute standard deviation if True.
ddof : int, optional, defaults to 0
It means delta degrees of freedom. Variance is calculated by
dividing by `n - ddof` (where `n` is the number of
elements). By default it computes the maximum likelyhood
estimator.
Returns
-------
average, std
Return the average and standard deviation along the specified
axis. If any of them was not required, returns `None` instead
"""
if not (with_mean or with_std):
raise ValueError("Either the mean or standard deviation need to be"
" computed.")
a = np.asarray(a)
if weights is None:
avg = a.mean(axis=axis) if with_mean else None
std = a.std(axis=axis, ddof=ddof) if with_std else None
else:
avg = np.average(a, axis=axis, weights=weights)
if with_std:
if axis is None:
variance = np.average((a - avg)**2, weights=weights)
else:
# Make sure that the subtraction to compute variance works for
# multidimensional arrays
a_rolled = np.rollaxis(a, axis)
# Numpy doesn't have a weighted std implementation, but this is
# stable and fast
variance = np.average((a_rolled - avg)**2, axis=0,
weights=weights)
if ddof != 0: # Don't waste time if variance doesn't need scaling
if axis is None:
variance *= a.size / (a.size - ddof)
else:
variance *= a.shape[axis] / (a.shape[axis] - ddof)
std = np.sqrt(variance)
else:
std = None
avg = avg if with_mean else None
return avg, std
def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
"""Scale array by columns to have weighted average 0 and standard
deviation 1.
Parameters
----------
a : array_like
2D array whose columns are standardized according to the
weights.
weights : array_like, optional
Array of weights associated with the columns of `a`. By
default, the scaling is unweighted.
with_mean : bool, optional, defaults to True
Center columns to have 0 weighted mean.
with_std : bool, optional, defaults to True
Scale columns to have unit weighted std.
ddof : int, optional, defaults to 0
If with_std is True, variance is calculated by dividing by `n
- ddof` (where `n` is the number of elements). By default it
computes the maximum likelyhood stimator.
copy : bool, optional, defaults to True
Whether to perform the standardization in place, or return a
new copy of `a`.
Returns
-------
2D ndarray
Scaled array.
Notes
-----
Wherever std equals 0, it is replaced by 1 in order to avoid
division by zero.
"""
if copy:
a = a.copy()
avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
with_std=with_std, ddof=ddof)
if with_mean:
a -= avg
if with_std:
std[std == 0] = 1.0
a /= std
return a
def svd_rank(M_shape, S, tol=None):
"""Matrix rank of `M` given its singular values `S`.
See `np.linalg.matrix_rank` for a rationale on the tolerance
(we're not using that function because it doesn't let us reuse a
precomputed SVD)."""
if tol is None:
tol = S.max() * max(M_shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
def corr(x, y=None):
"""Computes correlation between columns of `x`, or `x` and `y`.
Correlation is covariance of (columnwise) standardized matrices,
so each matrix is first centered and scaled to have variance one,
and then their covariance is computed.
Parameters
----------
x : 2D array_like
Matrix of shape (n, p). Correlation between its columns will
be computed.
y : 2D array_like, optional
Matrix of shape (n, q). If provided, the correlation is
computed between the columns of `x` and the columns of
`y`. Else, it's computed between the columns of `x`.
Returns
-------
correlation
Matrix of computed correlations. Has shape (p, p) if `y` is
not provided, else has shape (p, q).
"""
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if y.shape[0] != x.shape[0]:
raise ValueError("Both matrices must have the same number of rows")
x, y = scale(x), scale(y)
else:
x = scale(x)
y = x
# Notice that scaling was performed with ddof=0 (dividing by n,
# the default), so now we need to remove it by also using ddof=0
# (dividing by n)
return x.T.dot(y) / x.shape[0]
|
Jorge-C/bipy
|
skbio/maths/stats/ordination/utils.py
|
Python
|
bsd-3-clause
| 6,312
|
[
"scikit-bio"
] |
7ba1f2b67a1aafb3f3ee68c085e8a98edfe3a3b9101b333e9257e9153ae00e61
|
#!/usr/bin/python
from datetime import date,datetime
import sqlalchemy
import json
import pprint
import uuid
import logging
import os
import utils
from config import ComicStreamerConfig
from folders import AppFolders
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import deferred
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, String, DateTime, BigInteger, Text, LargeBinary, Table, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine, func
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.associationproxy import _AssociationList
from sqlalchemy.orm.properties import \
ColumnProperty,\
CompositeProperty,\
RelationshipProperty
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
mysql_active = ComicStreamerConfig()['database']['engine'].lower() == "mysql"
SCHEMA_VERSION=6
Base = declarative_base()
Session = sessionmaker()
def resultSetToJson(rset, listname="aaData", total=None):
return json.dumps(resultSetToDict(rset, listname, total), cls=alchemy_encoder(), check_circular=False)
def resultSetToDict(rset, listname="aaData", total=None):
l = []
for r in rset:
l.append(r)
results_dict = {}
results_dict[listname] = l
results_dict['page_count'] = len(l)
if total is None:
results_dict['total_count'] = len(l)
else:
results_dict['total_count'] = total
return results_dict
def alchemy_encoder():
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj,_AssociationList):
# Convert association list into python list
return list(obj)
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_')
and x != 'metadata'
and not x.endswith('_raw')
and x != "persons"
and x != "roles"
and x != "issue_num"
and x != "file"
and x != "folder"
and x != "thumbnail"
]:
value = obj.__getattribute__(field)
if (isinstance(value, date)):
value = str(value)
if value is not None:
fields[field] = value
else:
fields[field] = ""
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
# Junction table
comics_characters_table = Table('comics_characters', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('character_id', Integer, ForeignKey('characters.id'))
)
# Junction table
comics_teams_table = Table('comics_teams', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('team_id', Integer, ForeignKey('teams.id'))
)
# Junction table
comics_locations_table = Table('comics_locations', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('location_id', Integer, ForeignKey('locations.id'))
)
# Junction table
comics_storyarcs_table = Table('comics_storyarcs', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('storyarc_id', Integer, ForeignKey('storyarcs.id'))
)
# Junction table
comics_alternateseries_table = Table('comics_alternateseries', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('alternateseries_id', Integer, ForeignKey('alternateseries.id'))
)
# Junction table
comics_generictags_table = Table('comics_generictags', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('generictags_id', Integer, ForeignKey('generictags.id'))
)
# Junction table
comics_genres_table = Table('comics_genres', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('genre_id', Integer, ForeignKey('genres.id'))
)
# Junction table
comics_blacklist_table = Table('comics_blacklist', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('blacklist_id', Integer, ForeignKey('blacklist.id')),
Column('page', Integer, primary_key=True),
Column('ts',DateTime, default=datetime.utcnow)
)
"""
# Junction table
readinglists_comics_table = Table('readinglists_comics', Base.metadata,
Column('comic_id', Integer, ForeignKey('comics.id')),
Column('readinglist_id', Integer, ForeignKey('readinglists.id'))
)
"""
class CreditComparator(RelationshipProperty.Comparator):
def __eq__(self, other):
return self.person() == other
class MyComparator(ColumnProperty.Comparator):
def __eq__(self, other):
#return func.lower(self.__clause_element__()) == func.lower(other)
#print "-----------ATB------", type(self.__clause_element__()), type(other)
# for the children objects, make all equal comparisons be likes
return self.__clause_element__().ilike(func.lower(unicode(other)))
class Comic(Base):
__tablename__ = 'comics'
__table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
global mysql_active
if mysql_active:
path = Column(String(1000), unique=True)
fingerprint = Column(String(128))
folder = Column(String(1000))
file = Column(String(1000))
series = Column(String(1000))
issue = Column(String(100))
comments = Column(Text)
publisher = Column(String(256))
title = Column(String(1000))
imprint = Column(String(1000))
weblink = Column(String(1000))
hash = Column(String(1000))
language = Column(String(100))
comicbookvine = Column(String(64))
#thumbnail = Column(LargeBinary(1024*1024*10*10))
thumbnail = deferred(Column(LargeBinary(1024*1024*3)))
alternateIssue = Column(String(1000))
alternateseries_raw = relationship('AlternateSeries', secondary=comics_alternateseries_table,
cascade="save-update,delete") #, backref='comics')
credits_raw = relationship('Credit', #secondary=credits_,
cascade="all, delete", )#, backref='comics')
characters_raw = relationship('Character', secondary=comics_characters_table,
cascade="save-update,delete")#, backref='comics')
teams_raw = relationship('Team', secondary=comics_teams_table,
cascade="save-update,delete") #)#, backref='comics')
locations_raw = relationship('Location', secondary=comics_locations_table,
cascade="save-update,delete") #, backref='comics')
storyarcs_raw = relationship('StoryArc', secondary=comics_storyarcs_table,
cascade="save-update,delete") #, backref='comics')
generictags_raw = relationship('GenericTag', secondary=comics_generictags_table,
cascade="save-update,delete") #, backref='comics')
genres_raw = relationship('Genre', secondary=comics_genres_table,
cascade="save-update,delete") #, backref='comics')
blacklist_raw = relationship('Blacklist', secondary=comics_blacklist_table,
cascade="save-update,delete") #, backref='comics')
else:
path = Column(String, unique=True)
fingerprint = Column(String)
folder = Column(String)
file = Column(String)
series = Column(String)
issue = Column(String)
comments = Column(Text)
publisher = Column(String)
title = Column(String)
imprint = Column(String)
weblink = Column(String)
hash = Column(String)
language = Column(String)
thumbnail = deferred(Column(LargeBinary))
alternateIssue = Column(String)
comicbookvine = Column(String)
alternateseries_raw = relationship('AlternateSeries', secondary=comics_alternateseries_table,
cascade="save-update,delete") #, backref='comics')
credits_raw = relationship('Credit', #secondary=credits_,
cascade="all, delete", )#, backref='comics')
characters_raw = relationship('Character', secondary=comics_characters_table,
cascade="save-update,delete")#, backref='comics')
teams_raw = relationship('Team', secondary=comics_teams_table,
cascade="save-update,delete") #)#, backref='comics')
locations_raw = relationship('Location', secondary=comics_locations_table,
cascade="save-update,delete") #, backref='comics')
storyarcs_raw = relationship('StoryArc', secondary=comics_storyarcs_table,
cascade="save-update,delete") #, backref='comics')
generictags_raw = relationship('GenericTag', secondary=comics_generictags_table,
cascade="save-update,delete") #, backref='comics')
genres_raw = relationship('Genre', secondary=comics_genres_table,
cascade="save-update,delete") #, backref='comics')
blacklist_raw = relationship('Blacklist', secondary=comics_blacklist_table,
cascade="save-update,delete") #, backref='comics')
persons_raw = relationship("Person",
secondary="join(Credit, Person, Credit.person_id == Person.id)",
primaryjoin="and_(Comic.id == Credit.comic_id)",
#passive_updates=False,
viewonly=True
)
roles_raw = relationship("Role",
secondary="join(Credit, Role, Credit.role_id == Role.id)",
primaryjoin="and_(Comic.id == Credit.comic_id)",
#passive_updates=False,
viewonly=True
)
filesize = Column(BigInteger)
id = Column(Integer, primary_key=True)
issue_num = Column(Float)
date = Column(DateTime) # will be a composite of month,year,day for sorting/filtering
day = Column(Integer)
month = Column(Integer)
year = Column(Integer)
volume = Column(Integer)
page_count = Column(Integer)
deleted_ts = Column(DateTime)
lastread_ts = Column(DateTime)
lastread_page = Column(Integer)
alternateNumber = Column(Float)
#hash = Column(String)
added_ts = Column(DateTime, default=datetime.utcnow) # when the comic was added to the DB
mod_ts = Column(DateTime) # the last modified date of the file
"""
# chanhef to all instead of save-update
alternateseries_raw = relationship('AlternateSeries', secondary=comics_alternateseries_table, cascade="save-update,delete", backref='comics')
credits_raw = relationship('Credit',secondary=credits,cascade="save-update, delete", backref='comics')
characters_raw = relationship('Character', secondary=comics_characters_table,cascade="save-update ,delete", backref='comics')
teams_raw = relationship('Team', secondary=comics_teams_table,cascade="save-update ,delete", backref='comics')
locations_raw = relationship('Location', secondary=comics_locations_table,cascade="save-update ,delete", backref='comics')
storyarcs_raw = relationship('StoryArc', secondary=comics_storyarcs_table,cascade="save-update ,delete", backref='comics')
generictags_raw = relationship('GenericTag', secondary=comics_generictags_table,cascade="save-update, delete", backref='comics')
genres_raw = relationship('Genre', secondary=comics_genres_table,cascade="save-update, delete", backref='comics')
"""
#credits = association_proxy('credits_raw', 'person_role_dict')
alternateseries = association_proxy('alternateseries_raw', 'name')
characters = association_proxy('characters_raw', 'name')
teams = association_proxy('teams_raw', 'name')
locations = association_proxy('locations_raw', 'name')
storyarcs = association_proxy('storyarcs_raw', 'name')
generictags = association_proxy('generictags_raw', 'name')
persons = association_proxy('persons_raw', 'name')
roles = association_proxy('roles_raw', 'name')
genres = association_proxy('genres_raw', 'name')
blacklist = association_proxy('blacklist_raw', 'hash')
#blacklist = relationship("Blacklist", cascade="save-update,delete") #uselist=False,
def __repr__(self):
out = u"<Comic(id={0}, path={1},\n series={2}, issue={3}, year={4} pages={5}\n{6}".format(
self.id, self.folder+self.file,self.series,self.issue,self.year,self.page_count,self.characters)
return out
@property
def credits(self):
"""Merge credits together into a dict with role name as key, and lists of persons"""
out_dict = {}
# iterate over the list of credits mini dicts:
for c in self.credits_raw:
if c.role and c.person:
if not out_dict.has_key(c.role.name):
out_dict[c.role.name] = []
out_dict[c.role.name].append(c.person.name)
return out_dict
class Credit(Base):
__tablename__ = 'credits'
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
#__table_args__ = {'extend_existing': True}
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
role_id = Column(Integer, ForeignKey('roles.id'), primary_key=True)
person_id = Column(Integer, ForeignKey('persons.id'), primary_key=True)
#bidirectional attribute/collection of "comic"/"credits"
#comic = relationship(Comic,
# backref=backref("credits_backref_raw"),
# #cascade="all, delete-orphan")
# )
"""
person = relationship("Person", cascade="all, delete") #, backref='credits')
role = relationship("Role" , cascade="all, delete") #, backref='credits')
"""
person = relationship("Person",passive_deletes=True) # cascade="save-update, delete")
role = relationship("Role" ,passive_deletes=True) # cascade="save-update, delete")
def __init__(self, person=None, role=None):
self.person = person
self.role = role
#@property
#def person_role_tuple(self):
# return (self.person.name, self.role.name)
#@property
#def person_role_dict(self):
# return { self.role.name : [self.person.name] }
#def __repr__(self):
# return u"<Credit(person={0},role={1})>".format(self.person_role_tuple[1], self.person_role_tuple[0])
class Role(Base):
__tablename__ = "roles"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Person(Base):
__tablename__ = "persons"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Character(Base):
__tablename__ = "characters"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
def __repr__(self):
out = u"<Character(id={0},name='{1}')>".format(self.id, self.name)
return out
class Team(Base):
__tablename__ = "teams"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Location(Base):
__tablename__ = "locations"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class StoryArc(Base):
__tablename__ = "storyarcs"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class AlternateSeries(Base):
__tablename__ = "alternateseries"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class GenericTag(Base):
__tablename__ = "generictags"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class Genre(Base):
__tablename__ = "genres"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
name = ColumnProperty(
Column('name', String(1000), unique = True),
comparator_factory=MyComparator)
else:
name = ColumnProperty(
Column('name', String, unique = True),
comparator_factory=MyComparator)
class DeletedComic(Base):
__tablename__ = "deletedcomics"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
comic_id = Column(Integer)
ts = Column(DateTime, default=datetime.utcnow)
def __unicode__(self):
out = u"DeletedComic: {0}:{1}".format(self.id, self.comic_id)
return out
class Blacklist(Base):
__tablename__ = "blacklist"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
if mysql_active:
hash = Column(String(72))
else:
hash = Column(String)
#user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
# it's the file size of the page detect more file size...
detect = Column(BigInteger)
ts = Column(DateTime, default=datetime.utcnow)
class Favorite(Base):
__tablename__ = "favorites"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
#id = Column(Integer, primary_key=True)
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
page = Column(Integer, primary_key=True)
ts = Column(DateTime, default=datetime.utcnow)
#user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
"""
class User(Base):
__tablename__ = "users"
__table_args__ = {'sqlite_autoincrement': True,'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
if mysql_active:
name = Column(String(256))
password_digest = Column(String(16))
else:
name = Column(String)
password_digest = Column(String)
class Bookmark(Base):
__tablename__ = "bookmarks"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
#id = Column(Integer, primary_key=True)
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
# user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
page = Column(Integer)
updated = Column(DateTime)
class Read(Base):
__tablename__ = "read"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
# id = Column(Integer, primary_key=True)
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
# user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
updated = Column(DateTime)
#comics = relationship('Comic', secondary=readinglists_comics_table,
#cascade="delete", #, backref='comics')
# )
class ReadingList(Base):
__tablename__ = "readinglists"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
# id = Column(Integer, primary_key=True)
comic_id = Column(Integer, ForeignKey('comics.id'), primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True)
if mysql_active:
name = Column(String(256))
else:
name = Column(String)
updated = Column(DateTime)
#comics = relationship('Comic', secondary=readinglists_comics_table,
#cascade="delete", #, backref='comics')
# )
"""
class SchemaInfo(Base):
__tablename__ = "schemainfo"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
schema_version = Column(Integer)
class DatabaseInfo(Base):
__tablename__ = "dbid"
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
id = Column(Integer, primary_key=True)
global mysql_active
if mysql_active:
uuid = Column(String(1000))
else:
uuid = Column(String)
created = Column(DateTime, default=datetime.utcnow)
last_updated = Column(DateTime)
def __str__(self):
out = u"{0}".format(self.uuid)
return out
class SchemaVersionException(Exception):
pass
class DataManager():
def __init__(self, config):
global mysql_active
self.config = config
self.init()
def stop(self):
if mysql_active:
logging.debug("Database: MySQL: Stopped")
else:
logging.debug("Database: SQLite: Stopped")
def init(self):
global mysql_active
if mysql_active:
try:
logging.debug("Database: MySQL: Started")
self.engine = create_engine("mysql://"+self.config['database.mysql']['username']+":"+utils.decode(self.config['general']['install_id'],self.config['database.mysql']['password'])+"@"+self.config['database.mysql']['host']+":"+str(self.config['database.mysql']['port'])+"/"+self.config['database.mysql']['database']+"?charset=utf8", pool_recycle=3600, echo=False)
logging.info("Database: MySQL: " + self.config['database.mysql']['database'] + " (" + self.config['database.mysql']['host'] + ":" + str(self.config['database.mysql']['port']) + ")")
except Exception, e:
mysql_active = False
logging.error("Database: MySQL: Failed (" + self.config['database.mysql']['database'] + " [" + self.config['database.mysql']['host'] + ":" + str(self.config['database.mysql']['port']) + "])")
logging.error("Database: MySQL: Failed ("+ str(e) +")")
logging.warning("Database: Switching to SQLite Engine")
if not mysql_active:
logging.debug("Database: SQLite: Started")
db = self.config['database.sqlite']['database']
if db == "": db = "comicstreamer"
db += u".sqlite"
self.dbfile = self.config['database.sqlite']['location']
if self.dbfile == "":
self.dbfile = os.path.join(AppFolders.appData(), db)
else:
if os.path.isdir(self.dbfile):
self.dbfile = os.path.join(self.dbfile, db)
else:
logging.error("Database: SQLite: Database Location Unavailable (" + self.dbfile + ")")
logging.warning("Database: Switching to SQLite Engine Default Database Location")
self.dbfile = os.path.join(AppFolders.appData(), db)
try:
self.engine = create_engine(u'sqlite:///'+ self.dbfile, echo=False)
logging.info("Database: SQLite: (" + self.dbfile + ")")
except:
logging.error("Database: SQLite Failed (" + self.dbfile + ")")
logging.warning("Database: Switching to SQLite Engine Default Database Location")
self.dbfile = os.path.join(AppFolders.appData(), "comicstreamer.sqlite")
try:
self.engine = create_engine('sqlite:///'+ self.dbfile, echo=False)
logging.info("Database: SQLite: (" + self.dbfile + ")")
except:
logging.error("Database: SQLite: Failed (" + self.dbfile + ")")
if mysql_active:
session_factory = sessionmaker(bind=self.engine, expire_on_commit=True, autoflush=True, autocommit=False) #, autoflush=False, autocommit=True, expire_on_commit=True) #,autocommit=True)
else:
session_factory = sessionmaker(bind=self.engine, expire_on_commit=True) #, autocommit=True) #, autoflush=False, autocommit=True, expire_on_commit=True) #,autocommit=True)
self.Session = scoped_session(session_factory)
def delete(self):
logging.info("Database: Wiping Database")
if mysql_active:
# "HERE fix
print "Bug this does not work :-/"
Base.metadata.drop_all(self.engine)
self.engine.dispose()
self.init()
else:
if os.path.exists( self.dbfile ):
os.unlink( self.dbfile )
def create(self):
# if we don't have a UUID for this DB, add it.
if mysql_active:
Base.metadata.create_all(self.engine)
session = self.Session()
else:
try:
Base.metadata.create_all(self.engine)
except:
logging.error("Database: There was an error loaded the database (maybe file corrupted)")
self.engine = create_engine('sqlite:///'+ self.dbfile, echo=False)
session = self.Session()
results = session.query(SchemaInfo).first()
if results is None:
#SQLADD
schemainfo = SchemaInfo()
schemainfo.schema_version = SCHEMA_VERSION
session.add(schemainfo)
logging.debug("Database: Setting scheme version".format(schemainfo.schema_version))
session.commit()
else:
if results.schema_version != SCHEMA_VERSION:
raise SchemaVersionException
results = session.query(DatabaseInfo).first()
if results is None:
dbinfo = DatabaseInfo()
dbinfo.uuid = unicode(uuid.uuid4().hex)
dbinfo.last_updated = datetime.utcnow()
session.add(dbinfo)
session.commit()
logging.debug("Database: Added new uuid".format(dbinfo.uuid))
"""
# Eventually, there will be multi-user support, but for now,
# just have a single user entry
results = session.query(User).first()
if results is None:
user = User()
user.name = ""
user.password_digest = utils.getDigest("")
session.add(user)
session.commit()
"""
session.close()
if __name__ == "__main__":
dm = DataManager()
dm.create()
|
Tristan79/ComicStreamer
|
comicstreamerlib/database.py
|
Python
|
apache-2.0
| 30,152
|
[
"VisIt"
] |
ce3e824e34a838e72e92a3c05eecf82e2031ff6b38535af33b5c0373c0e27f91
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Monte Carlo Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.bayesflow.python.ops import monte_carlo_impl as monte_carlo_lib
from tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import _get_samples
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_diag_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
layers = layers_lib
mc = monte_carlo_lib
class ExpectationImportanceSampleTest(test.TestCase):
def test_normal_integral_mean_and_var_correctly_estimated(self):
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
mu_q = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([0.5, 0.5], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = normal_lib.Normal(loc=mu_p, scale=sigma_p)
q = normal_lib.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X].
e_x = mc.expectation_importance_sampler(
f=lambda x: x, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Compute E_p[X^2].
e_x2 = mc.expectation_importance_sampler(
f=math_ops.square, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
stddev = math_ops.sqrt(e_x2 - math_ops.square(e_x))
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence of mean is +- 0.003 if n = 100M
# Convergence of stddev is +- 0.00001 if n = 100M
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.stddev().eval(), stddev.eval(), rtol=0.02)
def test_multivariate_normal_prob_positive_product_of_components(self):
# Test that importance sampling can correctly estimate the probability that
# the product of components in a MultivariateNormal are > 0.
n = 1000
with self.test_session():
p = mvn_diag_lib.MultivariateNormalDiag(
loc=[0.], scale_diag=[1.0, 1.0])
q = mvn_diag_lib.MultivariateNormalDiag(
loc=[0.5], scale_diag=[3., 3.])
# Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x).
# Should equal 1/2 because p is a spherical Gaussian centered at (0, 0).
def indicator(x):
x1_times_x2 = math_ops.reduce_prod(x, reduction_indices=[-1])
return 0.5 * (math_ops.sign(x1_times_x2) + 1.0)
prob = mc.expectation_importance_sampler(
f=indicator, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
# Convergence is +- 0.004 if n = 100k.
self.assertEqual(p.batch_shape, prob.get_shape())
self.assertAllClose(0.5, prob.eval(), rtol=0.05)
class ExpectationImportanceSampleLogspaceTest(test.TestCase):
def test_normal_distribution_second_moment_estimated_correctly(self):
# Test the importance sampled estimate against an analytical result.
n = int(1e6)
with self.test_session():
mu_p = constant_op.constant([0.0, 0.0], dtype=dtypes.float64)
mu_q = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64)
sigma_p = constant_op.constant([1.0, 2 / 3.], dtype=dtypes.float64)
sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64)
p = normal_lib.Normal(loc=mu_p, scale=sigma_p)
q = normal_lib.Normal(loc=mu_q, scale=sigma_q)
# Compute E_p[X^2].
# Should equal [1, (2/3)^2]
log_e_x2 = mc.expectation_importance_sampler_logspace(
log_f=lambda x: math_ops.log(math_ops.square(x)),
log_p=p.log_prob,
sampling_dist_q=q,
n=n,
seed=42)
e_x2 = math_ops.exp(log_e_x2)
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertEqual(p.batch_shape, e_x2.get_shape())
self.assertAllClose([1., (2 / 3.)**2], e_x2.eval(), rtol=0.02)
class ExpectationTest(test.TestCase):
def test_mc_estimate_of_normal_mean_and_variance_is_correct_vs_analytic(self):
random_seed.set_random_seed(0)
n = 20000
with self.test_session():
p = normal_lib.Normal(loc=[1.0, -1.0], scale=[0.3, 0.5])
# Compute E_p[X] and E_p[X^2].
z = p.sample(n, seed=42)
e_x = mc.expectation(lambda x: x, p, z=z, seed=42)
e_x2 = mc.expectation(math_ops.square, p, z=z, seed=0)
var = e_x2 - math_ops.square(e_x)
self.assertEqual(p.batch_shape, e_x.get_shape())
self.assertEqual(p.batch_shape, e_x2.get_shape())
# Relative tolerance (rtol) chosen 2 times as large as minimim needed to
# pass.
self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01)
self.assertAllClose(p.variance().eval(), var.eval(), rtol=0.02)
class GetSamplesTest(test.TestCase):
"""Test the private method 'get_samples'."""
def test_raises_if_both_z_and_n_are_none(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = None
n = None
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_raises_if_both_z_and_n_are_not_none(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = dist.sample(seed=42)
n = 1
seed = None
with self.assertRaisesRegexp(ValueError, 'exactly one'):
_get_samples(dist, z, n, seed)
def test_returns_n_samples_if_n_provided(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = None
n = 10
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
def test_returns_z_if_z_provided(self):
with self.test_session():
dist = normal_lib.Normal(loc=0., scale=1.)
z = dist.sample(10, seed=42)
n = None
seed = None
z = _get_samples(dist, z, n, seed)
self.assertEqual((10,), z.get_shape())
class Expectationv2Test(test.TestCase):
def test_works_correctly(self):
with self.test_session() as sess:
x = constant_op.constant([-1e6, -100, -10, -1, 1, 10, 100, 1e6])
p = normal_lib.Normal(loc=x, scale=1.)
# We use the prefex "efx" to mean "E_p[f(X)]".
f = lambda u: u
efx_true = x
samples = p.sample(int(1e5), seed=1)
efx_reparam = mc.expectation_v2(f, samples, p.log_prob)
efx_score = mc.expectation_v2(f, samples, p.log_prob,
use_reparametrization=False)
[
efx_true_,
efx_reparam_,
efx_score_,
efx_true_grad_,
efx_reparam_grad_,
efx_score_grad_,
] = sess.run([
efx_true,
efx_reparam,
efx_score,
gradients_impl.gradients(efx_true, x)[0],
gradients_impl.gradients(efx_reparam, x)[0],
gradients_impl.gradients(efx_score, x)[0],
])
self.assertAllEqual(np.ones_like(efx_true_grad_), efx_true_grad_)
self.assertAllClose(efx_true_, efx_reparam_, rtol=0.005, atol=0.)
self.assertAllClose(efx_true_, efx_score_, rtol=0.005, atol=0.)
self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),
np.isfinite(efx_reparam_grad_))
self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),
np.isfinite(efx_score_grad_))
self.assertAllClose(efx_true_grad_, efx_reparam_grad_,
rtol=0.03, atol=0.)
# Variance is too high to be meaningful, so we'll only check those which
# converge.
self.assertAllClose(efx_true_grad_[2:-2],
efx_score_grad_[2:-2],
rtol=0.05, atol=0.)
if __name__ == '__main__':
test.main()
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/contrib/bayesflow/python/kernel_tests/monte_carlo_test.py
|
Python
|
apache-2.0
| 9,026
|
[
"Gaussian"
] |
b1095b113edff02a82c3da4039a4e418f488fb009919611ff6fcefe92aafa6ba
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin, urlsplit, parse_qs, urlunsplit
from django.views.generic import TemplateView
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
import edx_oauth2_provider
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch, reverse_lazy
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError, Http404
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode, urlencode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.template.response import TemplateResponse
from provider.oauth2.models import Client
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED,
LogoutViewConfiguration)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from bulk_email.models import Optout, BulkEmailFlag # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
import openedx.core.djangoapps.external_auth.views
from openedx.core.djangoapps.external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from util.password_policy_validators import validate_password_strength
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
destroy_oauth_tokens
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user, UserAttribute, EnrollStatusChange
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs import utils as programs_utils
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Used as the name of the user attribute for tracking affiliate registrations
REGISTRATION_AFFILIATE_ID = 'registration_affiliate_id'
# used to announce a registration
REGISTER_USER = Signal(providing_args=["user", "profile"])
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if configuration_helpers.get_value(
"ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"],
):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = configuration_helpers.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = configuration_helpers.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = configuration_helpers.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via site configuration
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the site configuration workflow, versus living here
youtube_video_id = configuration_helpers.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for theme override of the courses list
context['courses_list'] = theming_helpers.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): If not None, ONLY courses of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# Filter out anything that is not attributed to the current ORG.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, filter out any enrollments with courses attributed to current ORG.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
CertificateStatuses.unverified: 'unverified',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in White Labels
if linkedin_config.enabled and not theming_helpers.is_request_in_themed_site():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing', 'unverified'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': configuration_helpers.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(
openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
mode_info['verified_bulk_sku'] = modes['verified'].bulk_sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
# we want to filter and only show enrollments for courses within
# the 'ORG' defined in configuration.
course_org_filter = configuration_helpers.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a configuration
org_filter_out_set = configuration_helpers.get_all_orgs()
# remove our current org from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Find programs associated with courses being displayed. This information
# is passed in the template context to allow rendering of program-related
# information on the dashboard.
meter = programs_utils.ProgramProgressMeter(user, enrollments=course_enrollments)
programs_by_run = meter.engaged_programs(by_run=True)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
BulkEmailFlag.feature_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse('logout'),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'programs_by_run': programs_by_run,
'show_program_listing': ProgramsApiConfig.current().show_program_listing,
'disable_courseware_js': True,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
if course_id not in course_modes:
flat_unexpired_modes = {
unicode(course_id): [mode for mode in modes]
for course_id, modes in course_modes.iteritems()
}
flat_all_modes = {
unicode(course_id): [mode.slug for mode in modes]
for course_id, modes in CourseMode.all_modes_for_courses([course_id]).iteritems()
}
log.error(
u'Can not find `%s` in course modes.`%s`. All modes: `%s`',
course_id,
flat_unexpired_modes,
flat_all_modes
)
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
enrollment = CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
enrollment.send_signal(EnrollStatusChange.enroll)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = configuration_helpers.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("Before you sign in, you need to activate your account. We have sent you an "
"email message with instructions for activating your account.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = configuration_helpers.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow to define custom set of required/optional/hidden fields via configuration
extra_fields = configuration_helpers.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = configuration_helpers.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(openedx.core.djangoapps.external_auth.views.SHIBBOLETH_DOMAIN_PREFIX)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
# Announce registration
REGISTER_USER.send(sender=None, user=user, profile=profile)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
dest_addr = user.email
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send activation email to user from "%s" to "%s"',
from_address,
dest_addr,
exc_info=True
)
else:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
_record_registration_attribution(request, new_user)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
def _record_registration_attribution(request, user):
"""
Attribute this user's registration to the referring affiliate, if
applicable.
"""
affiliate_id = request.COOKIES.get(settings.AFFILIATE_COOKIE_NAME)
if user is not None and affiliate_id is not None:
UserAttribute.set_user_attribute(user, REGISTRATION_AFFILIATE_ID, affiliate_id)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to the `redirect_to` value if set, or
course home page if course_id is defined, otherwise it will redirect to dashboard
* `redirect_to`: will redirect to to this url
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
redirect_to = request.GET.get('redirect_to', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true' or redirect_to
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to specific page if specified
if redirect_to:
redirect_url = redirect_to
# Redirect to course info page if course_id is known
elif course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(regs[0].user)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
destroy_oauth_tokens(request.user)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def uidb36_to_uidb64(uidb36):
"""
Needed to support old password reset URLs that use base36-encoded user IDs
https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
Args:
uidb36: base36-encoded user ID
Returns: base64-encoded user ID. Otherwise returns a dummy, invalid ID
"""
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
return uidb64
def validate_password(user, password):
"""
Tie in password policy enforcement as an optional level of
security protection
Args:
user: the user object whose password we're checking.
password: the user's proposed new password.
Returns:
is_valid_password: a boolean indicating if the new password
passes the validation.
err_msg: an error message if there's a violation of one of the password
checks. Otherwise, `None`.
"""
err_msg = None
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_strength(password)
except ValidationError as err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
is_password_valid = err_msg is None
return is_password_valid, err_msg
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if request.method == 'POST':
password = request.POST['new_password1']
is_password_valid, password_err_msg = validate_password(user, password)
if not is_password_valid:
# We have a password reset attempt which violates some security
# policy. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': password_err_msg,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200 and not response.context_data['form'].is_valid():
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
try:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(
u'Unable to send reactivation email from "%s" to "%s"',
from_address,
user.email,
exc_info=True
)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(
subject,
message,
configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
class LogoutView(TemplateView):
"""
Logs out user and redirects.
The template should load iframes to log the user out of OpenID Connect services.
See http://openid.net/specs/openid-connect-logout-1_0.html.
"""
oauth_client_ids = []
template_name = 'logout.html'
# Keep track of the page to which the user should ultimately be redirected.
target = reverse_lazy('cas-logout') if settings.FEATURES.get('AUTH_USE_CAS') else '/'
def dispatch(self, request, *args, **kwargs): # pylint: disable=missing-docstring
# We do not log here, because we have a handler registered to perform logging on successful logouts.
request.is_from_logout = True
# Get the list of authorized clients before we clear the session.
self.oauth_client_ids = request.session.get(edx_oauth2_provider.constants.AUTHORIZED_CLIENTS_SESSION_KEY, [])
logout(request)
# If we don't need to deal with OIDC logouts, just redirect the user.
if LogoutViewConfiguration.current().enabled and self.oauth_client_ids:
response = super(LogoutView, self).dispatch(request, *args, **kwargs)
else:
response = redirect(self.target)
# Clear the cookie used by the edx.org marketing site
delete_logged_in_cookies(response)
return response
def _build_logout_url(self, url):
"""
Builds a logout URL with the `no_redirect` query string parameter.
Args:
url (str): IDA logout URL
Returns:
str
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params['no_redirect'] = 1
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
# Create a list of URIs that must be called to log the user out of all of the IDAs.
uris = Client.objects.filter(client_id__in=self.oauth_client_ids,
logout_uri__isnull=False).values_list('logout_uri', flat=True)
referrer = self.request.META.get('HTTP_REFERER', '').strip('/')
logout_uris = []
for uri in uris:
if not referrer or (referrer and not uri.startswith(referrer)):
logout_uris.append(self._build_logout_url(uri))
context.update({
'target': self.target,
'logout_uris': logout_uris,
})
return context
|
tanmaykm/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 105,433
|
[
"VisIt"
] |
c767c95b49a1b65dcfc8d4da66b20fedf67a37d6fd024eca2a408a0acab9c849
|
""" RPCClient object is used to create RPC connection to services
"""
__RCSID__ = "$Id$"
from DIRAC.Core.DISET.private.InnerRPCClient import InnerRPCClient
class _MagicMethod( object ):
""" This object allows to bundle together a function calling
an RPC and the remote function name.
When this object is called (__call__), the call is performed.
"""
def __init__( self, doRPCFunc, remoteFuncName ):
""" Constructor
:param doRPCFunc: the function actually performing the RPC call
:param remoteFuncName: name of the remote function
"""
self.__doRPCFunc = doRPCFunc
self.__remoteFuncName = remoteFuncName
def __getattr__( self, remoteFuncName ):
""" I really do not understand when this would be called.
I can only imagine it being called by dir, or things like that.
In any case, it recursively return a MagicMethod object
where the new remote function name is the old one to which
we append the new called attribute.
"""
return _MagicMethod( self.__doRPCFunc, "%s.%s" % ( self.__remoteFuncName, remoteFuncName ) )
def __call__(self, *args ):
""" Triggers the call.
it uses the RPC calling function given by RPCClient,
and gives as argument the remote function name and whatever
arguments given.
"""
return self.__doRPCFunc( self.__remoteFuncName, args )
def __str__( self ):
return "<RPCClient method %s>" % self.__remoteFuncName
class RPCClient( object ):
""" This class contains the mechanism to convert normal calls to RPC calls.
When instanciated, it creates a :class:`~DIRAC.Core.DISET.private.InnerRPCClient.InnerRPCClient`
as an attribute. Any attribute which is accessed is then either redirected to InnerRPCClient if it has it,
or creates a MagicMethod object otherwise. If the attribute is a function, MagicMethod will
trigger the RPC call, using the InnerRPCClient.
The typical workflow looks like this::
rpc = RPCClient('DataManagement/FileCatalog')
# Here, func is the ping function, which we call remotely.
# We go through RPCClient.__getattr__ which returns us a MagicMethod object
func = rpc.ping
# Here we call the method __call__ of the MagicMethod
func()
"""
def __init__( self, *args, **kwargs ):
"""
Constructor
The arguments are just passed on to InnerRPCClient.
In practice:
* args: has to be the service name or URL
* kwargs: all the arguments InnerRPCClient and BaseClient accept as configuration
"""
self.__innerRPCClient = InnerRPCClient( *args, **kwargs )
def __doRPC( self, sFunctionName, args ):
"""
Execute the RPC action. This is given as an attribute
to MagicMethod
:param sFunctionName: name of the remote function
:param args: arguments to pass to the function
"""
return self.__innerRPCClient.executeRPC( sFunctionName, args )
def __getattr__( self, attrName ):
""" Function for emulating the existance of functions.
In literature this is usually called a "stub function".
If the attribute exists in InnerRPCClient, return it,
otherwise we create a _MagicMethod instance
"""
if attrName in dir( self.__innerRPCClient ):
return getattr( self.__innerRPCClient, attrName )
return _MagicMethod( self.__doRPC, attrName )
def executeRPCStub( rpcStub ):
"""
Playback a stub
"""
#Generate a RPCClient with the same parameters
rpcClient = RPCClient( rpcStub[0][0], **rpcStub[0][1] )
#Get a functor to execute the RPC call
rpcFunc = getattr( rpcClient, rpcStub[1] )
#Reproduce the call
return rpcFunc( *rpcStub[2] )
|
fstagni/DIRAC
|
Core/DISET/RPCClient.py
|
Python
|
gpl-3.0
| 3,753
|
[
"DIRAC"
] |
7d7e3057a801fb3ea252802c162718f78fc712e6e9a7d8ac1fbac65d6a46fb99
|
from __future__ import division, print_function
from hscom import __common__
print, print_, print_on, print_off, rrr, profile, printDBG =\
__common__.init(__name__, '[cov]', DEBUG=False)
# Standard
from itertools import izip
from itertools import product as iprod
import math
# Science
import cv2
import numpy as np
# HotSpotter
from hscom import helpers as util
import matching_functions as mf
SCALE_FACTOR_DEFAULT = .05
METHOD_DEFAULT = 0
def score_chipmatch_coverage(hs, qcx, chipmatch, qreq, method=0):
prescore_method = 'csum'
nShortlist = 100
dcxs_ = set(qreq._dcxs)
(cx2_fm, cx2_fs, cx2_fk) = chipmatch
cx2_prescore = mf.score_chipmatch(hs, qcx, chipmatch, prescore_method, qreq)
topx2_cx = cx2_prescore.argsort()[::-1] # Only allow indexed cxs to be in the top results
topx2_cx = [cx for cx in iter(topx2_cx) if cx in dcxs_]
nRerank = min(len(topx2_cx), nShortlist)
cx2_score = [0 for _ in xrange(len(cx2_fm))]
mark_progress, end_progress = util.progress_func(nRerank, flush_after=10,
lbl='[cov] Compute coverage')
for topx in xrange(nRerank):
mark_progress(topx)
cx2 = topx2_cx[topx]
fm = cx2_fm[cx2]
fs = cx2_fs[cx2]
covscore = get_match_coverage_score(hs, qcx, cx2, fm, fs, method=method)
cx2_score[cx2] = covscore
end_progress()
return cx2_score
def get_match_coverage_score(hs, cx1, cx2, fm, fs, **kwargs):
if len(fm) == 0:
return 0
if not 'scale_factor' in kwargs:
kwargs['scale_factor'] = SCALE_FACTOR_DEFAULT
if not 'method' in kwargs:
kwargs['method'] = METHOD_DEFAULT
sel_fx1, sel_fx2 = fm.T
method = kwargs.get('method', 0)
score1 = get_cx_match_covscore(hs, cx1, sel_fx1, fs, **kwargs)
if method in [0, 2]:
# 0 and 2 use both score
score2 = get_cx_match_covscore(hs, cx2, sel_fx2, fs, **kwargs)
covscore = (score1 + score2) / 2
elif method in [1, 3]:
# 1 and 3 use just score 1
covscore = score1
else:
raise NotImplemented('[cov] method=%r' % method)
return covscore
def get_cx_match_covscore(hs, cx, sel_fx, mx2_score, **kwargs):
dstimg = get_cx_match_covimg(hs, cx, sel_fx, mx2_score, **kwargs)
score = dstimg.sum() / (dstimg.shape[0] * dstimg.shape[1])
return score
def get_cx_match_covimg(hs, cx, sel_fx, mx2_score, **kwargs):
chip = hs.get_chip(cx)
kpts = hs.get_kpts(cx)
mx2_kp = kpts[sel_fx]
srcimg = get_gaussimg()
# 2 and 3 are scale modes
if kwargs.get('method', 0) in [2, 3]:
# Bigger keypoints should get smaller weights
mx2_scale = np.sqrt([a * d for (x, y, a, c, d) in mx2_kp])
mx2_score = mx2_score / mx2_scale
dstimg = warp_srcimg_to_kpts(mx2_kp, srcimg, chip.shape[0:2],
fx2_score=mx2_score, **kwargs)
return dstimg
def get_match_coverage_images(hs, cx1, cx2, fm, mx2_score, **kwargs):
sel_fx1, sel_fx2 = fm.T
dstimg1 = get_cx_match_covimg(hs, cx1, sel_fx1, mx2_score, **kwargs)
dstimg2 = get_cx_match_covimg(hs, cx1, sel_fx1, mx2_score, **kwargs)
return dstimg1, dstimg2
def warp_srcimg_to_kpts(fx2_kp, srcimg, chip_shape, fx2_score=None, **kwargs):
if len(fx2_kp) == 0:
return None
if fx2_score is None:
fx2_score = np.ones(len(fx2_kp))
scale_factor = kwargs.get('scale_Factor', SCALE_FACTOR_DEFAULT)
# Build destination image
(h, w) = map(int, (chip_shape[0] * scale_factor, chip_shape[1] * scale_factor))
dstimg = np.zeros((h, w), dtype=np.float32)
dst_copy = dstimg.copy()
src_shape = srcimg.shape
# Build keypoint transforms
fx2_M = build_transforms(fx2_kp, (h, w), src_shape, scale_factor)
# cv2 warp flags
dsize = (w, h)
flags = cv2.INTER_LINEAR # cv2.INTER_LANCZOS4
boderMode = cv2.BORDER_CONSTANT
# mark prooress
mark_progress, end_progress = util.progress_func(len(fx2_M),
flush_after=20,
mark_after=1000,
lbl='coverage warp ')
# For each keypoint warp a gaussian scaled by the feature score
# into the image
count = 0
for count, (M, score) in enumerate(izip(fx2_M, fx2_score)):
mark_progress(count)
warped = cv2.warpAffine(srcimg * score, M, dsize,
dst=dst_copy,
flags=flags, borderMode=boderMode,
borderValue=0).T
catmat = np.dstack((warped.T, dstimg))
dstimg = catmat.max(axis=2)
mark_progress(count)
end_progress()
return dstimg
def pdf_norm2d(x_, y_):
x = np.array([x_, y_])
sigma = np.eye(2)
mu = np.array([0, 0])
size = len(x)
if size == len(mu) and (size, size) == sigma.shape:
det = np.linalg.det(sigma)
if det == 0:
raise NameError("The covariance matrix can't be singular")
np.tau = 2 * np.pi
norm_const = 1.0 / ( math.pow(np.tau, float(size) / 2) * math.pow(det, 1.0 / 2))
x_mu = np.matrix(x - mu)
inv = np.linalg.inv(sigma)
result = math.pow(math.e, -0.5 * (x_mu * inv * x_mu.T))
return norm_const * result
def get_gaussimg(width=3, resolution=7):
half_width = width / 2
gauss_xs = np.linspace(-half_width, half_width, resolution)
gauss_ys = np.linspace(-half_width, half_width, resolution)
gaussspace_xys = np.array(list(iprod(gauss_xs, gauss_ys)))
gausspace_score = np.array([pdf_norm2d(x, y) for (x, y) in gaussspace_xys])
gausspace_score -= gausspace_score.min()
gausspace_score /= gausspace_score.max()
size = (resolution, resolution)
gaussimg = gausspace_score.reshape(size).T
gaussimg = np.array(gaussimg, dtype=np.float32)
return gaussimg
def build_transforms(kpts, chip_shape, src_shape, scale_factor):
(h, w) = chip_shape
(h_, w_) = src_shape
T1 = np.array(((1, 0, -w_ / 2),
(0, 1, -h_ / 2),
(0, 0, 1),))
S1 = np.array(((1 / w_, 0, 0),
(0, 1 / h_, 0),
(0, 0, 1),))
aff_list = [np.array(((a, 0, x),
(c, d, y),
(0, 0, 1),)) for (x, y, a, c, d) in kpts]
S2 = np.array(((scale_factor, 0, 0),
(0, scale_factor, 0),
(0, 0, 1),))
perspective_list = [S2.dot(A).dot(S1).dot(T1) for A in aff_list]
transform_list = [M[0:2] for M in perspective_list]
return transform_list
def get_coverage_map(kpts, chip_shape, **kwargs):
# Create gaussian image to warp
np.tau = 2 * np.pi
srcimg = get_gaussimg()
dstimg = warp_srcimg_to_kpts(kpts, srcimg, chip_shape, **kwargs)
return dstimg
|
Erotemic/hotspotter
|
hotspotter/coverage.py
|
Python
|
apache-2.0
| 6,933
|
[
"Gaussian"
] |
1007ef80336d79844fdd56e99672bdac1edd757bc6247f79c973ca9a9250b0b7
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Rob Knight, Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Greg Caporaso", "Kyle Bittinger",
"Antonio Gonzalez Pena", "David Soergel", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
import logging
import os
import re
from os import remove
from os.path import abspath, dirname
from itertools import count
from string import strip
from tempfile import NamedTemporaryFile, mkdtemp
from cStringIO import StringIO
from collections import Counter, defaultdict
from shutil import rmtree
from skbio.parse.sequences import parse_fasta
from bfillings.blast import blast_seqs, Blastall, BlastResult
from bfillings.formatdb import build_blast_db_from_fasta_path
from bfillings.uclust import Uclust
from bfillings.sortmerna_v2 import (build_database_sortmerna,
sortmerna_map)
from bfillings import rdp_classifier
from bfillings import mothur
from bfillings import rtax
from qiime.util import FunctionWithParams, get_rdp_jarpath, get_qiime_temp_dir
"""Contains code for assigning taxonomy, using several techniques.
This module has the responsibility for taking a set of sequences and
providing a taxon assignment for each sequence."""
def validate_rdp_version(rdp_jarpath=None):
if rdp_jarpath is None:
rdp_jarpath = get_rdp_jarpath()
if rdp_jarpath is None:
raise RuntimeError(
"RDP classifier is not installed or not accessible to QIIME. "
"See install instructions here: "
"http://qiime.org/install/install.html#rdp-install"
)
rdp_jarname = os.path.basename(rdp_jarpath)
version_match = re.search("\d\.\d", rdp_jarname)
if version_match is None:
raise RuntimeError(
"Unable to detect RDP Classifier version in file %s" % rdp_jarname
)
version = float(version_match.group())
if version < 2.1:
raise RuntimeError(
"RDP Classifier does not look like version 2.2 or greater."
"Versions of the software prior to 2.2 have different "
"formatting conventions and are no longer supported by QIIME. "
"Detected version %s from file %s" % (version, rdp_jarpath)
)
return version
class TaxonAssigner(FunctionWithParams):
"""A TaxonAssigner assigns a taxon to each of a set of sequences.
This is an abstract class: subclasses should implement the __call__
method.
"""
Name = 'TaxonAssigner'
def __init__(self, params):
"""Return new TaxonAssigner object with specified params.
Note: expect params to contain both generic and per-method (e.g. for
RDP classifier w/ Hugenholtz taxonomy) params, so leaving it as a dict
rather than setting attributes. Some standard entries in params are:
Taxonomy: taxonomy used (e.g. RDP, Hugenholtz)
Similarity: similarity threshold for assignment, e.g. 0.97
Bootstrap: bootstrap support for assignment, e.g. 0.80
Application: 3rd-party application used, if any, e.g. RDP classifier
"""
self.Params = params
def __call__(self, seq_path, result_path=None, log_path=None):
"""Returns dict mapping {seq_id:(taxonomy, confidence)} for each seq.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified, should
dump the result to the desired path instead of returning it.
log_path: path to log, which should include dump of params.
"""
raise NotImplementedError("TaxonAssigner is an abstract class")
@staticmethod
def _parse_id_to_taxonomy_file(f):
""" parse the id_to_taxonomy file into a dict mapping id -> taxonomy
"""
result = {}
for line in f:
line = line.strip()
if line:
identifier, taxonomy = map(strip, line.split('\t'))
result[identifier] = taxonomy
return result
def _tax_assignments_to_consensus_assignments(self,
query_to_assignments):
""" For each query id and list of assignments,
call _get_consensus_assigment to compute the
consensus assignment.
Parameters
----------
query_to_assignments : dict of list of lists
The keys in the dict correspond to query IDs and
the values are a list of lists holding associated
taxonomies.
Returns
-------
query_to_assignments: dict
The keys in the dict correspond to query IDs and
the values carry a single consensus taxonomy
assignment.
"""
for query_id, assignments in query_to_assignments.iteritems():
consensus_assignment = self._get_consensus_assignment(assignments)
query_to_assignments[query_id] = consensus_assignment
return query_to_assignments
def _get_consensus_assignment(self, assignments):
""" compute the consensus assignment from a list of assignments
(method applied to SortMeRNATaxonAssigner and UclustConsensusTaxonAssigner)
"""
num_input_assignments = len(assignments)
consensus_assignment = []
# if the assignments don't all have the same number
# of levels, the resulting assignment will have a max number
# of levels equal to the number of levels in the assignment
# with the fewest number of levels. this is to avoid
# a case where, for example, there are n assignments, one of
# which has 7 levels, and the other n-1 assignments have 6 levels.
# A 7th level in the result would be misleading because it
# would appear to the user as though it was the consensus
# across all n assignments.
num_levels = min([len(a) for a in assignments])
# iterate over the assignment levels
for level in range(num_levels):
# count the different taxonomic assignments at the current level.
# the counts are computed based on the current level and all higher
# levels to reflect that, for example, 'p__A; c__B; o__C' and
# 'p__X; c__Y; o__C' represent different taxa at the o__ level (since
# they are different at the p__ and c__ levels).
current_level_assignments = \
Counter([tuple(e[:level + 1]) for e in assignments])
# identify the most common taxonomic assignment, and compute the
# fraction of assignments that contained it. it's safe to compute the
# fraction using num_assignments because the deepest level we'll
# ever look at here is num_levels (see above comment on how that
# is decided).
tax, max_count = current_level_assignments.most_common(1)[0]
max_consensus_fraction = max_count / num_input_assignments
# check whether the most common taxonomic assignment is observed
# in at least min_consensus_fraction of the sequences
if max_consensus_fraction >= self.Params['min_consensus_fraction']:
# if so, append the current level only (e.g., 'o__C' if tax is
# 'p__A; c__B; o__C', and continue on to the next level
consensus_assignment.append((tax[-1], max_consensus_fraction))
else:
# if not, there is no assignment at this level, and we're
# done iterating over levels
break
# construct the results
# determine the number of levels in the consensus assignment
consensus_assignment_depth = len(consensus_assignment)
if consensus_assignment_depth > 0:
# if it's greater than 0, generate a list of the
# taxa assignments at each level
assignment_result = [a[0] for a in consensus_assignment]
# and assign the consensus_fraction_result as the
# consensus fraction at the deepest level
consensus_fraction_result = \
consensus_assignment[consensus_assignment_depth - 1][1]
else:
# if there are zero assignments, indicate that the taxa is
# unknown
assignment_result = [self.Params['unassignable_label']]
# and assign the consensus_fraction_result to 1.0 (this is
# somewhat arbitrary, but could be interpreted as all of the
# assignments suggest an unknown taxonomy)
consensus_fraction_result = 1.0
return (
assignment_result, consensus_fraction_result, num_input_assignments
)
class SortMeRNATaxonAssigner(TaxonAssigner):
""" Assign taxonomy using SortMeRNA
"""
Name = 'SortMeRNATaxonAssigner'
Application = "SortMeRNA"
Citation = ("SortMeRNA is hosted at:\n"
"http://bioinfo.lifl.fr/RNA/sortmerna\n"
"https://github.com/biocore/sortmerna\n\n"
"The following paper should be cited if this resource is "
"used:\n\n"
"Kopylova, E., Noe L. and Touzet, H.,\n"
"SortMeRNA: fast and accurate filtering of ribosomal RNAs "
"in\n"
"metatranscriptomic data, Bioinformatics (2012) 28(24)\n"
)
_tracked_properties = ['Application', 'Citation']
def __init__(self, params):
_params = {
# id to taxonomy filepath
'id_to_taxonomy_fp': None,
# reference sequences filepath
'reference_sequences_fp': None,
# reference sequences indexed database
'sortmerna_db': None,
# Fraction of sequence hits that a taxonomy assignment
# must show up in to be considered the consensus assignment
'min_consensus_fraction': 0.51,
# minimum identity to consider a hit
'min_percent_id': 90.0,
# minimum query coverage to consider a hit
'min_percent_cov': 90.0,
# output 10 best alignments
'best_N_alignments': 10,
# E-value
'e_value': 1,
# threads
'threads': 1,
# label to apply for queries that cannot be assigned
'unassignable_label': 'Unassigned'
}
_params.update(params)
super(SortMeRNATaxonAssigner, self).__init__(_params)
def __call__(self,
seq_path,
result_path=None,
log_path=None,
HALT_EXEC=False):
"""Returns mapping of each seq to (taxonomy, consensus fraction, n).
Parameters
----------
seq_path : str, mandatory
The filepath to input sequences.
result_path : str, optional
The filepath to store resulting alignments.
log_path : str, optional
The filepath to store logging information.
HALT_EXEC : bool, debugging parameter
If passed, will exit just before the sortmerna command in issued
and will print out the command that would have been called
to stdout.
Returns
-------
dict if result_path=None
The results will be stored in a dict:
dict{query_id:[tax, consensus fraction, n]}
None if result_path
The results will be written to result_path as tab-separated
lines of:
query_id <tab> tax <tab> consensus fraction <tab> n
The values represent:
tax: the consensus taxonomy assignment
consensus fraction: the fraction of the assignments for the
query that contained the lowest level tax assignment that is
included in tax (e.g., if the assignment goes to genus level,
this will be the fraction of assignments that had the consensus
genus assignment)
n: the number of assignments that were considered when
constructing the consensus
"""
# Check input reference sequence and taxonomy are provided
if self.Params['reference_sequences_fp'] is None:
raise ValueError("Filepath for reference sequences is mandatory.")
if self.Params['id_to_taxonomy_fp'] is None:
raise ValueError("Filepath for id to taxonomy map is mandatory.")
# initialize the logger
logger = self._get_logger(log_path)
logger.info(str(self))
self.dirs_to_remove = []
# Indexed database not provided, build it
if not self.Params['sortmerna_db']:
output_dir = mkdtemp()
self.sortmerna_db, files_to_remove = \
build_database_sortmerna(abspath(self.Params[
'reference_sequences_fp']),
output_dir=output_dir)
self.dirs_to_remove.append(output_dir)
# Indexed database provided
else:
self.sortmerna_db = self.Params['sortmerna_db']
# Set SortMeRNA's output directory
if result_path is None:
output_dir = mkdtemp()
self.dirs_to_remove.append(output_dir)
else:
output_dir = dirname(abspath(result_path))
# Call sortmerna mapper
app_result =\
sortmerna_map(seq_path=seq_path,
output_dir=output_dir,
sortmerna_db=self.sortmerna_db,
refseqs_fp=self.Params['reference_sequences_fp'],
e_value=self.Params['e_value'],
threads=self.Params['threads'],
best=self.Params['best_N_alignments'],
HALT_EXEC=False)
with open(self.Params['id_to_taxonomy_fp'], "U") as id_to_taxonomy_f:
self.id_to_taxonomy_map =\
self._parse_id_to_taxonomy_file(id_to_taxonomy_f)
blast_tabular_fp = app_result['BlastAlignments'].name
query_to_assignments = self._blast_to_tax_assignments(blast_tabular_fp)
result = self._tax_assignments_to_consensus_assignments(
query_to_assignments)
# Write results to file
if result_path is not None:
with open(result_path, 'w') as of:
of.write('#OTU ID\ttaxonomy\tconfidence\tnum hits\n')
for seq_id, (assignment, consensus_fraction, n) in result.items():
assignment_str = ';'.join(assignment)
of.write('%s\t%s\t%1.2f\t%d\n' % (
seq_id, assignment_str, consensus_fraction, n))
result = None
logger.info('Result path: %s' % result_path)
else:
# If no result_path was provided, the result dict is
# returned as-is.
logger.info('Result path: None, returned as dict.')
# clean up
map(rmtree, self.dirs_to_remove)
return result
def _get_logger(self, log_path=None):
if log_path is not None:
handler = logging.FileHandler(log_path, mode='w')
else:
class NullHandler(logging.Handler):
def emit(self, record):
pass
handler = NullHandler()
logger = logging.getLogger("SortMeRNATaxonAssigner logger")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def _blast_to_tax_assignments(self,
blast_output_fp):
""" Parse SortMeRNA's Blast-like tabular format for query
IDs and the references they map to, use the reference IDs
to find the associated taxonomies in the id_to_taxonomy_map.
Three types of alignments are possible,
1. The Null alignment (E-value threshold failed):
not16S.1_130\t*\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t*\t0
2. All alignments for a query pass the E-value threshold
but fail the %id threshold (3rd column is %id):
f1_4866\t426848\t85.4\t121\t15\t3\t1\t121\t520\t641\t4.79e-32\t131\t72M1D7M1I13M1D28M31S\t79.6
f1_4866\t342684\t84\t91\t9\t6\t1\t91\t522\t612\t2.8e-19\t89\t55M1D4M1I12M1D3M1D4M1I1M1I9M61S\t59.9
3. Some/all alignments for a query pass both E-value and %id
thresholds:
f2_1271\t295053\t100\t128\t0\t0\t1\t128\t520\t647\t1.15e-59\t223\t128M\t100
f2_1271\t42684\t84.8\t124\t17\t2\t1\t124\t527\t650\t2.63e-32\t132\t101M1D6M1I16M4S\t96.9
Parameters
----------
blast_output_fp : str
Filepath to Blast-like tabular alignments.
Returns
-------
result : dict of list of lists
The keys in the dict correspond to query IDs and
the values are a list of lists holding associated
taxonomies.
"""
min_percent_id = self.Params['min_percent_id']
result = defaultdict(list)
with open(blast_output_fp, "U") as blast_output:
for line in blast_output:
fields = line.split('\t')
query_id = fields[0]
subject_id = fields[1]
percent_id = float(fields[2])
# sequence was not aligned
if subject_id == "*":
result[query_id].append([])
# sequence was aligned, passing %id threshold
elif percent_id >= min_percent_id:
# if exists, remove the empty alignment (failing %id
# threshold) for this sequence (Blast tabular output
# will list all alignments passing E-value threshold,
# not necessarily the %id threshold). It should happen
# rarely that an alignment passing the %id threshold
# comes after an alignment that failed the threshold,
# but it can happen (Blast alignments are often ordered
# from highest %id to lowest), though as sortmerna uses
# a heuristic, this isn't always guaranteed.
if [] in result[query_id]:
result[query_id].remove([])
# add alignment passing %id threshold
subject_tax = self.id_to_taxonomy_map[
subject_id].strip().split(';')
result[query_id].append(subject_tax)
# sequence was aligned, however failing %id threshold
# if no alignment results have been recorded for this
# sequence up to now, add an empty list
elif not result[query_id]:
result[query_id].append([])
return result
class BlastTaxonAssigner(TaxonAssigner):
""" Assign taxon best on best blast hit above a threshold
"""
Name = 'BlastTaxonAssigner'
SeqsPerBlastRun = 1000
def __init__(self, params):
""" Initialize the object
"""
_params = {
'Min percent identity': 90.0,
'Max E value': 1e-30,
'Application': 'blastn/megablast'
}
_params.update(params)
TaxonAssigner.__init__(self, _params)
def __call__(self, seq_path=None, seqs=None,
result_path=None, log_path=None):
"""Returns dict mapping {seq_id:(taxonomy, confidence)} for each seq.
"""
assert seq_path or seqs, \
"Must provide either seqs or seq_path when calling a BlastTaxonAssigner."
# initialize the logger
logger = self._get_logger(log_path)
logger.info(str(self))
# assign the blast database, either as a pre-exisiting database
# specified as self.Params['blast_db'] or by creating a
# temporary database from the sequence file specified
# as self.Params['reference_seqs_filepath']
try:
blast_db = self.Params['blast_db']
except KeyError:
# build a temporary blast_db
reference_seqs_path = self.Params['reference_seqs_filepath']
refseqs_dir, refseqs_name = os.path.split(reference_seqs_path)
blast_db, db_files_to_remove = \
build_blast_db_from_fasta_path(reference_seqs_path)
# build the mapping of sequence identifier
# (wrt to the blast db seqs) to taxonomy
id_to_taxonomy_map = self._parse_id_to_taxonomy_file(
open(self.Params['id_to_taxonomy_filepath'], 'U'))
# Iterate over the input self.SeqsPerBlastRun seqs at a time.
# There are two competing issues here when dealing with very large
# inputs. If all sequences are read in at once, the containing object
# can be very large, causing the system to page. On the other hand,
# in such cases it would be very slow to treat each sequence
# individually, since blast requires a filepath. Each call would
# therefore involve writing a single sequence to file, opening/closing
# and removing the file. To balance this, sequences are read in and
# blasted in chunks of self.SeqsPerBlastRun (defualt: 1000) at a time.
# This appears to solve the problem with the largest sets I've worked
# with so far.
if seq_path:
# Get a seq iterator
seqs = parse_fasta(open(seq_path))
# Build object to keep track of the current set of sequence to be
# blasted, and the results (i.e., seq_id -> (taxonomy,quaility score)
# mapping)
current_seqs = []
result = {}
# Iterate over the (seq_id, seq) pairs
for seq_id, seq in seqs:
# append the current seq_id,seq to list of seqs to be blasted
current_seqs.append((seq_id, seq))
# When there are 1000 in the list, blast them
if len(current_seqs) == self.SeqsPerBlastRun:
# update the result object
result.update(self._seqs_to_taxonomy(
current_seqs, blast_db, id_to_taxonomy_map))
# reset the list of seqs to be blasted
current_seqs = []
# Assign taxonomy to the remaining sequences
result.update(self._seqs_to_taxonomy(
current_seqs, blast_db, id_to_taxonomy_map))
# End iteration over the input self.SeqsPerBlastRun seqs at a time.
# Write log data if we have a path (while the logger can handle
# being called if we are not logging, some of these steps are slow).
if log_path is not None:
num_inspected = len(result)
logger.info('Number of sequences inspected: %s' % num_inspected)
num_null_hits = [r[1] for r in result.values()].count(None)
logger.info('Number with no blast hits: %s' % num_null_hits)
if result_path:
# if the user provided a result_path, write the
# results to file
of = open(result_path, 'w')
for seq_id, (lineage, confidence, blast_hit_id) in result.items():
of.write('%s\t%s\t%s\t%s\n' %
(seq_id, lineage, confidence, blast_hit_id))
of.close()
result = None
logger.info('Result path: %s' % result_path)
else:
# Returning the data as a dict, so no modification to result
# is necessary.
pass
# if no result_path was provided, return the data as a dict
logger.info('Result path: None, returned as dict.')
# clean-up temp blastdb files, if a temp blastdb was created
if 'reference_seqs_filepath' in self.Params:
map(remove, db_files_to_remove)
# return the result
return result
def _seqs_to_taxonomy(self, seqs, blast_db, id_to_taxonomy_map):
""" Assign taxonomy to (seq_id,seq) pairs
"""
# Handle the case of no seqs passed in
if not seqs:
return {}
# blast the seqs
blast_hits = self._get_blast_hits(blast_db, seqs)
# select the best blast hit for each query sequence
best_blast_hit_ids = self._get_first_blast_hit_per_seq(blast_hits)
# map the identifier of the best blast hit to (taxonomy, e-value)
return self._map_ids_to_taxonomy(
best_blast_hit_ids, id_to_taxonomy_map)
def _get_logger(self, log_path=None):
if log_path is not None:
handler = logging.FileHandler(log_path, mode='w')
else:
class NullHandler(logging.Handler):
def emit(self, record):
pass
handler = NullHandler()
logger = logging.getLogger("BlastTaxonAssigner logger")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def _map_ids_to_taxonomy(self, hits, id_to_taxonomy_map):
""" map {query_id:(best_blast_seq_id,e-val)} to {query_id:(tax,e-val,best_blast_seq_id)}
"""
for query_id, hit in hits.items():
query_id = query_id.split()[0]
try:
hit_id, e_value = hit
hits[query_id] = \
(id_to_taxonomy_map.get(hit_id, None), e_value, hit_id)
except TypeError:
hits[query_id] = ('No blast hit', None, None)
return hits
def _get_blast_hits(self, blast_db, seqs):
""" blast each seq in seqs against blast_db and retain good hits
"""
max_evalue = self.Params['Max E value']
min_percent_identity = self.Params['Min percent identity']
if min_percent_identity < 1.0:
min_percent_identity *= 100.0
seq_ids = [s[0] for s in seqs]
result = {}
blast_result = blast_seqs(
seqs, Blastall, blast_db=blast_db,
params={'-p': 'blastn', '-n': 'T'},
add_seq_names=False)
if blast_result['StdOut']:
lines = [x for x in blast_result['StdOut']]
blast_result = BlastResult(lines)
else:
return {}.fromkeys(seq_ids, [])
for seq_id in seq_ids:
blast_result_id = seq_id.split()[0]
try:
result[seq_id] = [(e['SUBJECT ID'], float(e['E-VALUE']))
for e in blast_result[blast_result_id][0]
if (float(e['E-VALUE']) <= max_evalue and
float(e['% IDENTITY']) >= min_percent_identity)]
except KeyError:
result[seq_id] = []
return result
def _get_first_blast_hit_per_seq(self, blast_hits):
""" discard all blast hits except the best for each query sequence
"""
result = {}
for k, v in blast_hits.items():
k = k.split()[0] # get rid of spaces
try:
result[k] = v[0]
except IndexError:
# If there is no good blast hit, do we want to
# leave the key out, or have it point to None?
result[k] = None
return result
class MothurTaxonAssigner(TaxonAssigner):
"""Assign taxonomy using Mothur's naive Bayes implementation
"""
Name = 'MothurTaxonAssigner'
Application = "Mothur"
Citation = (
"Schloss, P.D., et al., Introducing mothur: Open-source, platform-"
"independent, community-supported software for describing and "
"comparing microbial communities. Appl Environ Microbiol, 2009. "
"75(23):7537-41."
)
_tracked_properties = ['Application', 'Citation']
def __init__(self, params):
_params = {
'Confidence': 0.80,
'Iterations': None,
'KmerSize': None,
'id_to_taxonomy_fp': None,
'reference_sequences_fp': None,
}
_params.update(params)
super(MothurTaxonAssigner, self).__init__(_params)
def _format_id_to_taxonomy(self, id_to_taxonomy_file):
"""Reformat taxa to comply with Mothur formatting requirements.
Mothur requires lineages to be semicolon-separated with no space
following the semicolon. (QIIME convention is to include a
space.) Taxa may have no internal spaces. Furthermore, each
lineage must end with a semi-colon.
Returns the re-formatted id-to-taxonomy file as an open file
object.
"""
mothur_tax_file = NamedTemporaryFile(
prefix='MothurTaxonAssigner_',
suffix='.txt',
dir=get_qiime_temp_dir())
original_taxonomy = self._parse_id_to_taxonomy_file(id_to_taxonomy_file)
for seq_id, lineage in original_taxonomy.iteritems():
mothur_tax_file.write(seq_id)
mothur_tax_file.write('\t')
taxa = [t.strip() for t in lineage.split(';')]
for taxon in taxa:
mothur_tax_file.write(self._format_taxon(taxon))
mothur_tax_file.write(';')
mothur_tax_file.write('\n')
mothur_tax_file.seek(0)
return mothur_tax_file
def _unformat_result(self, result):
"""Transform results to remove any changes introduced by formatting.
"""
unformatted_result = {}
for seq_id, (taxa, conf) in result.iteritems():
unformatted_taxa = [self._unformat_taxon(t) for t in taxa]
unformatted_result[seq_id] = (unformatted_taxa, conf)
return unformatted_result
def _format_taxon(self, taxon):
"""Format taxon for MOTHUR, removing internal spaces.
Original taxon names are saved to self._original_taxa for later lookup.
"""
# Create private attribute to store unformatted taxon names.
# If _unformat_taxon() is called without first calling
# _format_taxon(), this attribute will be missing, and an
# AttributeError will be raised.
if not hasattr(self, "_original_taxa"):
self._original_taxa = {}
# Escape backslashes
mothur_taxon = taxon.replace("\\", "\\\\")
# Escape underscores
mothur_taxon = mothur_taxon.replace("_", "\\_")
# Now we can safely replace spaces with underscores
mothur_taxon = mothur_taxon.replace(' ', '_')
if mothur_taxon != taxon:
previously_registered_taxon = self._original_taxa.get(mothur_taxon)
# If we have not yet registered the escaped taxon name, add it now.
if previously_registered_taxon is None:
self._original_taxa[mothur_taxon] = taxon
# Otherwise, check that the previously registered taxon is
# consistent with the current taxon. If we have not
# escaped the taxon names properly, two distinct taxa may
# be registered under the same name. This should probably
# never happen, but I can't prove it, so we check and
# raise an error if the taxa are inconsistent.
elif taxon != previously_registered_taxon:
raise ValueError(
"Taxon %s conflicts with another taxon, %s. "
"Please change one of the names." % (
taxon, previously_registered_taxon))
return mothur_taxon
def _unformat_taxon(self, taxon):
"""Recover original taxon names that were altered due to formatting.
Looks up taxon names in the attribute self._original_taxa. If
self._format_taxon() was never called, this attribute will be
missing, and an AttributeError will be raised.
"""
return self._original_taxa.get(taxon, taxon)
def __call__(self, seq_path, result_path=None, log_path=None):
seq_file = open(seq_path)
percent_confidence = int(self.Params['Confidence'] * 100)
with open(self.Params['id_to_taxonomy_fp'], "U") as tax_file:
mothur_tax_file = self._format_id_to_taxonomy(tax_file)
try:
result = mothur.mothur_classify_file(
query_file=seq_file,
ref_fp=self.Params['reference_sequences_fp'],
tax_fp=mothur_tax_file.name,
cutoff=percent_confidence,
iters=self.Params['Iterations'],
ksize=self.Params['KmerSize'],
output_fp=None,
)
finally:
mothur_tax_file.close()
result = self._unformat_result(result)
if result_path is not None:
with open(result_path, "w") as f:
for seq_id, (taxa, conf) in result.iteritems():
lineage = ';'.join(taxa)
f.write("%s\t%s\t%.2f\n" % (seq_id, lineage, conf))
return None
if log_path:
self.writeLog(log_path)
return result
class RdpTaxonAssigner(TaxonAssigner):
"""Assign taxon using RDP's naive Bayesian classifier
"""
Name = "RdpTaxonAssigner"
Application = "RDP classfier"
Citation = "Wang, Q, G. M. Garrity, J. M. Tiedje, and J. R. Cole. 2007. Naive Bayesian Classifier for Rapid Assignment of rRNA Sequences into the New Bacterial Taxonomy. Appl Environ Microbiol. 73(16):5261-7."
Taxonomy = "RDP"
_tracked_properties = ['Application', 'Citation', 'Taxonomy']
def __init__(self, params):
"""Return new RdpTaxonAssigner object with specified params.
Standard entries in params are:
Taxonomy: taxonomy used (e.g. RDP, Hugenholtz)
"""
_params = {
'Confidence': 0.80,
'id_to_taxonomy_fp': None,
'reference_sequences_fp': None,
'training_data_properties_fp': None,
'max_memory': None
}
_params.update(params)
TaxonAssigner.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None):
"""Returns dict mapping {seq_id:(taxonomy, confidence)} for
each seq.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified, dumps the
result to the desired path instead of returning it.
log_path: path to log, which should include dump of params.
"""
tmp_dir = get_qiime_temp_dir()
min_conf = self.Params['Confidence']
training_data_properties_fp = self.Params[
'training_data_properties_fp']
reference_sequences_fp = self.Params['reference_sequences_fp']
id_to_taxonomy_fp = self.Params['id_to_taxonomy_fp']
max_memory = self.Params['max_memory']
seq_file = open(seq_path, 'U')
if reference_sequences_fp and id_to_taxonomy_fp:
# Train and assign taxonomy
taxonomy_file, training_seqs_file = self._generate_training_files()
results = rdp_classifier.train_rdp_classifier_and_assign_taxonomy(
training_seqs_file, taxonomy_file, seq_file,
min_confidence=min_conf,
classification_output_fp=result_path,
max_memory=max_memory, tmp_dir=tmp_dir)
if result_path is None:
results = self._training_set.fix_results(results)
else:
self._training_set.fix_output_file(result_path)
else:
# Just assign taxonomy, using properties file if passed
if training_data_properties_fp:
fix_ranks = False
else:
fix_ranks = True
results = rdp_classifier.assign_taxonomy(
seq_file, min_confidence=min_conf, output_fp=result_path,
training_data_fp=training_data_properties_fp,
max_memory=max_memory, fixrank=fix_ranks, tmp_dir=tmp_dir)
if log_path:
self.writeLog(log_path)
return results
def _generate_training_files(self):
"""Returns a tuple of file objects suitable for passing to the
RdpTrainer application controller.
"""
tmp_dir = get_qiime_temp_dir()
training_set = RdpTrainingSet()
reference_seqs_file = open(self.Params['reference_sequences_fp'], 'U')
id_to_taxonomy_file = open(self.Params['id_to_taxonomy_fp'], 'U')
for seq_id, seq in parse_fasta(reference_seqs_file):
training_set.add_sequence(seq_id, seq)
for line in id_to_taxonomy_file:
seq_id, lineage_str = map(strip, line.split('\t'))
training_set.add_lineage(seq_id, lineage_str)
training_set.dereplicate_taxa()
rdp_taxonomy_file = NamedTemporaryFile(
prefix='RdpTaxonAssigner_taxonomy_', suffix='.txt', dir=tmp_dir)
rdp_taxonomy_file.write(training_set.get_rdp_taxonomy())
rdp_taxonomy_file.seek(0)
rdp_training_seqs_file = NamedTemporaryFile(
prefix='RdpTaxonAssigner_training_seqs_', suffix='.fasta',
dir=tmp_dir)
for rdp_id, seq in training_set.get_training_seqs():
rdp_training_seqs_file.write('>%s\n%s\n' % (rdp_id, seq))
rdp_training_seqs_file.seek(0)
self._training_set = training_set
return rdp_taxonomy_file, rdp_training_seqs_file
class RdpTrainingSet(object):
def __init__(self):
self._tree = RdpTree()
self.sequences = {}
self.sequence_nodes = {}
self.lineage_depth = None
def add_sequence(self, seq_id, seq):
self.sequences[seq_id] = seq
def add_lineage(self, seq_id, lineage_str):
for char, escape_str in _QIIME_RDP_ESCAPES:
lineage_str = re.sub(char, escape_str, lineage_str)
lineage = self._parse_lineage(lineage_str)
seq_node = self._tree.insert_lineage(lineage)
self.sequence_nodes[seq_id] = seq_node
def dereplicate_taxa(self):
return self._tree.dereplicate_taxa()
def _parse_lineage(self, lineage_str):
"""Returns a list of taxa from the semi-colon-separated
lineage string of an id_to_taxonomy file.
"""
lineage = lineage_str.strip().split(';')
if self.lineage_depth is None:
self.lineage_depth = len(lineage)
if len(lineage) != self.lineage_depth:
raise ValueError(
'Because the RDP Classifier operates in a bottom-up manner, '
'each taxonomy assignment in the id-to-taxonomy file must have '
'the same number of ranks. Detected %s ranks in the first '
'item of the file, but detected %s ranks later in the file. '
'Offending taxonomy string: %s' %
(self.lineage_depth, len(lineage), lineage_str))
return lineage
def get_training_seqs(self):
"""Returns an iterator of valid training sequences in
RDP-compatible format
Each training sequence is represented by a tuple (rdp_id,
seq). The rdp_id consists of two items: the original sequence
ID with whitespace replaced by underscores, and the lineage
with taxa separated by semicolons.
"""
# Rdp requires unique sequence IDs without whitespace. Can't
# trust user IDs to not have whitespace, so we replace all
# whitespace with an underscore. Classification may fail if
# the replacement method generates a name collision.
for seq_id, node in self.sequence_nodes.iteritems():
seq = self.sequences.get(seq_id)
if seq is not None:
lineage = node.get_lineage()
rdp_id = '%s %s' % (
re.sub('\s',
'_',
seq_id),
';'.join(lineage))
yield rdp_id, seq
def get_rdp_taxonomy(self):
return self._tree.get_rdp_taxonomy()
def fix_output_file(self, result_path):
# Ultimate hack to replace mangled taxa names
temp_results = StringIO()
for line in open(result_path):
line = re.sub(
_QIIME_RDP_TAXON_TAG + "[^;\n\t]*", '', line)
for char, escape_str in _QIIME_RDP_ESCAPES:
line = re.sub(escape_str, char, line)
temp_results.write(line)
open(result_path, 'w').write(temp_results.getvalue())
def fix_results(self, results_dict):
for seq_id, assignment in results_dict.iteritems():
lineage, confidence = assignment
lineage = re.sub(
_QIIME_RDP_TAXON_TAG + "[^;\n\t]*", '', lineage)
for char, escape_str in _QIIME_RDP_ESCAPES:
lineage = re.sub(escape_str, char, lineage)
results_dict[seq_id] = (lineage, confidence)
return results_dict
class RdpTree(object):
"""Simple, specialized tree class used to generate a taxonomy
file for the Rdp Classifier.
"""
taxonomic_ranks = ' abcdefghijklmnopqrstuvwxyz'
def __init__(self, name='Root', parent=None, counter=None):
if counter is None:
self.counter = count(0)
else:
self.counter = counter
self.id = self.counter.next()
self.name = name
self.parent = parent
self.seq_ids = []
if parent is None:
self.depth = 0
else:
self.depth = parent.depth + 1
self.children = dict() # name => subtree
def insert_lineage(self, lineage):
"""Inserts an assignment into the taxonomic tree.
Lineage must support the iterator interface, or provide an
__iter__() method that returns an iterator.
"""
lineage = iter(lineage)
try:
taxon = lineage.next()
if taxon not in self.children:
self.children[taxon] = self.__class__(
name=taxon, parent=self, counter=self.counter)
retval = self.children[taxon].insert_lineage(lineage)
except StopIteration:
retval = self
return retval
def get_lineage(self):
if self.parent is not None:
return self.parent.get_lineage() + [self.name]
else:
return [self.name]
def get_nodes(self):
yield self
for child in self.children.values():
child_nodes = child.get_nodes()
for node in child_nodes:
yield node
def dereplicate_taxa(self):
# We check that there are no duplicate taxon names (case insensitive)
# at a given depth. We must do a case insensitive check because the RDP
# classifier converts taxon names to lowercase when it checks for
# duplicates, and will throw an error otherwise.
taxa_by_depth = {}
for node in self.get_nodes():
name = node.name
depth = node.depth
current_names = taxa_by_depth.get(depth, set())
if name.lower() in current_names:
node.name = name + _QIIME_RDP_TAXON_TAG + str(node.id)
else:
current_names.add(name.lower())
taxa_by_depth[depth] = current_names
def get_rdp_taxonomy(self):
"""Returns a string, in Rdp-compatible format.
"""
# RDP uses 0 for the parent ID of the root node
if self.parent is None:
parent_id = 0
else:
parent_id = self.parent.id
# top rank name must be norank, and bottom rank must be genus
if self.depth == 0:
rank_name = "norank"
elif self.children:
rank_name = self.taxonomic_ranks[self.depth]
else:
rank_name = "genus"
fields = [
self.id, self.name, parent_id, self.depth, rank_name]
taxonomy_str = '*'.join(map(str, fields)) + "\n"
# Recursively append lines from sorted list of subtrees
child_names = sorted(self.children.keys())
subtrees = [self.children[name] for name in child_names]
for subtree in subtrees:
taxonomy_str += subtree.get_rdp_taxonomy()
return taxonomy_str
_QIIME_RDP_TAXON_TAG = "_qiime_unique_taxon_tag_"
_QIIME_RDP_ESCAPES = [
("&", "_qiime_ampersand_escape_"),
(">", "_qiime_greaterthan_escape_"),
("<", "_qiime_lessthan_escape_"),
]
class RtaxTaxonAssigner(TaxonAssigner):
"""Assign taxon using RTAX
"""
Name = "RtaxTaxonAssigner"
# ", version 0.98" # don't hardcode the version number, as it may change, and then the log output test would fail
Application = "RTAX classifier"
Citation = "Soergel D.A.W., Dey N., Knight R., and Brenner S.E. 2012. Selection of primers for optimal taxonomic classification of environmental 16S rRNA gene sequences. ISME J (6), 1440-1444"
_tracked_properties = ['Application', 'Citation']
def __init__(self, params):
"""Return new RtaxTaxonAssigner object with specified params.
"""
_params = {
'id_to_taxonomy_fp': None,
'reference_sequences_fp': None,
# 'delimiter': ","
# use the amplicon ID, not including /1 or /3, as the primary key
# for the query sequences
'header_id_regex': "\\S+\\s+(\\S+?)\/",
# OTU clustering produces ">clusterID read_1_id"
'read_id_regex': "\\S+\\s+(\\S+)",
# split_libraries produces >read_1_id ampliconID/1 . This makes a
# map between read_1_id and ampliconID.
'amplicon_id_regex': "(\\S+)\\s+(\\S+?)\/",
'read_1_seqs_fp': None,
'read_2_seqs_fp': None,
'single_ok': False,
'no_single_ok_generic': False
}
_params.update(params)
TaxonAssigner.__init__(self, _params)
def __call__(self, seq_path, result_path=None, log_path=None):
"""Returns dict mapping {seq_id:(taxonomy, confidence)} for
each seq.
Parameters:
seq_path: path to file of sequences
result_path: path to file of results. If specified, dumps the
result to the desired path instead of returning it.
log_path: path to log, which should include dump of params.
"""
if log_path:
self.writeLog(log_path)
reference_sequences_fp = self.Params['reference_sequences_fp']
assert reference_sequences_fp, \
"Must provide reference_sequences_fp when calling an RtaxTaxonAssigner."
id_to_taxonomy_fp = self.Params['id_to_taxonomy_fp']
assert id_to_taxonomy_fp, \
"Must provide id_to_taxonomy_fp when calling an RtaxTaxonAssigner."
# delimiter = self.Params['delimiter']
read_1_seqs_fp = self.Params['read_1_seqs_fp']
assert read_1_seqs_fp, \
"Must provide read_1_seqs_fp when calling an RtaxTaxonAssigner."
# following params may all be null
read_2_seqs_fp = self.Params['read_2_seqs_fp']
single_ok = self.Params['single_ok']
no_single_ok_generic = self.Params['no_single_ok_generic']
header_id_regex = self.Params['header_id_regex']
assert header_id_regex, \
"Must not provide empty header_id_regex when calling an RtaxTaxonAssigner; leave unset"\
"to use default if in doubt."
read_id_regex = self.Params['read_id_regex']
amplicon_id_regex = self.Params['amplicon_id_regex']
# seq_file = open(seq_path, 'r')
results = rtax.assign_taxonomy(
seq_path, reference_sequences_fp, id_to_taxonomy_fp,
read_1_seqs_fp, read_2_seqs_fp, single_ok=single_ok, no_single_ok_generic=no_single_ok_generic,
header_id_regex=header_id_regex, read_id_regex=read_id_regex,
amplicon_id_regex=amplicon_id_regex, output_fp=result_path,
log_path=log_path, base_tmp_dir=get_qiime_temp_dir())
return results
class UclustConsensusTaxonAssigner(TaxonAssigner):
"""Assign taxonomy using uclust
"""
Name = "UclustConsensusTaxonAssigner"
Application = "uclust"
Citation = """uclust citation: Search and clustering orders of magnitude faster than BLAST. Edgar RC. Bioinformatics. 2010 Oct 1;26(19):2460-1.
uclust-based consensus taxonomy assigner by Greg Caporaso, citation: QIIME allows analysis of high-throughput community sequencing data. Caporaso JG, Kuczynski J, Stombaugh J, Bittinger K, Bushman FD, Costello EK, Fierer N, Pena AG, Goodrich JK, Gordon JI, Huttley GA, Kelley ST, Knights D, Koenig JE, Ley RE, Lozupone CA, McDonald D, Muegge BD, Pirrung M, Reeder J, Sevinsky JR, Turnbaugh PJ, Walters WA, Widmann J, Yatsunenko T, Zaneveld J, Knight R. Nat Methods. 2010 May;7(5):335-6.
"""
def __init__(self, params):
"""Returns a new UclustConsensusTaxonAssigner object with specified params
"""
_params = {
# Required, mapping of reference sequence to taxonomy
'id_to_taxonomy_fp': None,
# Required, reference sequence fasta file
'reference_sequences_fp': None,
# max-accepts parameter, as passed to uclust
'max_accepts': 3,
# Fraction of sequence hits that a taxonomy assignment
# must show up in to be considered the consensus assignment
'min_consensus_fraction': 0.51,
# minimum identity to consider a hit (passed to uclust as --id)
'similarity': 0.90,
# label to apply for queries that cannot be assigned
'unassignable_label': 'Unassigned'
}
_params.update(params)
TaxonAssigner.__init__(self, _params)
if self.Params['id_to_taxonomy_fp'] is None:
raise ValueError(
"id_to_taxonomy_fp must be provided when instantiating a UclustConsensusTaxonAssigner")
if self.Params['reference_sequences_fp'] is None:
raise ValueError(
"reference_sequences_fp must be provided when instantiating a UclustConsensusTaxonAssigner")
id_to_taxonomy_f = open(self.Params['id_to_taxonomy_fp'], 'U')
self.id_to_taxonomy = self._parse_id_to_taxonomy_file(id_to_taxonomy_f)
def __call__(self,
seq_path,
result_path=None,
uc_path=None,
log_path=None,
HALT_EXEC=False):
"""Returns mapping of each seq to (tax, consensus fraction, n)
Results:
If result_path is specified, the results will be written to file
as tab-separated lines of:
query_id <tab> tax <tab> consensus fraction <tab> n
If result_path is None (default), the results will be returned
as a dict of:
{'query_id': (tax, consensus fraction, n)}
In both cases, the values are:
tax: the consensus taxonomy assignment
consensus fraction: the fraction of the assignments for the
query that contained the lowest level tax assignment that is
included in tax (e.g., if the assignment goes to genus level,
this will be the fraction of assignments that had the consensus
genus assignment)
n: the number of assignments that were considered when constructing
the consensus
Parameters:
seq_path: path to file of query sequences
result_path: path where results should be written. If None (default),
returns results as a dict
uc_path: path where .uc file should be saved. If None (default), and
log_path is specified, the .uc contents will be written to appended to
the log file.
log_path: path where run log should be written. If None (default), no
log file is written.
HALT_EXEC: debugging paramter. If pass, will exit just before the
uclust command is issued, and will print the command that would have
been called to stdout.
"""
# initialize the logger
logger = self._get_logger(log_path)
logger.info(str(self))
# set the user-defined parameters
params = {'--id': self.Params['similarity'],
'--maxaccepts': self.Params['max_accepts']}
# initialize the application controller object
app = Uclust(params,
HALT_EXEC=HALT_EXEC)
# Configure for consensus taxonomy assignment
app.Parameters['--rev'].on()
app.Parameters['--lib'].on(self.Params['reference_sequences_fp'])
app.Parameters['--libonly'].on()
app.Parameters['--allhits'].on()
if uc_path is None:
uc = NamedTemporaryFile(prefix='UclustConsensusTaxonAssigner_',
suffix='.uc',
dir=get_qiime_temp_dir())
uc_path = uc.name
store_uc_in_log = True
else:
store_uc_in_log = False
app_result = app({'--input': seq_path,
'--uc': uc_path})
# get map of query id to all assignments
result = self._uc_to_assignments(app_result['ClusterFile'])
# get consensus assignment
query_to_assignments = self._tax_assignments_to_consensus_assignments(result)
if result_path is not None:
# if the user provided a result_path, write the
# results to file
of = open(result_path, 'w')
for seq_id, (assignment, consensus_fraction, n) in query_to_assignments.iteritems():
assignment_str = ';'.join(assignment)
of.write('%s\t%s\t%1.2f\t%d\n' %
(seq_id, assignment_str, consensus_fraction, n))
of.close()
result = None
logger.info('Result path: %s' % result_path)
else:
# If no result_path was provided, the result dict is
# returned as-is.
logger.info('Result path: None, returned as dict.')
if store_uc_in_log:
# This is a little hackish, but we don't have a good way
# to pass the uc_path value right now through the
# assign_taxonomy.py script, so writing the contents to the
# user-specified log file (since this is being stored for logging
# purposes).
app_result['ClusterFile'].seek(0)
logger.info('\n.uc file contents:\n')
for line in app_result['ClusterFile']:
logger.info(line.strip())
return result
def _get_logger(self, log_path=None):
if log_path is not None:
handler = logging.FileHandler(log_path, mode='w')
else:
class NullHandler(logging.Handler):
def emit(self, record):
pass
handler = NullHandler()
logger = logging.getLogger("UclustConsensusTaxonAssigner logger")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def _uc_to_assignments(self, uc):
""" return dict mapping query id to all taxonomy assignments
"""
results = defaultdict(list)
for line in uc:
line = line.strip()
if line.startswith('#') or line == "":
continue
elif line.startswith('H'):
fields = line.split('\t')
query_id = fields[8].split()[0]
subject_id = fields[9].split()[0]
tax = self.id_to_taxonomy[subject_id].split(';')
results[query_id].append(tax)
elif line.startswith('N'):
fields = line.split('\t')
query_id = fields[8].split()[0]
results[query_id].append([])
return results
|
wasade/qiime
|
qiime/assign_taxonomy.py
|
Python
|
gpl-2.0
| 56,210
|
[
"BLAST"
] |
fcfc83d0a138bb303154b4d51232d550e4a15de8c2331eaa76ea8436bbdc50bc
|
# coding: utf8
{
' (leave empty to detach account)': ' (leave empty to detach account)',
' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': ' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
' by ': ' by ',
' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.': ' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.',
' on ': ' on ',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'# of Houses Damaged': '# of Houses Damaged',
'# of Houses Destroyed': '# of Houses Destroyed',
'# of International Staff': '# of International Staff',
'# of National Staff': '# of National Staff',
'# of People Affected': 'Nombre de personnes touchées',
'# of People Deceased': '# of People Deceased',
'# of People Injured': '# of People Injured',
'# of Vehicles': '# of Vehicles',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'(Constraints Only)': '(Constraints Only)',
') & then click on the map below to adjust the Lat/Lon fields:': ') & then click on the map below to adjust the Lat/Lon fields:',
'* Required Fields': '* Required Fields',
'0-15 minutes': '0-15 minutes',
'1 Assessment': '1 Assessment',
'1 location, shorter time, can contain multiple Tasks': '1 location, shorter time, can contain multiple Tasks',
'1-3 days': '1-3 days',
'1. Fill the necessary fields in BLOCK letters.': '1. Fill the necessary fields in BLOCK letters.',
'15-30 minutes': '15-30 minutes',
'2 different options are provided here currently:': '2 different options are provided here currently:',
'2. Always use one box per letter and leave one box space to seperate words.': '2. Always use one box per letter and leave one box space to seperate words.',
'2x4 Car': '2x4 Car',
'30-60 minutes': '30-60 minutes',
'4-7 days': '4-7 days',
'4x4 Car': '4x4 Car',
'8-14 days': '8-14 jours',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.',
'A Warehouse is a physical place to store items.': 'A Warehouse is a physical place to store items.',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': 'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.',
'A brief description of the group (optional)': 'A brief description of the group (optional)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'A file downloaded from a GPS containing a series of geographic points in XML format.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'A library of digital resources, such as photos, documents and reports': 'A library of digital resources, such as photos, documents and reports',
'A place within a Site like a Shelf, room, bin number etc.': 'A place within a Site like a Shelf, room, bin number etc.',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.',
'A survey series with id %s does not exist. Please go back and create one.': 'A survey series with id %s does not exist. Please go back and create one.',
'ABOUT': 'ABOUT',
'ABOUT THIS MODULE': 'ABOUT THIS MODULE',
'ACCESS DATA': 'ACCESS DATA',
'ANY': 'ANY',
'API is documented here': 'API is documented here',
'Ability to Fill Out Surveys': 'Ability to Fill Out Surveys',
'Ability to customize the list of details tracked at a Shelter': 'Ability to customize the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Ability to customize the list of human resource tracked at a Shelter',
'Ability to customize the list of important facilities needed at a Shelter': 'Ability to customize the list of important facilities needed at a Shelter',
'Ability to track partial fulfillment of the request': 'Ability to track partial fulfillment of the request',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Ability to view Results of Completed and/or partially filled out Surveys',
'About': 'About',
'About Sahana': 'About Sahana',
'About Sahana Eden': 'About Sahana Eden',
'About this module': 'About this module',
'Access denied': 'Access denied',
'Accessibility of Affected Location': 'Accessibility of Affected Location',
'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Account registered, however registration is still pending approval - please wait until confirmation received.',
'Acronym': 'Acronym',
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organization's name, eg. IFRC.",
'Actionable': 'Actionable',
'Actionable by all targeted recipients': 'Actionable by all targeted recipients',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>',
'Actioned?': 'Actioned?',
'Active Problems': 'Active Problems',
'Activities': 'Activities',
'Activities are blue.': 'Activities are blue.',
'Activities matching Assessments:': 'Activities matching Assessments:',
'Activities:': 'Activities:',
'Activity': 'Activity',
'Activity Added': 'Activity Added',
'Activity Deleted': 'Activity Deleted',
'Activity Details': "Détails de l'activité",
'Activity Report': 'Activity Report',
'Activity Reports': 'Activity Reports',
'Activity Type': 'Activity Type',
'Activity Updated': 'Activity Updated',
'Add': 'Add',
'Add Activity': 'Add Activity',
'Add Activity Report': 'Add Activity Report',
'Add Activity Type': 'Add Activity Type',
'Add Address': 'Add Address',
'Add Aid Request': 'Add Aid Request',
'Add Assessment': 'Add Assessment',
'Add Assessment Summary': 'Add Assessment Summary',
'Add Baseline': 'Add Baseline',
'Add Baseline Type': 'Add Baseline Type',
'Add Bed Type': 'Add Bed Type',
'Add Bin Type': 'Add Bin Type',
'Add Bins': 'Add Bins',
'Add Budget': 'Add Budget',
'Add Bundle': 'Add Bundle',
'Add Catalog': 'Ajouter catalogue',
'Add Catalog Item': 'Add Catalog Item',
'Add Catalog.': 'Add Catalog.',
'Add Category': 'Add Category',
'Add Category<>Sub-Category<>Catalog Relation': 'Add Category<>Sub-Category<>Catalog Relation',
'Add Cholera Treatment Capability Information': 'Add Cholera Treatment Capability Information',
'Add Cluster': 'Ajouter Cluster',
'Add Cluster Subsector': 'Add Cluster Subsector',
'Add Config': 'Add Config',
'Add Contact': 'Add Contact',
'Add Contact Information': 'Add Contact Information',
'Add Disaster Victims': 'Add Disaster Victims',
'Add Distribution': 'Add Distribution',
'Add Distribution.': 'Add Distribution.',
'Add Donor': 'Ajouter des donateurs',
'Add Feature Class': 'Add Feature Class',
'Add Feature Layer': 'Add Feature Layer',
'Add Flood Report': 'Add Flood Report',
'Add Group': 'Add Group',
'Add Group Member': 'Add Group Member',
'Add Hospital': 'Add Hospital',
'Add Identification Report': 'Add Identification Report',
'Add Identity': 'Add Identity',
'Add Image': 'Add Image',
'Add Impact': 'Add Impact',
'Add Impact Type': 'Add Impact Type',
'Add Incident': 'Add Incident',
'Add Incident Report': 'Add Incident Report',
'Add Inventory Item': 'Add Inventory Item',
'Add Inventory Store': 'Add Inventory Store',
'Add Item': 'Add Item',
'Add Item (s)': 'Add Item (s)',
'Add Item Catalog': 'Add Item Catalog',
'Add Item Catalog ': 'Add Item Catalog ',
'Add Item Catalog Category ': 'Add Item Catalog Category ',
'Add Item Category': 'Add Item Category',
'Add Item Packet': 'Add Item Packet',
'Add Item Sub-Category': 'Add Item Sub-Category',
'Add Item to Shipment': 'Add Item to Shipment',
'Add Key': 'Add Key',
'Add Kit': 'Ajouter Kit',
'Add Layer': 'Add Layer',
'Add Location': 'Add Location',
'Add Locations': 'Add Locations',
'Add Log Entry': 'Add Log Entry',
'Add Member': 'Add Member',
'Add Membership': 'Add Membership',
'Add Message': 'Add Message',
'Add Need': 'Add Need',
'Add Need Type': 'Add Need Type',
'Add New': 'Add New',
'Add New Activity': 'Add New Activity',
'Add New Address': 'Add New Address',
'Add New Aid Request': 'Add New Aid Request',
'Add New Assessment': 'Add New Assessment',
'Add New Assessment Summary': 'Add New Assessment Summary',
'Add New Baseline': 'Add New Baseline',
'Add New Baseline Type': 'Add New Baseline Type',
'Add New Bin': 'Add New Bin',
'Add New Bin Type': 'Add New Bin Type',
'Add New Budget': 'Add New Budget',
'Add New Bundle': 'Add New Bundle',
'Add New Catalog Item': 'Add New Catalog Item',
'Add New Cluster': 'Ajouter nouveau cluster',
'Add New Cluster Subsector': 'Add New Cluster Subsector',
'Add New Config': 'Add New Config',
'Add New Contact': 'Add New Contact',
'Add New Distribution': 'Add New Distribution',
'Add New Distribution Item': 'Add New Distribution Item',
'Add New Document': 'Add New Document',
'Add New Donor': 'Add New Donor',
'Add New Entry': 'Add New Entry',
'Add New Feature Class': 'Add New Feature Class',
'Add New Feature Layer': 'Add New Feature Layer',
'Add New Flood Report': 'Add New Flood Report',
'Add New Group': 'Add New Group',
'Add New Hospital': 'Add New Hospital',
'Add New Identity': 'Add New Identity',
'Add New Image': 'Add New Image',
'Add New Impact': 'Add New Impact',
'Add New Impact Type': 'Add New Impact Type',
'Add New Incident': 'Add New Incident',
'Add New Incident Report': 'Add New Incident Report',
'Add New Inventory Item': 'Add New Inventory Item',
'Add New Inventory Store': 'Add New Inventory Store',
'Add New Item': 'Add New Item',
'Add New Item Catalog': 'Add New Item Catalog',
'Add New Item Catalog Category': 'Add New Item Catalog Category',
'Add New Item Category': 'Add New Item Category',
'Add New Item Packet': 'Add New Item Packet',
'Add New Item Sub-Category': 'Add New Item Sub-Category',
'Add New Item to Kit': 'Add New Item to Kit',
'Add New Key': 'Add New Key',
'Add New Kit': 'Ajouter Kit Nouveau',
'Add New Layer': 'Add New Layer',
'Add New Location': 'Add New Location',
'Add New Log Entry': 'Add New Log Entry',
'Add New Marker': 'Add New Marker',
'Add New Member': 'Add New Member',
'Add New Membership': 'Add New Membership',
'Add New Metadata': 'Add New Metadata',
'Add New Need': 'Add New Need',
'Add New Need Type': 'Add New Need Type',
'Add New Note': 'Add New Note',
'Add New Office': 'Add New Office',
'Add New Organization': 'Add New Organization',
'Add New Peer': 'Add New Peer',
'Add New Photo': 'Add New Photo',
'Add New Position': 'Add New Position',
'Add New Problem': 'Add New Problem',
'Add New Project': 'Add New Project',
'Add New Projection': 'Add New Projection',
'Add New Rapid Assessment': 'Add New Rapid Assessment',
'Add New Received Item': 'Add New Received Item',
'Add New Record': 'Add New Record',
'Add New Report': 'Ajouter un nouveau rapport',
'Add New Request': 'Add New Request',
'Add New Request Item': 'Add New Request Item',
'Add New Resource': 'Add New Resource',
'Add New Response': 'Ajouter un nouveau rapport',
'Add New River': 'Add New River',
'Add New Role': 'Add New Role',
'Add New Role to User': 'Add New Role to User',
'Add New Sector': 'Add New Sector',
'Add New Sent Item': 'Add New Sent Item',
'Add New Setting': 'Add New Setting',
'Add New Shelter': 'Add New Shelter',
'Add New Shelter Service': 'Add New Shelter Service',
'Add New Shelter Type': 'Add New Shelter Type',
'Add New Shipment to Send': 'Add New Shipment to Send',
'Add New Site': 'Add New Site',
'Add New Skill': 'Add New Skill',
'Add New Skill Type': 'Add New Skill Type',
'Add New Solution': 'Add New Solution',
'Add New Staff': 'Add New Staff',
'Add New Staff Type': 'Add New Staff Type',
'Add New Storage Location': 'Add New Storage Location',
'Add New Survey Answer': 'Add New Survey Answer',
'Add New Survey Question': 'Add New Survey Question',
'Add New Survey Section': 'Add New Survey Section',
'Add New Survey Series': 'Add New Survey Series',
'Add New Survey Template': 'Add New Survey Template',
'Add New Task': 'Add New Task',
'Add New Team': 'Add New Team',
'Add New Theme': 'Add New Theme',
'Add New Ticket': 'Add New Ticket',
'Add New Track': 'Add New Track',
'Add New Unit': 'Add New Unit',
'Add New User': 'Add New User',
'Add New User to Role': 'Add New User to Role',
'Add New Warehouse': 'Add New Warehouse',
'Add New Warehouse Item': 'Add New Warehouse Item',
'Add Note': 'Add Note',
'Add Office': 'Add Office',
'Add Organization': 'Ajouter Organisation',
'Add Peer': 'Add Peer',
'Add Person': 'Add Person',
'Add Personal Effects': 'Add Personal Effects',
'Add Photo': 'Add Photo',
'Add Position': 'Add Position',
'Add Problem': 'Add Problem',
'Add Project': 'Add Project',
'Add Projection': 'Add Projection',
'Add Projections': 'Add Projections',
'Add Question': 'Add Question',
'Add Rapid Assessment': 'Add Rapid Assessment',
'Add Recipient': 'Add Recipient',
'Add Recipient Site': 'Add Recipient Site',
'Add Recipient Site.': 'Add Recipient Site.',
'Add Record': 'Add Record',
'Add Recovery Report': 'Add Recovery Report',
'Add Reference Document': 'Add Reference Document',
'Add Report': 'Add Report',
'Add Request': 'Add Request',
'Add Request Detail': 'Add Request Detail',
'Add Request Item': 'Add Request Item',
'Add Resource': 'Add Resource',
'Add Response': 'Add Response',
'Add River': 'Add River',
'Add Role': 'Add Role',
'Add Section': 'Add Section',
'Add Sector': 'Ajouter secteur',
'Add Sender Organization': 'Add Sender Organization',
'Add Sender Site': 'Add Sender Site',
'Add Sender Site.': 'Add Sender Site.',
'Add Service Profile': 'Add Service Profile',
'Add Setting': 'Add Setting',
'Add Shelter': 'Add Shelter',
'Add Shelter Service': 'Add Shelter Service',
'Add Shelter Type': 'Add Shelter Type',
'Add Shipment Transit Log': 'Add Shipment Transit Log',
'Add Shipment/Way Bills': 'Add Shipment/Way Bills',
'Add Site': 'Add Site',
'Add Skill': 'Add Skill',
'Add Skill Type': 'Add Skill Type',
'Add Skill Types': 'Ajouter genres de compétence',
'Add Solution': 'Add Solution',
'Add Staff': 'Add Staff',
'Add Staff Type': 'Add Staff Type',
'Add Status': 'Add Status',
'Add Storage Bin ': 'Add Storage Bin ',
'Add Storage Bin Type': 'Add Storage Bin Type',
'Add Storage Location': 'Add Storage Location',
'Add Storage Location ': 'Add Storage Location ',
'Add Sub-Category': 'Add Sub-Category',
'Add Subscription': 'Add Subscription',
'Add Survey Answer': 'Add Survey Answer',
'Add Survey Question': "Ajouter une question d'enquête",
'Add Survey Section': 'Add Survey Section',
'Add Survey Series': 'Add Survey Series',
'Add Survey Template': 'Add Survey Template',
'Add Task': 'Add Task',
'Add Team': 'Add Team',
'Add Theme': 'Add Theme',
'Add Ticket': 'Add Ticket',
'Add Unit': 'Add Unit',
'Add User': 'Add User',
'Add Volunteer': 'Add Volunteer',
'Add Volunteer Registration': 'Add Volunteer Registration',
'Add Warehouse': 'Add Warehouse',
'Add Warehouse Item': 'Add Warehouse Item',
'Add a Person': 'Add a Person',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.',
'Add a Volunteer': 'Add a Volunteer',
'Add a new Site from where the Item is being sent.': 'Add a new Site from where the Item is being sent.',
'Add a new Site where the Item is being sent to.': 'Add a new Site where the Item is being sent to.',
'Add an Photo.': 'Add an Photo.',
'Add location': 'Add location',
'Add main Item Category.': 'Add main Item Category.',
'Add main Item Sub-Category.': 'Add main Item Sub-Category.',
'Add new Group': 'Add new Group',
'Add new Individual': 'Add new Individual',
'Add new position.': 'Add new position.',
'Add new project.': 'Add new project.',
'Add new staff role.': 'Add new staff role.',
'Add the Storage Bin Type.': 'Add the Storage Bin Type.',
'Add the Storage Location where this bin is located.': 'Add the Storage Location where this bin is located.',
'Add the Storage Location where this this Bin belongs to.': 'Add the Storage Location where this this Bin belongs to.',
'Add the main Warehouse/Site information where this Bin belongs to.': 'Add the main Warehouse/Site information where this Bin belongs to.',
'Add the main Warehouse/Site information where this Item is to be added.': 'Add the main Warehouse/Site information where this Item is to be added.',
'Add the main Warehouse/Site information where this Storage location is.': 'Add the main Warehouse/Site information where this Storage location is.',
'Add the unit of measure if it doesnt exists already.': 'Add the unit of measure if it doesnt exists already.',
'Add to Bundle': 'Add to Bundle',
'Add to Catalog': 'Add to Catalog',
'Add to budget': 'Add to budget',
'Add/Edit/Remove Layers': 'Add/Edit/Remove Layers',
'Additional Beds / 24hrs': 'Additional Beds / 24hrs',
'Additional Comments': 'Additional Comments',
"Additional quantity quantifier – e.g. '4x5'.": "Additional quantity quantifier – e.g. '4x5'.",
'Address': 'Address',
'Address Details': 'Address Details',
'Address Type': 'Address Type',
'Address added': 'Address added',
'Address deleted': 'Adresse supprimé',
'Address updated': 'Address updated',
'Addresses': 'Addresses',
'Adequate': 'Adequate',
'Adjust Item(s) Quantity': 'Adjust Item(s) Quantity',
'Adjust Items due to Theft/Loss': 'Adjust Items due to Theft/Loss',
'Admin': 'Admin',
'Admin Email': 'Admin Email',
'Admin Name': 'Admin Name',
'Admin Tel': 'Admin Tel',
'Administration': 'Administration',
'Administrator': 'Administrator',
'Admissions/24hrs': 'Admissions/24hrs',
'Adolescent (12-20)': 'Adolescent (12-20)',
'Adult (21-50)': 'Adult (21-50)',
'Adult ICU': 'Adult ICU',
'Adult Psychiatric': 'Adult Psychiatric',
'Adult female': 'Adult female',
'Adult male': 'Adult male',
'Advanced Bin Search': 'Advanced Bin Search',
'Advanced Catalog Search': 'Advanced Catalog Search',
'Advanced Category Search': 'Advanced Category Search',
'Advanced Item Search': 'Advanced Item Search',
'Advanced Location Search': 'Recherche avancée Localisation',
'Advanced Site Search': 'Advanced Site Search',
'Advanced Sub-Category Search': 'Advanced Sub-Category Search',
'Advanced Unit Search': 'Advanced Unit Search',
'Advanced:': 'Advanced:',
'Advisory': 'Advisory',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.',
'Age Group': 'Age Group',
'Age group': 'Age group',
'Age group does not match actual age.': 'Age group does not match actual age.',
'Aggravating factors': 'Aggravating factors',
'Aggregate Items': 'Aggregate Items',
'Agriculture': 'Agriculture',
'Aid Request': 'Aid Request',
'Aid Request Details': 'Aid Request Details',
'Aid Request added': 'Aid Request added',
'Aid Request deleted': 'Aid Request deleted',
'Aid Request updated': 'Aid Request updated',
'Aid Requests': 'Aid Requests',
'Air Transport Service': 'Air Transport Service',
'Air tajin': 'Air tajin',
'Aircraft Crash': 'Aircraft Crash',
'Aircraft Hijacking': 'Aircraft Hijacking',
'Airport Closure': 'Airport Closure',
'Airspace Closure': 'Airspace Closure',
'Alcohol': 'Alcohol',
'Alert': 'Alert',
'All': 'All',
'All Inbound & Outbound Messages are stored here': 'All Inbound & Outbound Messages are stored here',
'All Locations': 'All Locations',
'All Requested Items': 'All Requested Items',
'All Resources': 'All Resources',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.',
'Allowed to push': 'Allowed to push',
'Allows a Budget to be drawn up': 'Allows a Budget to be drawn up',
'Allows authorized users to control which layers are available to the situation map.': 'Cela permet aux utilisateurs de contrôler les couches sont disponibles à la carte de situation.',
'Ambulance Service': 'Ambulance Service',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.',
'Analysis of Completed Surveys': 'Analysis of Completed Surveys',
'Animal Die Off': 'Animal Die Off',
'Animal Feed': 'Animal Feed',
'Animals': 'Animals',
'Answer Choices (One Per Line)': 'Choix de réponse (une par ligne)',
'Anthropolgy': 'Anthropolgy',
'Antibiotics available': 'Antibiotics available',
'Antibiotics needed per 24h': 'Antibiotics needed per 24h',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.',
'Any comments about this sync partner.': 'Any comments about this sync partner.',
'Apparent Age': 'Apparent Age',
'Apparent Gender': 'Apparent Gender',
'Archive not Delete': 'Archive not Delete',
'Arctic Outflow': 'Arctic Outflow',
'Area': 'Area',
'Assessment': 'Assessment',
'Assessment Details': 'Assessment Details',
'Assessment Reported': 'Assessment Reported',
'Assessment Summaries': 'Assessment Summaries',
'Assessment Summary Details': 'Assessment Summary Details',
'Assessment Summary added': 'Assessment Summary added',
'Assessment Summary deleted': 'Assessment Summary deleted',
'Assessment Summary updated': 'Assessment Summary updated',
'Assessment Type': 'Assessment Type',
'Assessment added': 'Assessment added',
'Assessment deleted': 'Assessment deleted',
'Assessment updated': 'Assessment updated',
'Assessments': 'Assessments',
'Assessments Needs vs. Activities': 'Assessments Needs vs. Activities',
'Assessments and Activities': 'Assessments and Activities',
'Assessments are shown as green, yellow, orange, red.': 'Assessments are shown as green, yellow, orange, red.',
'Assessments are structured reports done by Professional Organizations': 'Assessments are structured reports done by Professional Organizations',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments',
'Assessments:': 'Assessments:',
'Assessor': 'Assessor',
'Assign Storage Location': 'Assign Storage Location',
'Assign to Org.': 'Assign to Org.',
'Assigned': 'Assigned',
'Assigned To': 'Assigned To',
'Assigned to': 'Assigned to',
'Assistant': 'Assistant',
'At/Visited Location (not virtual)': 'At/Visited Location (not virtual)',
'Attend to information sources as described in <instruction>': 'Attend to information sources as described in <instruction>',
'Attribution': 'Attribution',
'Audit Read': 'Audit Read',
'Audit Write': 'Audit Write',
"Authenticate system's Twitter account": "Authenticate system's Twitter account",
'Author': 'Author',
'Automotive': 'Automotive',
'Available Beds': 'Available Beds',
'Available Messages': 'Available Messages',
'Available Records': 'Available Records',
'Available databases and tables': 'Available databases and tables',
'Available from': 'Available from',
'Available in Viewer?': 'Available in Viewer?',
'Available until': 'Available until',
'Availability': 'Availability',
'Avalanche': 'Avalanche',
'Avoid the subject event as per the <instruction>': 'Avoid the subject event as per the <instruction>',
'Baby And Child Care': 'Baby And Child Care',
'Background Colour': 'Background Colour',
'Background Colour for Text blocks': 'Background Colour for Text blocks',
'Bahai': 'Bahai',
'Baldness': 'Baldness',
'Balochi': 'Balochi',
'Banana': 'Banana',
'Bank/micro finance': 'Bank/micro finance',
'Base Layer?': 'Base Layer?',
'Base Unit': 'Base Unit',
'Baseline Number of Beds': 'Baseline Number of Beds',
'Baseline Type': 'Baseline Type',
'Baseline Type Details': 'Baseline Type Details',
'Baseline Type added': 'Baseline Type added',
'Baseline Type deleted': 'Baseline Type deleted',
'Baseline Type updated': 'Baseline Type updated',
'Baseline Types': 'Baseline Types',
'Baseline added': 'Baseline added',
'Baseline deleted': 'Baseline deleted',
'Baseline number of beds of that type in this unit.': 'Baseline number of beds of that type in this unit.',
'Baseline updated': 'Baseline updated',
'Baselines': 'Baselines',
'Baselines Details': 'Baselines Details',
'Basic': 'Basic',
'Basic Assess.': 'Basic Assess.',
'Basic Assessment': 'Basic Assessment',
'Basic Assessment Reported': 'Basic Assessment Reported',
'Basic Details': 'Basic Details',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Les informations de base sur les demandes et les dons, comme la catégorie, les unités, les coordonnées et le statut.',
'Basic reports on the Shelter and drill-down by region': 'Basic reports on the Shelter and drill-down by region',
'Baud': 'Baud',
'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate to use for your modem - The default is safe for most cases',
'Bed Capacity': 'Bed Capacity',
'Bed Capacity per Unit': 'Bed Capacity per Unit',
'Bed Type': 'Bed Type',
'Bed type already registered': 'Bed type already registered',
'Beneficiary Type': 'Beneficiary Type',
'Biological Hazard': 'Biological Hazard',
'Biscuits': 'Biscuits',
'Blizzard': 'Blizzard',
'Blood Type (AB0)': 'Blood Type (AB0)',
'Blowing Snow': 'Blowing Snow',
'Boat': 'Boat',
'Bodies found': 'Bodies found',
'Bodies recovered': 'Bodies recovered',
'Body': 'Body',
'Body Recovery Reports': 'Body Recovery Reports',
'Body Recovery Request': 'Body Recovery Request',
'Body Recovery Requests': 'Body Recovery Requests',
'Bomb': 'Bomb',
'Bomb Explosion': 'Bomb Explosion',
'Bomb Threat': 'Bomb Threat',
'Border Colour for Text blocks': 'Frontaliers couleur pour les blocs de texte',
'Bounding Box Insets': 'Bounding Box Insets',
'Bounding Box Size': 'Bounding Box Size',
'Bricks': 'Bricks',
'Bridge Closed': 'Bridge Closed',
'Bucket': 'Bucket',
'Buddhist': 'Buddhist',
'Budget': 'Budget',
'Budget Details': 'Budget Details',
'Budget Updated': 'Budget Updated',
'Budget added': 'Budget added',
'Budget deleted': 'Budget deleted',
'Budget updated': 'Budget updated',
'Budgeting Module': 'Budgeting Module',
'Budgets': 'Budgets',
'Buffer': 'Buffer',
'Building Aide': 'Building Aide',
'Building Collapsed': 'Building Collapsed',
'Built using the Template agreed by a group of NGOs working together as the': 'Built using the Template agreed by a group of NGOs working together as the',
'Bulk Uploader': 'Bulk Uploader',
'Bundle': 'Bundle',
'Bundle Contents': 'Bundle Contents',
'Bundle Details': 'Bundle Details',
'Bundle Updated': 'Bundle Updated',
'Bundle added': 'Bundle added',
'Bundle deleted': 'Bundle deleted',
'Bundle updated': 'Bundle updated',
'Bundles': 'Bundles',
'Burn': 'Burn',
'Burn ICU': 'Burn ICU',
'Burned/charred': 'Burned/charred',
'By Warehouse': 'By Warehouse',
'CBA Women': 'CBA Women',
'CSS file %s not writable - unable to apply theme!': 'CSS file %s not writable - unable to apply theme!',
'Calculate': 'Calculate',
'Camp': 'Camp',
'Camp Coordination/Management': 'Camp Coordination/Management',
'Can users register themselves for authenticated login access?': 'Can users register themselves for authenticated login access?',
"Can't import tweepy": "Can't import tweepy",
'Cancel': 'Cancel',
'Cancelled': 'Cancelled',
'Candidate Matches for Body %s': 'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'Cannot delete whilst there are linked records. Please delete linked records first.',
'Capacity (Max Persons)': 'Capacity (Max Persons)',
'Capacity (W x D X H)': 'Capacity (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)',
'Capture Information on each disaster victim': 'Capture Information on each disaster victim',
'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturing organizational information of a relief organization and all the projects they have in the region',
'Capturing the essential services each Volunteer is providing and where': 'Capturing the essential services each Volunteer is providing and where',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organization is providing and where',
'Cardiology': 'Cardiology',
'Cassava': 'Cassava',
'Casual Labor': 'Casual Labor',
'Catalog': 'Catalog',
'Catalog Item': 'Catalog Item',
'Catalog Item added': 'Catalog Item added',
'Catalog Item deleted': 'Catalog Item deleted',
'Catalog Item updated': 'Catalog Item updated',
'Catalog Items': 'Catalog Items',
'Catalog Name': 'Catalog Name',
'Category': 'Category',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog Relation',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog Relation added',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog Relation deleted',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog Relation updated',
'Central point to record details on People': 'Central point to record details on People',
'Change Password': 'Change Password',
'Check for errors in the URL, maybe the address was mistyped.': 'Check for errors in the URL, maybe the address was mistyped.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Check if the URL is pointing to a directory instead of a webpage.',
'Check outbox for the message status': 'Check outbox for the message status',
'Check to delete': 'Check to delete',
'Check-in': 'Check-in',
'Check-out': 'Check-out',
'Checklist': 'Checklist',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'Chicken',
'Child': 'Child',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'les ménages dirigés par des enfants (<18 ans)',
'Children (2-5 years)': 'Children (2-5 years)',
'Children (5-15 years)': 'Children (5-15 years)',
'Children (< 2 years)': 'Children (< 2 years)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Chinese (Taiwan)': 'Chinese (Taiwan)',
'Cholera Treatment': 'Cholera Treatment',
'Cholera Treatment Capability': 'Cholera Treatment Capability',
'Cholera Treatment Center': 'Cholera Treatment Center',
'Cholera-Treatment-Center': 'Cholera-Treatment-Center',
'Choosing Skill and Resources of Volunteers': 'Choosing Skill and Resources of Volunteers',
'Christian': 'Christian',
'Church': 'Church',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.',
'City': 'City',
'Civil Emergency': 'Civil Emergency',
'Clear Selection': 'Clear Selection',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.",
'Click on the link ': 'Click on the link ',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Close map': 'Close map',
'Closed': 'Closed',
'Closure': 'Closure',
'Clothing': 'Clothing',
'Cluster': 'Cluster',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Subsector': 'Cluster Subsector',
'Cluster Subsector Details': 'Cluster Subsector Details',
'Cluster Subsector added': 'Cluster Subsector added',
'Cluster Subsector deleted': 'Cluster Subsector deleted',
'Cluster Subsector updated': 'Cluster Subsector updated',
'Cluster Subsectors': 'Cluster Subsectors',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Cluster added',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Code': 'Code',
'Cold Wave': 'Cold Wave',
'Collective center': 'Collective center',
'Colour for Underline of Subheadings': 'Colour for Underline of Subheadings',
'Colour of Buttons when hovering': 'Colour of Buttons when hovering',
'Colour of bottom of Buttons when not pressed': 'Colour of bottom of Buttons when not pressed',
'Colour of bottom of Buttons when pressed': 'Colour of bottom of Buttons when pressed',
'Colour of dropdown menus': 'Colour of dropdown menus',
'Colour of selected Input fields': 'Colour of selected Input fields',
'Colour of selected menu items': 'Colour of selected menu items',
'Column Choices (One Per Line': 'Column Choices (One Per Line',
'Combined Method': 'Combined Method',
'Come back later.': 'Come back later.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Comments': 'Comments',
'Commiting a changed spreadsheet to the database': 'Commiting a changed spreadsheet to the database',
'Communication problems': 'Communication problems',
'Community Centre': 'Community Centre',
'Community Health Center': 'Community Health Center',
'Community Member': 'Community Member',
'Complete Unit Label for e.g. meter for m.': 'Unité complète Label pour par exemple mètre pour m.',
'Completed': 'Completed',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Config': 'Config',
'Config added': 'Config added',
'Config deleted': 'Config deleted',
'Config updated': 'Config updated',
'Configs': 'Configs',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Confirmed': 'Confirmed',
'Confirmed Incidents': 'Confirmed Incidents',
'Conflict Details': 'Conflict Details',
'Conflict Resolution': 'Conflict Resolution',
'Consumable': 'Consumable',
'Contact': 'Contact',
'Contact Data': 'Contact Data',
'Contact Details': 'Contact Details',
'Contact Information': 'Contact Information',
'Contact Method': 'Contact Method',
'Contact Person': 'Contact Person',
'Contact details': 'Contact details',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact us': 'Contact us',
'Contacts': 'Contacts',
'Contents': 'Contents',
'Contradictory values!': 'Contradictory values!',
'Contributor': 'Contributor',
'Conversion Tool': 'Conversion Tool',
'Cooking NFIs': 'Cooking NFIs',
'Cooking Oil': 'Cooking Oil',
'Coordinate Conversion': 'Coordinate Conversion',
'Copy': 'Copy',
'Copy any data from the one to be deleted into the one to keep': 'Copy any data from the one to be deleted into the one to keep',
'Corn': 'Corn',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
"Couldn't import tweepy library": "Couldn't import tweepy library",
'Country': 'Country',
'Country of Residence': 'Country of Residence',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Checklist': 'Create Checklist',
'Create Group Entry': 'Create Group Entry',
'Create Impact Assessment': 'Create Impact Assessment',
'Create Import Job': 'Create Import Job',
'Create Mobile Impact Assessment': 'Create Mobile Impact Assessment',
'Create New Import Job': 'Create New Import Job',
'Create Rapid Assessment': 'Create Rapid Assessment',
'Create Request': 'Create Request',
'Create Task': 'Create Task',
'Create a group entry in the registry.': 'Create a group entry in the registry.',
'Create, enter, and manage surveys.': 'Create, enter, and manage surveys.',
'Creation of Surveys': 'Creation of Surveys',
'Crime': 'Crime',
'Criteria': 'Criteria',
'Currency': 'Currency',
'Current Group Members': 'Current Group Members',
'Current Identities': 'Current Identities',
'Current Location': 'Current Location',
'Current Log Entries': 'Current Log Entries',
'Current Memberships': 'Current Memberships',
'Current Notes': 'Current Notes',
'Current Registrations': 'Current Registrations',
'Current Status': 'Current Status',
'Current Team Members': 'Current Team Members',
'Current Twitter account': 'Current Twitter account',
'Current number of patients': 'Current number of patients',
'Current problems, categories': 'Current problems, categories',
'Current problems, details': 'Current problems, details',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Custom Database Resource (e.g., anything defined as a resource in Sahana)',
'Customisable category of aid': 'Customisable category of aid',
'DECISION': 'DECISION',
'DNA Profile': 'DNA Profile',
'DNA Profiling': 'DNA Profiling',
'Daily': 'Daily',
'Dam Overflow': 'Dam Overflow',
'Dangerous Person': 'Dangerous Person',
'Data import policy': 'Data import policy',
'Data uploaded': 'Data uploaded',
'Database': 'Database',
'Date': 'Date',
'Date & Time': 'Date & Time',
'Date Requested': 'Date Requested',
'Date Required': 'Date Required',
'Date and Time': 'Date and Time',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.',
'Date and time this report relates to.': 'Date and time this report relates to.',
'Date of Birth': 'Date of Birth',
'Date of Latest Information on Beneficiaries Reached': 'Date of Latest Information on Beneficiaries Reached',
'Date of Report': 'Date of Report',
'Date/Time': 'Date/Time',
'Date/Time of Find': 'Date / Heure de Trouver',
'Date/Time of disappearance': 'Date/Time of disappearance',
'De-duplicator': 'De-duplicator',
'Dead Body Details': 'Dead Body Details',
'Dead Body Reports': 'Dead Body Reports',
'Deaths in the past 24h': 'Deaths in the past 24h',
'Deaths/24hrs': 'Deaths/24hrs',
'Debug': 'Debug',
'Decimal Degrees': 'Decimal Degrees',
'Decomposed': 'Decomposed',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default Marker': 'Default Marker',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default synchronization policy': 'Default synchronization policy',
'Defaults': 'Defaults',
'Defaults updated': 'Defaults updated',
'Defecation area for animals': 'Defecation area for animals',
'Defines the icon used for display of features on handheld GPS.': 'Defines the icon used for display of features on handheld GPS.',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': 'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.',
'Defines the marker used for display & the attributes visible in the popup.': 'Defines the marker used for display & the attributes visible in the popup.',
'Degrees must be a number between -180 and 180': 'Degrees must be a number between -180 and 180',
'Dehydration': 'Dehydration',
'Delete': 'Delete',
'Delete Aid Request': 'Delete Aid Request',
'Delete Assessment': 'Delete Assessment',
'Delete Assessment Summary': 'Delete Assessment Summary',
'Delete Baseline': 'Delete Baseline',
'Delete Baseline Type': 'Delete Baseline Type',
'Delete Budget': 'Delete Budget',
'Delete Bundle': 'Delete Bundle',
'Delete Catalog Item': 'Delete Catalog Item',
'Delete Cluster': 'Delete Cluster',
'Delete Cluster Subsector': 'Delete Cluster Subsector',
'Delete Config': 'Delete Config',
'Delete Distribution': 'Delete Distribution',
'Delete Distribution Item': 'Delete Distribution Item',
'Delete Document': 'Delete Document',
'Delete Donor': 'Delete Donor',
'Delete Entry': 'Delete Entry',
'Delete Feature Class': 'Delete Feature Class',
'Delete Feature Layer': 'Delete Feature Layer',
'Delete Group': 'Delete Group',
'Delete Hospital': 'Delete Hospital',
'Delete Image': 'Delete Image',
'Delete Impact': 'Delete Impact',
'Delete Impact Type': 'Delete Impact Type',
'Delete Incident': 'Delete Incident',
'Delete Incident Report': 'Delete Incident Report',
'Delete Inventory Item': 'Delete Inventory Item',
'Delete Inventory Store': 'Delete Inventory Store',
'Delete Item': 'Delete Item',
'Delete Item Category': 'Delete Item Category',
'Delete Item Packet': 'Delete Item Packet',
'Delete Key': 'Delete Key',
'Delete Kit': 'Delete Kit',
'Delete Layer': 'Delete Layer',
'Delete Location': 'Delete Location',
'Delete Marker': 'Delete Marker',
'Delete Membership': 'Delete Membership',
'Delete Message': 'Delete Message',
'Delete Metadata': 'Delete Metadata',
'Delete Need': 'Delete Need',
'Delete Need Type': 'Delete Need Type',
'Delete Office': 'Delete Office',
'Delete Old': 'Delete Old',
'Delete Organization': 'Delete Organization',
'Delete Peer': 'Delete Peer',
'Delete Person': 'Delete Person',
'Delete Photo': 'Delete Photo',
'Delete Project': 'Delete Project',
'Delete Projection': 'Delete Projection',
'Delete Rapid Assessment': 'Delete Rapid Assessment',
'Delete Received Item': 'Delete Received Item',
'Delete Received Shipment': 'Delete Received Shipment',
'Delete Record': 'Delete Record',
'Delete Recovery Report': 'Delete Recovery Report',
'Delete Report': 'Delete Report',
'Delete Request': 'Delete Request',
'Delete Request Item': 'Delete Request Item',
'Delete Resource': 'Delete Resource',
'Delete Section': 'Delete Section',
'Delete Sector': 'Delete Sector',
'Delete Sent Item': 'Delete Sent Item',
'Delete Sent Shipment': 'Delete Sent Shipment',
'Delete Service Profile': 'Delete Service Profile',
'Delete Setting': 'Delete Setting',
'Delete Skill': 'Delete Skill',
'Delete Skill Type': 'Delete Skill Type',
'Delete Staff Type': 'Delete Staff Type',
'Delete Status': 'Delete Status',
'Delete Subscription': 'Delete Subscription',
'Delete Survey Answer': 'Delete Survey Answer',
'Delete Survey Question': 'Delete Survey Question',
'Delete Survey Section': 'Delete Survey Section',
'Delete Survey Series': 'Delete Survey Series',
'Delete Survey Template': 'Delete Survey Template',
'Delete Unit': 'Delete Unit',
'Delete User': 'Delete User',
'Delete Volunteer': 'Delete Volunteer',
'Delete Warehouse': 'Delete Warehouse',
'Delete Warehouse Item': 'Delete Warehouse Item',
'Delete from Server?': 'Delete from Server?',
'Delivered': 'Delivered',
'Delphi Decision Maker': 'Delphi Decision Maker',
'Demographic': 'Demographic',
'Demonstrations': 'Démonstrations',
'Dental Examination': 'Dental Examination',
'Dental Profile': 'Dental Profile',
'Department/Unit Name': 'Department/Unit Name',
'Deployment': 'Deployment',
'Describe the condition of the roads to your hospital.': 'Describe the condition of the roads to your hospital.',
'Describe the procedure which this record relates to (e.g. "medical examination")': 'Describe the procedure which this record relates to (e.g. "medical examination")',
'Description': 'Description',
'Description of Bin Type': 'Description of Bin Type',
'Description of Contacts': 'Description of Contacts',
'Description of defecation area': 'Description of defecation area',
'Description of drinking water source': 'Description of drinking water source',
'Description of sanitary water source': 'Description of sanitary water source',
'Description of water source before the disaster': 'Description of water source before the disaster',
'Descriptive Text (e.g., Prose, etc)': 'Descriptive Text (e.g., Prose, etc)',
'Designated for': 'Designated for',
'Desire to remain with family': 'Desire to remain with family',
'Destination': 'Destination',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": "Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.",
'Details': 'Details',
'Dialysis': 'Dialysis',
'Diarrhea': 'Diarrhea',
'Dignitary Visit': 'Dignitary Visit',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Direction': 'Direction',
'Disabled': 'Disabled',
'Disabled?': 'Disabled?',
'Disaster Victim Identification': 'Disaster Victim Identification',
'Disaster Victim Registry': 'Disaster Victim Registry',
'Disaster clean-up/repairs': 'Disaster clean-up/repairs',
'Discharge (cusecs)': 'Discharge (cusecs)',
'Discharges/24hrs': 'Discharges/24hrs',
'Discussion Forum': 'Discussion Forum',
'Discussion Forum on item': 'Discussion Forum on item',
'Disease vectors': 'Disease vectors',
'Dispatch': 'Dispatch',
'Dispatch Items': 'Dispatch Items',
'Dispensary': 'Dispensary',
'Displaced': 'Displaced',
'Displaced Populations': 'Displaced Populations',
'Display Polygons?': 'Display Polygons?',
'Display Routes?': 'Display Routes?',
'Display Tracks?': 'Display Tracks?',
'Display Waypoints?': 'Display Waypoints?',
'Dispose': 'Dispose',
'Dispose Expired/Unusable Items': 'Dispose Expired/Unusable Items',
'Distance between defecation area and water source': 'Distance between defecation area and water source',
'Distance(Kms)': 'Distance(Kms)',
'Distribution': 'Distribution',
'Distribution Details': 'Distribution Details',
'Distribution Item': 'Distribution Item',
'Distribution Item Details': 'Distribution Item Details',
'Distribution Item added': 'Distribution Item added',
'Distribution Item deleted': 'Distribution Item deleted',
'Distribution Item updated': 'Distribution Item updated',
'Distribution Items': 'Distribution Items',
'Distribution added': 'Distribution added',
'Distribution deleted': 'Distribution deleted',
'Distribution groups': 'Distribution groups',
'Distribution updated': 'Distribution updated',
'Distributions': 'Distributions',
'District': 'District',
'Do you really want to delete these records?': 'Do you really want to delete these records?',
'Do you want to over-write the file metadata with new default values?': 'Do you want to over-write the file metadata with new default values?',
'Do you want to receive this shipment?': 'Do you want to receive this shipment?',
'Do you want to send this shipment?': 'Do you want to send this shipment?',
'Document': 'Document',
'Document Details': 'Document Details',
'Document Scan': 'Document Scan',
'Document added': 'Document added',
'Document deleted': 'Document deleted',
'Document updated': 'Document updated',
'Documents': 'Documents',
'Documents and Photos': 'Documents and Photos',
'Does this facility provide a cholera treatment center?': 'Does this facility provide a cholera treatment center?',
'Doing nothing (no structured activity)': 'Doing nothing (no structured activity)',
'Dollars': 'Dollars',
'Domestic chores': 'Domestic chores',
'Donation Phone #': 'Donation Phone #',
'Donor': 'Donor',
'Donor Details': 'Donor Details',
'Donor added': 'Donor added',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Donor updated',
'Donors': 'Donors',
'Donors Report': 'Les bailleurs de fonds Rapport',
'Door frame': 'Door frame',
'Draft': 'Draft',
'Drainage': 'Drainage',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Drill Down by Group': 'Drill Down by Group',
'Drill Down by Incident': 'Drill Down by Incident',
'Drill Down by Shelter': 'Drill Down by Shelter',
'Driving License': 'Driving License',
'Drought': 'Drought',
'Drugs': 'Drugs',
'Dug Well': 'Dug Well',
'Duplicate?': 'Duplicate?',
'Duration': 'Duration',
'Dust Storm': 'Dust Storm',
'Dwellings': 'Dwellings',
'E-mail': 'E-mail',
'EMS Reason': 'EMS Reason',
'EMS Status': 'EMS Status',
'EMS Status Reason': 'EMS Status Reason',
'EMS Traffic Status': 'EMS Traffic Status',
'ER Status': 'ER Status',
'ER Status Reason': 'ER Status Reason',
'Early Recovery': 'Early Recovery',
'Earthquake': 'Earthquake',
'Edit': 'Edit',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Aid Request': 'Edit Aid Request',
'Edit Application': 'Edit Application',
'Edit Assessment': 'Edit Assessment',
'Edit Assessment Summary': 'Edit Assessment Summary',
'Edit Baseline': 'Edit Baseline',
'Edit Baseline Type': 'Edit Baseline Type',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Catalog Item': 'Edit Catalog Item',
'Edit Category<>Sub-Category<>Catalog Relation': 'Edit Category<>Sub-Category<>Catalog Relation',
'Edit Cluster': 'Edit Cluster',
'Edit Cluster Subsector': 'Edit Cluster Subsector',
'Edit Config': 'Edit Config',
'Edit Contact': 'Edit Contact',
'Edit Contact Information': 'Edit Contact Information',
'Edit Contents': 'Edit Contents',
'Edit Defaults': 'Edit Defaults',
'Edit Description': 'Edit Description',
'Edit Details': 'Edit Details',
'Edit Disaster Victims': 'Edit Disaster Victims',
'Edit Distribution': 'Modifier Distribution',
'Edit Distribution Item': 'Edit Distribution Item',
'Edit Document': 'Edit Document',
'Edit Donor': 'Edit Donor',
'Edit Email Settings': 'Edit Email Settings',
'Edit Feature Class': 'Edit Feature Class',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit Flood Report': 'Edit Flood Report',
'Edit Gateway Settings': 'Edit Gateway Settings',
'Edit Group': 'Edit Group',
'Edit Hospital': 'Edit Hospital',
'Edit Identification Report': "Modifier le rapport d'identification",
'Edit Identity': 'Edit Identity',
'Edit Image': 'Edit Image',
'Edit Image Details': 'Edit Image Details',
'Edit Impact': 'Edit Impact',
'Edit Impact Type': 'Edit Impact Type',
'Edit Incident': 'Edit Incident',
'Edit Incident Report': 'Edit Incident Report',
'Edit Inventory Item': 'Edit Inventory Item',
'Edit Inventory Store': 'Edit Inventory Store',
'Edit Item': 'Edit Item',
'Edit Item Catalog': 'Edit Item Catalog',
'Edit Item Catalog Categories': 'Modifier les catégories catalogue Point',
'Edit Item Category': 'Edit Item Category',
'Edit Item Packet': 'Edit Item Packet',
'Edit Item Sub-Categories': 'Edit Item Sub-Categories',
'Edit Key': 'Edit Key',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Location': 'Edit Location',
'Edit Log Entry': 'Edit Log Entry',
'Edit Map Services': 'Edit Map Services',
'Edit Marker': 'Edit Marker',
'Edit Membership': 'Edit Membership',
'Edit Message': 'Edit Message',
'Edit Messaging Settings': 'Edit Messaging Settings',
'Edit Metadata': 'Edit Metadata',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Need': 'Edit Need',
'Edit Need Type': 'Edit Need Type',
'Edit Note': 'Edit Note',
'Edit Office': 'Edit Office',
'Edit Options': 'Edit Options',
'Edit Organization': 'Edit Organization',
'Edit Parameters': 'Edit Parameters',
'Edit Peer': 'Edit Peer',
'Edit Peer Details': 'Edit Peer Details',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit Pledge': 'Edit Pledge',
'Edit Position': 'Edit Position',
'Edit Problem': 'Edit Problem',
'Edit Project': 'Edit Project',
'Edit Projection': 'Edit Projection',
'Edit Rapid Assessment': 'Edit Rapid Assessment',
'Edit Received Item': 'Edit Received Item',
'Edit Received Shipment': 'Edit Received Shipment',
'Edit Record': 'Edit Record',
'Edit Recovery Details': 'Modifier les détails de récupération',
'Edit Registration': 'Edit Registration',
'Edit Registration Details': 'Edit Registration Details',
'Edit Report': 'Edit Report',
'Edit Request': 'Edit Request',
'Edit Request Item': 'Edit Request Item',
'Edit Resource': 'Edit Resource',
'Edit Response': 'Modifier la réponse',
'Edit River': 'Edit River',
'Edit Role': 'Edit Role',
'Edit Sector': 'Edit Sector',
'Edit Sent Item': 'Edit Sent Item',
'Edit Sent Shipment': 'Edit Sent Shipment',
'Edit Setting': 'Edit Setting',
'Edit Settings': 'Edit Settings',
'Edit Shelter': 'Edit Shelter',
'Edit Shelter Service': 'Edit Shelter Service',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Shipment Transit Log': 'Edit Shipment Transit Log',
'Edit Shipment to Send': 'Edit Shipment to Send',
'Edit Shipment/Way Bills': 'Edit Shipment/Way Bills',
'Edit Shipment<>Item Relation': 'Edit Shipment<>Item Relation',
'Edit Site': 'Edit Site',
'Edit Skill': 'Edit Skill',
'Edit Skill Type': 'Edit Skill Type',
'Edit Solution': 'Edit Solution',
'Edit Staff': 'Edit Staff',
'Edit Staff Type': 'Edit Staff Type',
'Edit Storage Bin Type(s)': 'Edit Storage Bin Type(s)',
'Edit Storage Bins': 'Edit Storage Bins',
'Edit Storage Location': 'Edit Storage Location',
'Edit Subscription': 'Edit Subscription',
'Edit Survey Answer': 'Edit Survey Answer',
'Edit Survey Question': 'Edit Survey Question',
'Edit Survey Section': 'Edit Survey Section',
'Edit Survey Series': 'Edit Survey Series',
'Edit Survey Template': 'Edit Survey Template',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Theme': 'Edit Theme',
'Edit Themes': 'Edit Themes',
'Edit Ticket': 'Edit Ticket',
'Edit Track': 'Edit Track',
'Edit Tropo Settings': 'Edit Tropo Settings',
'Edit Unit': 'Edit Unit',
'Edit User': 'Edit User',
'Edit Volunteer Details': 'Edit Volunteer Details',
'Edit Volunteer Registration': 'Edit Volunteer Registration',
'Edit Warehouse': 'Edit Warehouse',
'Edit Warehouse Item': 'Edit Warehouse Item',
'Edit current record': 'Edit current record',
'Edit message': 'Edit message',
'Edit the Application': 'Edit the Application',
'Editable?': 'Editable?',
'Education': 'Education',
'Education materials received': 'Le matériel éducatif a reçu',
'Education materials, source': 'Education materials, source',
'Effects Inventory': 'Effects Inventory',
'Eggs': 'Eggs',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or document URL required.': 'Either file upload or document URL required.',
'Either file upload or image URL required.': 'Either file upload or image URL required.',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Electrical': 'Electrical',
'Elevated': 'Élevée',
'Email': 'Email',
'Email Settings': 'Email Settings',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': 'Email address verified, however registration is still pending approval - please wait until confirmation received.',
'Email settings updated': 'Email settings updated',
'Embalming': 'Embalming',
'Embassy': 'Embassy',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Department': 'Emergency Department',
'Emergency Shelter': 'Emergency Shelter',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Emergency Telecommunications': 'Emergency Telecommunications',
'Enable/Disable Layers': 'Enable/Disable Layers',
'Enabled': 'Enabled',
'End date': 'End date',
'End date should be after start date': 'End date should be after start date',
'End of Period': 'End of Period',
'English': 'English',
'Enter Coordinates:': 'Enter Coordinates:',
'Enter a GPS Coord': 'Enter a GPS Coord',
'Enter a date before': 'Enter a date before',
'Enter a location': 'Enter a location',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Enter a name for the spreadsheet you are uploading (mandatory).',
'Enter a new support request.': 'Enter a new support request.',
'Enter a summary of the request here.': 'Enter a summary of the request here.',
'Enter a unique label!': 'Enter a unique label!',
'Enter a valid email': 'Enter a valid email',
'Enter tags separated by commas.': 'Enter tags separated by commas.',
'Enter the same password as above': 'Enter the same password as above',
'Enter your firstname': 'Enter your firstname',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': "Saisie d'un numéro de téléphone est facultatif, mais cela permet donc de vous abonner pour recevoir des messages SMS.",
'Entry deleted': 'Entry deleted',
'Equipment': 'Equipment',
'Error encountered while applying the theme.': 'Error encountered while applying the theme.',
'Error in message': 'Error in message',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Errors': 'Errors',
'Estimated # of households who are affected by the emergency': 'Estimated # of households who are affected by the emergency',
'Estimated # of people who are affected by the emergency': 'Estimated # of people who are affected by the emergency',
'Euros': 'Euros',
'Evacuating': 'Evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)',
'Event Time': 'Event Time',
'Event Type': "Type de l'événement",
'Event type': 'Event type',
'Example': 'Example',
'Exceeded': 'Exceeded',
'Excreta disposal': 'Excreta disposal',
'Execute a pre-planned activity identified in <instruction>': 'Execute a pre-planned activity identified in <instruction>',
'Expected In': 'Expected In',
'Expected Out': 'Attendus Out',
'Expiry Time': 'Expiry Time',
'Explosive Hazard': 'Explosive Hazard',
'Export': 'Export',
'Export Data': 'Export Data',
'Export Database as CSV': 'Export Database as CSV',
'Export in GPX format': 'Export in GPX format',
'Export in KML format': 'Export in KML format',
'Export in OSM format': 'Export in OSM format',
'Export in PDF format': 'Export in PDF format',
'Export in RSS format': 'Export in RSS format',
'Export in XLS format': 'Export in XLS format',
'Eye Color': 'Eye Color',
'Facebook': 'Facebook',
'Facial hair, color': 'Facial hair, color',
'Facial hair, type': 'Facial hair, type',
'Facial hear, length': 'Facial hear, length',
'Facility Operations': 'Facility Operations',
'Facility Status': 'Facility Status',
'Facility Type': 'Facility Type',
'Failed!': 'Failed!',
'Falling Object Hazard': 'Falling Object Hazard',
'Families/HH': 'Families/HH',
'Family': 'Family',
'Family tarpaulins received': 'Family tarpaulins received',
'Family tarpaulins, source': 'Family tarpaulins, source',
'Family/friends': 'Family/friends',
'Farmland/fishing material assistance, Rank': 'Farmland/fishing material assistance, Rank',
'Fax': 'Fax',
'Feature Class': 'Feature Class',
'Feature Class Details': 'Feature Class Details',
'Feature Class added': 'Feature Class added',
'Feature Class deleted': 'Feature Class deleted',
'Feature Class updated': 'Feature Class updated',
'Feature Classes': 'Feature Classes',
'Feature Classes are collections of Locations (Features) of the same type': 'Feature Classes are collections of Locations (Features) of the same type',
'Feature Layer Details': 'Feature Layer Details',
'Feature Layer added': 'Feature Layer added',
'Feature Layer deleted': 'Feature Layer deleted',
'Feature Layer updated': 'Feature Layer updated',
'Feature Layers': 'Feature Layers',
'Feature Namespace': 'Feature Namespace',
'Feature Type': 'Feature Type',
'Features Include': 'Features Include',
'Female': 'Female',
'Female headed households': 'Female headed households',
'Few': 'Few',
'Field Hospital': 'Field Hospital',
'File': 'File',
'Fill in Latitude': 'Fill in Latitude',
'Fill in Longitude': 'Fill in Longitude',
'Filter': 'Filter',
'Filter Field': 'Filter Field',
'Filter Value': 'Filter Value',
'Filtered search of aid pledges and requests': 'Filtered search of aid pledges and requests',
'Find': 'Find',
'Find Dead Body Report': 'Find Dead Body Report',
'Find Recovery Report': 'Find Recovery Report',
'Find Volunteers': 'Find Volunteers',
'Find by Name': 'Find by Name',
'Finder': 'Finder',
'Fingerprint': 'Fingerprint',
'Fingerprinting': 'Fingerprinting',
'Fingerprints': 'Fingerprints',
'Finish': 'Finish',
'Finished Jobs': 'Finished Jobs',
'Fire': 'Fire',
'Fire suppression and rescue': 'Fire suppression and rescue',
'First Name': 'First Name',
'First name': 'First name',
'Fishing': 'Fishing',
'Flash Flood': 'Flash Flood',
'Flash Freeze': 'Flash Freeze',
'Fleet Management': 'Fleet Management',
'Flexible Impact Assessments': 'Flexible Impact Assessments',
'Flood': 'Flood',
'Flood Alerts': 'Flood Alerts',
'Flood Alerts show water levels in various parts of the country': 'Flood Alerts show water levels in various parts of the country',
'Flood Report': 'Flood Report',
'Flood Report Details': 'Flood Report Details',
'Flood Report added': 'Flood Report added',
'Flood Report deleted': 'Flood Report deleted',
'Flood Report updated': 'Flood Report updated',
'Flood Reports': 'Flood Reports',
'Flow Status': 'Flow Status',
'Focal Point': 'Point focal',
'Fog': 'Fog',
'Food': 'Food',
'Food Supply': 'Food Supply',
'Footer': 'Footer',
'Footer file %s missing!': 'Footer file %s missing!',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': "Pour chaque partenaire de synchronisation, il ya un travail de synchronisation par défaut qui s'exécute après un intervalle de temps spécifié. Vous pouvez également configurer la synchronisation des emplois plus qui pourrait être personnalisés à vos besoins. Cliquez sur le lien à droite pour commencer.",
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners',
'For live help from the Sahana community on using this application, go to': 'For live help from the Sahana community on using this application, go to',
'For messages that support alert network internal functions': 'For messages that support alert network internal functions',
'For more details on the Sahana Eden system, see the': 'For more details on the Sahana Eden system, see the',
'For more information, see ': 'For more information, see ',
'For:': 'For:',
'Forest Fire': 'Forest Fire',
'Formal camp': 'Formal camp',
'Format': 'Format',
'Forms': 'Forms',
'Found': 'Found',
'Freezing Drizzle': 'Freezing Drizzle',
'Freezing Rain': 'Freezing Rain',
'Freezing Spray': 'Freezing Spray',
'French': 'French',
'Friday': 'Friday',
'From Location': 'From Location',
'From Warehouse': 'From Warehouse',
'Frost': 'Frost',
'Full': 'Full',
'Full beard': 'Full beard',
'Fullscreen Map': 'Fullscreen Map',
'Functional Tests': 'Functional Tests',
'Functions available': 'Functions available',
'Funding Organization': 'Funding Organization',
'Funeral': 'Funeral',
'GIS Reports of Shelter': 'GIS Reports of Shelter',
'GIS integration to view location details of the Shelter': 'GIS integration to view location details of the Shelter',
'GPS': 'GPS',
'GPS Marker': 'GPS Marker',
'GPS Track': 'GPS Track',
'GPS Track File': 'GPS Track File',
'GPX Track': 'GPX Track',
'Gale Wind': 'Gale Wind',
'Gap Analysis': 'Gap Analysis',
'Gap Analysis Map': 'Gap Analysis Map',
'Gap Analysis Report': 'Gap Analysis Report',
'Gap Map': 'Gap Map',
'Gap Report': 'Gap Report',
'Gateway Settings': 'Gateway Settings',
'Gateway settings updated': 'Gateway settings updated',
'Gender': 'Gender',
'General Medical/Surgical': 'General Medical/Surgical',
'General emergency and public safety': 'General emergency and public safety',
'Generator': 'Generator',
'Geocoder Selection': 'Geocoder Selection',
'Geometry Name': 'Geometry Name',
'Geophysical (inc. landslide)': 'Geophysical (inc. landslide)',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo module not available within the running Python - this needs installing for PDF output!',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).',
'Give information about where and when you have seen the person': 'Give information about where and when you have seen the person',
'Give information about where and when you have seen them': 'Give information about where and when you have seen them',
'Global Messaging Settings': 'Paramètres globaux de messagerie',
'Goatee': 'Goatee',
'Government': 'Government',
'Government UID': 'Government UID',
'Government building': 'Government building',
'Grade': 'Grade',
'Greek': 'Greek',
'Group': 'Group',
'Group Details': 'Group Details',
'Group Member added': 'Group Member added',
'Group Members': 'Group Members',
'Group Memberships': 'Group Memberships',
'Group Title': 'Group Title',
'Group Type': 'Group Type',
'Group added': 'Groupe ajouté',
'Group deleted': 'Group deleted',
'Group description': 'Group description',
'Group name': 'Group name',
'Group type': 'Type de groupe',
'Group updated': 'Group updated',
'Groups': 'Groups',
'Groups removed': 'Groups removed',
'Guest': 'Guest',
'Hail': 'Hail',
'Hair Color': 'Hair Color',
'Hair Length': 'Hair Length',
'Hair Style': 'Hair Style',
'Has data from this Reference Document been entered into Sahana?': 'Has data from this Reference Document been entered into Sahana?',
'Hazard Pay': 'Hazard Pay',
'Hazardous Material': 'Hazardous Material',
'Hazardous Road Conditions': 'Hazardous Road Conditions',
'Header Background': 'Header Background',
'Header background file %s missing!': 'Header background file %s missing!',
'Headquarters': 'Headquarters',
'Health': 'Health',
'Health care assistance, Rank': 'Health care assistance, Rank',
'Health center': 'Centre de santé',
'Health center with beds': 'Health center with beds',
'Health center without beds': 'Health center without beds',
'Healthcare Worker': 'Healthcare Worker',
'Heat Wave': 'Heat Wave',
'Heat and Humidity': 'Heat and Humidity',
'Height': 'Height',
'Height (cm)': 'Height (cm)',
'Help': 'Help',
'Helps to monitor status of hospitals': 'Helps to monitor status of hospitals',
'Helps to report and search for Missing Persons': 'Helps to report and search for Missing Persons',
'Here are the solution items related to the problem.': 'Here are the solution items related to the problem.',
'High': 'High',
'High Water': 'High Water',
'Hindu': 'Hindu',
'History': 'History',
'Hit the back button on your browser to try again.': 'Hit the back button on your browser to try again.',
'Holiday Address': 'Holiday Address',
'Home': 'Home',
'Home Address': 'Home Address',
'Home Country': 'Home Country',
'Home Crime': 'Home Crime',
'Hospital': 'Hospital',
'Hospital Details': 'Hospital Details',
'Hospital Status Report': 'Hospital Status Report',
'Hospital information added': "l'information a ajouté l'hôpital",
'Hospital information deleted': 'Hospital information deleted',
'Hospital information updated': 'Hospital information updated',
'Hospital status assessment.': 'Hospital status assessment.',
'Hospitals': 'Hospitals',
'Hot Spot': 'Hot Spot',
'Hourly': 'Hourly',
'Household kits received': 'Household kits received',
'Household kits, source': 'Household kits, source',
'How does it work?': 'How does it work?',
'How is this person affected by the disaster? (Select all that apply)': 'How is this person affected by the disaster? (Select all that apply)',
'How long will the food last?': 'How long will the food last?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'How many Boys (0-17 yrs) are Dead due to the crisis',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'How many Boys (0-17 yrs) are Injured due to the crisis',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'How many Boys (0-17 yrs) are Missing due to the crisis',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'How many Girls (0-17 yrs) are Dead due to the crisis',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'How many Girls (0-17 yrs) are Injured due to the crisis',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'How many Girls (0-17 yrs) are Missing due to the crisis',
'How many Men (18 yrs+) are Dead due to the crisis': 'How many Men (18 yrs+) are Dead due to the crisis',
'How many Men (18 yrs+) are Injured due to the crisis': 'How many Men (18 yrs+) are Injured due to the crisis',
'How many Men (18 yrs+) are Missing due to the crisis': 'How many Men (18 yrs+) are Missing due to the crisis',
'How many Women (18 yrs+) are Dead due to the crisis': 'How many Women (18 yrs+) are Dead due to the crisis',
'How many Women (18 yrs+) are Injured due to the crisis': 'How many Women (18 yrs+) are Injured due to the crisis',
'How many Women (18 yrs+) are Missing due to the crisis': 'How many Women (18 yrs+) are Missing due to the crisis',
'How many days will the supplies last?': 'How many days will the supplies last?',
'How many new cases have been admitted to this facility in the past 24h?': 'How many new cases have been admitted to this facility in the past 24h?',
'How many of the patients with the disease died in the past 24h at this facility?': 'How many of the patients with the disease died in the past 24h at this facility?',
'How many patients with the disease are currently hospitalized at this facility?': 'How many patients with the disease are currently hospitalized at this facility?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.',
'Humanitarian NGO': 'Humanitarian NGO',
'Hurricane': 'Hurricane',
'Hurricane Force Wind': 'Hurricane Force Wind',
'Hygiene': 'Hygiene',
'Hygiene NFIs': 'Hygiene NFIs',
'Hygiene kits received': 'Hygiene kits received',
'Hygiene kits, source': 'Hygiene kits, source',
'Hygiene practice': 'Hygiene practice',
'Hygiene problems': 'Hygiene problems',
'ID Label': 'ID Label',
'ID Label: ': 'ID Label: ',
'ID Tag': 'ID Tag',
'ID Tag Number': 'ID Tag Number',
'ID type': 'ID type',
'Ice Pressure': 'Ice Pressure',
'Iceberg': 'Iceberg',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'Ideally a full URL to the source file, otherwise just a note on where data came from.',
'Identification': 'Identification',
'Identification Report': 'Identification Report',
'Identification Reports': 'Identification Reports',
'Identification Status': 'Identification Status',
'Identification label of the Storage bin.': 'Identification label of the Storage bin.',
'Identified as': 'Identified as',
'Identified by': 'Identified by',
'Identity': 'Identity',
'Identity Details': 'Identity Details',
'Identity added': 'Identity added',
'Identity deleted': 'Identity deleted',
'Identity updated': 'Identity updated',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.',
'If no marker defined then the system default marker is used': 'If no marker defined then the system default marker is used',
'If no, specify why': 'If no, specify why',
'If the location is a geographic area, then state at what level here.': 'If the location is a geographic area, then state at what level here.',
'If this is set to True then mails will be deleted from the server after downloading.': 'If this is set to True then mails will be deleted from the server after downloading.',
'If this record should be restricted then select which role is required to access the record here.': 'If this record should be restricted then select which role is required to access the record here.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'If this record should be restricted then select which role(s) are permitted to access the record here.',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": "If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.",
'If yes, specify what and by whom': 'If yes, specify what and by whom',
'If yes, which and how': 'If yes, which and how',
"If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:",
"If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:",
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": "If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:",
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'If you know what the Geonames ID of this location is then you can enter it here.',
'If you know what the OSM ID of this location is then you can enter it here.': 'If you know what the OSM ID of this location is then you can enter it here.',
'If you need to add a new document then you can click here to attach one.': 'If you need to add a new document then you can click here to attach one.',
'If you would like to help, then please': 'If you would like to help, then please',
'Illegal Immigrant': 'Illegal Immigrant',
'Image': 'Image',
'Image Details': 'Image Details',
'Image Tags': 'Image Tags',
'Image Type': 'Image Type',
'Image Upload': 'Image Upload',
'Image added': 'Image added',
'Image deleted': 'Image deleted',
'Image updated': 'Image updated',
'Image/Attachment': 'Image/Attachment',
'Image/Other Attachment': 'Image/Other Attachment',
'Imagery': 'Imagery',
'Images': 'Images',
'Impact Assessment Summaries': 'Impact Assessment Summaries',
'Impact Assessments': 'Impact Assessments',
'Impact Baselines': 'Impact Baselines',
'Impact Details': 'Impact Details',
'Impact Type': 'Impact Type',
'Impact Type Details': 'Impact Type Details',
'Impact Type added': 'Impact Type added',
'Impact Type deleted': 'Impact Type deleted',
'Impact Type updated': 'Impact Type updated',
'Impact Types': 'Impact Types',
'Impact added': 'Impact added',
'Impact deleted': 'Impact deleted',
'Impact updated': 'Impact updated',
'Impacts': 'Impacts',
'Import': 'Import',
'Import & Export Data': 'Import & Export Data',
'Import Data': 'Import Data',
'Import Job': 'Import Job',
'Import Jobs': 'Import Jobs',
'Import and Export': 'Import and Export',
'Import from Ushahidi Instance': 'Import from Ushahidi Instance',
'Import if Master': 'Import if Master',
'Import job created': 'Import job created',
'Import multiple tables as CSV': 'Import multiple tables as CSV',
'Import/Export': 'Import/Export',
'Important': 'Important',
'Importantly where there are no aid services being provided': 'Importantly where there are no aid services being provided',
'Imported': 'Imported',
'Importing data from spreadsheets': 'Importing data from spreadsheets',
'Improper decontamination': 'Improper decontamination',
'Improper handling of dead bodies': 'Improper handling of dead bodies',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
'In Inventories': 'In Inventories',
'In Process': 'In Process',
'In Progress': 'In Progress',
'In Transit': 'In Transit',
'Inbound Mail Settings': 'Inbound Mail Settings',
'Incident': 'Incident',
'Incident Categories': 'Incident Categories',
'Incident Details': 'Incident Details',
'Incident Report': 'Incident Report',
'Incident Report Details': 'Incident Report Details',
'Incident Report added': 'Incident Report added',
'Incident Report deleted': 'Incident Report deleted',
'Incident Report updated': 'Incident Report updated',
'Incident Reporting': 'Incident Reporting',
'Incident Reporting System': 'Incident Reporting System',
'Incident Reports': 'Incident Reports',
'Incident added': 'Incident added',
'Incident deleted': 'Incident deleted',
'Incident updated': 'Incident updated',
'Incidents': 'Incidents',
'Incoming': 'Incoming',
'Incomplete': 'Incomplete',
'Individuals': 'Individuals',
'Industrial Crime': 'Industrial Crime',
'Industry Fire': 'Industry Fire',
'Infant (0-1)': 'Infantile (0-1)',
'Infectious Disease': 'Infectious Disease',
'Infectious Diseases': 'Infectious Diseases',
'Infestation': 'Infestation',
'Informal Leader': 'Informal Leader',
'Informal camp': 'Informal camp',
'Information gaps': 'Information gaps',
'Infusion catheters available': 'Infusion catheters available',
'Infusion catheters need per 24h': 'Infusion catheters need per 24h',
'Infusion catheters needed per 24h': 'Infusion catheters needed per 24h',
'Infusions available': 'Infusions available',
'Infusions needed per 24h': 'Infusions needed per 24h',
'Input Job': 'Input Job',
'Instance Type': 'Instance Type',
'Instant Porridge': 'Instant Porridge',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.",
'Institution': 'Institution',
'Insufficient': 'Insufficient',
'Insufficient vars: Need module, resource, jresource, instance': 'Insufficient vars: Need module, resource, jresource, instance',
'Intake Items': 'Intake Items',
'Intergovernmental Organisation': 'Intergovernmental Organisation',
'Internal State': 'Internal State',
'International NGO': 'International NGO',
'International Organization': 'International Organization',
'International Staff': 'International Staff',
'Intervention': 'Intervention',
'Interview taking place at': 'Interview taking place at',
'Invalid': 'Invalid',
'Invalid Query': 'Invalid Query',
'Invalid request!': 'Invalid request!',
'Invalid ticket': 'Invalid ticket',
'Inventories with Item': 'Inventories with Item',
'Inventory': 'Inventory',
'Inventory Item Details': 'Inventory Item Details',
'Inventory Item added': 'Inventaire item a été ajouté',
'Inventory Item deleted': 'Inventory Item deleted',
'Inventory Item updated': 'Inventory Item updated',
'Inventory Items': 'Inventory Items',
'Inventory Management': 'Inventory Management',
'Inventory Store': 'Inventory Store',
'Inventory Store Details': 'Inventory Store Details',
'Inventory Store added': 'Inventory Store added',
'Inventory Store deleted': 'Inventory Store deleted',
'Inventory Store updated': 'Inventory Store updated',
'Inventory Stores': 'Inventory Stores',
'Inventory of Effects': 'Inventory of Effects',
'Inventory/Ledger': 'Inventory/Ledger',
'Is it safe to collect water?': 'Is it safe to collect water?',
'Issuing Authority': 'Issuing Authority',
'It is built using the Template agreed by a group of NGOs working together as the': 'It is built using the Template agreed by a group of NGOs working together as the',
'Item': 'Item',
'Item Added to Shipment': 'Item Added to Shipment',
'Item Catalog Categories': 'Item Catalog Categories',
'Item Catalog Category': 'Item Catalog Category',
'Item Catalog Category Details': 'Item Catalog Category Details',
'Item Catalog Category added': 'Item Catalog Category added',
'Item Catalog Category deleted': 'Item Catalog Category deleted',
'Item Catalog Category updated': 'Item Catalog Category updated',
'Item Catalog Details': 'Item Catalog Details',
'Item Catalog added': 'Item Catalog added',
'Item Catalog deleted': 'Item Catalog deleted',
'Item Catalog updated': 'Item Catalog updated',
'Item Catalogs': 'Item Catalogs',
'Item Categories': 'Item Categories',
'Item Category': 'Item Category',
'Item Category Details': 'Item Category Details',
'Item Category added': 'Item Category added',
'Item Category deleted': 'Item Category deleted',
'Item Category updated': 'Item Category updated',
'Item Details': 'Item Details',
'Item Packet Details': 'Item Packet Details',
'Item Packet added': 'Item Packet added',
'Item Packet deleted': 'Item Packet deleted',
'Item Packet updated': 'Item Packet updated',
'Item Packets': 'Item Packets',
'Item Sub-Categories': 'Item Sub-Categories',
'Item Sub-Category': 'Item Sub-Category',
'Item Sub-Category Details': 'Item Sub-Category Details',
'Item Sub-Category added': 'Item Sub-Category added',
'Item Sub-Category deleted': 'Item Sub-Category deleted',
'Item Sub-Category updated': "Sous-catégorie de l'objet mis à jour",
'Item added': 'Item added',
'Item already in Bundle!': 'Item already in Bundle!',
'Item already in Kit!': 'Item already in Kit!',
'Item already in budget!': 'Item already in budget!',
'Item deleted': 'Item deleted',
'Item updated': 'Item updated',
'Items': 'Articles',
'Items Sent from Warehouse': 'Items Sent from Warehouse',
'Japanese': 'Japanese',
'Jerry can': 'Jerry can',
'Jew': 'Jew',
'Job Title': 'Job Title',
'Jobs': 'Jobs',
'Just Once': 'Just Once',
'KPIs': 'KPIs',
'Key': 'Key',
'Key Details': 'Key Details',
'Key added': 'Key added',
'Key deleted': 'Key deleted',
'Key updated': 'Key updated',
'Keys': 'Keys',
'Kit': 'Kit',
'Kit Contents': 'Kit Contents',
'Kit Details': 'Kit Details',
'Kit Updated': 'Kit Updated',
'Kit added': 'Kit added',
'Kit deleted': 'Kit deleted',
'Kit updated': 'Kit updated',
'Kits': 'Kits',
'Known Identities': 'Known Identities',
'LICENCE': 'LICENCE',
'LICENSE': 'LICENSE',
'LMS Administration': 'LMS Administration',
'Label': 'Label',
'Lack of material': 'Lack of material',
'Lack of school uniform': 'Lack of school uniform',
'Lack of supplies at school': 'Lack of supplies at school',
'Lack of transport to school': 'Lack of transport to school',
'Lactating women': 'Lactating women',
'Lahar': 'Lahar',
'Landslide': 'Landslide',
'Language': 'Language',
'Last Name': 'Last Name',
'Last known location': 'Last known location',
'Last synchronization time': 'Last synchronization time',
'Last updated by': 'Last updated by',
'Last updated on': 'Last updated on',
'Latitude': 'Latitude',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.',
'Latitude should be between': 'Latitude should be between',
'Law enforcement, military, homeland and local/private security': 'Law enforcement, military, homeland and local/private security',
'Layer': 'Layer',
'Layer Details': 'Layer Details',
'Layer added': 'Layer added',
'Layer deleted': 'Layer deleted',
'Layer updated': 'Layer updated',
'Layers': 'Layers',
'Layers updated': 'Layers updated',
'Layout': 'Layout',
'Legend Format': 'Legend Format',
'Length': 'Length',
'Level': 'Level',
"Level is higher than parent's": "Level is higher than parent's",
'Library support not available for OpenID': 'Library support not available for OpenID',
'Line': 'Line',
'Link Item & Shipment': 'Link Item & Shipment',
'Link an Item & Shipment': 'Link an Item & Shipment',
'Linked Records': 'Linked Records',
'Linked records': 'Linked records',
'List': 'List',
'List / Add Baseline Types': 'List / Add Baseline Types',
'List / Add Impact Types': 'List / Add Impact Types',
'List / Add Services': 'List / Add Services',
'List / Add Types': 'List / Add Types',
'List Activities': 'List Activities',
'List Aid Requests': 'List Aid Requests',
'List All': 'List All',
'List All Entries': 'List All Entries',
'List All Memberships': 'List All Memberships',
'List Assessment Summaries': 'List Assessment Summaries',
'List Assessments': 'List Assessments',
'List Baseline Types': 'List Baseline Types',
'List Baselines': 'List Baselines',
'List Budgets': 'List Budgets',
'List Bundles': 'List Bundles',
'List Catalog Items': 'List Catalog Items',
'List Category<>Sub-Category<>Catalog Relation': 'List Category<>Sub-Category<>Catalog Relation',
'List Checklists': 'Listes de contrôle Liste',
'List Cluster': 'List Cluster',
'List Cluster Subsectors': 'List Cluster Subsectors',
'List Clusters': 'List Clusters',
'List Configs': 'List Configs',
'List Conflicts': 'List Conflicts',
'List Contacts': 'List Contacts',
'List Distribution Items': 'List Distribution Items',
'List Distributions': 'List Distributions',
'List Documents': 'List Documents',
'List Donors': 'List Donors',
'List Feature Classes': 'List Feature Classes',
'List Feature Layers': 'List Feature Layers',
'List Flood Reports': 'List Flood Reports',
'List Groups': 'List Groups',
'List Groups/View Members': 'List Groups/View Members',
'List Hospitals': 'List Hospitals',
'List Identities': 'List Identities',
'List Images': 'List Images',
'List Impact Assessments': 'List Impact Assessments',
'List Impact Types': 'List Impact Types',
'List Impacts': 'List Impacts',
'List Incident Reports': 'List Incident Reports',
'List Incidents': 'List Incidents',
'List Inventory Items': 'List Inventory Items',
'List Inventory Stores': 'List Inventory Stores',
'List Item Catalog Categories': 'List Item Catalog Categories',
'List Item Catalogs': 'List Item Catalogs',
'List Item Categories': 'List Item Categories',
'List Item Packets': 'List Item Packets',
'List Item Sub-Categories': 'List Item Sub-Categories',
'List Items': 'List Items',
'List Keys': 'List Keys',
'List Kits': 'List Kits',
'List Layers': 'List Layers',
'List Locations': 'List Locations',
'List Log Entries': 'List Log Entries',
'List Markers': 'List Markers',
'List Members': 'List Members',
'List Memberships': 'List Memberships',
'List Messages': 'List Messages',
'List Metadata': 'Liste des métadonnées',
'List Missing Persons': 'List Missing Persons',
'List Need Types': 'List Need Types',
'List Needs': 'List Needs',
'List Notes': 'List Notes',
'List Offices': 'List Offices',
'List Organizations': 'List Organizations',
'List Peers': 'List Peers',
'List Personal Effects': 'List Personal Effects',
'List Persons': 'List Persons',
'List Photos': 'List Photos',
'List Positions': 'List Positions',
'List Problems': 'List Problems',
'List Projections': 'List Projections',
'List Projects': 'List Projects',
'List Rapid Assessments': 'List Rapid Assessments',
'List Received Items': 'List Received Items',
'List Received Shipments': 'List Received Shipments',
'List Records': 'List Records',
'List Registrations': 'List Registrations',
'List Reports': 'List Reports',
'List Request Items': 'List Request Items',
'List Requests': 'List Requests',
'List Resources': 'Liste des ressources',
'List Responses': 'List Responses',
'List Rivers': 'List Rivers',
'List Roles': 'List Roles',
'List Sections': 'List Sections',
'List Sector': 'List Sector',
'List Sent Items': 'List Sent Items',
'List Sent Shipments': 'List Sent Shipments',
'List Service Profiles': 'List Service Profiles',
'List Settings': 'List Settings',
'List Shelter Services': 'List Shelter Services',
'List Shelter Types': 'List Shelter Types',
'List Shelters': 'List Shelters',
'List Shipment Transit Logs': 'List Shipment Transit Logs',
'List Shipment/Way Bills': 'List Shipment/Way Bills',
'List Shipment<>Item Relation': 'List Shipment<>Item Relation',
'List Shipments': 'List Shipments',
'List Sites': 'List Sites',
'List Skill Types': 'List Skill Types',
'List Skills': 'List Skills',
'List Solutions': 'List Solutions',
'List Staff': 'List Staff',
'List Staff Types': 'List Staff Types',
'List Status': 'List Status',
'List Storage Bin Type(s)': 'List Storage Bin Type(s)',
'List Storage Bins': 'List Storage Bins',
'List Storage Location': 'Lieu de stockage Liste',
'List Subscriptions': 'List Subscriptions',
'List Survey Answers': 'List Survey Answers',
'List Survey Questions': 'List Survey Questions',
'List Survey Sections': 'List Survey Sections',
'List Survey Series': 'List Survey Series',
'List Survey Templates': 'List Survey Templates',
'List Tasks': 'List Tasks',
'List Teams': 'List Teams',
'List Themes': 'List Themes',
'List Tickets': 'List Tickets',
'List Tracks': 'List Tracks',
'List Units': 'List Units',
'List Users': 'Liste des utilisateurs',
'List Volunteers': 'List Volunteers',
'List Warehouse Items': 'List Warehouse Items',
'List Warehouses': 'List Warehouses',
'List all': 'List all',
'List of Items': 'List of Items',
'List of Missing Persons': 'List of Missing Persons',
'List of Peers': 'List of Peers',
'List of Reports': 'List of Reports',
'List of Requests': 'List of Requests',
'List of Spreadsheets': 'List of Spreadsheets',
'List of Spreadsheets uploaded': 'List of Spreadsheets uploaded',
'List of Volunteers for this skills set': 'List of Volunteers for this skills set',
'List of addresses': 'List of addresses',
'List unidentified': 'List unidentified',
'List/Add': 'List/Add',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Listes "qui fait quoi et où". organismes de secours permet de coordonner leurs activités',
'Live Help': 'Live Help',
'Livelihood': 'Livelihood',
'Load Cleaned Data into Database': 'Load Cleaned Data into Database',
'Load Details': 'Load Details',
'Load Raw File into Grid': 'Load Raw File into Grid',
'Load the details to help decide which is the best one to keep out of the 2.': 'Load the details to help decide which is the best one to keep out of the 2.',
'Loading': 'Loading',
'Loading Locations...': 'Loading Locations...',
'Local Name': 'Nom local',
'Local Names': 'Local Names',
'Location': 'Location',
'Location 1': 'Location 1',
'Location 2': 'Location 2',
'Location De-duplicated': 'Location De-duplicated',
'Location Details': 'Location Details',
'Location added': 'Location added',
'Location deleted': 'Lieu supprimé',
'Location details': 'Location details',
'Location updated': 'Location updated',
'Location: ': 'Location: ',
'Locations': 'Locations',
'Locations De-duplicator': 'Locations De-duplicator',
'Locations of this level need to have a parent of level': 'Locations of this level need to have a parent of level',
'Locations should be different!': 'Locations should be different!',
'Lockdown': 'Lockdown',
'Log': 'Log',
'Log Entry Details': 'Log Entry Details',
'Log entry added': 'Log entry added',
'Log entry deleted': 'Log entry deleted',
'Log entry updated': 'Entrée du journal mis à jour',
'Login': 'Login',
'Logistics': 'Logistics',
'Logistics Management': 'Logistics Management',
'Logistics Management System': 'Logistics Management System',
'Logo': 'Logo',
'Logo file %s missing!': 'Logo file %s missing!',
'Logout': 'Logout',
'Long Text': 'Long Text',
'Longitude': 'Longitude',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.',
'Longitude should be between': 'Longitude should be between',
'Looting': 'Looting',
'Lost Password': 'Lost Password',
'Low': 'Low',
'Magnetic Storm': 'Magnetic Storm',
'Major outward damage': 'Major outward damage',
'Make Pledge': 'Make Pledge',
'Make Request': 'Make Request',
'Make a Request': 'Make a Request',
'Make a Request for Aid': 'Make a Request for Aid',
'Make preparations per the <instruction>': 'Make preparations per the <instruction>',
'Male': 'Male',
'Manage': 'Manage',
'Manage Category': 'Manage Category',
'Manage Item catalog': 'Manage Item catalog',
'Manage Items Catalog': 'Manage Items Catalog',
'Manage Kits': 'Manage Kits',
'Manage Relief Item Catalogue': 'Manage Relief Item Catalogue',
'Manage Sub-Category': 'Manage Sub-Category',
'Manage Users & Roles': 'Manage Users & Roles',
'Manage Warehouses': 'Manage Warehouses',
'Manage Warehouses/Sites': 'Manage Warehouses/Sites',
'Manage requests of hospitals for assistance.': 'Manage requests of hospitals for assistance.',
'Manage volunteers by capturing their skills, availability and allocation': "Gérer les volontaires en capturant leurs compétences, leur disponibilité et l'affectation",
'Manager': 'Manager',
'Managing Office': 'Managing Office',
'Managing, Storing and Distributing Catalog Items.': 'Managing, Storing and Distributing Catalog Items.',
'Managing, Storing and Distributing Items.': 'Managing, Storing and Distributing Items.',
'Managing, Storing and Distributing Relief Items': 'Managing, Storing and Distributing Relief Items',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'Mandatory. The URL to access the service.': 'Mandatory. The URL to access the service.',
'Manual': 'Manual',
'Manual Synchronization': 'Manual Synchronization',
'Many': 'Many',
'Map': 'Map',
'Map Height': 'Map Height',
'Map Service Catalogue': 'Map Service Catalogue',
'Map Settings': 'Map Settings',
'Map Viewing Client': 'Map Viewing Client',
'Map Width': 'Carte Largeur',
'Map of Hospitals': 'Map of Hospitals',
'Mapping': 'Mapping',
'Marine Security': 'Marine Security',
'Marital Status': 'État civil',
'Marker': 'Marker',
'Marker Details': 'Marker Details',
'Marker added': 'Marker added',
'Marker deleted': 'Marker deleted',
'Marker updated': 'Marker updated',
'Markers': 'Markers',
'Master Message Log': 'Master Message Log',
'Master Message Log to process incoming reports & requests': 'Master Message Log to process incoming reports & requests',
'Match Percentage': 'Match Percentage',
'Match percentage indicates the % match between these two records': 'Match percentage indicates the % match between these two records',
'Matching Records': 'Matching Records',
'Matrix of Choices (Multiple Answers)': 'Matrix of Choices (Multiple Answers)',
'Matrix of Choices (Only one answer)': 'Matrix of Choices (Only one answer)',
'Matrix of Text Fields': 'Matrix of Text Fields',
'Max Persons per Dwelling': 'Max Persons per Dwelling',
'Maximum Weight': 'Maximum Weight',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.',
'Medical and public health': 'Medical and public health',
'Medicine': 'Medicine',
'Medium': 'Medium',
'Megabytes per Month': 'Megabytes per Month',
'Members': 'Members',
'Membership': 'Membership',
'Membership Details': 'Membership Details',
'Membership added': "L'adhésion a ajouté",
'Membership deleted': 'Membership deleted',
'Membership updated': 'Membership updated',
'Memberships': 'Memberships',
'Message': 'Message',
'Message Details': 'Message Details',
'Message Variable': 'Message Variable',
'Message added': 'Message ajouté',
'Message deleted': 'Message deleted',
'Message sent to outbox': 'Message sent to outbox',
'Message updated': 'Message updated',
'Message variable': 'Message variable',
'Messages': 'Messages',
'Messaging': 'Messaging',
'Messaging settings updated': 'Messaging settings updated',
'Metadata': 'Metadata',
'Metadata Details': 'Metadata Details',
'Metadata added': 'Metadata added',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'Metadata can be supplied here to be applied to all uploaded photos, if desired.',
'Metadata deleted': 'Metadata deleted',
'Metadata updated': 'Metadata updated',
'Meteorite': 'Meteorite',
'Meteorological (inc. flood)': 'Meteorological (inc. flood)',
'Method used': 'Method used',
'Middle Name': 'Middle Name',
'Migrants or ethnic minorities': 'Migrants or ethnic minorities',
'Military': 'Military',
'Minimum Bounding Box': 'Minimum Bounding Box',
'Minutes must be a number between 0 and 60': 'Minutes must be a number between 0 and 60',
'Minutes per Month': 'Minutes per Month',
'Minutes should be a number greater than 0 and less than 60': 'Minutes should be a number greater than 0 and less than 60',
'Miscellaneous': 'Miscellaneous',
'Missing': 'Missing',
'Missing Person': 'Missing Person',
'Missing Person Details': 'Missing Person Details',
'Missing Person Reports': 'Missing Person Reports',
'Missing Persons': 'Missing Persons',
'Missing Persons Registry': 'Missing Persons Registry',
'Missing Persons Report': 'Missing Persons Report',
'Missing Report': 'Missing Report',
'Missing Senior Citizen': 'Missing Senior Citizen',
'Missing Vulnerable Person': 'Missing Vulnerable Person',
'Mobile': 'Mobile',
'Mobile Assess': 'Mobile Assess',
'Mobile Assess.': 'Mobile Assess.',
'Mobile Basic': 'Mobile Basic',
'Mobile Basic Assessment': 'Mobile Basic Assessment',
'Mobile Phone': 'Mobile Phone',
'Mode': 'Mode',
'Modem Settings': 'Modem Settings',
'Modem settings updated': 'Modem settings updated',
'Moderator': 'Moderator',
'Modify Information on groups and individuals': 'Modify Information on groups and individuals',
'Modifying data in spreadsheet before importing it to the database': 'Modifying data in spreadsheet before importing it to the database',
'Module Administration': 'Module Administration',
'Module disabled!': 'Module disabled!',
'Module provides access to information on current Flood Levels.': 'Module provides access to information on current Flood Levels.',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.',
'Monday': 'Monday',
'Monthly Cost': 'Monthly Cost',
'Monthly Salary': 'Monthly Salary',
'Months': 'Months',
'Morgue Status': 'Morgue Status',
'Morgue Units Available': 'Morgue Units Available',
'Mosque': 'Mosque',
'Motorcycle': 'Motorcycle',
'Moustache': 'Moustache',
'Movements (Filter In/Out/Lost)': 'Movements (Filter In/Out/Lost)',
'MultiPolygon': 'MultiPolygon',
'Multiple': 'Multiple',
'Multiple Choice (Multiple Answers)': 'Multiple Choice (Multiple Answers)',
'Multiple Choice (Only One Answer)': 'Multiple Choice (Only One Answer)',
'Multiple Text Fields': 'Multiple Text Fields',
'Multiplicator': 'Multiplicator',
'Muslim': 'Muslim',
'My Tasks': 'My Tasks',
'N/A': 'N/A',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.",
'Name': 'Name',
'Name and/or ID': 'Name and/or ID',
'Name and/or ID Label': 'Name and/or ID Label',
'Name of Storage Bin Type.': 'Name of Storage Bin Type.',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name of the file (& optional sub-path) located in views which should be used for footer.',
'Name of the person in local language and script (optional).': 'Name of the person in local language and script (optional).',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.',
'Names can be added in multiple languages': 'Names can be added in multiple languages',
'National ID Card': 'National ID Card',
'National NGO': 'National NGO',
'National Staff': 'National Staff',
'Nationality': 'Nationality',
'Nationality of the person.': 'Nationality of the person.',
'Nautical Accident': 'Nautical Accident',
'Nautical Hijacking': 'Nautical Hijacking',
'Need Type': 'Need Type',
'Need Type Details': 'Need Type Details',
'Need Type added': 'Need Type added',
'Need Type deleted': 'Need Type deleted',
'Need Type updated': 'Need Type updated',
'Need Types': 'Need Types',
"Need a 'url' argument!": "Need a 'url' argument!",
'Need added': 'Need added',
'Need deleted': 'Need deleted',
'Need to configure Twitter Authentication': 'Need to configure Twitter Authentication',
'Need to select 2 Locations': 'Need to select 2 Locations',
'Need to specify a Budget!': 'Need to specify a Budget!',
'Need to specify a Kit!': 'Need to specify a Kit!',
'Need to specify a Resource!': 'Need to specify a Resource!',
'Need to specify a bundle!': 'Need to specify a bundle!',
'Need to specify a group!': 'Need to specify a group!',
'Need to specify a location to search for.': 'Need to specify a location to search for.',
'Need to specify a role!': 'Besoin de préciser un rôle!',
'Need to specify a table!': 'Need to specify a table!',
'Need to specify a user!': 'Need to specify a user!',
'Need updated': 'Need updated',
'Needs': 'Needs',
'Needs Details': 'Needs Details',
'Needs elaboration!!!': 'Needs elaboration!!!',
'Negative Flow Isolation': 'Negative Flow Isolation',
'Neighbourhood': 'Neighbourhood',
'Neonatal ICU': 'Neonatal ICU',
'Neonatology': 'Neonatology',
'Network': 'Network',
'Neurology': 'Neurology',
'New': 'New',
'New Assessment reported from': 'New Assessment reported from',
'New Checklist': 'New Checklist',
'New Peer': 'New Peer',
'New Record': 'New Record',
'New Report': 'New Report',
'New Request': 'New Request',
'New Solution Choice': 'New Solution Choice',
'New Synchronization Peer': 'New Synchronization Peer',
'New cases in the past 24h': 'New cases in the past 24h',
'News': 'News',
'Next': 'Next',
'No': 'No',
'No Activities Found': 'No Activities Found',
'No Addresses currently registered': 'No Addresses currently registered',
'No Aid Requests have been made yet': 'No Aid Requests have been made yet',
'No Assessment Summaries currently registered': 'No Assessment Summaries currently registered',
'No Assessments currently registered': 'No Assessments currently registered',
'No Baseline Types currently registered': 'No Baseline Types currently registered',
'No Baselines currently registered': 'No Baselines currently registered',
'No Budgets currently registered': 'No Budgets currently registered',
'No Bundles currently registered': 'No Bundles currently registered',
'No Catalog Items currently registered': 'No Catalog Items currently registered',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Pas de catégorie <> Sous-catégorie de relation <Catalogue> actuellement enregistrés',
'No Checklist available': 'No Checklist available',
'No Cluster Subsectors currently registered': 'No Cluster Subsectors currently registered',
'No Clusters currently registered': 'No Clusters currently registered',
'No Configs currently defined': 'No Configs currently defined',
'No Details currently registered': 'No Details currently registered',
'No Distribution Items currently registered': 'No Distribution Items currently registered',
'No Distributions currently registered': 'No Distributions currently registered',
'No Documents found': 'No Documents found',
'No Donors currently registered': 'No Donors currently registered',
'No Feature Classes currently defined': 'No Feature Classes currently defined',
'No Feature Layers currently defined': 'No Feature Layers currently defined',
'No Flood Reports currently registered': 'No Flood Reports currently registered',
'No Groups currently defined': 'No Groups currently defined',
'No Groups currently registered': 'No Groups currently registered',
'No Hospitals currently registered': "Pas d'hôpitaux actuellement enregistrés",
'No Identification Report Available': 'No Identification Report Available',
'No Identities currently registered': 'No Identities currently registered',
'No Image': 'No Image',
'No Images currently registered': 'Aucune image actuellement enregistrés',
'No Impact Types currently registered': 'No Impact Types currently registered',
'No Impacts currently registered': 'No Impacts currently registered',
'No Incident Reports currently registered': 'No Incident Reports currently registered',
'No Incidents currently registered': 'No Incidents currently registered',
'No Inventory Items currently registered': 'No Inventory Items currently registered',
'No Inventory Stores currently registered': 'No Inventory Stores currently registered',
'No Item Catalog Category currently registered': 'No Item Catalog Category currently registered',
'No Item Catalog currently registered': 'No Item Catalog currently registered',
'No Item Categories currently registered': 'No Item Categories currently registered',
'No Item Packets currently registered': 'No Item Packets currently registered',
'No Item Sub-Category currently registered': 'No Item Sub-Category currently registered',
'No Item currently registered': 'No Item currently registered',
'No Items currently registered': 'No Items currently registered',
'No Items currently requested': 'No Items currently requested',
'No Keys currently defined': 'No Keys currently defined',
'No Kits currently registered': 'No Kits currently registered',
'No Locations currently available': 'No Locations currently available',
'No Locations currently registered': 'No Locations currently registered',
'No Markers currently available': 'No Markers currently available',
'No Members currently registered': 'No Members currently registered',
'No Memberships currently defined': 'Aucun Membre de la définition actuelle',
'No Memberships currently registered': 'No Memberships currently registered',
'No Messages currently in Outbox': 'No Messages currently in Outbox',
'No Metadata currently defined': 'No Metadata currently defined',
'No Need Types currently registered': 'No Need Types currently registered',
'No Needs currently registered': 'No Needs currently registered',
'No Offices currently registered': 'No Offices currently registered',
'No Offices found!': 'No Offices found!',
'No Organizations currently registered': 'No Organizations currently registered',
'No Packets for Item': 'No Packets for Item',
'No Peers currently registered': 'No Peers currently registered',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'No Photos found',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Projections currently defined': 'No Projections currently defined',
'No Projects currently registered': 'No Projects currently registered',
'No Rapid Assessments currently registered': 'No Rapid Assessments currently registered',
'No Received Items currently registered': 'No Received Items currently registered',
'No Received Shipments': 'No Received Shipments',
'No Records currently available': 'No Records currently available',
'No Records matching the query': 'No Records matching the query',
'No Request Items currently registered': 'No Request Items currently registered',
'No Request Shipments': 'No Request Shipments',
'No Requests have been made yet': 'No Requests have been made yet',
'No Requests match this criteria': 'No Requests match this criteria',
'No Responses currently registered': 'No Responses currently registered',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles currently defined': 'No Roles currently defined',
'No Sections currently registered': 'No Sections currently registered',
'No Sectors currently registered': 'No Sectors currently registered',
'No Sent Items currently registered': 'No Sent Items currently registered',
'No Sent Shipments': 'No Sent Shipments',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'No Shelters currently registered',
'No Shipment Transit Logs currently registered': 'No Shipment Transit Logs currently registered',
'No Shipment/Way Bills currently registered': 'No Shipment/Way Bills currently registered',
'No Shipment<>Item Relation currently registered': 'No Shipment<>Item Relation currently registered',
'No Sites currently registered': 'No Sites currently registered',
'No Skill Types currently set': 'No Skill Types currently set',
'No Solutions currently defined': 'No Solutions currently defined',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Staff currently registered': 'No Staff currently registered',
'No Storage Bin Type currently registered': 'No Storage Bin Type currently registered',
'No Storage Bins currently registered': 'No Storage Bins currently registered',
'No Storage Locations currently registered': 'No Storage Locations currently registered',
'No Subscription available': 'No Subscription available',
'No Survey Answers currently registered': 'No Survey Answers currently registered',
'No Survey Questions currently registered': 'No Survey Questions currently registered',
'No Survey Sections currently registered': 'No Survey Sections currently registered',
'No Survey Series currently registered': 'No Survey Series currently registered',
'No Survey Template currently registered': 'No Survey Template currently registered',
'No Tasks with Location Data': 'No Tasks with Location Data',
'No Tasks with Location Data!': 'No Tasks with Location Data!',
'No Themes currently defined': 'No Themes currently defined',
'No Tickets currently registered': 'No Tickets currently registered',
'No Tracks currently available': 'No Tracks currently available',
'No Units currently registered': 'No Units currently registered',
'No Users currently registered': 'No Users currently registered',
'No Volunteers currently registered': 'No Volunteers currently registered',
'No Warehouse Items currently registered': 'No Warehouse Items currently registered',
'No Warehouses currently registered': 'No Warehouses currently registered',
'No Warehouses match this criteria': 'No Warehouses match this criteria',
'No access at all': 'No access at all',
'No access to this record!': 'No access to this record!',
'No action recommended': 'No action recommended',
'No conflicts logged': 'No conflicts logged',
'No contact information available': 'No contact information available',
'No contacts currently registered': 'No contacts currently registered',
'No data in this table - cannot create PDF!': 'No data in this table - cannot create PDF!',
'No databases in this application': 'No databases in this application',
'No entries found': 'No entries found',
'No entries matching the query': 'No entries matching the query',
'No import jobs': 'No import jobs',
'No linked records': 'No linked records',
'No location found': 'No location found',
'No location known for this person': 'No location known for this person',
'No location known for this team': 'No location known for this team',
'No locations found for members of this team': 'No locations found for members of this team',
'No locations registered at this level': 'No locations registered at this level',
'No log entries matching the query': 'No log entries matching the query',
'No matching records found.': 'No matching records found.',
'No messages in the system': 'No messages in the system',
'No notes available': 'No notes available',
'No peers currently registered': 'No peers currently registered',
'No pending registrations found': 'No pending registrations found',
'No pending registrations matching the query': 'No pending registrations matching the query',
'No person record found for current user.': 'No person record found for current user.',
'No positions currently registered': 'No positions currently registered',
'No problem group defined yet': 'No problem group defined yet',
'No records matching the query': 'No records matching the query',
'No records to delete': 'No records to delete',
'No recovery reports available': 'No recovery reports available',
'No report available.': 'No report available.',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No requests found': 'No requests found',
'No resources currently registered': 'No resources currently registered',
'No resources currently reported': 'No resources currently reported',
'No service profile available': 'No service profile available',
'No skills currently set': 'No skills currently set',
'No status information available': 'No status information available',
'No synchronization': 'No synchronization',
'No tasks currently registered': 'No tasks currently registered',
'No template found!': 'No template found!',
'No units currently registered': 'No units currently registered',
'No volunteer information registered': 'No volunteer information registered',
'None': 'None',
'None (no such record)': 'None (no such record)',
'Noodles': 'Noodles',
'Normal': 'Normal',
'Not Applicable': 'Not Applicable',
'Not Authorised!': 'Not Authorised!',
'Not Possible': 'Not Possible',
'Not Set': 'Not Set',
'Not authorised!': 'Not authorised!',
'Not installed or incorrectly configured.': 'Not installed or incorrectly configured.',
'Note': 'Note',
'Note Details': 'Note Details',
'Note Status': 'Note Status',
'Note Type': 'Note Type',
'Note added': 'Note added',
'Note deleted': 'Note deleted',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.': 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.',
'Note updated': 'Note updated',
'Notes': 'Notes',
'Notice to Airmen': 'Notice to Airmen',
'Number': 'Number',
'Number of Columns': 'Number of Columns',
'Number of Patients': 'Number of Patients',
'Number of Rows': 'Number of Rows',
'Number of Vehicles': 'Number of Vehicles',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Number of alternative places for studying',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of newly admitted patients during the past 24 hours.': 'Number of newly admitted patients during the past 24 hours.',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of private schools': 'Number of private schools',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Number/Percentage of affected population that is Male & Aged 0-5',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Number/Percentage of affected population that is Male & Aged 26-60',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Number/Percentage of affected population that is Male & Aged 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Number/Percentage of affected population that is Male & Aged 61+',
'Numbers Only': 'Numbers Only',
'Nursery Beds': 'Nursery Beds',
'Nutrition': 'Nutrition',
'OK': 'OK',
'OR Reason': 'OR Reason',
'OR Status': 'OR Status',
'OR Status Reason': 'OR Status Reason',
'Observer': 'Observer',
'Obstetrics/Gynecology': 'Obstétrique / Gynécologie',
'Office': 'Office',
'Office Address': 'Office Address',
'Office Details': 'Office Details',
'Office added': 'Office added',
'Office deleted': 'Office deleted',
'Office updated': 'Office updated',
'Offices': 'Offices',
'Offline Sync': 'Offline Sync',
'Offline Sync (from USB/File Backup)': 'Offline Sync (from USB/File Backup)',
'Old': 'Old',
'Older person (>60 yrs)': 'Older person (>60 yrs)',
'On by default?': 'On by default?',
'On by default? (only applicable to Overlays)': 'On by default? (only applicable to Overlays)',
'One Time Cost': 'One Time Cost',
'One time cost': 'One time cost',
'One-time': 'One-time',
'One-time costs': 'One-time costs',
'Oops! Something went wrong...': 'Oops! Something went wrong...',
'Oops! something went wrong on our side.': 'Oops! something went wrong on our side.',
'Open': 'Open',
'Open Assessment': 'Open Assessment',
'Open area': 'Open area',
'Open recent': 'Open recent',
'OpenStreetMap Editor': 'OpenStreetMap Editor',
'Operating Rooms': 'Operating Rooms',
'Optional link to an Incident which this Assessment was triggered by.': 'Optional link to an Incident which this Assessment was triggered by.',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.",
'Options': 'Options',
'Organisation': 'Organisation',
'Organization': 'Organization',
'Organization Details': 'Organization Details',
'Organization Registry': 'Organization Registry',
'Organization added': 'Organization added',
'Organization deleted': 'Organization deleted',
'Organization updated': 'Organization updated',
'Organizations': 'Organizations',
'Origin': 'Origin',
'Other': 'Other',
'Other (describe)': 'Other (describe)',
'Other (specify)': 'Other (specify)',
'Other Evidence': 'Other Evidence',
'Other Faucet/Piped Water': 'Other Faucet/Piped Water',
'Other Isolation': 'Other Isolation',
'Other Name': 'Other Name',
'Other activities of boys 13-17yrs': 'Other activities of boys 13-17yrs',
'Other activities of boys 13-17yrs before disaster': 'Other activities of boys 13-17yrs before disaster',
'Other activities of boys <12yrs': 'Other activities of boys <12yrs',
'Other activities of boys <12yrs before disaster': 'Other activities of boys <12yrs before disaster',
'Other activities of girls 13-17yrs': 'Other activities of girls 13-17yrs',
'Other activities of girls 13-17yrs before disaster': 'Other activities of girls 13-17yrs before disaster',
'Other activities of girls<12yrs': 'Other activities of girls<12yrs',
'Other activities of girls<12yrs before disaster': 'Other activities of girls<12yrs before disaster',
'Other alternative infant nutrition in use': 'Other alternative infant nutrition in use',
'Other alternative places for study': 'Other alternative places for study',
'Other assistance needed': 'Other assistance needed',
'Other assistance, Rank': "Autre forme d'assistance, Rank",
'Other current health problems, adults': 'Other current health problems, adults',
'Other current health problems, children': 'Other current health problems, children',
'Other events': 'Other events',
'Other factors affecting school attendance': 'Other factors affecting school attendance',
'Other major expenses': 'Other major expenses',
'Other school assistance received': 'Other school assistance received',
'Other school assistance, details': 'Other school assistance, details',
'Other school assistance, source': 'Other school assistance, source',
'Other side dishes in stock': "autres plats d'accompagnement en stock",
'Other types of water storage containers': 'Other types of water storage containers',
'Other ways to obtain food': 'Other ways to obtain food',
'Outbound Mail settings are configured in models/000_config.py.': 'Outbound Mail settings are configured in models/000_config.py.',
'Outbox': 'Outbox',
'Outgoing SMS Handler': 'Outgoing SMS Handler',
'Outgoing SMS handler': 'Outgoing SMS handler',
'Overland Flow Flood': 'Overland Flow Flood',
'Owned Resources': 'Owned Resources',
'PDAM': 'PDAM',
'PIN': 'PIN',
'PIN number ': 'PIN number ',
'PL Women': 'PL Women',
'Packet': 'Packet',
'Parameters': 'Parameters',
'Parent': 'Parent',
'Parent Office': 'Parent Office',
"Parent level should be higher than this record's level. Parent level is": "Parent level should be higher than this record's level. Parent level is",
'Parent needs to be of the correct level': 'Parent needs to be of the correct level',
'Parent needs to be set': 'Parent needs to be set',
'Parent needs to be set for locations of level': 'Parent needs to be set for locations of level',
'Participant': 'Participant',
'Pashto': 'Pashto',
'Passport': 'Passport',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.',
'Pathology': 'Pathology',
'Patients': 'Les patients',
'Pediatric ICU': 'Pediatric ICU',
'Pediatric Psychiatric': 'Pediatric Psychiatric',
'Pediatrics': 'Pédiatrie',
'Peer': 'Peer',
'Peer Details': 'Peer Details',
'Peer Registration': 'Peer Registration',
'Peer Registration Details': 'Peer Registration Details',
'Peer Registration Request': 'Peer Registration Request',
'Peer Type': 'Peer Type',
'Peer UID': 'Peer UID',
'Peer added': 'Peer added',
'Peer deleted': 'Peer deleted',
'Peer not allowed to push': 'Peer not allowed to push',
'Peer registration request added': 'Peer registration request added',
'Peer registration request deleted': 'Peer registration request deleted',
'Peer registration request updated': 'Peer registration request updated',
'Peer updated': 'Peer updated',
'Peers': 'Peers',
'Pending Requests': 'Pending Requests',
'People': 'People',
'People Needing Food': 'People Needing Food',
'People Needing Shelter': 'People Needing Shelter',
'People Needing Water': 'People Needing Water',
'People Trapped': 'People Trapped',
'Person': 'Personne',
'Person 1': 'Person 1',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1, Person 2 are the potentially duplicate records',
'Person 2': 'Person 2',
'Person Data': 'Person Data',
'Person De-duplicator': 'Person De-duplicator',
'Person Details': 'Person Details',
'Person Finder': 'Person Finder',
'Person Registry': 'Person Registry',
'Person added': 'Person added',
'Person deleted': 'Person deleted',
'Person details updated': 'Person details updated',
'Person interviewed': 'Person interviewed',
'Person missing': 'Person missing',
'Person reporting': 'Person reporting',
'Person who has actually seen the person/group.': 'Person who has actually seen the person/group.',
'Person who is reporting about the presence.': 'Person who is reporting about the presence.',
'Person who observed the presence (if different from reporter).': 'Person who observed the presence (if different from reporter).',
'Person/Group': 'Person/Group',
'Personal Data': 'Personal Data',
'Personal Effects': 'Personal Effects',
'Personal Effects Details': 'Personal Effects Details',
'Personal impact of disaster': 'Personal impact of disaster',
'Persons': 'Persons',
'Persons with disability (mental)': 'Personnes ayant un handicap (physique)',
'Persons with disability (physical)': 'Personnes ayant un handicap (physique)',
'Phone': 'Phone',
'Phone 1': 'Phone 1',
'Phone 2': 'Phone 2',
"Phone number to donate to this organization's relief efforts.": "Phone number to donate to this organization's relief efforts.",
'Phone/Business': 'Phone/Business',
'Phone/Emergency': 'Phone/Emergency',
'Phone/Exchange': 'Phone/Exchange',
'Photo': 'Photo',
'Photo Details': 'Photo Details',
'Photo added': 'Photo added',
'Photo deleted': 'Photo deleted',
'Photo updated': 'Photo updated',
'Photograph': 'Photograph',
'Photos': 'Photos',
'Physical Description': 'Physical Description',
'Picture upload and finger print upload facility': 'Picture upload and finger print upload facility',
'Place of Recovery': 'Place of Recovery',
'Playing': 'Playing',
"Please come back after sometime if that doesn't help.": "Please come back after sometime if that doesn't help.",
'Please correct all errors.': 'Please correct all errors.',
'Please enter a First Name': 'Please enter a First Name',
'Please enter a valid email address': 'Please enter a valid email address',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Please enter the first few letters of the Person/Group for the autocomplete.',
'Please enter the recipient': 'Please enter the recipient',
'Please fill this!': 'Please fill this!',
'Please report here where you are:': 'Please report here where you are:',
'Please select another level': 'Please select another level',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Please use this field to record any additional information, including a history of the record if it is updated.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.',
'Pledge': 'Pledge',
'Pledge Aid': 'Pledge Aid',
'Pledge Aid to match these Requests': 'Pledge Aid to match these Requests',
'Pledge Status': 'Pledge Status',
'Pledge Support': 'Pledge Support',
'Pledged': 'Pledged',
'Pledges': 'Pledges',
'Point': 'Point',
'Poisoning': 'Poisoning',
'Poisonous Gas': 'Poisonous Gas',
'Police': 'Police',
'Pollution and other environmental': 'Pollution and other environmental',
'Polygon': 'Polygon',
'Population': 'Population',
'Porridge': 'Bouillie',
'Port': 'Port',
'Port Closure': 'Port Closure',
'Position Details': 'Position Details',
'Position added': 'Position added',
'Position deleted': 'Position deleted',
'Position type': 'Position type',
'Position updated': 'Position updated',
'Positions': 'Positions',
'Postcode': 'Postcode',
'Poultry': 'Poultry',
'Poultry restocking, Rank': 'Poultry restocking, Rank',
'Pounds': 'Pounds',
'Power Failure': 'Power Failure',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Preferred Name': 'Preferred Name',
'Pregnant women': 'Pregnant women',
'Preliminary': 'Preliminary',
'Presence': 'Presence',
'Presence Condition': 'Presence Condition',
'Presence Log': 'Presence Log',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.",
'Previous': 'Previous',
'Primary Name': 'Primary Name',
'Priority': 'Priority',
'Priority Level': 'Priority Level',
'Private': 'Private',
'Problem': 'Problem',
'Problem Administration': 'Problem Administration',
'Problem Details': 'Problem Details',
'Problem Group': 'Problem Group',
'Problem Title': 'Problem Title',
'Problem added': 'Problem added',
'Problem deleted': 'Problem deleted',
'Problem updated': 'Problem updated',
'Problems': 'Problems',
'Procedure': 'Procedure',
'Procurements': 'Procurements',
'Product Description': 'Product Description',
'Product Name': 'Product Name',
'Profile': 'Profile',
'Project': 'Project',
'Project Activities': 'Project Activities',
'Project Details': 'Project Details',
'Project Management': 'Project Management',
'Project Status': 'Project Status',
'Project Tracking': 'Project Tracking',
'Project added': 'Project added',
'Project deleted': 'Project deleted',
'Project has no Lat/Lon': 'Project has no Lat/Lon',
'Project updated': 'Project updated',
'Projection': 'Projection',
'Projection Details': 'Projection Details',
'Projection added': 'Projection added',
'Projection deleted': 'Projection deleted',
'Projection updated': 'Projection updated',
'Projections': 'Projections',
'Projects': 'Projects',
'Protected resource': 'Protected resource',
'Protection': 'Protection',
'Provide Metadata for your media files': 'Provide Metadata for your media files',
'Provide a password': 'Provide a password',
'Province': 'Province',
'Proxy-server': 'Proxy-server',
'Psychiatrics/Adult': 'Psychiatrics/Adult',
'Psychiatrics/Pediatric': 'Psychiatrics/Pediatric',
'Public': 'Public',
'Public Event': 'Public Event',
'Public and private transportation': 'Public and private transportation',
'Pull tickets from external feed': 'Pull tickets from external feed',
'Punjabi': 'Punjabi',
'Push tickets to external system': 'Push tickets to external system',
'Put a choice in the box': 'Put a choice in the box',
'Pyroclastic Flow': 'Pyroclastic Flow',
'Pyroclastic Surge': 'Pyroclastic Surge',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial module not available within the running Python - this needs installing to activate the Modem',
'Quantity': 'Quantity',
'Quarantine': 'Quarantine',
'Queries': 'Queries',
'Query': 'Query',
'Queryable?': 'Queryable?',
'RECORD A': 'RECORD A',
'RECORD B': 'RECORD B',
'RESPONSE': 'RESPONSE',
'Race': 'Race',
'Radiological Hazard': 'Radiological Hazard',
'Radiology': 'Radiology',
'Railway Accident': 'Railway Accident',
'Railway Hijacking': 'Railway Hijacking',
'Rain Fall': 'Rain Fall',
'Rapid Assessment': 'Rapid Assessment',
'Rapid Assessment Details': 'Rapid Assessment Details',
'Rapid Assessment added': 'Rapid Assessment added',
'Rapid Assessment deleted': 'Rapid Assessment deleted',
'Rapid Assessment updated': 'Rapid Assessment updated',
'Rapid Assessments': 'Rapid Assessments',
'Rapid Assessments & Flexible Impact Assessments': 'Rapid Assessments & Flexible Impact Assessments',
'Rapid Close Lead': 'Rapid Close Lead',
'Rating Scale': 'Rating Scale',
'Raw Database access': 'Raw Database access',
'Real World Arbitrary Units': 'Real World Arbitrary Units',
'Receive': 'Receive',
'Receive Items': 'Receive Items',
'Receive Shipment': 'Receive Shipment',
'Received': 'Received',
'Received By': 'Received By',
'Received Item Details': 'Received Item Details',
'Received Item added': 'Received Item added',
'Received Item deleted': 'Received Item deleted',
'Received Item updated': 'Received Item updated',
'Received Items': 'Received Items',
'Received Items added to Warehouse Items': 'Received Items added to Warehouse Items',
'Received Shipment Details': 'Received Shipment Details',
'Received Shipment canceled': 'Received Shipment canceled',
'Received Shipment updated': 'Received Shipment updated',
'Received Shipments': 'Received Shipments',
'Recipient': 'Recipient',
'Recipients': 'Recipients',
'Record Details': 'Record Details',
'Record Saved': 'Record Saved',
'Record added': 'Record added',
'Record deleted': 'Record deleted',
'Record last updated': 'Record last updated',
'Record not found!': 'Record not found!',
'Record updated': 'Record updated',
'Records': 'Records',
'Recovery': 'Recovery',
'Recovery Request': 'Recovery Request',
'Recovery Request added': 'Recovery Request added',
'Recovery Request deleted': 'Recovery Request deleted',
'Recovery Request updated': 'Recovery Request updated',
'Recovery Requests': 'Recovery Requests',
'Recovery report added': 'Recovery report added',
'Recovery report deleted': 'Recovery report deleted',
'Recovery report updated': 'Recovery report updated',
'Recurring': 'Recurring',
'Recurring Cost': 'Recurring Cost',
'Recurring cost': 'Recurring cost',
'Recurring costs': 'Recurring costs',
'Reference Document': 'Reference Document',
'Regional': 'Regional',
'Register': 'Register',
'Register Person': 'Register Person',
'Register Person into this Shelter': 'Register Person into this Shelter',
'Register them as a volunteer': 'Register them as a volunteer',
'Registered People': 'Registered People',
'Registered users can': 'Registered users can',
'Registering ad-hoc volunteers willing to contribute': 'Registering ad-hoc volunteers willing to contribute',
'Registration': 'Registration',
'Registration Details': 'Registration Details',
'Registration added': 'Registration added',
'Registration entry deleted': 'Registration entry deleted',
'Registration updated': 'Registration updated',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'Rehabilitation/Long Term Care': 'Rehabilitation/Long Term Care',
'Relief': 'Relief',
'Relief Item Catalog': 'Relief Item Catalog',
'Relief Team': 'Relief Team',
'Religion': 'Religion',
'Religious Leader': 'Religious Leader',
'Relocate as instructed in the <instruction>': 'Relocate as instructed in the <instruction>',
'Remove': 'Remove',
'Repeat your password': 'Repeat your password',
'Replace': 'Replace',
'Replace if Master': 'Replace if Master',
'Replace if Newer': 'Remplacer si plus récent',
'Report': 'Report',
'Report Another Assessment...': 'Report Another Assessment...',
'Report Details': 'Report Details',
'Report Resource': 'Report Resource',
'Report Type': 'Report Type',
'Report Types Include': 'Report Types Include',
'Report a Problem with the Software': 'Report a Problem with the Software',
'Report added': 'Report added',
'Report deleted': 'Report deleted',
'Report my location': 'Report my location',
'Report that person missing': 'Report that person missing',
'Report the contributing factors for the current EMS status.': 'Report the contributing factors for the current EMS status.',
'Report the contributing factors for the current OR status.': 'Report the contributing factors for the current OR status.',
'Report the person as found': 'Report the person as found',
'Report them as found': 'Report them as found',
'Report them missing': 'Report them missing',
'Report updated': 'Report updated',
'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab module not available within the running Python - this needs installing for PDF output!',
'Reporter': 'Reporter',
'Reporter Name': 'Reporter Name',
'Reporting on the projects in the region': 'Reporting on the projects in the region',
'Reports': 'Reports',
'Request': 'Request',
'Request Added': 'Request Added',
'Request Canceled': 'Request Canceled',
'Request Details': 'Request Details',
'Request Item': 'Request Item',
'Request Item Details': 'Request Item Details',
'Request Item added': 'Request Item added',
'Request Item deleted': 'Demande Point supprimé',
'Request Item updated': 'Request Item updated',
'Request Items': 'Request Items',
'Request Type': 'Request Type',
'Request Updated': 'Request Updated',
'Request added': 'Request added',
'Request deleted': 'Request deleted',
'Request for Role Upgrade': 'Request for Role Upgrade',
'Request updated': 'Request updated',
'Request, Response & Session': 'Request, Response & Session',
'Requested': 'Requested',
'Requested By Location': 'Requested By Location',
'Requested From Warehouse': 'Requested From Warehouse',
'Requested by': 'Requested by',
'Requested on': 'Requested on',
'Requester': 'Requester',
'Requestor': 'Requestor',
'Requests': 'Requests',
'Requests From': 'Requests From',
'Requests for Item': 'Requests for Item',
'Requires Login!': 'Requires Login!',
'Requires login': 'Requires login',
'Rescue and recovery': 'Rescue and recovery',
'Reset': 'Reset',
'Reset Password': 'Reset Password',
'Reset form': 'Reset form',
'Resolve': 'Resolve',
'Resolve Conflict': 'Resolve Conflict',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.',
'Resource': 'Resource',
'Resource Details': 'Resource Details',
'Resource added': 'Resource added',
'Resource deleted': 'Resource deleted',
'Resource updated': 'Resource updated',
'Resources': 'Resources',
'Respiratory Infections': 'Respiratory Infections',
'Response': 'Response',
'Response Details': 'Response Details',
'Response added': 'Response added',
'Response deleted': 'Response deleted',
'Response updated': 'Response updated',
'Responses': 'Responses',
'Restricted Access': 'Restricted Access',
'Restrictions': 'Restrictions',
'Results': 'Results',
'Retail Crime': 'Retail Crime',
'Retrieve Password': 'Retrieve Password',
'Rice': 'Rice',
'Riot': 'Riot',
'River': 'River',
'River Details': 'River Details',
'River added': 'River added',
'River deleted': 'River deleted',
'River updated': 'River updated',
'Rivers': 'Rivers',
'Road Accident': 'Accidents de la route',
'Road Closed': 'Road Closed',
'Road Conditions': 'Road Conditions',
'Road Delay': 'Road Delay',
'Road Hijacking': 'Road Hijacking',
'Road Usage Condition': 'Road Usage Condition',
'Role': 'Role',
'Role Details': 'Role Details',
'Role Manager': 'Role Manager',
'Role Required': 'Role Required',
'Role Updated': 'Role Updated',
'Role added': 'Role added',
'Role deleted': 'Role deleted',
'Role updated': 'Role updated',
'Role-based': 'Role-based',
'Roles': 'Roles',
'Roles Permitted': 'Roles Permitted',
'Roof tile': 'Roof tile',
'Row Choices (One Per Line)': 'Row Choices (One Per Line)',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Run Functional Tests': 'Exécuter les tests fonctionnels',
'Run Interval': 'Run Interval',
'Running Cost': 'Running Cost',
'SITUATION': 'SITUATION',
'Sahana Administrator': 'Sahana Administrator',
'Sahana Agasti': 'Sahana Agasti',
'Sahana Blue': 'Sahana Blue',
'Sahana Community Chat': 'Sahana Community Chat',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden <=> Other': 'Sahana Eden <=> Other',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)',
'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden',
'Sahana Eden Disaster Management Platform': 'Sahana Eden Disaster Management Platform',
'Sahana Eden Open Source Disaster Management Platform': 'Sahana Eden Open Source Disaster Management Platform',
'Sahana Eden Website': 'Sahana Eden Website',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': 'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.',
'Sahana FOSS Disaster Management System': 'Sahana FOSS Disaster Management System',
'Sahana Green': 'Sahana Green',
'Sahana Login Approval Pending': 'Sahana Login Approval Pending',
'Sahana Steel': 'Sahana Steel',
'Sahana access granted': 'Sahana access granted',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: new request has been made. Please login to see if you can fulfil the request.',
'Salted Fish': 'Salted Fish',
'Sanitation problems': 'Sanitation problems',
'Satellite': 'Satellite',
'Satellite Office': 'Satellite Office',
'Saturday': 'Saturday',
'Save': 'Save',
'Save any Changes in the one you wish to keep': 'Save any Changes in the one you wish to keep',
'Saved.': 'Saved.',
'Saving...': 'Saving...',
'Scale of Results': 'Scale of Results',
'Schedule': 'Schedule',
'School': 'School',
'School Closure': 'School Closure',
'School Lockdown': 'School Lockdown',
'School Reports': 'School Reports',
'School Teacher': 'School Teacher',
'School destroyed': 'School destroyed',
'School heavily damaged': 'School heavily damaged',
'School tents received': 'School tents received',
'School tents, source': 'School tents, source',
'School used for other purpose': 'School used for other purpose',
'School/studying': 'School/studying',
'Schools': 'Schools',
'Search': 'Search',
'Search & List Bin Types': 'Search & List Bin Types',
'Search & List Bins': 'Search & List Bins',
'Search & List Catalog': 'Search & List Catalog',
'Search & List Category': 'Search & List Category',
'Search & List Items': 'Search & List Items',
'Search & List Locations': 'Search & List Locations',
'Search & List Site': 'Search & List Site',
'Search & List Sub-Category': 'Search & List Sub-Category',
'Search & List Unit': 'Search & List Unit',
'Search Activities': 'Search Activities',
'Search Activity Report': 'Search Activity Report',
'Search Addresses': 'Search Addresses',
'Search Aid Requests': 'Search Aid Requests',
'Search Assessment Summaries': 'Search Assessment Summaries',
'Search Assessments': 'Search Assessments',
'Search Baseline Type': 'Search Baseline Type',
'Search Baselines': 'Search Baselines',
'Search Budgets': 'Search Budgets',
'Search Bundles': 'Search Bundles',
'Search Catalog Items': 'Search Catalog Items',
'Search Category<>Sub-Category<>Catalog Relation': 'Search Category<>Sub-Category<>Catalog Relation',
'Search Checklists': 'Search Checklists',
'Search Cluster Subsectors': 'Search Cluster Subsectors',
'Search Clusters': 'Search Clusters',
'Search Configs': 'Search Configs',
'Search Contact Information': 'Search Contact Information',
'Search Contacts': 'Search Contacts',
'Search Distribution Items': 'Search Distribution Items',
'Search Distributions': 'Search Distributions',
'Search Documents': 'Search Documents',
'Search Donors': 'Search Donors',
'Search Feature Class': 'Search Feature Class',
'Search Feature Layers': 'Search Feature Layers',
'Search Flood Reports': 'Search Flood Reports',
'Search Groups': 'Search Groups',
'Search Hospitals': 'Search Hospitals',
'Search Identity': 'Search Identity',
'Search Images': 'Search Images',
'Search Impact Type': 'Search Impact Type',
'Search Impacts': 'Search Impacts',
'Search Incident Reports': 'Search Incident Reports',
'Search Incidents': 'Search Incidents',
'Search Inventory Items': 'Search Inventory Items',
'Search Inventory Stores': 'Search Inventory Stores',
'Search Item Catalog Category(s)': 'Search Item Catalog Category(s)',
'Search Item Catalog(s)': 'Search Item Catalog(s)',
'Search Item Categories': 'Search Item Categories',
'Search Item Packets': 'Search Item Packets',
'Search Item Sub-Category(s)': 'Search Item Sub-Category(s)',
'Search Items': 'Search Items',
'Search Keys': 'Search Keys',
'Search Kits': 'Search Kits',
'Search Layers': 'Search Layers',
'Search Locations': 'Search Locations',
'Search Log Entry': 'Search Log Entry',
'Search Markers': 'Search Markers',
'Search Member': 'Search Member',
'Search Membership': 'Search Membership',
'Search Memberships': 'Search Memberships',
'Search Metadata': 'Search Metadata',
'Search Need Type': 'Search Need Type',
'Search Needs': 'Search Needs',
'Search Notes': 'Search Notes',
'Search Offices': 'Search Offices',
'Search Organizations': 'Search Organizations',
'Search Peer': 'Search Peer',
'Search Peers': 'Search Peers',
'Search Personal Effects': 'Search Personal Effects',
'Search Persons': 'Search Persons',
'Search Photos': 'Search Photos',
'Search Positions': 'Search Positions',
'Search Problems': 'Search Problems',
'Search Projections': 'Search Projections',
'Search Projects': 'Search Projects',
'Search Rapid Assessments': 'Search Rapid Assessments',
'Search Received Items': 'Search Received Items',
'Search Received Shipments': 'Search Received Shipments',
'Search Records': 'Search Records',
'Search Recovery Reports': 'Search Recovery Reports',
'Search Registations': 'Search Registations',
'Search Registration Request': 'Search Registration Request',
'Search Report': 'Search Report',
'Search Reports': 'Search Reports',
'Search Request': 'Search Request',
'Search Request Items': 'Search Request Items',
'Search Requests': 'Search Requests',
'Search Resources': 'Search Resources',
'Search Responses': 'Search Responses',
'Search Rivers': 'Search Rivers',
'Search Roles': 'Search Roles',
'Search Sections': 'Search Sections',
'Search Sectors': 'Search Sectors',
'Search Sent Items': 'Search Sent Items',
'Search Sent Shipments': 'Search Sent Shipments',
'Search Service Profiles': 'Search Service Profiles',
'Search Settings': 'Search Settings',
'Search Shelter Services': 'Search Shelter Services',
'Search Shelter Types': 'Search Shelter Types',
'Search Shelters': 'Search Shelters',
'Search Shipment Transit Logs': 'Search Shipment Transit Logs',
'Search Shipment/Way Bills': 'Search Shipment/Way Bills',
'Search Shipment<>Item Relation': 'Search Shipment<>Item Relation',
'Search Site(s)': 'Search Site(s)',
'Search Skill Types': 'Search Skill Types',
'Search Skills': 'Search Skills',
'Search Solutions': 'Search Solutions',
'Search Staff': 'Search Staff',
'Search Staff Types': 'Search Staff Types',
'Search Status': 'Search Status',
'Search Storage Bin Type(s)': 'Search Storage Bin Type(s)',
'Search Storage Bin(s)': 'Search Storage Bin(s)',
'Search Storage Location(s)': 'Search Storage Location(s)',
'Search Subscriptions': 'Search Subscriptions',
'Search Tasks': 'Search Tasks',
'Search Teams': 'Search Teams',
'Search Themes': 'Thèmes de recherche',
'Search Tickets': 'Search Tickets',
'Search Tracks': 'Coups de recherche',
'Search Twitter Tags': 'Search Twitter Tags',
'Search Units': 'Search Units',
'Search Users': 'Search Users',
'Search Volunteer Registrations': 'Search Volunteer Registrations',
'Search Volunteers': 'Search Volunteers',
'Search Warehouse Items': 'Search Warehouse Items',
'Search Warehouses': 'Search Warehouses',
'Search and Edit Group': 'Search and Edit Group',
'Search and Edit Individual': 'Search and Edit Individual',
'Search by ID Tag': 'Search by ID Tag',
'Search by Skill Types': 'Search by Skill Types',
'Search for Items': 'Search for Items',
'Search for a Hospital': 'Search for a Hospital',
'Search for a Location': 'Search for a Location',
'Search for a Person': 'Search for a Person',
'Search for a Project': 'Search for a Project',
'Search for a Request': 'Search for a Request',
'Search here for a person in order to:': 'Search here for a person in order to:',
"Search here for a person's record in order to:": "Search here for a person's record in order to:",
'Search messages': 'Search messages',
'Searching for different groups and individuals': 'Searching for different groups and individuals',
'Secondary Server (Optional)': 'Secondary Server (Optional)',
'Seconds must be a number between 0 and 60': 'Seconds must be a number between 0 and 60',
'Section Details': 'Section Details',
'Section deleted': 'Section deleted',
'Section updated': 'Section updated',
'Sections': 'Sections',
'Sector': 'Sector',
'Sector Details': 'Sector Details',
'Sector added': 'Sector added',
'Sector deleted': 'Sector deleted',
'Sector updated': 'Sector updated',
'Sectors': 'Sectors',
'Security Policy': 'Security Policy',
'Security Status': 'Security Status',
'Security problems': 'Security problems',
'Seen': 'Seen',
'Select 2 potential locations from the dropdowns.': 'Select 2 potential locations from the dropdowns.',
'Select Items from this Warehouse': 'Select Items from this Warehouse',
'Select Photos': 'Select Photos',
'Select a location': 'Select a location',
"Select a person in charge for status 'assigned'": "Select a person in charge for status 'assigned'",
'Select a question from the list': 'Choisissez une question dans la liste',
'Select all that apply': 'Select all that apply',
'Select an Organization to see a list of offices': 'Select an Organization to see a list of offices',
'Select the Cluster Layers for Assessments and Activities to analyse the Gaps:': 'Select the Cluster Layers for Assessments and Activities to analyse the Gaps:',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.',
'Select the person assigned to this role for this project.': 'Select the person assigned to this role for this project.',
'Select the person associated with this scenario.': 'Select the person associated with this scenario.',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS',
'Selects whether to use the gateway or the Modem for sending out SMS': 'Selects whether to use the gateway or the Modem for sending out SMS',
'Self Registration': 'Self Registration',
'Self-registration': 'Self-registration',
'Send': 'Send',
'Send Alerts using Email &/or SMS': 'Send Alerts using Email &/or SMS',
'Send Mail': 'Send Mail',
'Send Notification': 'Send Notification',
'Send Shipment': 'Send Shipment',
'Send message': 'Send message',
'Send new message': 'Send new message',
'Sends & Receives Alerts via Email & SMS': 'Sends & Receives Alerts via Email & SMS',
'Senior (50+)': 'Senior (50+)',
'Sensitivity': 'Sensibilité',
'Sent': 'Sent',
'Sent Item': 'Sent Item',
'Sent Item Details': 'Sent Item Details',
'Sent Item added': 'Sent Item added',
'Sent Item deleted': 'Sent Item deleted',
'Sent Item updated': 'Sent Item updated',
'Sent Items': 'Sent Items',
'Sent Shipment Details': 'Sent Shipment Details',
'Sent Shipment canceled': 'Sent Shipment canceled',
'Sent Shipment updated': 'Sent Shipment updated',
'Sent Shipments': 'Sent Shipments',
'Seraiki': 'Seraiki',
'Series': 'Series',
'Server': 'Server',
'Service': 'Service',
'Service Catalogue': 'Service Catalogue',
'Service or Facility': 'Service or Facility',
'Service profile added': 'Service profile added',
'Service profile deleted': 'Service profile deleted',
'Service profile updated': 'Service profile updated',
'Services': 'Services',
'Services Available': 'Services Available',
'Setting Details': 'Setting Details',
'Setting added': 'Setting added',
'Setting deleted': 'Setting deleted',
'Setting updated': 'Setting updated',
'Settings': 'Settings',
'Settings updated': 'Settings updated',
'Settings were reset because authenticating with Twitter failed': 'Settings were reset because authenticating with Twitter failed',
'Severity': 'Severity',
'Severity:': 'Severity:',
'Share a common Marker (unless over-ridden at the Feature level)': 'Share a common Marker (unless over-ridden at the Feature level)',
'Shelter': 'Shelter',
'Shelter & Essential NFIs': 'Shelter & Essential NFIs',
'Shelter Details': 'Shelter Details',
'Shelter Name': 'Shelter Name',
'Shelter Registry': 'Shelter Registry',
'Shelter Service': 'Shelter Service',
'Shelter Service Details': 'Shelter Service Details',
'Shelter Service added': 'Shelter Service added',
'Shelter Service deleted': 'Shelter Service deleted',
'Shelter Service updated': 'Shelter Service updated',
'Shelter Services': "Les services d'hébergement",
'Shelter Type': 'Shelter Type',
'Shelter Type Details': 'Shelter Type Details',
'Shelter Type added': 'Shelter Type added',
'Shelter Type deleted': 'Shelter Type deleted',
'Shelter Type updated': 'Shelter Type updated',
'Shelter Types': 'Shelter Types',
'Shelter Types and Services': 'Shelter Types and Services',
'Shelter added': 'Shelter added',
'Shelter deleted': 'Shelter deleted',
'Shelter updated': 'Shelter updated',
'Shelters': 'Shelters',
'Shipment Created': 'Shipment Created',
'Shipment Details': 'Shipment Details',
'Shipment Items': 'Shipment Items',
'Shipment Received': 'Shipment Received',
'Shipment Sent': 'Shipment Sent',
'Shipment Transit Log Details': 'Shipment Transit Log Details',
'Shipment Transit Log added': 'Shipment Transit Log added',
'Shipment Transit Log deleted': 'Shipment Transit Log deleted',
'Shipment Transit Log updated': 'Shipment Transit Log updated',
'Shipment Transit Logs': 'Shipment Transit Logs',
'Shipment/Way Bill added': 'Shipment/Way Bill added',
'Shipment/Way Bills': 'Shipment/Way Bills',
'Shipment/Way Bills Details': 'Shipment/Way Bills Details',
'Shipment/Way Bills deleted': 'Shipment/Way Bills deleted',
'Shipment/Way Bills updated': 'Shipment/Way Bills updated',
'Shipment<>Item Relation added': 'Shipment<>Item Relation added',
'Shipment<>Item Relation deleted': 'Shipment<>Item Relation deleted',
'Shipment<>Item Relation updated': 'Shipment<>Item Relation updated',
'Shipment<>Item Relations': 'Shipment<>Item Relations',
'Shipment<>Item Relations Details': 'Shipment<>Item Relations Details',
'Shipments': 'Shipments',
'Shipments To': 'Shipments To',
'Shooting': 'Shooting',
'Short Assessment': 'Short Assessment',
'Short Description': 'Short Description',
'Show Checklist': 'Show Checklist',
'Show on map': 'Show on map',
'Sindhi': 'Sindhi',
'Site': 'Site',
'Site Address': 'Site Address',
'Site Administration': 'Site Administration',
'Site Description': 'Site Description',
'Site Details': 'Site Details',
'Site ID': 'Site ID',
'Site Location Description': 'Site Location Description',
'Site Location Name': 'Site Location Name',
'Site Manager': 'Site Manager',
'Site Name': 'Site Name',
'Site added': 'Site added',
'Site deleted': 'Site deleted',
'Site updated': 'Site updated',
'Site/Warehouse': 'Site/Warehouse',
'Sites': 'Sites',
'Situation Awareness & Geospatial Analysis': 'Situation Awareness & Geospatial Analysis',
'Sketch': 'Sketch',
'Skill': 'Skill',
'Skill Details': 'Skill Details',
'Skill Status': 'Skill Status',
'Skill Type Details': 'Skill Type Details',
'Skill Type added': 'Skill Type added',
'Skill Type deleted': 'Skill Type deleted',
'Skill Type updated': 'Skill Type updated',
'Skill Types': 'Skill Types',
'Skill added': 'Skill added',
'Skill deleted': 'Skill deleted',
'Skill updated': 'Skill updated',
'Skills': 'Skills',
'Skype ID': 'Skype ID',
'Small Trade': 'Small Trade',
'Smoke': 'Smoke',
'Snow Fall': 'Snow Fall',
'Snow Squall': 'Snow Squall',
'Solid waste': 'Solid waste',
'Solution': 'Solution',
'Solution Details': 'Solution Details',
'Solution Item': 'Solution Item',
'Solution added': 'Solution added',
'Solution deleted': 'Solution deleted',
'Solution updated': 'Solution updated',
'Solutions': 'Solutions',
'Some': 'Some',
'Sorry that location appears to be outside the area of the Parent.': 'Sorry that location appears to be outside the area of the Parent.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Sorry that location appears to be outside the area supported by this deployment.',
'Sorry, I could not understand your request': 'Sorry, I could not understand your request',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Sorry, only users with the MapAdmin role are allowed to edit these locations',
'Sorry, something went wrong.': 'Sorry, something went wrong.',
'Sorry, that page is forbidden for some reason.': 'Sorry, that page is forbidden for some reason.',
'Sorry, that service is temporary unavailable.': 'Sorry, that service is temporary unavailable.',
'Sorry, there are no addresses to display': 'Sorry, there are no addresses to display',
"Sorry, things didn't get done on time.": "Sorry, things didn't get done on time.",
"Sorry, we couldn't find that page.": "Sorry, we couldn't find that page.",
'Source': 'Source',
'Source ID': 'Source ID',
'Source Time': 'Source Time',
'Source Type': 'Source Type',
'Space Debris': 'Space Debris',
'Spanish': 'Spanish',
'Special Ice': 'Special Ice',
'Special Marine': 'Special Marine',
'Special needs': 'Special needs',
'Specialized Hospital': 'Specialized Hospital',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.',
'Specific locations need to have a parent of level': 'Specific locations need to have a parent of level',
'Specify a descriptive title for the image.': 'Specify a descriptive title for the image.',
'Specify the bed type of this unit.': 'Specify the bed type of this unit.',
'Specify the number of available sets': 'Specify the number of available sets',
'Specify the number of available units (adult doses)': 'Specify the number of available units (adult doses)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions',
'Specify the number of sets needed per 24h': 'Specify the number of sets needed per 24h',
'Specify the number of units (adult doses) needed per 24h': 'Specify the number of units (adult doses) needed per 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h',
'Spherical Mercator?': 'Spherical Mercator?',
'Spreadsheet Importer': 'Spreadsheet Importer',
'Spreadsheet uploaded': 'Spreadsheet uploaded',
'Spring': 'Spring',
'Squall': 'Squall',
'Staff': 'Staff',
'Staff 2': 'Staff 2',
'Staff Details': 'Staff Details',
'Staff Type Details': 'Staff Type Details',
'Staff Type added': 'Staff Type added',
'Staff Type deleted': 'Staff Type deleted',
'Staff Type updated': 'Staff Type updated',
'Staff Types': 'Staff Types',
'Staff added': 'Staff added',
'Staff deleted': 'Personnel supprimé',
'Staff updated': 'Staff updated',
'Staffing': 'Staffing',
'Start date': 'Start date',
'Start of Period': 'Start of Period',
'State': 'State',
'Stationery': 'Stationery',
'Status': 'Status',
'Status Report': 'Status Report',
'Status added': 'Status added',
'Status deleted': 'Status deleted',
'Status of clinical operation of the facility.': 'Status of clinical operation of the facility.',
'Status of general operation of the facility.': 'Status of general operation of the facility.',
'Status of morgue capacity.': 'Status of morgue capacity.',
'Status of operations of the emergency department of this hospital.': 'Status of operations of the emergency department of this hospital.',
'Status of security procedures/access restrictions in the hospital.': 'Status of security procedures/access restrictions in the hospital.',
'Status of the operating rooms of this hospital.': 'Status of the operating rooms of this hospital.',
'Status updated': 'Status updated',
'Storage Bin': 'Storage Bin',
'Storage Bin Details': 'Storage Bin Details',
'Storage Bin Number': 'Storage Bin Number',
'Storage Bin Type': 'Storage Bin Type',
'Storage Bin Type Details': 'Storage Bin Type Details',
'Storage Bin Type added': 'Storage Bin Type added',
'Storage Bin Type deleted': 'Storage Bin Type deleted',
'Storage Bin Type updated': 'Storage Bin Type updated',
'Storage Bin Types': 'Storage Bin Types',
'Storage Bin added': 'Storage Bin added',
'Storage Bin deleted': 'Storage Bin deleted',
'Storage Bin updated': 'Storage Bin updated',
'Storage Bins': 'Storage Bins',
'Storage Location': 'Storage Location',
'Storage Location Details': 'Storage Location Details',
'Storage Location ID': 'Storage Location ID',
'Storage Location Name': 'Storage Location Name',
'Storage Location added': 'Storage Location added',
'Storage Location deleted': 'Storage Location deleted',
'Storage Location updated': 'Storage Location updated',
'Storage Locations': 'Storage Locations',
'Store spreadsheets in the Eden database': 'Store spreadsheets in the Eden database',
'Storm Force Wind': 'Storm Force Wind',
'Storm Surge': 'Storm Surge',
'Stowaway': 'Stowaway',
'Street': 'Street',
'Street (continued)': 'Street (continued)',
'Street Address': 'Street Address',
'Strong Wind': 'Strong Wind',
'Sub Category': 'Sub Category',
'Sub-type': 'Sub-type',
'Subject': 'Subject',
'Submission successful - please wait': 'Submission successful - please wait',
'Submission successful - please wait...': 'Submission successful - please wait...',
'Subscription Details': 'Subscription Details',
'Subscription added': 'Subscription added',
'Subscription deleted': 'Subscription deleted',
'Subscription updated': 'Subscription updated',
'Subscriptions': 'Subscriptions',
'Subsistence Cost': 'Subsistence Cost',
'Suggest not changing this field unless you know what you are doing.': 'Suggest not changing this field unless you know what you are doing.',
'Summary': 'Summary',
'Sunday': 'Sunday',
'Support Request': 'Support Request',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.',
'Sure you want to delete this object?': 'Sure you want to delete this object?',
'Surgery': 'Surgery',
'Survey Answer': 'Survey Answer',
'Survey Answer Details': 'Survey Answer Details',
'Survey Answer added': 'Survey Answer added',
'Survey Answer deleted': 'Survey Answer deleted',
'Survey Answer updated': 'Survey Answer updated',
'Survey Module': 'Survey Module',
'Survey Name': 'Survey Name',
'Survey Question': 'Survey Question',
'Survey Question Details': 'Survey Question Details',
'Survey Question Display Name': 'Survey Question Display Name',
'Survey Question added': 'Survey Question added',
'Survey Question deleted': 'Survey Question deleted',
'Survey Question updated': 'Survey Question updated',
'Survey Section': 'Survey Section',
'Survey Section Details': 'Survey Section Details',
'Survey Section Display Name': 'Survey Section Display Name',
'Survey Section added': 'Survey Section added',
'Survey Section deleted': 'Survey Section deleted',
'Survey Section updated': 'Survey Section updated',
'Survey Series': 'Survey Series',
'Survey Series Details': 'Survey Series Details',
'Survey Series Name': 'Survey Series Name',
'Survey Series added': 'Survey Series added',
'Survey Series deleted': 'Survey Series deleted',
'Survey Series updated': 'Survey Series updated',
'Survey Template': 'Survey Template',
'Survey Template Details': 'Survey Template Details',
'Survey Template added': 'Enquête Modèle ajouté',
'Survey Template deleted': 'Survey Template deleted',
'Survey Template updated': 'Survey Template updated',
'Survey Templates': 'Survey Templates',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': 'Switch this on to use individual CSS/Javascript files for diagnostics during development.',
'Symbology': 'Symbology',
'Sync Conflicts': 'Sync Conflicts',
'Sync History': 'Sync History',
'Sync Now': 'Sync Now',
'Sync Partners': 'Sync Partners',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.',
'Sync Pools': 'Sync Pools',
'Sync Schedule': 'Sync Schedule',
'Sync Settings': 'Sync Settings',
'Sync process already started on ': 'Sync process already started on ',
'Synchronisation': 'Synchronisation',
'Synchronisation History': 'Synchronisation History',
'Synchronization': 'Synchronization',
'Synchronization Conflicts': 'Synchronization Conflicts',
'Synchronization Details': 'Synchronization Details',
'Synchronization History': 'Synchronization History',
'Synchronization Peers': 'Synchronization Peers',
'Synchronization Settings': 'Synchronization Settings',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden',
'Synchronization not configured.': 'Synchronization not configured.',
'Synchronization settings updated': 'Synchronization settings updated',
'Syncronisation History': 'Syncronisation History',
'Syncronisation Schedules': 'Syncronisation Schedules',
'System allows the General Public to Report Incidents & have these Tracked.': 'System allows the General Public to Report Incidents & have these Tracked.',
'System allows the tracking & discovery of Items stored in Locations.': 'System allows the tracking & discovery of Items stored in Locations.',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': "Système garde une trace de tous les volontaires travaillant dans la région sinistrée. Il capte non seulement les lieux où ils sont actifs, mais il prend aussi des informations sur la gamme des services qu'ils offrent dans chaque zone.",
"System's Twitter account updated": "System's Twitter account updated",
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Take shelter in place or per <instruction>',
'Task Details': 'Task Details',
'Task List': 'Task List',
'Task Status': 'Task Status',
'Task added': 'Task added',
'Task deleted': 'Task deleted',
'Task status': 'Task status',
'Task updated': 'Task updated',
'Tasks': 'Tasks',
'Team': 'Team',
'Team Description': 'Team Description',
'Team Details': 'Team Details',
'Team Head': 'Team Head',
'Team Id': 'Team Id',
'Team Leader': 'Team Leader',
'Team Member added': 'Team Member added',
'Team Members': 'Team Members',
'Team Name': 'Team Name',
'Team Type': 'Team Type',
'Team added': 'Team added',
'Team deleted': 'Team deleted',
'Team updated': 'Team updated',
'Teams': 'Teams',
'Technical testing only, all recipients disregard': 'Technical testing only, all recipients disregard',
'Telecommunications': 'Telecommunications',
'Telephone': 'Telephone',
'Telephony': 'Telephony',
'Temp folder %s not writable - unable to apply theme!': 'Temp folder %s not writable - unable to apply theme!',
'Template file %s not readable - unable to apply theme!': 'Template file %s not readable - unable to apply theme!',
'Templates': 'Templates',
'Terrorism': 'Terrorism',
'Tertiary Server (Optional)': 'Tertiary Server (Optional)',
'Test Results': 'Test Results',
'Text': 'Texte',
'Text Colour for Text blocks': 'Text Colour for Text blocks',
'Text before each Text Field (One per line)': 'Text before each Text Field (One per line)',
'Text in Message': 'Text in Message',
'Text in Message: ': 'Text in Message: ',
'Thanks for your assistance': 'Thanks for your assistance',
'The': 'The',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.',
'The Area which this Site is located within.': 'The Area which this Site is located within.',
'The Assessments module allows field workers to send in assessments.': 'The Assessments module allows field workers to send in assessments.',
'The Author of this Document (optional)': 'The Author of this Document (optional)',
'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The District for this Report.': 'The District for this Report.',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.",
'The Group whose members can edit data in this record.': 'The Group whose members can edit data in this record.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Media Library provides a catalogue of digital media.': 'The Media Library provides a catalogue of digital media.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
'The Office this record is associated with.': 'The Office this record is associated with.',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'The Organization this record is associated with.': 'The Organization this record is associated with.',
'The Organization which is funding this Activity.': 'The Organization which is funding this Activity.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'The Rapid Assessments Module stores structured reports done by Professional Organizations.',
'The Request this record is associated with.': 'The Request this record is associated with.',
'The Role this person plays within this Office/Project.': 'The Role this person plays within this Office/Project.',
'The Role this person plays within this hospital.': 'The Role this person plays within this hospital.',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": "The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.",
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'The Shelter this Request is from (optional).': 'The Shelter this Request is from (optional).',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": "The URL of the image file. If you don't upload an image file, then you must specify its location here.",
'The URL of your web gateway without the post parameters': 'The URL of your web gateway without the post parameters',
'The URL to access the service.': 'The URL to access the service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'The Unique Identifier (UUID) as assigned to this facility by the government.',
'The attribute within the KML which is used for the title of popups.': 'The attribute within the KML which is used for the title of popups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)',
'The body height (crown to heel) in cm.': 'The body height (crown to heel) in cm.',
'The category of the Item.': 'The category of the Item.',
'The contact person for this organization.': 'The contact person for this organization.',
'The country the person usually lives in.': 'The country the person usually lives in.',
'The default policy for data import from this peer.': 'The default policy for data import from this peer.',
'The descriptive name of the peer.': 'The descriptive name of the peer.',
'The duplicate record will be deleted': 'The duplicate record will be deleted',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': 'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.',
'The first or only name of the person (mandatory).': 'The first or only name of the person (mandatory).',
'The following modules are available': 'The following modules are available',
'The hospital this record is associated with.': 'The hospital this record is associated with.',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.',
'The language to use for notifications.': 'The language to use for notifications.',
'The last known location of the missing person before disappearance.': 'The last known location of the missing person before disappearance.',
'The list of Item categories are maintained by the Administrators.': 'The list of Item categories are maintained by the Administrators.',
'The name to be used when calling for or directly addressing the person (optional).': 'The name to be used when calling for or directly addressing the person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'The next screen will allow you to detail the number of people here & their needs.',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.',
'The person at the location who is reporting this incident (optional)': 'The person at the location who is reporting this incident (optional)',
'The person reporting about the missing person.': 'The person reporting about the missing person.',
'The person reporting the missing person.': 'The person reporting the missing person.',
"The person's manager within this Office/Project.": 'La personne gestionnaire sein de ce bureau / projet.',
'The post variable containing the phone number': 'The post variable containing the phone number',
'The post variable on the URL used for sending messages': "La variable post sur l'URL utilisée pour envoyer des messages",
'The post variables other than the ones containing the message and the phone number': 'The post variables other than the ones containing the message and the phone number',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>',
'The title of the WMS Browser panel in the Tools panel.': 'The title of the WMS Browser panel in the Tools panel.',
'The token associated with this application on': 'The token associated with this application on',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': 'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.',
'The unique identifier which identifies this instance to other instances.': 'The unique identifier which identifies this instance to other instances.',
'The weight in kg.': 'The weight in kg.',
'Theme': 'Thème',
'Theme Details': 'Theme Details',
'Theme added': 'Theme added',
'Theme deleted': 'Theme deleted',
'Theme updated': 'Theme updated',
'Themes': 'Themes',
'There are errors': 'There are errors',
'There are multiple records at this location': 'There are multiple records at this location',
'There are not sufficient items in the store to send this shipment': 'There are not sufficient items in the store to send this shipment',
'These are settings for Inbound Mail.': 'These are settings for Inbound Mail.',
'These are the Incident Categories visible to normal End-Users': 'These are the Incident Categories visible to normal End-Users',
'These are the default settings for all users. To change settings just for you, click ': 'These are the default settings for all users. To change settings just for you, click ',
'They': 'They',
'This appears to be a duplicate of ': 'This appears to be a duplicate of ',
'This file already exists on the server as': 'This file already exists on the server as',
'This form allows the administrator to remove a duplicate location.': 'This form allows the administrator to remove a duplicate location.',
'This is the way to transfer data between machines as it maintains referential integrity.': 'This is the way to transfer data between machines as it maintains referential integrity.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!',
'This might be due to a temporary overloading or maintenance of the server.': 'This might be due to a temporary overloading or maintenance of the server.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'This page shows you logs of past syncs. Click on the link below to go to this page.',
'This screen allows you to upload a collection of photos to the server.': 'This screen allows you to upload a collection of photos to the server.',
'Thunderstorm': 'Thunderstorm',
'Thursday': 'Thursday',
'Ticket': 'Ticket',
'Ticket Details': 'Ticket Details',
'Ticket added': 'Ticket added',
'Ticket deleted': 'Ticket deleted',
'Ticket updated': 'Ticket updated',
'Ticketing Module': 'Ticketing Module',
'Tickets': 'Tickets',
'Time of Request': 'Time of Request',
'Title': 'Title',
'To Location': 'To Location',
'To begin the sync process, click the button on the right => ': 'Pour commencer le processus de synchronisation, cliquez sur le bouton à droite =>',
'To begin the sync process, click this button => ': 'To begin the sync process, click this button => ',
'To delete': 'To delete',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Pour rechercher une personne, entrez l'un des prénoms, au milieu ou à la dernière et / ou un numéro d'identification d'une personne, séparés par des espaces. Vous pouvez utiliser% comme joker. «Recherche» de presse sans l'apport d'énumérer toutes les personnes.",
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.",
'To submit a new job, use the': 'To submit a new job, use the',
'To variable': 'To variable',
'Tools': 'Tools',
'Tornado': 'Tornado',
'Total # of Beneficiaries Reached ': 'Total # of Beneficiaries Reached ',
'Total # of Target Beneficiaries': 'Nombre total de bénéficiaires cibles',
'Total # of households of site visited': 'Total # of households of site visited',
'Total Beds': 'Total Beds',
'Total Beneficiaries': 'Total Beneficiaries',
'Total Cost per Megabyte': 'Total Cost per Megabyte',
'Total Cost per Minute': 'Total Cost per Minute',
'Total Households': 'Total Households',
'Total Monthly': 'Total Monthly',
'Total Monthly Cost': 'Total Monthly Cost',
'Total Monthly Cost: ': 'Total Monthly Cost: ',
'Total One-time Costs': 'Total One-time Costs',
'Total Persons': 'Total Persons',
'Total Recurring Costs': 'Total Recurring Costs',
'Total Unit Cost': 'Total Unit Cost',
'Total Unit Cost: ': 'Total Unit Cost: ',
'Total Units': 'Total Units',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Total number of beds in this hospital. Automatically updated from daily reports.',
'Total number of houses in the area': 'Total number of houses in the area',
'Total number of schools in affected area': 'Total number of schools in affected area',
'Total population of site visited': 'Total population of site visited',
'Totals for Budget:': 'Totals for Budget:',
'Totals for Bundle:': 'Totals for Bundle:',
'Totals for Kit:': 'Totals for Kit:',
'Tourist Group': 'Tourist Group',
'Town': 'Town',
'Traces internally displaced people (IDPs) and their needs': 'Traces internally displaced people (IDPs) and their needs',
'Tracing': 'Tracing',
'Track': 'Track',
'Track Details': 'Track Details',
'Track deleted': 'Track deleted',
'Track updated': 'Track updated',
'Track uploaded': 'Track uploaded',
'Tracking of Projects, Activities and Tasks': 'Tracking of Projects, Activities and Tasks',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Tracking of basic information on the location, facilities and size of the Shelters',
'Tracks': 'Tracks',
'Tracks requests for aid and matches them against donors who have pledged aid': 'Tracks requests for aid and matches them against donors who have pledged aid',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Tracks the location, distibution, capacity and breakdown of victims in Shelters',
'Traffic Report': 'Traffic Report',
'Transit': 'Transit',
'Transition Effect': 'Transition Effect',
'Transparent?': 'Transparent?',
'Transportation assistance, Rank': 'Transportation assistance, Rank',
'Trauma Center': 'Trauma Center',
'Travel Cost': 'Travel Cost',
'Tree': 'Tree',
'Tropical Storm': 'Tropical Storm',
'Tropo Messaging Token': 'Tropo Messaging Token',
'Tropo Settings': 'Tropo Settings',
'Tropo Voice Token': 'Tropo Voice Token',
'Tropo settings updated': 'Tropo settings updated',
'Truck': 'Truck',
'Try checking the URL for errors, maybe it was mistyped.': 'Try checking the URL for errors, maybe it was mistyped.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Try hitting refresh/reload button or trying the URL from the address bar again.',
'Try refreshing the page or hitting the back button on your browser.': 'Try refreshing the page or hitting the back button on your browser.',
'Tsunami': 'Tsunami',
'Tuesday': 'Tuesday',
'Twitter': 'Twitter',
'Twitter ID or #hashtag': 'Twitter ID or #hashtag',
'Twitter Settings': 'Twitter Settings',
'Type': 'Type',
'Type of cause': 'Type of cause',
'Type of water source before the disaster': 'Type of water source before the disaster',
'UID': 'UID',
'URL': 'URL',
'UTC Offset': 'UTC Offset',
'Unable to parse CSV file!': 'Impossible de traiter le fichier CSV!',
'Understaffed': 'Understaffed',
'Unidentified': 'Unidentified',
'Unit': 'Unit',
'Unit Bed Capacity': 'Unit Bed Capacity',
'Unit Cost': 'Unit Cost',
'Unit Details': 'Unit Details',
'Unit Name': 'Unit Name',
'Unit Set': 'Unit Set',
'Unit Short Code for e.g. m for meter.': 'Unit Short Code for e.g. m for meter.',
'Unit added': 'Unit added',
'Unit deleted': 'Unit deleted',
'Unit updated': 'Unit updated',
'Units': 'Units',
'Units of Measure': 'Units of Measure',
'Unknown': 'Unknown',
'Unknown Peer': 'Unknown Peer',
'Unknown type of facility': 'Unknown type of facility',
'Unresolved Conflicts': 'Unresolved Conflicts',
'Unselect to disable the modem': 'Unselect to disable the modem',
'Unsent': 'Unsent',
'Unsupported data format!': 'Unsupported data format!',
'Unsupported method!': 'Unsupported method!',
'Update': 'Update',
'Update Activity Report': 'Update Activity Report',
'Update Cholera Treatment Capability Information': 'Update Cholera Treatment Capability Information',
'Update Import Job': 'Update Import Job',
'Update Request': 'Update Request',
'Update Service Profile': 'Update Service Profile',
'Update Task Status': 'Update Task Status',
'Update Unit': 'Update Unit',
'Update if Master': 'Mettre à jour si Master',
'Update if Newer': 'Update if Newer',
'Update your current ordered list': 'Update your current ordered list',
'Upload': 'Upload',
'Upload Photos': 'Upload Photos',
'Upload Spreadsheet': 'Upload Spreadsheet',
'Upload Track': 'Upload Track',
'Upload a Spreadsheet': 'Upload a Spreadsheet',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.",
'Urban Fire': 'Urban Fire',
'Urban area': 'Urban area',
'Urdu': 'Urdu',
'Urgent': 'Urgent',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilisez (...)&(...) pour ET, OU pour (...)|(...), et ~(...) Pour ne pas créer des requêtes plus complexes.',
'Use default': 'Use default',
'Use these links to download data that is currently in the database.': 'Use these links to download data that is currently in the database.',
'Use this space to add a description about the Bin Type.': 'Use this space to add a description about the Bin Type.',
'Use this space to add a description about the site location.': 'Use this space to add a description about the site location.',
'Use this space to add a description about the warehouse/site.': 'Use this space to add a description about the warehouse/site.',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Use this space to add additional comments and notes about the Site/Warehouse.',
'Used to import data from spreadsheets into the database': 'Used to import data from spreadsheets into the database',
'User': 'User',
'User Details': 'User Details',
'User Management': 'User Management',
'User Profile': 'User Profile',
'User Requests': 'User Requests',
'User Updated': 'User Updated',
'User added': 'User added',
'User already has this role': 'User already has this role',
'User deleted': 'User deleted',
'User updated': 'User updated',
'Username': 'Username',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.',
'Users': 'Users',
'Users removed': 'Users removed',
'Ushahidi': 'Ushahidi',
'Utility, telecommunication, other non-transport infrastructure': 'Utility, telecommunication, other non-transport infrastructure',
'Various Reporting functionalities': 'Various Reporting functionalities',
'Vehicle': 'Vehicle',
'Vehicle Crime': 'Vehicle Crime',
'Vehicle Types': 'Vehicle Types',
'Vendor': 'Vendor',
'Verified': 'Verified',
'Verified?': 'Verified?',
'Verify password': 'Verify password',
'Version': 'Version',
'Very High': 'Very High',
'View Alerts received using either Email or SMS': 'View Alerts received using either Email or SMS',
'View Fullscreen Map': 'View Fullscreen Map',
'View Image': 'View Image',
'View On Map': 'View On Map',
'View Outbox': 'View Outbox',
'View Requests for Aid': 'View Requests for Aid',
'View Settings': 'View Settings',
'View Tickets': 'View Tickets',
"View and/or update details of the person's record": "View and/or update details of the person's record",
'View and/or update their details': 'View and/or update their details',
'View or update the status of a hospital.': 'View or update the status of a hospital.',
'View pending requests and pledge support.': 'View pending requests and pledge support.',
'View the hospitals on a map.': 'View the hospitals on a map.',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": "View/Edit the Database directly (caution: doesn't respect the framework rules!)",
'Village': 'Village',
'Village Leader': 'Village Leader',
'Visible?': 'Visible?',
'Visual Recognition': 'Visual Recognition',
'Volcanic Ash Cloud': 'Volcanic Ash Cloud',
'Volcanic Event': 'Volcanic Event',
'Volume - Fluids': 'Volume - Fluids',
'Volume - Solids': 'Volume - Solids',
'Volume Capacity': 'Volume Capacity',
'Volume/Dimensions': 'Volume/Dimensions',
'Volunteer Data': 'Volunteer Data',
'Volunteer Details': 'Volunteer Details',
'Volunteer Management': 'Volunteer Management',
'Volunteer Project': 'Volunteer Project',
'Volunteer Registration': 'Volunteer Registration',
'Volunteer Registrations': 'Volunteer Registrations',
'Volunteer Request': 'Volunteer Request',
'Volunteer added': 'Volunteer added',
'Volunteer deleted': 'Volunteer deleted',
'Volunteer details updated': 'Volunteer details updated',
'Volunteer registration added': 'Volunteer registration added',
'Volunteer registration deleted': 'Volunteer registration deleted',
'Volunteer registration updated': 'Volunteer registration updated',
'Volunteers': 'Volunteers',
'Volunteers were notified!': 'Volunteers were notified!',
'Vote': 'Vote',
'Votes': 'Votes',
'WASH': 'WASH',
'WMS Browser Name': 'WMS Browser Name',
'WMS Browser URL': 'WMS Browser URL',
'Walking Only': 'Walking Only',
'Warehouse': 'Warehouse',
'Warehouse Details': 'Warehouse Details',
'Warehouse Item': 'Warehouse Item',
'Warehouse Item Details': 'Warehouse Item Details',
'Warehouse Item added': 'Warehouse Item added',
'Warehouse Item deleted': 'Warehouse Item deleted',
'Warehouse Item updated': 'Warehouse Item updated',
'Warehouse Items': 'Warehouse Items',
'Warehouse Management': 'Warehouse Management',
'Warehouse added': 'Warehouse added',
'Warehouse deleted': 'Warehouse deleted',
'Warehouse updated': 'Warehouse updated',
'Warehouse/Sites Registry': 'Warehouse/Sites Registry',
'Warehouses': 'Warehouses',
'WatSan': 'WatSan',
'Water': 'Water',
'Water Sanitation Hygiene': 'Water Sanitation Hygiene',
'Water gallon': 'Water gallon',
'Water supply': 'Water supply',
'Waterspout': 'Waterspout',
'Way Bill(s)': 'Way Bill(s)',
'Website': 'Website',
'Wednesday': 'Wednesday',
'Weekly': 'Weekly',
'Weight': 'Weight',
'Weight (kg)': 'Weight (kg)',
'Welcome to the Sahana Eden Disaster Management Platform': 'Welcome to the Sahana Eden Disaster Management Platform',
'Welcome to the Sahana Eden Disaster Management System': 'Welcome to the Sahana Eden Disaster Management System',
'Welcome to the Sahana Portal at ': 'Welcome to the Sahana Portal at ',
'Well-Known Text': 'Well-Known Text',
'Wheat': 'Wheat',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.",
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.',
'Whiskers': 'Whiskers',
'Who is doing what and where': 'Who is doing what and where',
'Who usually collects water for the family?': 'Who usually collects water for the family?',
'Width': 'Width',
'Wild Fire': 'Wild Fire',
'Wind Chill': 'Wind Chill',
'Window frame': 'Window frame',
'Winter Storm': 'Winter Storm',
'Women of Child Bearing Age': 'Women of Child Bearing Age',
'Women who are Pregnant or in Labour': 'Women who are Pregnant or in Labour',
'Womens Focus Groups': 'Womens Focus Groups',
'Wooden plank': 'Wooden plank',
'Wooden poles': 'Wooden poles',
'Working hours end': 'Working hours end',
'Working hours start': 'Working hours start',
'Working or other to provide money/food': 'Working or other to provide money/food',
'Would you like to display the photos on the map?': 'Would you like to display the photos on the map?',
'X-Ray': 'X-Ray',
'XMPP': 'XMPP',
'Yes': 'Yes',
'You are attempting to delete your own account - are you sure you want to proceed?': 'You are attempting to delete your own account - are you sure you want to proceed?',
'You are currently reported missing!': 'You are currently reported missing!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.',
'You can click on the map below to select the Lat/Lon fields:': 'You can click on the map below to select the Lat/Lon fields:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': 'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.',
'You can select the Draw tool (': 'You can select the Draw tool (',
'You can set the modem settings for SMS here.': 'You can set the modem settings for SMS here.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": "You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ",
"You haven't made any calculations": "You haven't made any calculations",
'You must be logged in to register volunteers.': 'You must be logged in to register volunteers.',
'You must be logged in to report persons missing or found.': 'You must be logged in to report persons missing or found.',
'You must provide a series id to proceed.': 'You must provide a series id to proceed.',
'You should edit Twitter settings in models/000_config.py': 'You should edit Twitter settings in models/000_config.py',
'Your action is required. Please approve user %s asap: ': 'Your action is required. Please approve user %s asap: ',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Your current ordered list of solution items is shown below. You can change it by voting again.',
'Your post was added successfully.': 'Your post was added successfully.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.',
'ZIP/Postcode': 'ZIP/Postcode',
'Zinc roof': 'Zinc roof',
'Zoom': 'Zoom',
'Zoom Levels': 'Zoom Levels',
'act': 'act',
'active': 'active',
'added': 'added',
'all records': 'all records',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'allows for creation and management of surveys to assess the damage following a natural disaster.',
'an individual/team to do in 1-2 days': 'an individual/team to do in 1-2 days',
'approved': 'approved',
'assigned': 'assigned',
'average': 'average',
'black': 'black',
'blond': 'blond',
'blue': 'blue',
'brown': 'brown',
'c/o Name': 'c/o Name',
'can be used to extract data from spreadsheets and put them into database tables.': 'can be used to extract data from spreadsheets and put them into database tables.',
'cancelled': 'cancelled',
'caucasoid': 'caucasoid',
'check all': 'check all',
'click for more details': 'click for more details',
'collateral event': 'collateral event',
'completed': 'completed',
'confirmed': 'confirmed',
'consider': 'consider',
'constraint_id': 'constraint_id',
'criminal intent': 'criminal intent',
'crud': 'crud',
'curly': 'curly',
'currently registered': 'currently registered',
'daily': 'daily',
'dark': 'dark',
'data uploaded': 'data uploaded',
'database': 'database',
'database %s select': 'database %s select',
'db': 'db',
'delete all checked': 'delete all checked',
'deleted': 'deleted',
'denied': 'denied',
'description': 'description',
'design': 'design',
'diseased': 'diseased',
'displaced': 'displaced',
'divorced': 'divorced',
'done!': 'done!',
'edit': 'edit',
'editor': 'editor',
'embedded': 'embedded',
'enclosed area': 'enclosed area',
'export as csv file': 'export as csv file',
'fat': 'fat',
'feedback': 'feedback',
'female': 'female',
'final report': 'final report',
'flush latrine with septic tank': 'flush latrine with septic tank',
'follow-up assessment': 'follow-up assessment',
'forehead': 'forehead',
'from Twitter': 'from Twitter',
'from_id': 'from_id',
'full': 'full',
'green': 'green',
'grey': 'grey',
'here': 'here',
'high': 'high',
'hourly': 'hourly',
'households': 'households',
'human error': 'human error',
'identified': 'identified',
'ignore': 'ignore',
'immediately': 'immediately',
'in Deg Min Sec format': 'in Deg Min Sec format',
'in GPS format': 'in GPS format',
'inactive': 'inactive',
'initial assessment': 'initial assessment',
'injured': 'injured',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'invalid': 'invalid',
'invalid request': 'invalid request',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.',
'kilogram': 'kilogram',
'kit': 'kit',
'latrines': 'latrines',
'legend URL': 'legend URL',
'light': 'light',
'liter': 'liter',
'login': 'login',
'long': 'long',
'long>12cm': 'long>12cm',
'low': 'low',
'male': 'male',
'manual': 'manual',
'married': 'married',
'maxExtent': 'maxExtent',
'maxResolution': 'maxResolution',
'medium': 'medium',
'medium<12cm': 'medium<12cm',
'menu item': 'menu item',
'message_id': 'message_id',
'meter': 'meter',
'meter cubed': 'meter cubed',
'meters': 'meters',
'module allows the site administrator to configure various options.': 'module allows the site administrator to configure various options.',
'module helps monitoring the status of hospitals.': 'module helps monitoring the status of hospitals.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).',
'mongoloid': 'mongoloid',
'more': 'more',
'n/a': 'n/a',
'natural hazard': 'natural hazard',
'negroid': 'negroid',
'never': 'jamais',
'new': 'nouvelles',
'new record inserted': 'new record inserted',
'next 100 rows': 'next 100 rows',
'no': 'no',
'none': 'none',
'normal': 'normal',
'not needed': 'not needed',
'not specified': 'not specified',
'num Zoom Levels': 'num Zoom Levels',
'once': 'once',
'open defecation': 'open defecation',
'operational intent': 'operational intent',
'or import from csv file': 'or import from csv file',
'other': 'other',
'over one hour': 'over one hour',
'pack of 10': 'pack of 10',
'pending': 'pending',
'people': 'people',
'piece': 'piece',
'pit': 'pit',
'pit latrine': 'pit latrine',
'postponed': 'postponed',
'preliminary template or draft, not actionable in its current form': 'preliminary template or draft, not actionable in its current form',
'previous 100 rows': 'previous 100 rows',
'primary incident': 'primary incident',
'problem connecting to twitter.com - please refresh': 'problem connecting to twitter.com - please refresh',
'provides a catalogue of digital media.': 'provides a catalogue of digital media.',
'record does not exist': 'record does not exist',
'record id': 'record id',
'records deleted': 'records deleted',
'red': 'red',
'reported': 'reported',
'reports successfully imported.': 'reports successfully imported.',
'retired': 'retired',
'river': 'river',
'sack 20kg': 'sack 20kg',
'sack 50kg': 'sack 50kg',
'secondary effect': 'secondary effect',
'see comment': 'see comment',
'selected': 'selected',
'separated': 'separated',
'separated from family': 'separated from family',
'shaved': 'shaved',
'shift_end': 'shift_end',
'shift_start': 'shift_start',
'short': 'short',
'short<6cm': 'short<6cm',
'sides': 'sides',
'sign-up now': 'sign-up now',
'simple': 'simple',
'single': 'single',
'slim': 'slim',
'state': 'state',
'straight': 'straight',
'suffered financial losses': 'suffered financial losses',
'table': 'table',
'table_name': 'table_name',
'tall': 'tall',
'technical failure': 'technical failure',
'this': 'this',
'to access the system': 'to access the system',
'to reset your password': 'to reset your password',
'to verify your email': 'to verify your email',
'to_id': 'to_id',
'ton': 'ton',
'tonsure': 'tonsure',
'total': 'total',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!',
'unable to parse csv file': 'unable to parse csv file',
'unapproved': 'unapproved',
'uncheck all': 'uncheck all',
'unidentified': 'unidentified',
'unknown': 'unknown',
'unspecified': 'unspecified',
'updated': 'mis à jour',
'updates only': 'updates only',
'urgent': 'urgent',
'vm_action': 'vm_action',
'wavy': 'wavy',
'weekly': 'weekly',
'white': 'white',
'wider area, longer term, usually contain multiple Activities': 'wider area, longer term, usually contain multiple Activities',
'widowed': 'widowed',
'window': 'window',
'within human habitat': 'within human habitat',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt module not available within the running Python - this needs installing for XLS output!',
'yes': 'yes',
}
|
ptressel/sahana-eden-madpub
|
languages/fr.py
|
Python
|
mit
| 226,541
|
[
"VisIt"
] |
255c6f6210877c9de2c75aabc08590fd95c65590dd5300f289acdcb14c7c24f5
|
# -*- encoding: utf-8 -*-
"""
Haiku.py
Based on Twitter REST API
(Japanese) http://watcher.moe-nifty.com/memo/docs/twitterAPI13.txt
(Original) http://apiwiki.twitter.com/REST+API+Documentation
Also Twitter Search API
(Original) http://apiwiki.twitter.com/Search+API+Documentation
"""
__author__="ymotongpoo <ymotongpoo@gmail.com>"
__date__ ="$2008/11/22 09:57:30$"
__version__="$Revision: 0.10"
__credits__="0x7d8 -- programming training"
import urllib
import urllib2
FORMAT = set(['xml', 'rss', 'json', 'atom'])
default_format = 'json'
class Twitter:
def __init__(self, username, password, base_url='', proxy_host='', proxy_port=''):
self.username = username
self.password = password
self.base_url = base_url if len(base_url) > 0 else 'http://twitter.com/'
self.search_url = search_url if len(search_url) > 0 else 'http://search.twitter.com/'
if len(proxy_host) > 0 and type(proxy) is IntType:
self.proxies = {'http': proxy_host + ':' + proxy_port}
else:
self.proxies = {}
def __create_opener(self):
if 'http' in self.proxies:
proxy_handler = urllib2.ProxyHandler(self.proxies)
auth_handler = urllib2.ProxyBasicAuthHandler()
else:
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password('Twitter API', self.base_url, self.username, self.password)
if 'http' in self.proxies:
opener = urllib2.build_opener(proxy_handler, auth_handler)
else:
opener = urllib2.build_opener(auth_handler)
return opener
def __add_format(self, url, format):
if format in FORMAT:
url = url + '.' + format
else:
url = url + '.' + default_format
return url
def __open_url_in_get(self, url, get_dict={}):
opener = self.__create_opener()
urllib2.install_opener(opener)
if len(get_dict) > 0:
params = urllib.urlencode(get_dict)
f = urllib2.urlopen(url + '?' + params)
else:
f = urllib2.urlopen(url)
return f.read()
def __open_url_in_post(self, url, post_dict={}):
opener = self.__create_opener()
urllib2.install_opener(opener)
if len(post_dict) > 0:
params = urllib.urlencode(post_dict)
f = urllib2.urlopen(url, params)
return f.read()
else:
return
def __query_dict_generator(self, func_args):
get_dict = {}
if 'since_id' in func_args and func_args['since_id'] > 0 and type(func_args['since_id']) is int:
get_dict['since_id'] = func_args['since_id']
if 'twitterid' in func_args and len(func_args['twitterid']) > 0:
get_dict['id'] = func_args['twitterid']
if 'since' in func_args and len(func_args['since']) > 0:
get_dict['since'] = func_args['since']
if 'page' in func_args and func_args['page'] > 0 and type(func_args['page']) is int:
get_dict['page'] = func_args['page']
if 'lite' in func_args and not func_args['lite']:
get_dict['lite'] = 'true'
return get_dict
def __get_request_without_options(self, url_part, format):
url = self.base_url + url_part
url = self.__add_format(url, format)
d = self.__open_url_in_get(url)
return d
def __get_request_with_options(self, url_part, format, dict):
url = self.base_url + url_part
url = self.__add_format(url, format)
get_dict = self.__query_dict_generator(dict)
d = self.__open_url_in_get(url, get_dict)
return d
def publicTimeline(self, since_id=-1, format=default_format):
return self.__get_request_with_options('statuses/public_timeline', format, locals())
def friendsTimeline(self, twitterid='', since='', page=1, format=default_format):
"""
since -- expects same type as the return of strftime()
"""
url_part = 'statuses/friends_timeline'
if len(twitterid) > 0:
url_part = url_part + '/' + twitterid
return self.__get_request_with_options(url_part, format, locals())
def userTimeline(self, twitterid='', count=20, since='', since_id='', page=1, format=default_format):
"""
since -- expects same type as the return of strftime()
"""
url_part = 'statuses/user_timeline'
if len(twitterid) > 0:
url_part = url_part + '/' + twitterid
return self.__get_request_with_options(url_part, format, locals())
def showStatusByID(self, status_id, format=default_format):
if type(status_id) is int:
status_id = str(status_id)
return self.__get_request_without_options('statuses/show/' + status_id, format)
def updateStatus(self, status, source='', format=default_format):
url = self.base_url + 'statuses/update'
url = self.__add_format(url, format)
post_dict = {}
if len(status) <= 160:
post_dict['status'] = status
if len(source) > 0:
post_dict['source'] = source
d = self.__open_url_in_post(url, post_dict)
return d
def repliesPost(self, since='', since_id='', page=1, format=default_format):
return self.__get_request_with_options('statuses/replies', format, locals())
def destroyPost(self, status_id, format=default_format):
return self.__get_request_without_options('statuses/destroy/' + status_id, format)
def friendsList(self, twitterid='', page=1, lite=False, since='', format=default_format):
return self.__get_request_with_options('statuses/friends', format, locals())
def followersList(self, twitterid='', page=1, lite=False, format=default_format):
return self.__get_request_with_options('statuses/followers', format, locals())
def featured(self, format=default_format):
return self.__get_request_without_options('statuses/featured', format)
def showUserInfo(self, twitterid, email='', format=default_format):
url = self.base_url + 'users/show'
get_dict = {}
if 'email' in locals():
url = self.__add_format(url, format)
get_dict['email'] = email
else:
url = url + '/' + twitterid
url = self.__add_format(url, format)
for k, v in self.__query_dict_generator(locals()):
get_dict[k] = v
d = self.__open_url_in_get(url, get_dict)
return d
def directMsgs(self, since='', since_id='', page=1, format=default_format):
return self.__get_request_with_options('direct_messages', format, locals())
def sentMsgs(self, since='', since_id='', page=1, format=default_format):
return self.__get_request_with_options('direct_messages/sent', format, locals())
def sendNewDirectMsg(self, user, text, format=default_format):
if len(user) > 0 and len(text) > 0:
url = self.base_url + 'direct_messages/new'
url = self.__add_format(url, format)
post_dict = self.__query_dict_generator(locals())
d = self.__open_url_in_post(url, post_dict)
return d
else:
sys.exit(0)
def destroyDirectMsg(self, msgid, format=default_format):
if type(msgid) is int:
url = self.base_url + 'direct_messages/destroy/'
url = self.__add_format(url, format)
get_dict['id'] = msgid
d = self.__open_url_in_get(url, get_dict)
return d
else:
return
def createFriend(self, twitterid, format=default_format):
return self.__get_request_without_options('friendships/create/' + twitterid, format)
def destroyFriend(self, twitterid, format=default_format):
return self.__get_request_without_options('friendships/destroy/' + twitterid, format)
def existsRelationship(self, user_a, user_b, format=default_format):
url = self.base_url + 'friendships/exists'
url = self.__add_format(url, format)
get_dict = {}
get_dict['user_a'] = user_a
get_dict['user_b'] = user_b
d = self.__open_url_in_get(url, get_dict)
return d
def verifyCredentials(self, format=default_format):
url = self.base_url + 'account/verify_credentials'
url = self.__add_format(url, format)
d = self.__open_url_in_get(url)
return d
def endSession(self):
url = self.base_url + 'account/end_session'
d = self.__open_url_in_get(url)
return d
# *** 'archive' was no longer available ***
#
# def archivePost(self, page=1, since='', since_id='', format=default_format):
# return self.__get_request_with_options('account/archive', format, locals())
def updateLocation(self, location, format=default_format):
url = self.base_url + 'account/update_location'
url = self.__add_format(url, format)
get_dict = {}
get_dict['location'] = location
d = self.__open_url_in_get(url, get_dict)
return d
def updateDeliveryDevice(self, device, format=default_format):
devices = set(['sms', 'im', 'none'])
url = self.base_url + 'account/update_delivery_device'
url = self.__add_format(url, format)
post_dict = self.__query_dict_generator(locals())
d = self.__open_url_in_post(url, post_dict)
return d
def rateLimitStatus(self, format=default_format):
return self.__get_request_without_options('account/rate_limit_status', format)
def favoritesPost(self, twitterid='', page=1, format=default_format):
return self.__get_request_with_options('favorites', format, locals())
def createFavorite(self, twitterid, format=default_format):
return self.__get_request_without_options('favourings/create/' + twitterid, format)
def destroyFavorite(self, twitterid, format=default_format):
return self.__get_request_without_options('favourings/destory/' + twitterid, format)
def followIM(self, twitterid, format=default_format):
return self.__get_request_without_options('notifications/follow/' + twitterid, format)
def leaveIM(self, twitterid, format=default_format):
return self.__get_request_without_options('notifications/leave/' + twitterid, format)
def createBlock(self, twitterid, format=default_format):
return self.__get_request_without_options('blocks/create/' + twitterid, format)
def destoryBlock(self, twitterid, format=default_format):
return self.__get_request_without_options('blocks/destroy/' + twitterid, format)
def testConnection(self, format=default_format):
return self.__get_request_without_options('help/test', format)
def downtimeSchedule(self, format=default_format):
return self.__get_request_without_options('help/downtime_schedule', format)
def updateProfileColors(self, bg='', txt='', link='', sbfill='', sbbdr='', format=default_format):
url = self.base_url + 'account/update_profile_colors'
url = self.__add_format(url, format)
post_dict = {}
if len(bg) > 0:
post_dict['profile_background_color'] = bg
if len(txt) > 0:
post_dict['profile_text_color'] = txt
if len(link) > 0:
post_dict['profile_link_color'] = link
if len(sbfill) > 0:
post_dict['profile_sidebar_fill_color'] = sbfill
if len(sbbdr) > 0:
post_dict['profile_sidebar_border_color'] = sbbdr
d = self.__open_url_in_post(url, post_dict)
return d
def updateProfileBackgroundImage(self, image='', format=default_format):
url = self.base_url + 'account/update_profile_background_image'
url = self.__add_format(url, format)
post_dict = self.__query_dict_generator(locals())
d = self.__open_url_in_post(url, post_dict)
return d
def search(self, word, fromuser, touser, refuser, hashtag, format):
if (word != None):
pass
|
ymotongpoo/restroom
|
0x7d8/APIs/Haiku.py
|
Python
|
apache-2.0
| 12,200
|
[
"MOE"
] |
bf29418033eaed5fb50092595fe7e5d287bc7944f190c1215f8335b07ac987f7
|
#
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
"""Module with non-generic exceptions classes."""
import psi4
class PsiException(Exception):
"""Error class for Psi."""
pass
class ValidationError(PsiException):
"""Error called for problems with the input file. Prints
error message *msg* to standard output stream and output file.
"""
def __init__(self, msg):
PsiException.__init__(self, msg)
self.msg = msg
psi4.print_out('\nPsiException: %s\n\n' % (msg))
|
spring01/libPSI
|
lib/python/psiexceptions.py
|
Python
|
gpl-2.0
| 1,262
|
[
"Psi4"
] |
7482475cdceb9c133f9fdd1d02685f4614ff9dc08ec8a4173c5cd7fea9e2a2d7
|
import unittest
import json
from tests.base import Base
class TestWhoAmIResource(Base):
""" This class tests the whoami resources by making a get request
to /whoami with token set in the header
"""
def test_returns_user_info(self):
""" Get user info by making a request with a valid token """
payload = self.client.get("/api/v1/whoami", headers=self.set_headers())
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["id"], 1)
self.assertEquals(reponse["username"],"brian")
self.assertEquals(payload.status_code, 200)
def test_invalid_token_denied(self):
""" Test if invalid token is allowed """
payload = self.client.get("/api/v1/whoami", headers=
dict({"Authorization": "tiainsansindad"}))
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["status"], "failed")
self.assertEquals(reponse["message"],
"Invalid token, please login again")
self.assertEquals(payload.status_code, 401)
|
brayoh/bucket-list-api
|
tests/test_whoami_resource.py
|
Python
|
mit
| 1,106
|
[
"Brian"
] |
67190ddf897631ca96ebe5c6cb73a3c0615686424fbbc355bca98885452846d7
|
# -*- Mode: python; coding: utf-8 -*-
from __future__ import division
#from __future__ import with_statement
LAYOUT_ALGORITHM = 'neato' # ['neato'|'dot'|'twopi'|'circo'|'fdp'|'nop']
REPRESENT_CHANNELS_AS_NODES = 1
DEFAULT_NODE_SIZE = 3.0 # default node size in meters
DEFAULT_TRANSMISSIONS_MEMORY = 5 # default number of of past intervals whose transmissions are remembered
BITRATE_FONT_SIZE = 10
# internal constants, normally not meant to be changed
SAMPLE_PERIOD = 0.1
PRIORITY_UPDATE_MODEL = -100
PRIORITY_UPDATE_VIEW = 200
import platform
if platform.system() == "Windows":
SHELL_FONT = "Lucida Console 9"
else:
SHELL_FONT = "Luxi Mono 10"
import ns.core
import ns.network
import ns.visualizer
import ns.internet
import ns.mobility
import math
import os
import sys
import gobject
import time
try:
import pygraphviz
import gtk
import pango
import goocanvas
import cairo
import threading
import hud
#import time
import cairo
from higcontainer import HIGContainer
gobject.threads_init()
try:
import svgitem
except ImportError:
svgitem = None
except ImportError, _import_error:
import dummy_threading as threading
else:
_import_error = None
try:
import ipython_view
except ImportError:
ipython_view = None
from base import InformationWindow, PyVizObject, Link, lookup_netdevice_traits, PIXELS_PER_METER
from base import transform_distance_simulation_to_canvas, transform_point_simulation_to_canvas
from base import transform_distance_canvas_to_simulation, transform_point_canvas_to_simulation
from base import load_plugins, register_plugin, plugins
PI_OVER_2 = math.pi/2
PI_TIMES_2 = math.pi*2
class Node(PyVizObject):
__gsignals__ = {
# signal emitted whenever a tooltip is about to be shown for the node
# the first signal parameter is a python list of strings, to which information can be appended
'query-extra-tooltip-info': (gobject.SIGNAL_RUN_LAST, None, (object,)),
}
def __init__(self, visualizer, node_index):
super(Node, self).__init__()
self.visualizer = visualizer
self.node_index = node_index
self.canvas_item = goocanvas.Ellipse()
self.canvas_item.set_data("pyviz-object", self)
self.links = []
self._has_mobility = None
self._selected = False
self._highlighted = False
self._color = 0x808080ff
self._size = DEFAULT_NODE_SIZE
self.canvas_item.connect("enter-notify-event", self.on_enter_notify_event)
self.canvas_item.connect("leave-notify-event", self.on_leave_notify_event)
self.menu = None
self.svg_item = None
self.svg_align_x = None
self.svg_align_y = None
self._label = None
self._label_canvas_item = None
self._update_appearance() # call this last
def set_svg_icon(self, file_base_name, width=None, height=None, align_x=0.5, align_y=0.5):
"""
Set a background SVG icon for the node.
@param file_base_name: base file name, including .svg
extension, of the svg file. Place the file in the folder
src/contrib/visualizer/resource.
@param width: scale to the specified width, in meters
@param width: scale to the specified height, in meters
@param align_x: horizontal alignment of the icon relative to
the node position, from 0 (icon fully to the left of the node)
to 1.0 (icon fully to the right of the node)
@param align_y: vertical alignment of the icon relative to the
node position, from 0 (icon fully to the top of the node) to
1.0 (icon fully to the bottom of the node)
"""
if width is None and height is None:
raise ValueError("either width or height must be given")
rsvg_handle = svgitem.rsvg_handle_factory(file_base_name)
x = self.canvas_item.props.center_x
y = self.canvas_item.props.center_y
self.svg_item = svgitem.SvgItem(x, y, rsvg_handle)
self.svg_item.props.parent = self.visualizer.canvas.get_root_item()
self.svg_item.props.pointer_events = 0
self.svg_item.lower(None)
self.svg_item.props.visibility = goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD
if width is not None:
self.svg_item.props.width = transform_distance_simulation_to_canvas(width)
if height is not None:
self.svg_item.props.height = transform_distance_simulation_to_canvas(height)
#threshold1 = 10.0/self.svg_item.props.height
#threshold2 = 10.0/self.svg_item.props.width
#self.svg_item.props.visibility_threshold = min(threshold1, threshold2)
self.svg_align_x = align_x
self.svg_align_y = align_y
self._update_svg_position(x, y)
self._update_appearance()
def set_label(self, label):
assert isinstance(label, basestring)
self._label = label
self._update_appearance()
def _update_svg_position(self, x, y):
w = self.svg_item.width
h = self.svg_item.height
self.svg_item.set_properties(x=(x - (1-self.svg_align_x)*w),
y=(y - (1-self.svg_align_y)*h))
def tooltip_query(self, tooltip):
self.visualizer.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(self.node_index)
ipv4 = ns3_node.GetObject(ns.internet.Ipv4.GetTypeId())
ipv6 = ns3_node.GetObject(ns.internet.Ipv6.GetTypeId())
lines = ['<b><u>Node %i</u></b>' % self.node_index]
lines.append('')
self.emit("query-extra-tooltip-info", lines)
mob = ns3_node.GetObject(ns.mobility.MobilityModel.GetTypeId())
if mob is not None:
lines.append(' <b>Mobility Model</b>: %s' % mob.GetInstanceTypeId().GetName())
for devI in range(ns3_node.GetNDevices()):
lines.append('')
lines.append(' <u>NetDevice %i:</u>' % devI)
dev = ns3_node.GetDevice(devI)
name = ns.core.Names.FindName(dev)
if name:
lines.append(' <b>Name:</b> %s' % name)
devname = dev.GetInstanceTypeId().GetName()
lines.append(' <b>Type:</b> %s' % devname)
if ipv4 is not None:
ipv4_idx = ipv4.GetInterfaceForDevice(dev)
if ipv4_idx != -1:
addresses = [
'%s/%s' % (ipv4.GetAddress(ipv4_idx, i).GetLocal(),
ipv4.GetAddress(ipv4_idx, i).GetMask())
for i in range(ipv4.GetNAddresses(ipv4_idx))]
lines.append(' <b>IPv4 Addresses:</b> %s' % '; '.join(addresses))
if ipv6 is not None:
ipv6_idx = ipv6.GetInterfaceForDevice(dev)
if ipv6_idx != -1:
addresses = [
'%s/%s' % (ipv6.GetAddress(ipv6_idx, i).GetAddress(),
ipv6.GetAddress(ipv6_idx, i).GetPrefix())
for i in range(ipv6.GetNAddresses(ipv6_idx))]
lines.append(' <b>IPv6 Addresses:</b> %s' % '; '.join(addresses))
lines.append(' <b>MAC Address:</b> %s' % (dev.GetAddress(),))
tooltip.set_markup('\n'.join(lines))
finally:
self.visualizer.simulation.lock.release()
def on_enter_notify_event(self, view, target, event):
self.highlighted = True
def on_leave_notify_event(self, view, target, event):
self.highlighted = False
def _set_selected(self, value):
self._selected = value
self._update_appearance()
def _get_selected(self):
return self._selected
selected = property(_get_selected, _set_selected)
def _set_highlighted(self, value):
self._highlighted = value
self._update_appearance()
def _get_highlighted(self):
return self._highlighted
highlighted = property(_get_highlighted, _set_highlighted)
def set_size(self, size):
self._size = size
self._update_appearance()
def _update_appearance(self):
"""Update the node aspect to reflect the selected/highlighted state"""
size = transform_distance_simulation_to_canvas(self._size)
if self.svg_item is not None:
alpha = 0x80
else:
alpha = 0xff
fill_color_rgba = (self._color & 0xffffff00) | alpha
self.canvas_item.set_properties(radius_x=size, radius_y=size,
fill_color_rgba=fill_color_rgba)
if self._selected:
line_width = size*.3
else:
line_width = size*.15
if self.highlighted:
stroke_color = 'yellow'
else:
stroke_color = 'black'
self.canvas_item.set_properties(line_width=line_width, stroke_color=stroke_color)
if self._label is not None:
if self._label_canvas_item is None:
self._label_canvas_item = goocanvas.Text(visibility_threshold=0.5,
font="Sans Serif 10",
fill_color_rgba=0x808080ff,
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_N,
parent=self.visualizer.canvas.get_root_item(),
pointer_events=0)
self._label_canvas_item.lower(None)
self._label_canvas_item.set_properties(visibility=goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD,
text=self._label)
self._update_position()
def set_position(self, x, y):
self.canvas_item.set_property("center_x", x)
self.canvas_item.set_property("center_y", y)
if self.svg_item is not None:
self._update_svg_position(x, y)
for link in self.links:
link.update_points()
if self._label_canvas_item is not None:
self._label_canvas_item.set_properties(x=x, y=(y+self._size*3))
def get_position(self):
return (self.canvas_item.get_property("center_x"), self.canvas_item.get_property("center_y"))
def _update_position(self):
x, y = self.get_position()
self.set_position(x, y)
def set_color(self, color):
if isinstance(color, str):
color = gtk.gdk.color_parse(color)
color = ((color.red>>8) << 24) | ((color.green>>8) << 16) | ((color.blue>>8) << 8) | 0xff
self._color = color
self._update_appearance()
def add_link(self, link):
assert isinstance(link, Link)
self.links.append(link)
def remove_link(self, link):
assert isinstance(link, Link)
self.links.remove(link)
@property
def has_mobility(self):
if self._has_mobility is None:
node = ns.network.NodeList.GetNode(self.node_index)
mobility = node.GetObject(ns.mobility.MobilityModel.GetTypeId())
self._has_mobility = (mobility is not None)
return self._has_mobility
class Channel(PyVizObject):
def __init__(self, channel):
self.channel = channel
self.canvas_item = goocanvas.Ellipse(radius_x=30, radius_y=30,
fill_color="white",
stroke_color="grey", line_width=2.0,
line_dash=goocanvas.LineDash([10.0, 10.0 ]),
visibility=goocanvas.ITEM_VISIBLE)
self.canvas_item.set_data("pyviz-object", self)
self.links = []
def set_position(self, x, y):
self.canvas_item.set_property("center_x", x)
self.canvas_item.set_property("center_y", y)
for link in self.links:
link.update_points()
def get_position(self):
return (self.canvas_item.get_property("center_x"), self.canvas_item.get_property("center_y"))
class WiredLink(Link):
def __init__(self, node1, node2):
assert isinstance(node1, Node)
assert isinstance(node2, (Node, Channel))
self.node1 = node1
self.node2 = node2
self.canvas_item = goocanvas.Path(line_width=1.0, stroke_color="black")
self.canvas_item.set_data("pyviz-object", self)
self.node1.links.append(self)
self.node2.links.append(self)
def update_points(self):
pos1_x, pos1_y = self.node1.get_position()
pos2_x, pos2_y = self.node2.get_position()
self.canvas_item.set_property("data", "M %r %r L %r %r" % (pos1_x, pos1_y, pos2_x, pos2_y))
class SimulationThread(threading.Thread):
def __init__(self, viz):
super(SimulationThread, self).__init__()
assert isinstance(viz, Visualizer)
self.viz = viz # Visualizer object
self.lock = threading.Lock()
self.go = threading.Event()
self.go.clear()
self.target_time = 0 # in seconds
self.quit = False
self.sim_helper = ns.visualizer.PyViz()
self.pause_messages = []
def set_nodes_of_interest(self, nodes):
self.lock.acquire()
try:
self.sim_helper.SetNodesOfInterest(nodes)
finally:
self.lock.release()
def run(self):
while not self.quit:
#print "sim: Wait for go"
self.go.wait() # wait until the main (view) thread gives us the go signal
self.go.clear()
if self.quit:
break
#self.go.clear()
#print "sim: Acquire lock"
self.lock.acquire()
try:
if 0:
if ns3.core.Simulator.IsFinished():
self.viz.play_button.set_sensitive(False)
break
#print "sim: Current time is %f; Run until: %f" % (ns3.Simulator.Now ().GetSeconds (), self.target_time)
#if ns3.Simulator.Now ().GetSeconds () > self.target_time:
# print "skipping, model is ahead of view!"
self.sim_helper.SimulatorRunUntil(ns.core.Seconds(self.target_time))
#print "sim: Run until ended at current time: ", ns3.Simulator.Now ().GetSeconds ()
self.pause_messages.extend(self.sim_helper.GetPauseMessages())
gobject.idle_add(self.viz.update_model, priority=PRIORITY_UPDATE_MODEL)
#print "sim: Run until: ", self.target_time, ": finished."
finally:
self.lock.release()
#print "sim: Release lock, loop."
# enumeration
class ShowTransmissionsMode(object):
__slots__ = []
ShowTransmissionsMode.ALL = ShowTransmissionsMode()
ShowTransmissionsMode.NONE = ShowTransmissionsMode()
ShowTransmissionsMode.SELECTED = ShowTransmissionsMode()
class Visualizer(gobject.GObject):
INSTANCE = None
if _import_error is None:
__gsignals__ = {
# signal emitted whenever a right-click-on-node popup menu is being constructed
'populate-node-menu': (gobject.SIGNAL_RUN_LAST, None, (object, gtk.Menu,)),
# signal emitted after every simulation period (SAMPLE_PERIOD seconds of simulated time)
# the simulation lock is acquired while the signal is emitted
'simulation-periodic-update': (gobject.SIGNAL_RUN_LAST, None, ()),
# signal emitted right after the topology is scanned
'topology-scanned': (gobject.SIGNAL_RUN_LAST, None, ()),
# signal emitted when it's time to update the view objects
'update-view': (gobject.SIGNAL_RUN_LAST, None, ()),
}
def __init__(self):
assert Visualizer.INSTANCE is None
Visualizer.INSTANCE = self
super(Visualizer, self).__init__()
self.nodes = {} # node index -> Node
self.channels = {} # id(ns3.Channel) -> Channel
self.window = None # toplevel window
self.canvas = None # goocanvas.Canvas
self.time_label = None # gtk.Label
self.play_button = None # gtk.ToggleButton
self.zoom = None # gtk.Adjustment
self._scrolled_window = None # gtk.ScrolledWindow
self.links_group = goocanvas.Group()
self.channels_group = goocanvas.Group()
self.nodes_group = goocanvas.Group()
self._update_timeout_id = None
self.simulation = SimulationThread(self)
self.selected_node = None # node currently selected
self.speed = 1.0
self.information_windows = []
self._transmission_arrows = []
self._last_transmissions = []
self._drop_arrows = []
self._last_drops = []
self._show_transmissions_mode = None
self.set_show_transmissions_mode(ShowTransmissionsMode.ALL)
self._panning_state = None
self.node_size_adjustment = None
self.transmissions_smoothing_adjustment = None
self.sample_period = SAMPLE_PERIOD
self.node_drag_state = None
self.follow_node = None
self.shell_window = None
self.create_gui()
for plugin in plugins:
plugin(self)
def set_show_transmissions_mode(self, mode):
assert isinstance(mode, ShowTransmissionsMode)
self._show_transmissions_mode = mode
if self._show_transmissions_mode == ShowTransmissionsMode.ALL:
self.simulation.set_nodes_of_interest(range(ns.network.NodeList.GetNNodes()))
elif self._show_transmissions_mode == ShowTransmissionsMode.NONE:
self.simulation.set_nodes_of_interest([])
elif self._show_transmissions_mode == ShowTransmissionsMode.SELECTED:
if self.selected_node is None:
self.simulation.set_nodes_of_interest([])
else:
self.simulation.set_nodes_of_interest([self.selected_node.node_index])
def _create_advanced_controls(self):
expander = gtk.Expander("Advanced")
expander.show()
main_vbox = gobject.new(gtk.VBox, border_width=8, visible=True)
expander.add(main_vbox)
main_hbox1 = gobject.new(gtk.HBox, border_width=8, visible=True)
main_vbox.pack_start(main_hbox1)
show_transmissions_group = HIGContainer("Show transmissions")
show_transmissions_group.show()
main_hbox1.pack_start(show_transmissions_group, False, False, 8)
vbox = gtk.VBox(True, 4)
vbox.show()
show_transmissions_group.add(vbox)
all_nodes = gtk.RadioButton(None)
all_nodes.set_label("All nodes")
all_nodes.set_active(True)
all_nodes.show()
vbox.add(all_nodes)
selected_node = gtk.RadioButton(all_nodes)
selected_node.show()
selected_node.set_label("Selected node")
selected_node.set_active(False)
vbox.add(selected_node)
no_node = gtk.RadioButton(all_nodes)
no_node.show()
no_node.set_label("Disabled")
no_node.set_active(False)
vbox.add(no_node)
def toggled(radio):
if radio.get_active():
self.set_show_transmissions_mode(ShowTransmissionsMode.ALL)
all_nodes.connect("toggled", toggled)
def toggled(radio):
if radio.get_active():
self.set_show_transmissions_mode(ShowTransmissionsMode.NONE)
no_node.connect("toggled", toggled)
def toggled(radio):
if radio.get_active():
self.set_show_transmissions_mode(ShowTransmissionsMode.SELECTED)
selected_node.connect("toggled", toggled)
# -- misc settings
misc_settings_group = HIGContainer("Misc Settings")
misc_settings_group.show()
main_hbox1.pack_start(misc_settings_group, False, False, 8)
settings_hbox = gobject.new(gtk.HBox, border_width=8, visible=True)
misc_settings_group.add(settings_hbox)
# --> node size
vbox = gobject.new(gtk.VBox, border_width=0, visible=True)
scale = gobject.new(gtk.HScale, visible=True, digits=2)
vbox.pack_start(scale, True, True, 0)
vbox.pack_start(gobject.new(gtk.Label, label="Node Size", visible=True), True, True, 0)
settings_hbox.pack_start(vbox, False, False, 6)
self.node_size_adjustment = scale.get_adjustment()
def node_size_changed(adj):
for node in self.nodes.itervalues():
node.set_size(adj.value)
self.node_size_adjustment.connect("value-changed", node_size_changed)
self.node_size_adjustment.set_all(DEFAULT_NODE_SIZE, 0.01, 20, 0.1)
# --> transmissions smooth factor
vbox = gobject.new(gtk.VBox, border_width=0, visible=True)
scale = gobject.new(gtk.HScale, visible=True, digits=1)
vbox.pack_start(scale, True, True, 0)
vbox.pack_start(gobject.new(gtk.Label, label="Tx. Smooth Factor (s)", visible=True), True, True, 0)
settings_hbox.pack_start(vbox, False, False, 6)
self.transmissions_smoothing_adjustment = scale.get_adjustment()
self.transmissions_smoothing_adjustment.set_all(DEFAULT_TRANSMISSIONS_MEMORY*0.1, 0.1, 10, 0.1)
return expander
class _PanningState(object):
__slots__ = ['initial_mouse_pos', 'initial_canvas_pos', 'motion_signal']
def _begin_panning(self, widget, event):
self.canvas.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
self._panning_state = self._PanningState()
x, y, dummy = widget.window.get_pointer()
self._panning_state.initial_mouse_pos = (x, y)
x = self._scrolled_window.get_hadjustment().value
y = self._scrolled_window.get_vadjustment().value
self._panning_state.initial_canvas_pos = (x, y)
self._panning_state.motion_signal = self.canvas.connect("motion-notify-event", self._panning_motion)
def _end_panning(self, event):
if self._panning_state is None:
return
self.canvas.window.set_cursor(None)
self.canvas.disconnect(self._panning_state.motion_signal)
self._panning_state = None
def _panning_motion(self, widget, event):
assert self._panning_state is not None
if event.is_hint:
x, y, dummy = widget.window.get_pointer()
else:
x, y = event.x, event.y
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
mx0, my0 = self._panning_state.initial_mouse_pos
cx0, cy0 = self._panning_state.initial_canvas_pos
dx = x - mx0
dy = y - my0
hadj.value = cx0 - dx
vadj.value = cy0 - dy
return True
def _canvas_button_press(self, widget, event):
if event.button == 2:
self._begin_panning(widget, event)
return True
return False
def _canvas_button_release(self, dummy_widget, event):
if event.button == 2:
self._end_panning(event)
return True
return False
def _canvas_scroll_event(self, dummy_widget, event):
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom.value *= 1.25
return True
elif event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom.value /= 1.25
return True
return False
def get_hadjustment(self):
return self._scrolled_window.get_hadjustment()
def get_vadjustment(self):
return self._scrolled_window.get_vadjustment()
def create_gui(self):
self.window = gtk.Window()
vbox = gtk.VBox(); vbox.show()
self.window.add(vbox)
# canvas
self.canvas = goocanvas.Canvas()
self.canvas.connect_after("button-press-event", self._canvas_button_press)
self.canvas.connect_after("button-release-event", self._canvas_button_release)
self.canvas.connect("scroll-event", self._canvas_scroll_event)
self.canvas.props.has_tooltip = True
self.canvas.connect("query-tooltip", self._canvas_tooltip_cb)
self.canvas.show()
sw = gtk.ScrolledWindow(); sw.show()
self._scrolled_window = sw
sw.add(self.canvas)
vbox.pack_start(sw, True, True, 4)
self.canvas.set_size_request(600, 450)
self.canvas.set_bounds(-10000, -10000, 10000, 10000)
self.canvas.scroll_to(0, 0)
self.canvas.get_root_item().add_child(self.links_group)
self.links_group.set_property("visibility", goocanvas.ITEM_VISIBLE)
self.canvas.get_root_item().add_child(self.channels_group)
self.channels_group.set_property("visibility", goocanvas.ITEM_VISIBLE)
self.channels_group.raise_(self.links_group)
self.canvas.get_root_item().add_child(self.nodes_group)
self.nodes_group.set_property("visibility", goocanvas.ITEM_VISIBLE)
self.nodes_group.raise_(self.channels_group)
self.hud = hud.Axes(self)
hbox = gtk.HBox(); hbox.show()
vbox.pack_start(hbox, False, False, 4)
# zoom
zoom_adj = gtk.Adjustment(1.0, 0.01, 10.0, 0.02, 1.0, 0)
self.zoom = zoom_adj
def _zoom_changed(adj):
self.canvas.set_scale(adj.value)
zoom_adj.connect("value-changed", _zoom_changed)
zoom = gtk.SpinButton(zoom_adj)
zoom.set_digits(3)
zoom.show()
hbox.pack_start(gobject.new(gtk.Label, label=" Zoom:", visible=True), False, False, 4)
hbox.pack_start(zoom, False, False, 4)
_zoom_changed(zoom_adj)
# speed
speed_adj = gtk.Adjustment(1.0, 0.01, 10.0, 0.02, 1.0, 0)
def _speed_changed(adj):
self.speed = adj.value
self.sample_period = SAMPLE_PERIOD*adj.value
self._start_update_timer()
speed_adj.connect("value-changed", _speed_changed)
speed = gtk.SpinButton(speed_adj)
speed.set_digits(3)
speed.show()
hbox.pack_start(gobject.new(gtk.Label, label=" Speed:", visible=True), False, False, 4)
hbox.pack_start(speed, False, False, 4)
_speed_changed(speed_adj)
# Current time
self.time_label = gobject.new(gtk.Label, label=" Speed:", visible=True)
self.time_label.set_width_chars(20)
hbox.pack_start(self.time_label, False, False, 4)
# Screenshot button
screenshot_button = gobject.new(gtk.Button,
label="Snapshot",
relief=gtk.RELIEF_NONE, focus_on_click=False,
visible=True)
hbox.pack_start(screenshot_button, False, False, 4)
def load_button_icon(button, icon_name):
try:
import gnomedesktop
except ImportError:
sys.stderr.write("Could not load icon %s due to missing gnomedesktop Python module\n" % icon_name)
else:
icon = gnomedesktop.find_icon(gtk.icon_theme_get_default(), icon_name, 16, 0)
if icon is not None:
button.props.image = gobject.new(gtk.Image, file=icon, visible=True)
load_button_icon(screenshot_button, "applets-screenshooter")
screenshot_button.connect("clicked", self._take_screenshot)
# Shell button
if ipython_view is not None:
shell_button = gobject.new(gtk.Button,
label="Shell",
relief=gtk.RELIEF_NONE, focus_on_click=False,
visible=True)
hbox.pack_start(shell_button, False, False, 4)
load_button_icon(shell_button, "gnome-terminal")
shell_button.connect("clicked", self._start_shell)
# Play button
self.play_button = gobject.new(gtk.ToggleButton,
image=gobject.new(gtk.Image, stock=gtk.STOCK_MEDIA_PLAY, visible=True),
label="Simulate (F3)",
relief=gtk.RELIEF_NONE, focus_on_click=False,
use_stock=True, visible=True)
accel_group = gtk.AccelGroup()
self.window.add_accel_group(accel_group)
self.play_button.add_accelerator("clicked", accel_group,
gtk.keysyms.F3, 0, gtk.ACCEL_VISIBLE)
self.play_button.connect("toggled", self._on_play_button_toggled)
hbox.pack_start(self.play_button, False, False, 4)
self.canvas.get_root_item().connect("button-press-event", self.on_root_button_press_event)
vbox.pack_start(self._create_advanced_controls(), False, False, 4)
self.window.show()
def scan_topology(self):
print "scanning topology: %i nodes..." % (ns.network.NodeList.GetNNodes(),)
graph = pygraphviz.AGraph()
seen_nodes = 0
for nodeI in range(ns.network.NodeList.GetNNodes()):
seen_nodes += 1
if seen_nodes == 100:
print "scan topology... %i nodes visited (%.1f%%)" % (nodeI, 100*nodeI/ns.network.NodeList.GetNNodes())
seen_nodes = 0
node = ns.network.NodeList.GetNode(nodeI)
node_name = "Node %i" % nodeI
node_view = self.get_node(nodeI)
mobility = node.GetObject(ns.mobility.MobilityModel.GetTypeId())
if mobility is not None:
node_view.set_color("red")
pos = mobility.GetPosition()
node_view.set_position(*transform_point_simulation_to_canvas(pos.x, pos.y))
#print "node has mobility position -> ", "%f,%f" % (pos.x, pos.y)
else:
graph.add_node(node_name)
for devI in range(node.GetNDevices()):
device = node.GetDevice(devI)
device_traits = lookup_netdevice_traits(type(device))
if device_traits.is_wireless:
continue
if device_traits.is_virtual:
continue
channel = device.GetChannel()
if channel.GetNDevices() > 2:
if REPRESENT_CHANNELS_AS_NODES:
# represent channels as white nodes
if mobility is None:
channel_name = "Channel %s" % id(channel)
graph.add_edge(node_name, channel_name)
self.get_channel(channel)
self.create_link(self.get_node(nodeI), self.get_channel(channel))
else:
# don't represent channels, just add links between nodes in the same channel
for otherDevI in range(channel.GetNDevices()):
otherDev = channel.GetDevice(otherDevI)
otherNode = otherDev.GetNode()
otherNodeView = self.get_node(otherNode.GetId())
if otherNode is not node:
if mobility is None and not otherNodeView.has_mobility:
other_node_name = "Node %i" % otherNode.GetId()
graph.add_edge(node_name, other_node_name)
self.create_link(self.get_node(nodeI), otherNodeView)
else:
for otherDevI in range(channel.GetNDevices()):
otherDev = channel.GetDevice(otherDevI)
otherNode = otherDev.GetNode()
otherNodeView = self.get_node(otherNode.GetId())
if otherNode is not node:
if mobility is None and not otherNodeView.has_mobility:
other_node_name = "Node %i" % otherNode.GetId()
graph.add_edge(node_name, other_node_name)
self.create_link(self.get_node(nodeI), otherNodeView)
print "scanning topology: calling graphviz layout"
graph.layout(LAYOUT_ALGORITHM)
for node in graph.iternodes():
#print node, "=>", node.attr['pos']
node_type, node_id = node.split(' ')
pos_x, pos_y = [float(s) for s in node.attr['pos'].split(',')]
if node_type == 'Node':
obj = self.nodes[int(node_id)]
elif node_type == 'Channel':
obj = self.channels[int(node_id)]
obj.set_position(pos_x, pos_y)
print "scanning topology: all done."
self.emit("topology-scanned")
def get_node(self, index):
try:
return self.nodes[index]
except KeyError:
node = Node(self, index)
self.nodes[index] = node
self.nodes_group.add_child(node.canvas_item)
node.canvas_item.connect("button-press-event", self.on_node_button_press_event, node)
node.canvas_item.connect("button-release-event", self.on_node_button_release_event, node)
return node
def get_channel(self, ns3_channel):
try:
return self.channels[id(ns3_channel)]
except KeyError:
channel = Channel(ns3_channel)
self.channels[id(ns3_channel)] = channel
self.channels_group.add_child(channel.canvas_item)
return channel
def create_link(self, node, node_or_channel):
link = WiredLink(node, node_or_channel)
self.links_group.add_child(link.canvas_item)
link.canvas_item.lower(None)
def update_view(self):
#print "update_view"
self.time_label.set_text("Time: %f s" % ns.core.Simulator.Now().GetSeconds())
self._update_node_positions()
# Update information
for info_win in self.information_windows:
info_win.update()
self._update_transmissions_view()
self._update_drops_view()
self.emit("update-view")
def _update_node_positions(self):
for node in self.nodes.itervalues():
if node.has_mobility:
ns3_node = ns.network.NodeList.GetNode(node.node_index)
mobility = ns3_node.GetObject(ns.mobility.MobilityModel.GetTypeId())
if mobility is not None:
pos = mobility.GetPosition()
x, y = transform_point_simulation_to_canvas(pos.x, pos.y)
node.set_position(x, y)
if node is self.follow_node:
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
px, py = self.canvas.convert_to_pixels(x, y)
hadj.value = px - hadj.page_size/2
vadj.value = py - vadj.page_size/2
def center_on_node(self, node):
if isinstance(node, ns.network.Node):
node = self.nodes[node.GetId()]
elif isinstance(node, (int, long)):
node = self.nodes[node]
elif isinstance(node, Node):
pass
else:
raise TypeError("expected int, viz.Node or ns.network.Node, not %r" % node)
x, y = node.get_position()
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
px, py = self.canvas.convert_to_pixels(x, y)
hadj.value = px - hadj.page_size/2
vadj.value = py - vadj.page_size/2
def update_model(self):
self.simulation.lock.acquire()
try:
self.emit("simulation-periodic-update")
finally:
self.simulation.lock.release()
def do_simulation_periodic_update(self):
smooth_factor = int(self.transmissions_smoothing_adjustment.value*10)
transmissions = self.simulation.sim_helper.GetTransmissionSamples()
self._last_transmissions.append(transmissions)
while len(self._last_transmissions) > smooth_factor:
self._last_transmissions.pop(0)
drops = self.simulation.sim_helper.GetPacketDropSamples()
self._last_drops.append(drops)
while len(self._last_drops) > smooth_factor:
self._last_drops.pop(0)
def _get_label_over_line_position(self, pos1_x, pos1_y, pos2_x, pos2_y):
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
bounds_x1, bounds_y1 = self.canvas.convert_from_pixels(hadj.value, vadj.value)
bounds_x2, bounds_y2 = self.canvas.convert_from_pixels(hadj.value + hadj.page_size,
vadj.value + vadj.page_size)
pos1_x, pos1_y, pos2_x, pos2_y = ns.visualizer.PyViz.LineClipping(bounds_x1, bounds_y1,
bounds_x2, bounds_y2,
pos1_x, pos1_y,
pos2_x, pos2_y)
return (pos1_x + pos2_x)/2, (pos1_y + pos2_y)/2
def _update_transmissions_view(self):
transmissions_average = {}
for transmission_set in self._last_transmissions:
for transmission in transmission_set:
key = (transmission.transmitter.GetId(), transmission.receiver.GetId())
rx_bytes, count = transmissions_average.get(key, (0, 0))
rx_bytes += transmission.bytes
count += 1
transmissions_average[key] = rx_bytes, count
old_arrows = self._transmission_arrows
for arrow, label in old_arrows:
arrow.set_property("visibility", goocanvas.ITEM_HIDDEN)
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
new_arrows = []
k = self.node_size_adjustment.value/5
for (transmitter_id, receiver_id), (rx_bytes, rx_count) in transmissions_average.iteritems():
transmitter = self.get_node(transmitter_id)
receiver = self.get_node(receiver_id)
try:
arrow, label = old_arrows.pop()
except IndexError:
arrow = goocanvas.Polyline(line_width=2.0, stroke_color_rgba=0x00C000C0, close_path=False, end_arrow=True)
arrow.set_property("parent", self.canvas.get_root_item())
arrow.props.pointer_events = 0
arrow.raise_(None)
label = goocanvas.Text(parent=self.canvas.get_root_item(), pointer_events=0)
label.raise_(None)
arrow.set_property("visibility", goocanvas.ITEM_VISIBLE)
line_width = max(0.1, math.log(float(rx_bytes)/rx_count/self.sample_period)*k)
arrow.set_property("line-width", line_width)
pos1_x, pos1_y = transmitter.get_position()
pos2_x, pos2_y = receiver.get_position()
points = goocanvas.Points([(pos1_x, pos1_y), (pos2_x, pos2_y)])
arrow.set_property("points", points)
kbps = float(rx_bytes*8)/1e3/rx_count/self.sample_period
label.set_properties(visibility=goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD,
visibility_threshold=0.5,
font=("Sans Serif %f" % int(1+BITRATE_FONT_SIZE*k)))
angle = math.atan2((pos2_y - pos1_y), (pos2_x - pos1_x))
if -PI_OVER_2 <= angle <= PI_OVER_2:
label.set_properties(text=("%.2f kbit/s →" % (kbps,)),
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_S,
x=0, y=-line_width/2)
M = cairo.Matrix()
M.translate(*self._get_label_over_line_position(pos1_x, pos1_y, pos2_x, pos2_y))
M.rotate(angle)
label.set_transform(M)
else:
label.set_properties(text=("← %.2f kbit/s" % (kbps,)),
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_N,
x=0, y=line_width/2)
M = cairo.Matrix()
M.translate(*self._get_label_over_line_position(pos1_x, pos1_y, pos2_x, pos2_y))
M.rotate(angle)
M.scale(-1, -1)
label.set_transform(M)
new_arrows.append((arrow, label))
self._transmission_arrows = new_arrows + old_arrows
def _update_drops_view(self):
drops_average = {}
for drop_set in self._last_drops:
for drop in drop_set:
key = drop.transmitter.GetId()
drop_bytes, count = drops_average.get(key, (0, 0))
drop_bytes += drop.bytes
count += 1
drops_average[key] = drop_bytes, count
old_arrows = self._drop_arrows
for arrow, label in old_arrows:
arrow.set_property("visibility", goocanvas.ITEM_HIDDEN)
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
new_arrows = []
# get the coordinates for the edge of screen
vadjustment = self._scrolled_window.get_vadjustment()
bottom_y = vadjustment.value + vadjustment.page_size
dummy, edge_y = self.canvas.convert_from_pixels(0, bottom_y)
k = self.node_size_adjustment.value/5
for transmitter_id, (drop_bytes, drop_count) in drops_average.iteritems():
transmitter = self.get_node(transmitter_id)
try:
arrow, label = old_arrows.pop()
except IndexError:
arrow = goocanvas.Polyline(line_width=2.0, stroke_color_rgba=0xC00000C0, close_path=False, end_arrow=True)
arrow.props.pointer_events = 0
arrow.set_property("parent", self.canvas.get_root_item())
arrow.raise_(None)
label = goocanvas.Text()#, fill_color_rgba=0x00C000C0)
label.props.pointer_events = 0
label.set_property("parent", self.canvas.get_root_item())
label.raise_(None)
arrow.set_property("visibility", goocanvas.ITEM_VISIBLE)
arrow.set_property("line-width", max(0.1, math.log(float(drop_bytes)/drop_count/self.sample_period)*k))
pos1_x, pos1_y = transmitter.get_position()
pos2_x, pos2_y = pos1_x, edge_y
points = goocanvas.Points([(pos1_x, pos1_y), (pos2_x, pos2_y)])
arrow.set_property("points", points)
label.set_properties(visibility=goocanvas.ITEM_VISIBLE_ABOVE_THRESHOLD,
visibility_threshold=0.5,
font=("Sans Serif %i" % int(1+BITRATE_FONT_SIZE*k)),
text=("%.2f kbit/s" % (float(drop_bytes*8)/1e3/drop_count/self.sample_period,)),
alignment=pango.ALIGN_CENTER,
x=(pos1_x + pos2_x)/2,
y=(pos1_y + pos2_y)/2)
new_arrows.append((arrow, label))
self._drop_arrows = new_arrows + old_arrows
def update_view_timeout(self):
#print "view: update_view_timeout called at real time ", time.time()
# while the simulator is busy, run the gtk event loop
while not self.simulation.lock.acquire(False):
while gtk.events_pending():
gtk.main_iteration()
pause_messages = self.simulation.pause_messages
self.simulation.pause_messages = []
try:
self.update_view()
self.simulation.target_time = ns.core.Simulator.Now ().GetSeconds () + self.sample_period
#print "view: target time set to %f" % self.simulation.target_time
finally:
self.simulation.lock.release()
if pause_messages:
#print pause_messages
dialog = gtk.MessageDialog(parent=self.window, flags=0, type=gtk.MESSAGE_WARNING, buttons=gtk.BUTTONS_OK,
message_format='\n'.join(pause_messages))
dialog.connect("response", lambda d, r: d.destroy())
dialog.show()
self.play_button.set_active(False)
# if we're paused, stop the update timer
if not self.play_button.get_active():
self._update_timeout_id = None
return False
#print "view: self.simulation.go.set()"
self.simulation.go.set()
#print "view: done."
return True
def _start_update_timer(self):
if self._update_timeout_id is not None:
gobject.source_remove(self._update_timeout_id)
#print "start_update_timer"
self._update_timeout_id = gobject.timeout_add(int(SAMPLE_PERIOD/min(self.speed, 1)*1e3),
self.update_view_timeout,
priority=PRIORITY_UPDATE_VIEW)
def _on_play_button_toggled(self, button):
if button.get_active():
self._start_update_timer()
else:
if self._update_timeout_id is not None:
gobject.source_remove(self._update_timeout_id)
def _quit(self, *dummy_args):
if self._update_timeout_id is not None:
gobject.source_remove(self._update_timeout_id)
self._update_timeout_id = None
self.simulation.quit = True
self.simulation.go.set()
self.simulation.join()
gtk.main_quit()
def _monkey_patch_ipython(self):
# The user may want to access the NS 3 simulation state, but
# NS 3 is not thread safe, so it could cause serious problems.
# To work around this, monkey-patch IPython to automatically
# acquire and release the simulation lock around each code
# that is executed.
original_runcode = __IPYTHON__.runcode
def runcode(ip, *args):
#print "lock"
self.simulation.lock.acquire()
try:
return original_runcode(*args)
finally:
#print "unlock"
self.simulation.lock.release()
import types
__IPYTHON__.runcode = types.MethodType(runcode, __IPYTHON__)
def autoscale_view(self):
if not self.nodes:
return
self._update_node_positions()
positions = [node.get_position() for node in self.nodes.itervalues()]
min_x, min_y = min(x for (x,y) in positions), min(y for (x,y) in positions)
max_x, max_y = max(x for (x,y) in positions), max(y for (x,y) in positions)
min_x_px, min_y_px = self.canvas.convert_to_pixels(min_x, min_y)
max_x_px, max_y_px = self.canvas.convert_to_pixels(max_x, max_y)
dx = max_x - min_x
dy = max_y - min_y
dx_px = max_x_px - min_x_px
dy_px = max_y_px - min_y_px
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
new_dx, new_dy = 1.5*dx_px, 1.5*dy_px
if new_dx == 0 or new_dy == 0:
return
self.zoom.value = min(hadj.page_size/new_dx, vadj.page_size/new_dy)
x1, y1 = self.canvas.convert_from_pixels(hadj.value, vadj.value)
x2, y2 = self.canvas.convert_from_pixels(hadj.value+hadj.page_size, vadj.value+vadj.page_size)
width = x2 - x1
height = y2 - y1
center_x = (min_x + max_x) / 2
center_y = (min_y + max_y) / 2
self.canvas.scroll_to(center_x - width/2, center_y - height/2)
return False
def start(self):
self.scan_topology()
self.window.connect("delete-event", self._quit)
#self._start_update_timer()
gobject.timeout_add(200, self.autoscale_view)
self.simulation.start()
try:
__IPYTHON__
except NameError:
pass
else:
self._monkey_patch_ipython()
gtk.main()
def on_root_button_press_event(self, view, target, event):
if event.button == 1:
self.select_node(None)
return True
def on_node_button_press_event(self, view, target, event, node):
if event.button == 1:
self.select_node(node)
return True
elif event.button == 3:
self.popup_node_menu(node, event)
return True
elif event.button == 2:
self.begin_node_drag(node)
return True
return False
def on_node_button_release_event(self, view, target, event, node):
if event.button == 2:
self.end_node_drag(node)
return True
return False
class NodeDragState(object):
def __init__(self, canvas_x0, canvas_y0, sim_x0, sim_y0):
self.canvas_x0 = canvas_x0
self.canvas_y0 = canvas_y0
self.sim_x0 = sim_x0
self.sim_y0 = sim_y0
self.motion_signal = None
def begin_node_drag(self, node):
self.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(node.node_index)
mob = ns3_node.GetObject(ns.mobility.MobilityModel.GetTypeId())
if mob is None:
return
if self.node_drag_state is not None:
return
pos = mob.GetPosition()
finally:
self.simulation.lock.release()
x, y, dummy = self.canvas.window.get_pointer()
x0, y0 = self.canvas.convert_from_pixels(x, y)
self.node_drag_state = self.NodeDragState(x0, y0, pos.x, pos.y)
self.node_drag_state.motion_signal = node.canvas_item.connect("motion-notify-event", self.node_drag_motion, node)
def node_drag_motion(self, item, targe_item, event, node):
self.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(node.node_index)
mob = ns3_node.GetObject(ns.mobility.MobilityModel.GetTypeId())
if mob is None:
return False
if self.node_drag_state is None:
return False
x, y, dummy = self.canvas.window.get_pointer()
canvas_x, canvas_y = self.canvas.convert_from_pixels(x, y)
dx = (canvas_x - self.node_drag_state.canvas_x0)
dy = (canvas_y - self.node_drag_state.canvas_y0)
pos = mob.GetPosition()
pos.x = self.node_drag_state.sim_x0 + transform_distance_canvas_to_simulation(dx)
pos.y = self.node_drag_state.sim_y0 + transform_distance_canvas_to_simulation(dy)
#print "SetPosition(%G, %G)" % (pos.x, pos.y)
mob.SetPosition(pos)
node.set_position(*transform_point_simulation_to_canvas(pos.x, pos.y))
finally:
self.simulation.lock.release()
return True
def end_node_drag(self, node):
if self.node_drag_state is None:
return
node.canvas_item.disconnect(self.node_drag_state.motion_signal)
self.node_drag_state = None
def popup_node_menu(self, node, event):
menu = gtk.Menu()
self.emit("populate-node-menu", node, menu)
menu.popup(None, None, None, event.button, event.time)
def _update_ipython_selected_node(self):
# If we are running under ipython -gthread, make this new
# selected node available as a global 'selected_node'
# variable.
try:
__IPYTHON__
except NameError:
pass
else:
if self.selected_node is None:
ns3_node = None
else:
self.simulation.lock.acquire()
try:
ns3_node = ns.network.NodeList.GetNode(self.selected_node.node_index)
finally:
self.simulation.lock.release()
__IPYTHON__.user_ns['selected_node'] = ns3_node
def select_node(self, node):
if isinstance(node, ns.network.Node):
node = self.nodes[node.GetId()]
elif isinstance(node, (int, long)):
node = self.nodes[node]
elif isinstance(node, Node):
pass
elif node is None:
pass
else:
raise TypeError("expected None, int, viz.Node or ns.network.Node, not %r" % node)
if node is self.selected_node:
return
if self.selected_node is not None:
self.selected_node.selected = False
self.selected_node = node
if self.selected_node is not None:
self.selected_node.selected = True
if self._show_transmissions_mode == ShowTransmissionsMode.SELECTED:
if self.selected_node is None:
self.simulation.set_nodes_of_interest([])
else:
self.simulation.set_nodes_of_interest([self.selected_node.node_index])
self._update_ipython_selected_node()
def add_information_window(self, info_win):
self.information_windows.append(info_win)
self.simulation.lock.acquire()
try:
info_win.update()
finally:
self.simulation.lock.release()
def remove_information_window(self, info_win):
self.information_windows.remove(info_win)
def _canvas_tooltip_cb(self, canvas, x, y, keyboard_mode, tooltip):
#print "tooltip query: ", x, y
hadj = self._scrolled_window.get_hadjustment()
vadj = self._scrolled_window.get_vadjustment()
x, y = self.canvas.convert_from_pixels(hadj.value + x, vadj.value + y)
item = self.canvas.get_item_at(x, y, True)
#print "items at (%f, %f): %r | keyboard_mode=%r" % (x, y, item, keyboard_mode)
if not item:
return False
while item is not None:
obj = item.get_data("pyviz-object")
if obj is not None:
obj.tooltip_query(tooltip)
return True
item = item.props.parent
return False
def _get_export_file_name(self):
sel = gtk.FileChooserDialog("Save...", self.canvas.get_toplevel(),
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
sel.set_default_response(gtk.RESPONSE_OK)
sel.set_local_only(True)
sel.set_do_overwrite_confirmation(True)
sel.set_current_name("Unnamed.pdf")
filter = gtk.FileFilter()
filter.set_name("Embedded PostScript")
filter.add_mime_type("image/x-eps")
sel.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("Portable Document Graphics")
filter.add_mime_type("application/pdf")
sel.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("Scalable Vector Graphics")
filter.add_mime_type("image/svg+xml")
sel.add_filter(filter)
resp = sel.run()
if resp != gtk.RESPONSE_OK:
sel.destroy()
return None
file_name = sel.get_filename()
sel.destroy()
return file_name
def _take_screenshot(self, dummy_button):
#print "Cheese!"
file_name = self._get_export_file_name()
if file_name is None:
return
# figure out the correct bounding box for what is visible on screen
x1 = self._scrolled_window.get_hadjustment().value
y1 = self._scrolled_window.get_vadjustment().value
x2 = x1 + self._scrolled_window.get_hadjustment().page_size
y2 = y1 + self._scrolled_window.get_vadjustment().page_size
bounds = goocanvas.Bounds()
bounds.x1, bounds.y1 = self.canvas.convert_from_pixels(x1, y1)
bounds.x2, bounds.y2 = self.canvas.convert_from_pixels(x2, y2)
dest_width = bounds.x2 - bounds.x1
dest_height = bounds.y2 - bounds.y1
#print bounds.x1, bounds.y1, " -> ", bounds.x2, bounds.y2
dummy, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == '.eps':
surface = cairo.PSSurface(file_name, dest_width, dest_height)
elif extension == '.pdf':
surface = cairo.PDFSurface(file_name, dest_width, dest_height)
elif extension == '.svg':
surface = cairo.SVGSurface(file_name, dest_width, dest_height)
else:
dialog = gtk.MessageDialog(parent = self.canvas.get_toplevel(),
flags = gtk.DIALOG_DESTROY_WITH_PARENT,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = "Unknown extension '%s' (valid extensions are '.eps', '.svg', and '.pdf')"
% (extension,))
dialog.run()
dialog.destroy()
return
# draw the canvas to a printing context
cr = cairo.Context(surface)
cr.translate(-bounds.x1, -bounds.y1)
self.canvas.render(cr, bounds, self.zoom.value)
cr.show_page()
surface.finish()
def set_follow_node(self, node):
if isinstance(node, ns.network.Node):
node = self.nodes[node.GetId()]
self.follow_node = node
def _start_shell(self, dummy_button):
if self.shell_window is not None:
self.shell_window.present()
return
self.shell_window = gtk.Window()
self.shell_window.set_size_request(750,550)
self.shell_window.set_resizable(True)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
ipython = ipython_view.IPythonView()
ipython.modify_font(pango.FontDescription(SHELL_FONT))
ipython.set_wrap_mode(gtk.WRAP_CHAR)
ipython.show()
scrolled_window.add(ipython)
scrolled_window.show()
self.shell_window.add(scrolled_window)
self.shell_window.show()
self.shell_window.connect('destroy', self._on_shell_window_destroy)
self._update_ipython_selected_node()
__IPYTHON__.user_ns['viz'] = self
def _on_shell_window_destroy(self, window):
self.shell_window = None
initialization_hooks = []
def add_initialization_hook(hook, *args):
"""
Adds a callback to be called after
the visualizer is initialized, like this::
initialization_hook(visualizer, *args)
"""
global initialization_hooks
initialization_hooks.append((hook, args))
def set_bounds(x1, y1, x2, y2):
assert x2>x1
assert y2>y1
def hook(viz):
cx1, cy1 = transform_point_simulation_to_canvas(x1, y1)
cx2, cy2 = transform_point_simulation_to_canvas(x2, y2)
viz.canvas.set_bounds(cx1, cy1, cx2, cy2)
add_initialization_hook(hook)
def start():
assert Visualizer.INSTANCE is None
if _import_error is not None:
import sys
print >> sys.stderr, "No visualization support (%s)." % (str(_import_error),)
ns.core.Simulator.Run()
return
load_plugins()
viz = Visualizer()
for hook, args in initialization_hooks:
gobject.idle_add(hook, viz, *args)
ns.network.Packet.EnablePrinting()
viz.start()
|
dnlove/ns3
|
src/visualizer/visualizer/core.py
|
Python
|
gpl-2.0
| 60,240
|
[
"FLEUR"
] |
ee93d09659b796df72c6114535a052deb6daeb56db6d82fcd897a29853f3cc65
|
#! /usr/bin/env python
##############################################################################
## pyvolve: Python platform for simulating evolutionary sequences.
##
## Written by Stephanie J. Spielman (stephanie.spielman@gmail.com)
##############################################################################
'''
This module will compute a vector of stationary frequencies.
'''
import os
import re
import sys
import time
import numpy as np
import random as rn
from Bio import SeqIO, AlignIO
from genetics import *
ZERO = 1e-8
MOLECULES = Genetics()
class StateFrequencies(object):
'''
Parent class for stationary (state, equilibrium, etc.) frequency calculations.
Child classes include the following:
1. **EqualFrequencies** (default)
- Sets frequencies as equal (i.e. 1/4 for all nucleotides if by='nucleotide', and so on.)
2. **RandomFrequencies**
- Computes (semi-)random frequency values for a given alphabet.
3. **CustomFrequencies**
- Computes frequencies based on a user-provided dictionary of frequencies.
4. **ReadFrequencies**
- Computes frequencies from a sequence file. Contains an option to select specific columns from sequence file only, but this requires that the file is an alignemnt.
'''
def __init__(self, by, **kwargs):
'''
A single positional argument is required for all child classes.
This argument can take on three values: "nucleotide", "amino_acid", or "codon," and it indicates *how* frequencies should be computed. These frequencies need not be the ultimate frequencies you want to compute. For example, it is possible to compute stationary frequencies in amino-acid space (via this argument) but ultimately return codon frequencies (using argument "type" in the .compute_frequencies() method, described below).
'''
# Frequency vectors "initialized". It is possible that not all of these will be used, but we set them up in case.
self.nucleotide_freqs = np.zeros(4)
self.amino_acid_freqs = np.zeros(20)
self.codon_freqs = np.zeros(61)
# Input parameters and general setup.
self._by = by.lower()
assert(self._by =='amino_acid' or self._by == 'codon' or self._by == 'nucleotide'), "\n\nYou did not provide a reasonable alphabet for frequency calculations! Options include 'nucleotide', 'amino_acid', or 'codon'."
self._set_code_size()
self._byFreqs = np.zeros(self._size)
# NOTE: restrict can be used only with EqualFrequencies and RandomFrequencies!!
self._restrict = kwargs.get('restrict', self._code)
if self._restrict != self._code:
assert(type(self._restrict) is list), "*restrict* must be a list of state strings corresponding to the 'by' argument. For instance, you may use (by = 'amino_acid', restrict = ['A', 'C', 'G', 'P'])."
def compute_frequencies(self, **kwargs):
'''
Calculate and return a vector of state frequencies. At this stage, the StateFrequencies object must already have been initialized with the keyword argument by = <amino_acid/codon/nucleotide>.
Optional keyword arguments include,
1. **type** ( = "nucleotide", "amino_acid", or "codon") represents the type of final frequencies to return. If not specified, the alphabet of returned frequencies will be that specified with the **by** keyword.
2. **savefile** is a file name to which final frequencies may be saved. Output frequencies will be ordered alphabetically, i.e. A, C, G, T for nucleotides; A, C, D, E, etc.for amino acids; and AAA, AAC, AAG, AAT, ACA, etc. for codons.
'''
# Input arguments and general setup
type = kwargs.get('type', self._by)
assert(type =='amino_acid' or type == 'codon' or type == 'nucleotide'), "Can only calculate codon, amino acid, or nucleotide frequencies."
if type == 'amino_acid' or type == 'codon':
assert(self._by == 'amino_acid' or self._by == 'codon'), "\n\nIncompatible *type* argument! If you would like to obtain amino acid or codon frequencies, the provided alphabet when defining this frequency object must be either 'codon' or 'amino_acid', NOT 'nucleotide'."
savefile = kwargs.get('savefile', None)
# Create the self._byFreqs, if does not already exist. Once created, assign as either amino, codon, nuc frequencies.
if np.array_equal(self._byFreqs, np.zeros(self._size)):
self._generate_byFreqs()
assert( abs(np.sum(self._byFreqs) - 1.) < ZERO), "State frequencies improperly generated. Do not sum to 1."
self._assign_byFreqs()
# Convert frequencies if needed
if type != self._by:
conv_expr = "self._"+self._by+"_to_"+type+"()"
eval(conv_expr)
# Save if needed
if savefile is not None:
np.savetxt(savefile, eval("self."+type+"_freqs"), fmt='%.5e')
return eval("self."+type+"_freqs")
def _set_code_size(self):
''' Set up the code (alphabet) and dimensionality for computing self._byFreqs '''
if self._by == 'amino_acid':
self._code = MOLECULES.amino_acids
elif self._by == 'codon':
self._code = MOLECULES.codons
elif self._by == 'nucleotide':
self._code = MOLECULES.nucleotides
self._size = len(self._code)
############################################# FREQUENCY CONVERSIONS ###############################################
def _amino_acid_to_codon(self):
'''
Calculate codon frequencies from amino acid frequencies. (by = 'amino_acid', type = 'codon')
Assumes equal frequencies for synonymous codons.
'''
for aa_count in range(20):
syn = MOLECULES.genetic_code[aa_count]
for synCodon in syn:
cind = MOLECULES.codons.index(synCodon)
self.codon_freqs[cind] = self.amino_acid_freqs[aa_count]/float(len(syn))
assert( abs(np.sum(self.codon_freqs) - 1.) < ZERO), "Codon state frequencies improperly calculated from amino acid frequencies. Do not sum to 1."
def _codon_to_amino_acid(self):
'''
Calculate amino acid frequencies from codon frequencies (by = 'codon', type = 'amino_acid').
'''
for a in range(len(MOLECULES.amino_acids)):
codons1 = MOLECULES.genetic_code[a]
for c in codons1:
ind = MOLECULES.codons.index(c)
self.amino_acid_freqs[a] += self.codon_freqs[ind]
assert( abs(np.sum(self.amino_acid_freqs) - 1.) < ZERO), "Amino acid state frequencies improperly generate_byFreqsd from codon frequencies. Do not sum to 1."
def _codon_to_nucleotide(self):
'''
Calculate nucleotide frequencies from codon frequencies (by = 'codon', type = 'nucleotide').
'''
for i in range(61):
codon_freq = self.codon_freqs[i]
codon = MOLECULES.codons[i]
for n in range(4):
nuc = MOLECULES.nucleotides[n]
nuc_freq = float(codon.count(nuc))/3. # number of that nucleotide in the codon
self.nucleotide_freqs[n] += codon_freq * nuc_freq
assert( abs(np.sum(self.nucleotide_freqs) - 1.) < ZERO), "Nucleotide state frequencies improperly generate_byFreqsd. Do not sum to 1."
def _amino_acid_to_nucleotide(self):
'''
Calculate nucleotide frequencies from amino acid frequencies (by = 'amino_acid', type = 'nucleotide').
'''
self._amino_acid_to_codon()
self._codon_to_nucleotide()
#####################################################################################
def _assign_byFreqs(self):
'''
Called from within function calcuate_freqs, this function will assign a frequency vector to the appropriate attribute variable.
'''
if self._by == 'codon':
self.codon_freqs = self._byFreqs
elif self._by == 'amino_acid':
self.amino_acid_freqs = self._byFreqs
elif self._by == 'nucleotide':
self.nucleotide_freqs = self._byFreqs
else:
raise AssertionError("WHAT ARE WE DOING HERE.")
class EqualFrequencies(StateFrequencies):
'''
This class may be used to compute equal state frequencies (amino = 1/20, codon = 1/61, nucleotide = 1/4).
'''
def __init__(self, by, **kwargs):
'''
Required arguments include,
1. **by**. See parent class StateFrequencies for details.
Optional arguments include,
1. **restrict**, a list (in which each element is a string) specifying which states should have non-zero frequencies. Default: all.
Examples:
.. code-block:: python
>>> # Return 1/20 amino acid frequencies in the variable `frequencies`
>>> f = EqualFrequencies("amino_acid")()
>>> frequencies = f.contruct_frequencies()
>>> # Compute equal codon frequencies and convert to amino-acid space. `frequencies` will contain amino-acid frequencies.
>>> f = EqualFrequencies("codon")
>>> frequencies = f.compute_frequencies(type = "amino_acid")
>>> # Compute equal amino acid frequencies, but allowing only certain amino acids to have non-zero frequencies
>>> f = EqualFrequencies("amino_acid", restrict = ["A", "G", "P", "T", "W"])
>>> frequencies = f.compute_frequencies()
'''
super(EqualFrequencies, self).__init__(by, **kwargs)
def _generate_byFreqs(self):
'''
Compute self._byFreqs
'''
fill = 1./float(len(self._restrict))
for entry in self._restrict:
self._byFreqs[self._code.index(entry)] = fill
class RandomFrequencies(StateFrequencies):
'''
This class may be used to compute "semi-random" state frequencies. The resulting frequency distributions are not truly random, but are instead virtually flat distributions with some noise.
'''
def __init__(self, by, **kwargs):
'''
Required arguments include,
1. **by**. See parent class StateFrequencies for details.
Optional arguments include,
1. **restrict**, a list (in which each element is a string) specifying which states should have non-zero frequencies. Default: all.
Examples:
.. code-block:: python
>>> # Return random amino acid frequencies in `frequencies` variable
>>> f = RandomFrequencies("amino_acid")
>>> frequencies = f.compute_frequencies()
>>> # Compute random amino acid frequencies, but allowing only certain amino acids to have non-zero frequencies
>>> f = RandomFrequencies("amino_acid", restrict = ["A", "G", "P", "T", "W"])
>>> frequencies = f.compute_frequencies()
'''
super(RandomFrequencies, self).__init__(by,**kwargs)
self._partial_restrict = self._restrict[:-1] # all but last
def _generate_byFreqs(self):
'''
Compute self._byFreqs. Since random sampling, we can run into timing issues. Make sure we don't get stuck!!
'''
max = 2./len(self._restrict)
min = 1e-5
abort_after_time = 0.001 + time.time()
restart_search = True
while restart_search:
restart_search = False
sum = 0.
self._byFreqs = np.zeros(self._size)
for entry in self._partial_restrict:
freq = rn.uniform(min,max)
while (sum + freq > 1):
freq = rn.uniform(min,max)
if time.time() > abort_after_time:
restart_search = True
break
if restart_search:
break
sum += freq
self._byFreqs[self._code.index(entry)] = freq
self._byFreqs[self._code.index(self._restrict[-1])] = (1.-sum)
class CustomFrequencies(StateFrequencies):
'''
This class may be used to compute frequencies directly from a user-provided python dictionary of frequencies.
Required keyword arguments include,
1. **by**. See parent class StateFrequencies for details.
2. **freq_dict**, a dictionary of frequencies, in which keys are states (e.g. a codon key would be 'ATC', an amino acid key would be 'W', and a nucleotide key would be 'T'), and values are float frequencies which sum to 1. Note that the keys in this dictionary must correspond to the **by** keyword provided. Any states not included in this dictionary are assumed to have an equal frequency. Hence, the dictionary values *MUST* sum to 1, and all states not included in this dictionary will be given a 0 frequency.
Examples:
.. code-block:: python
>>> # custom random amino acid frequencies
>>> f = CustomFrequencies("amino_acid", freq_dict = {'A':0.5, 'C':0.1, 'D':0.2, 'E':0.3})
>>> frequencies = f.compute_frequencies()
>>> # use amino-acid information to get custom codon frequencies (note: synonymous codons are assigned equal frequencies!)
>>> f = CustomFrequencies("amino_acid", freq_dict = {'F':0.5, 'W':0.1, 'D':0.2, 'E':0.3})
>>> frequencies = f.compute_frequencies(type = "codon")
>>> # custom nucleotide frequencies with lots of GC bias
>>> f = CustomFrequencies("nucleotide", freq_dict = {'A':0.1, 'C':0.45, 'T':0.05, 'G': 0.4})
>>> frequencies = f.compute_frequencies()
'''
def __init__(self, by, **kwargs):
super(CustomFrequencies, self).__init__(by, **kwargs)
self.given_freqs = kwargs.get('freq_dict', None) # Dictionary of desired frequencies.
if self.given_freqs == None:
self.given_freqs = {}
self._sanity_freq_dict() # Quick sanity check on frequencies
def _sanity_freq_dict(self):
'''
Sanity check to ensure the following:
1. self._by is the same alphabet as the freq_dict keys
2. freq_dict keys are consistent.
3. frequencies sum to 1
'''
prob_sum = 0.
for entry in self.given_freqs:
assert( len(entry) == len(self._code[0]) and entry in self._code ), "\n\n Your *freq_dict* keys are not properly format. Please ensure that your keys correspond to the *by* calculations, and that you only specify canonical amino acids/nucleotide, or sense codons."
prob_sum += float(self.given_freqs[entry])
assert( abs( 1. - prob_sum) < ZERO), "\n\nFrequencies provided in *freq_dict* do not sum to 1!"
def _generate_byFreqs(self):
'''
Compute self._byFreqs.
'''
for i in range(self._size):
element = self._code[i]
if element in self.given_freqs:
self._byFreqs[i] = self.given_freqs[element]
class ReadFrequencies(StateFrequencies):
'''
This class may be used to compute frequencies directly from a specified sequence file. Frequencies may be computed globally (using entire file), or based on specific columns (i.e. site-specific frequencies), provided the file contains a sequence alignment.
Required positional include,
1. **by**. See parent class StateFrequencies for details.
Required keyword arguments include,
1. **file** is the file containing sequences from which frequencies will be computed. By default, this file is assumed to be in FASTA format, although you can specify a different format with the optional argument **format**
Optional keyword arguments include,
1. **format** is the sequence file format (case-insensitive). Sequence files are parsed using Biopython, so any format they accept is accepted here (e.g. fasta, phylip, phylip-relaxed, nexus, clustal...)
2. **columns** is a list of integers giving the column(s) which should be considered in frequency calculations. This list should be indexed *from 1*. If this argument is not provided, all positions in sequence file will be considered. Note that this argument is only possible for alignments!
Examples:
.. code-block:: python
>>> # Compute amino acid frequencies globally from a sequence file
>>> f = ReadFrequencies("amino_acid", file = "my_sequence_file.fasta")
>>> frequencies = f.compute_frequencies()
>>> # Compute amino acid frequencies globally from a sequence file, and then convert to codon frequencies (note: synonymous codons are assigned the same fitness!)
>>> f = ReadFrequencies("amino_acid", file = "my_sequence_file.fasta")
>>> frequencies = f.compute_frequencies(type = "codon")
>>> # Compute nucleotide frequencies from a specific range of columns (1-10, inclusive) from a nucleotide alignment file
>>> f = ReadFrequencies("nucleotide", file = "my_nucleotide_alignment.phy", format = "phylip", columns = range(1,11))
>>> frequencies = f.compute_frequencies()
'''
def __init__(self, by, **kwargs):
super(ReadFrequencies, self).__init__(by, **kwargs)
# Input variables, options
self.seqfile = kwargs.get('file', None)
self.format = kwargs.get('format', 'fasta').lower() # Biopython requires that this flag is lowercase.
self.which_columns = kwargs.get('columns', None)
self._seqs = [] # Sequence records obtained from sequence file
self._make_seq_list()
def _sanity_which_columns(self):
'''
Sanity check that *columns* argument has been properly specified.
1. Should be a list
2. Should not include columns outside of the length of the alignment [1,alnlen]
3. Should be converted to a numpy array
'''
try:
AlignIO.read(self.seqfile, self.format)
except:
raise AssertionError("\n\nYour sequence file does not appear to be an *alignment.* If you would like to get frequencies from specific columns only, it must be an alignment!")
assert( type(self.which_columns) is list), "\n\nArgument *columns* must be a list of integers giving the column(s) (indexed from 1!) which should be considered for frequency calculations."
self.which_columns = np.array(self.which_columns) - 1
if self._by == 'codon':
which_check = self._alnlen / 3
else:
which_check = self._alnlen
assert( (self.which_columns >= 0).all() and (self.which_columns <= which_check).all() ), "\n\nYour column indices specified in *which_columns* do not play well with alignment! Remember that column indexing starts at *1*, and you cannot specify columns that don't exist."
def _make_seq_list(self):
'''
Read in sequence file and set up variables used in frequency calculations.
Additionally performs some sanity checks on sequence file, sequences themselves, and the which_columns argument (if specified).
'''
assert(self.seqfile is not None), "\n\n You must provide a sequence/alignment file with the argument file=<my_file_name> to use the ReadFrequencies class."
assert(os.path.exists(self.seqfile)), "\n\n Your input file does not exist! Check the path?"
try:
raw = list(SeqIO.parse(self.seqfile, self.format))
except:
raise AssertionError("\n\nYour sequence file could not be parsed. Note that if your sequence file is not in FASTA format, you must specify its format with the argument *format*.")
self._numseq = len(raw)
self._alnlen = len(raw[0]) # This will only come into play if we're collecting columns.
if self._by == 'codon':
assert( self._alnlen%3 == 0), "\n\nThe length of your sequence alignment is not a multiple of three, so you don't seem to actually have codons."
if self.which_columns is not None:
self._sanity_which_columns()
for entry in raw:
self._seqs.append(str(entry.seq))
def _generate_byFreqs(self):
'''
Compute self._byFreqs
'''
total_characters = 0.
for row in self._seqs:
if self.which_columns is not None:
for col in self.which_columns:
if self._by == "codon":
char = row[col*3 : col*3 + 3]
else:
char = row[col]
if char in self._code:
total_characters += 1.
self._byFreqs[ self._code.index(char) ] += 1
else:
for i in range(len(row)):
if self._by == "codon":
char = row[i*3 : i*3+3]
else:
char = row[i]
if char in self._code:
total_characters += 1.
self._byFreqs[ self._code.index(char) ] += 1
self._byFreqs = np.divide(self._byFreqs, total_characters)
class EmpiricalModelFrequencies():
'''
This class assigns state frequencies from a specified amino acid or codon empirical model (e.g. JTT, WAG, ECM...). The default frequencies (i.e. those given in each model's original paper) for empirical models.
The currently supported models include,
1. *Amino acid*: JTT, WAG, LG
2. *Codon*: ECM(un)rest
Required positional arguments include,
1. **model** is empirical model of choice (case-insensitive). This argument should be specified as any of the following: JTT, WAG, LG, ECMrest, ECMunrest.
Examples:
.. code-block:: python
>>> # Assign WAG frequencies
>>> f = EmpiricalModelFrequencies("WAG")
>>> frequencies = f.compute_frequencies()
>>> # Assign ECMrest frequencies (ECM "restricted" model, in which only single nucleotide changes occur instantaneously)
>>> my_freqs = EmpiricalModelFrequencies("ecmrest")
>>> frequencies = f.compute_frequencies()
'''
def __init__(self, model):
try:
self.empirical_model = model.lower()
except KeyError:
print("\n\n You must specify an empirical model to obtain its frequencies.")
def compute_frequencies(self):
'''
Function to return state frequencies. No arguments are needed.
'''
from . import empirical_matrices as em
try:
return np.array( eval("em."+self.empirical_model+"_freqs") )
except:
print("Couldn't figure out your empirical model specification! We only support the following empirical models (for frequency specification):")
print("Amino acid: JTT, WAG, LG, mtmam, mtREV24, or DAYHOFF.")
print("Codon: ECM restricted or unrestricted, which can be specified respectively as ECMrest and ECMunrest (case insensitive).")
sys.exit()
|
SimonGreenhill/pyvolve
|
src/state_freqs.py
|
Python
|
bsd-2-clause
| 24,738
|
[
"Biopython"
] |
a2dc6ba1203c090eeece3684a6409c54831d195b692f68e06dc1186de4aad84a
|
import numpy as np
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
def remap(src_array, remap_file, src_grad1=None, src_grad2=None, \
src_grad3=None, spval=1e37, verbose=False):
'''
remap based on addresses and weights computed in a setup phase
'''
# get info from remap_file
data = netCDF.Dataset(remap_file, 'r')
title = data.title
map_method = data.map_method
normalization = data.normalization
src_grid_name = data.source_grid
dst_grid_name = data.dest_grid
src_grid_size = len(data.dimensions['src_grid_size'])
dst_grid_size = len(data.dimensions['dst_grid_size'])
num_links = len(data.dimensions['num_links'])
src_grid_dims = data.variables['src_grid_dims'][:]
dst_grid_dims = data.variables['dst_grid_dims'][:]
# get weights and addresses from remap_file
map_wts = data.variables['remap_matrix'][:]
dst_add = data.variables['dst_address'][:]
src_add = data.variables['src_address'][:]
# get destination mask
dst_mask = data.variables['dst_grid_imask'][:]
# remap from src grid to dst grid
if src_grad1 is not None:
iorder = 2
else:
iorder = 1
if verbose is True:
print 'Reading remapping: ', title
print 'From file: ', remap_file
print ' '
print 'Remapping between:'
print src_grid_name
print 'and'
print dst_grid_name
print 'Remapping method: ', map_method
ndim = len(src_array.squeeze().shape)
if (ndim == 2):
tmp_dst_array = np.zeros((dst_grid_size))
tmp_src_array = src_array.flatten()
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0:2] = map_wts[:,0:2].copy()
map_wts = tmp_map_wts
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknown method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
elif (ndim == 3):
nlev = src_array.shape[0]
dst_array = np.zeros((nlev, dst_grid_dims[1], dst_grid_dims[0]))
# loop over vertical level
for k in range(nlev):
tmp_src_array = src_array[k,:,:].flatten()
tmp_dst_array = np.zeros((dst_grid_size))
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknown method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array[k,:,:] = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
else:
raise ValueError, 'src_array must have two or three dimensions'
# close data file
data.close()
return dst_array
|
kshedstrom/pyroms
|
pyroms/pyroms/remapping/remap.py
|
Python
|
bsd-3-clause
| 5,852
|
[
"NetCDF"
] |
8c5b593d4b245b95596b29f0a5a9517417942380b5958225b337e57ea26708cd
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Set of classes to store internationalization data in the database. Camelot
applications can be translated by the developer using regular PO files, or by
the user. In case the user makes a translation, this translation is stored into
the `Translation` table. This table can be exported to PO files for inclusion
in the development cycle.
"""
from camelot.core.orm import Entity, Session
from camelot.core.utils import ugettext_lazy as _
from camelot.admin.action import Action
from camelot.admin.entity_admin import EntityAdmin
from camelot.view.art import Icon
from camelot.view.utils import default_language
import camelot.types
from sqlalchemy import sql
from sqlalchemy.schema import Column
from sqlalchemy.types import Unicode, INT
import logging
logger = logging.getLogger( 'camelot.model.i18n' )
class ExportAsPO( Action ):
verbose_name = _('PO Export')
icon = Icon('tango/16x16/actions/document-save.png')
def model_run( self, model_context ):
from camelot.view.action_steps import SelectFile
select_file = SelectFile()
select_file.existing = False
filenames = yield select_file
for filename in filenames:
file = open(filename, 'w')
for translation in model_context.get_collection():
file.write( (u'msgid "%s"\n'%translation.source).encode('utf-8') )
file.write( (u'msgstr "%s"\n\n'%translation.value).encode('utf-8') )
class Translation( Entity ):
"""Table to store user generated translations or customization.
"""
__tablename__ = 'translation'
language = Column( camelot.types.Language, index = True, nullable = False )
source = Column( Unicode( 500 ), index = True, nullable = False )
# value needs to be indexed as well, because when starting up we
# want to load only the translations that have a value specified
value = Column( Unicode( 500 ), index = True )
cid = Column( INT(), default = 0, index = True )
uid = Column( INT(), default = 0, index = True )
# cache, to prevent too much of the same sql queries
_cache = dict()
class Admin( EntityAdmin ):
verbose_name_plural = _( 'Translations' )
form_size = ( 700, 150 )
list_display = ['source', 'language', 'value', 'uid']
list_filter = ['language']
list_actions = [ExportAsPO()]
field_attributes = { 'language':{ 'default':default_language } }
@classmethod
def translate( cls, source, language ):
"""Translate source to language, return None if no translation is found"""
if source:
key = ( source, language )
if key in cls._cache:
return cls._cache[key]
query = Session().query( cls )
query = query.filter( sql.and_( cls.source == unicode( source ),
cls.language == language,
cls.uid != 0 ) )
translation = query.first()
if translation:
cls._cache[key] = translation.value
return translation.value
return None
return ''
@classmethod
def translate_or_register( cls, source, language ):
"""Translate source to language, if no translation is found, register the
source as to be translated and return the source"""
if source:
source = unicode( source )
translation = cls.translate( source, language )
if not translation:
session = Session()
query = session.query( cls )
translation = query.filter_by( source = source,
language = language ).first()
if not translation:
if ( source, language ) not in cls._cache:
registered_translation = Translation( source = source,
language = language )
cls._cache[( source, language )] = source
session.flush( [registered_translation] )
logger.debug( 'registed %s with id %s' % ( source, registered_translation.id ) )
return source
return translation
return ''
|
jeroendierckx/Camelot
|
camelot/model/i18n.py
|
Python
|
gpl-2.0
| 5,504
|
[
"VisIt"
] |
87505c75580e12c005bce15a76d0b3b5a87bcd05667c74c6ecdfcc63442ba0f1
|
############################################################
#Example 3: Saving the coordinates as an xyz file
############################################################
import numpy as np
import pygmin.potentials.lj as lj
import pygmin.basinhopping as bh
from pygmin.takestep import displace
from pygmin.storage.savenlowest import SaveN as saveit
natoms = 12
coords=np.random.random(3*natoms)
potential = lj.LJ()
step = displace.RandomDisplacement(stepsize=0.5)
storage = saveit(nsave=10)
opt = bh.BasinHopping(coords, potential, step, storage=storage.insert)
opt.run(100)
with open("lowest", "w") as fout:
fout.write( str(natoms) + "\n")
fout.write( str(storage.data[0].energy) + "\n")
atoms=storage.data[0].coords.reshape(natoms, 3)
for a in atoms:
fout.write( "LA "+ str(a[0])+ " " + str(a[1]) + " " + str(a[2]) + "\n" )
############################################################
# some visualization
############################################################
try:
import pygmin.utils.pymolwrapper as pym
pym.start()
frame=1
print storage.data
for minimum in storage.data:
coords=minimum.coords.reshape(natoms, 3)
pym.draw_spheres(coords, "A", frame)
frame=frame+1
except:
print "Could not draw using pymol, skipping this step"
|
js850/PyGMIN
|
examples/basinhopping_no_system_class/3_savelowest.py
|
Python
|
gpl-3.0
| 1,334
|
[
"PyMOL"
] |
23358169c7900adf68587cc677031c4b6c7b2676777d7fdb5f8503801507c424
|
import asyncore
import socket
import struct
from markovobfuscate.obfuscation import MarkovKeyState
import logging
import threading
import zlib
BUFFER_SIZE = 4096
class LocalProxy(asyncore.dispatcher):
"""Listens for new client connections and creates new ToClient
objects for each one."""
def __init__(self, markov, localHost, localPort, mtunnelHost, mtunnelPort):
"""Creates the socket, binds to clientPort"""
asyncore.dispatcher.__init__(self)
self.markov = markov
self.clientPort = localPort
self.host = localHost
self.mtunnel_host = mtunnelHost
self.mtunnel_port = mtunnelPort
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((self.host, self.clientPort))
self.listen(5)
def handle_accept(self):
"""Handles new client connections"""
conn, addr = self.accept()
logging.info("{0} connected".format(addr))
return LocalProxy.SendToClient(self.markov, conn, self.mtunnel_host, self.mtunnel_port)
def handle_close(self):
self.close()
logging.info("Local socket closed")
def run(self):
logging.info("Local server running...")
self.listen(5)
def die(self, error):
logging.info("Death....")
logging.info("Error: %s" % error)
self.handle_close()
class SendToClient(asyncore.dispatcher_with_send):
def __init__(self, markov, sock, remote_server, remote_port):
self.markov = markov
asyncore.dispatcher_with_send.__init__(self, sock)
msock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
msock.connect((remote_server, remote_port))
self.msock = LocalProxy.ToMTunnelServer(markov, self, msock)
def handle_read(self):
data = self.recv(BUFFER_SIZE)
logging.info("Recv'd {0} bytes from the client".format(len(data)))
data = self.markov.obfuscate_string(zlib.compress(data, 9)) + "\n"
logging.info("Obfuscated into {0} bytes and sending to other side of the tunnel".format(len(data)))
self.msock.send(data)
def handle_close(self):
logging.info("Closing client socket...")
self.close()
class ToMTunnelServer(asyncore.dispatcher_with_send):
def __init__(self, markov, sock, oSock):
self.read_buffer = ''
self.markov = markov
asyncore.dispatcher_with_send.__init__(self, oSock)
self.client = sock
def handle_read(self):
data = self.recv(BUFFER_SIZE)
logging.info("Recv'd {0} bytes from the other side of the tunnel".format(len(data)))
self.read_buffer += data
while "\n" in self.read_buffer:
data, self.read_buffer = self.read_buffer.split("\n", 1)
logging.info("Recv'd obfuscated {0} bytes from the other side of the tunnel".format(len(data)))
if len(data) > 0:
data = zlib.decompress(self.markov.deobfuscate_string(data))
logging.info("Deobfuscated {0} bytes from the other side of the tunnel".format(len(data)))
self.client.send(data)
def handle_close(self):
logging.info("Closing MTunnel socket...")
self.close()
class MTunnelServer(asyncore.dispatcher):
"""Listens for new client connections and creates new ToClient
objects for each one."""
def __init__(self, markov, localHost, localPort):
"""Creates the socket, binds to clientPort"""
self.markov = markov
asyncore.dispatcher.__init__(self)
self.clientPort = localPort
self.host = localHost
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((self.host, self.clientPort))
self.listen(5)
def handle_accept(self):
"""Handles new client connections"""
conn, addr = self.accept()
logging.info("{0} connected.".format(addr))
return MTunnelServer.MSendToClient(self.markov, conn)
def handle_close(self):
self.close()
logging.info("Obfuscated SOCKS server socket closed")
def run(self):
logging.info("Obfuscated SOCKS server running...")
self.listen(5)
def die(self, error):
logging.info("Death....")
logging.info("Error: %s" % error)
self.handle_close()
class MSendToClient(asyncore.dispatcher_with_send):
def __init__(self, markov, sock):
self.read_buffer = ''
self.markov = markov
#self.msock = MTunnelServer.ToRemoteServer(markov, self, msock)
self.msock = None
self.state = 0
asyncore.dispatcher_with_send.__init__(self, sock)
self.state_lock = threading.RLock()
def handle_read(self):
data = self.recv(BUFFER_SIZE)
logging.info("Recv'd {0} bytes from the other side of the tunnel".format(len(data)))
self.read_buffer += data
while "\n" in self.read_buffer:
data, self.read_buffer = self.read_buffer.split("\n", 1)
logging.info("Recv'd obfuscated {0} bytes from the other side of the tunnel".format(len(data)))
if len(data) > 0:
data = zlib.decompress(self.markov.deobfuscate_string(data))
logging.info("Deobfuscated {0} bytes from the other side of the tunnel".format(len(data)))
with self.state_lock:
if self.state == 0:
if len(data) > 2:
# All socks4 initial packets start with 4 and end with 0
if data[0] == "\x04" and data[-1] == "\x00":
# Socks4/4a
if len(data) >= 9: # minimum for socks4
if data[1] == "\x01":
# Let's only support stream connections...
port = struct.unpack("!H", data[2:4])[0]
ip = data[4:8]
# Get user string
user = ""
index = 8
while data[index] != "\x00":
user += data[index]
index += 1
if ip[0:3] == "\x00\x00\x00" and ip[3] != "\x00":
# socks4a
index += 1
domain = ""
while data[index] != "\x00":
domain += data[index]
index += 1
try:
ip = socket.gethostbyname(domain)
msock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
msock.connect((ip, port))
self.msock = MTunnelServer.ToRemoteServer(self.markov, self, msock)
self.send(self.markov.obfuscate_string(zlib.compress("\x00\x5a" + struct.pack("!H", port) + socket.inet_aton(ip))) + "\n")
self.state = 0x10
logging.info("Connected to remote server {2} - {0}:{1}".format(ip, port, domain))
except socket.error:
logging.info("Error connecting to remote server {0}:{1}".format(ip, port))
self.send(self.markov.obfuscate_string(zlib.compress("\x00\x5b" + struct.pack("!H", port) + socket.inet_aton(ip))) + "\n")
self.handle_close()
else:
# socks4
try:
ip = socket.inet_ntoa(ip)
msock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
msock.connect((ip, port))
self.msock = MTunnelServer.ToRemoteServer(self.markov, self, msock)
self.send(self.markov.obfuscate_string(zlib.compress("\x00\x5a" + struct.pack("!H", port) + socket.inet_aton(ip))) + "\n")
self.state = 0x10
logging.info("Connected to remote server {0}:{1}".format(ip, port))
except socket.error:
logging.info("Error connecting to remote server {0}:{1}".format(ip, port))
self.send(self.markov.obfuscate_string(zlib.compress("\x00\x5b" + struct.pack("!H", port) + socket.inet_aton(ip))) + "\n")
self.handle_close()
pass
elif data[0] == 0x5:
# Socks5
pass
pass
elif self.state == 0x10:
logging.info("Sending {0} bytes to other side of tunnel".format(len(data)))
self.msock.send(data)
def handle_close(self):
logging.info("Closing client socket...")
self.close()
class ToRemoteServer(asyncore.dispatcher_with_send):
def __init__(self, markov, sock, oSock):
self.markov = markov
asyncore.dispatcher_with_send.__init__(self, oSock)
self.client = sock
def handle_read(self):
data = self.recv(BUFFER_SIZE)
logging.info("Recv'd {0} bytes from remote server".format(len(data)))
data = self.markov.obfuscate_string(zlib.compress(data, 9)) + "\n"
logging.info("Obfuscated into {0} bytes and sending to other side of the tunnel".format(len(data)))
self.client.send(data)
def handle_close(self):
logging.info("Closing remote server socket...")
self.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(
prog=__file__,
description="Acts as both ends of a Markov model obfuscated TCP tunnel",
version="%(prog)s v0.1 by Brian Wallace (@botnet_hunter)",
epilog="%(prog)s v0.1 by Brian Wallace (@botnet_hunter)"
)
parser.add_argument('-s', '--server', default=False, required=False, action='store_true', help="Run as end server")
parser.add_argument('-r', '--remote', default=None, type=str, action='append', help='Remote server to tunnel to')
parser.add_argument('-p', '--port', default=9050, type=int, help='Port to listen on')
parser.add_argument('-P', '--remoteport', default=9999, type=int, help='Port for remote server')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
# Regular expression to split our training files on
split_regex = r'\.'
# File/book to read for training the Markov model (will be read into memory)
training_file = "datasets/98.txt"
# Obfuscating Markov engine
m = MarkovKeyState()
# Read the shared key into memory
logging.info("Reading {0}".format(training_file))
with open(training_file, "r") as f:
text = f.read()
import re
# Split learning data into sentences, in this case, based on periods.
logging.info("Teaching the Markov model")
map(m.learn_sentence, re.split(split_regex, text))
if args.server:
# We are the terminating server
host = "0.0.0.0"
port = int(args.remoteport)
logging.info("Running as server on {0}:{1}".format(host, port))
server = MTunnelServer(m, host, port)
asyncore.loop()
else:
# We are the local server
logging.info("Running as local SOCKS proxy on {0}:{1} connecting to {2}:{3}".format(
'localhost', args.port, args.remote[0], int(args.remoteport)))
server = LocalProxy(m, 'localhost', args.port, args.remote[0], int(args.remoteport))
asyncore.loop()
|
bwall/markovobfuscate
|
mtunnel.py
|
Python
|
mit
| 13,030
|
[
"Brian"
] |
e53edc6f13ac0b1e61c62b8691a1192ad2b1f117f64f5cd74381a0a7b3d8272b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, 2011, 2012, 2014.
# SMHI,
# Folkborgsvägen 1,
# Norrköping,
# Sweden
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# Adam Dybbroe <adam.dybbroe@smhi.se>
# This file is part of mpop.
# mpop is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# mpop is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# mpop. If not, see <http://www.gnu.org/licenses/>.
"""Very simple netcdf reader for mpop.
"""
# TODO
# - complete projection list and attribute list
# - handle other units than "m" for coordinates
# - handle units for data
# - pluginize
import warnings
from ConfigParser import NoSectionError
import numpy as np
from netCDF4 import Dataset, num2date
from mpop.instruments.visir import VisirCompositer
from mpop.satellites import GenericFactory
from mpop.satout.cfscene import TIME_UNITS
from mpop.utils import get_logger
LOG = get_logger("netcdf4/cf reader")
# To be complete, get from appendix F of cf conventions
MAPPING_ATTRIBUTES = {'grid_mapping_name': "proj",
'standard_parallel': ["lat_1", "lat_2"],
'latitude_of_projection_origin': "lat_0",
'longitude_of_projection_origin': "lon_0",
'longitude_of_central_meridian': "lon_0",
'perspective_point_height': "h",
'false_easting': "x_0",
'false_northing': "y_0",
'semi_major_axis': "a",
'semi_minor_axis': "b",
'inverse_flattening': "rf",
'ellipsoid': "ellps", # not in CF conventions...
}
# To be completed, get from appendix F of cf conventions
PROJNAME = {"vertical_perspective": "nsper",
"geostationary": "geos",
"albers_conical_equal_area": "aea",
"azimuthal_equidistant": "aeqd",
"equirectangular": "eqc",
"transverse_mercator": "tmerc",
"stereographic": "stere",
"general_oblique_transformation": "ob_tran"
}
def _load02(filename):
"""Load data from a netcdf4 file, cf-satellite v0.2 (2012-02-03).
"""
rootgrp = Dataset(filename, 'r')
# processed variables
processed = set()
# Currently MPOP does not like unicode (so much).
satellite_name, satellite_number = [str(i) for i in rootgrp.platform.rsplit("-", 1)]
time_slot = rootgrp.variables["time"].getValue()[0]
time_slot = num2date(time_slot, TIME_UNITS)
processed |= set(["time"])
try:
service = str(rootgrp.service)
except AttributeError:
service = ""
instrument_name = str(rootgrp.instrument)
try:
orbit = str(rootgrp.orbit)
except AttributeError:
orbit = None
try:
scene = GenericFactory.create_scene(satellite_name,
satellite_number,
instrument_name,
time_slot,
orbit,
None,
service)
except NoSectionError:
scene = VisirCompositer(time_slot=time_slot)
scene.satname = satellite_name
scene.number = satellite_number
scene.service = service
dim_chart = {}
for var_name, var in rootgrp.variables.items():
varname = None
try:
varname = var.standard_name
except AttributeError:
try:
varname = var.long_name
except AttributeError:
pass
if varname in ["band_data", "Band data"]:
LOG.debug("Found some data: " + var_name)
dims = var.dimensions
for dim in dims:
dim_chart[dim] = var_name
for cnt, dim in enumerate(dims):
if dim.startswith("band"):
break
data = var
data.set_auto_maskandscale(False)
area = None
try:
area_var_name = getattr(var,"grid_mapping")
area_var = rootgrp.variables[area_var_name]
proj4_dict = {}
for attr, projattr in MAPPING_ATTRIBUTES.items():
try:
the_attr = getattr(area_var, attr)
if projattr == "proj":
proj4_dict[projattr] = PROJNAME[the_attr]
elif(isinstance(projattr, (list, tuple))):
try:
for i, subattr in enumerate(the_attr):
proj4_dict[projattr[i]] = subattr
except TypeError:
proj4_dict[projattr[0]] = the_attr
else:
proj4_dict[projattr] = the_attr
except AttributeError:
pass
y_name, x_name = dims[:cnt] + dims[cnt + 1:]
x__ = rootgrp.variables[x_name][:]
y__ = rootgrp.variables[y_name][:]
if proj4_dict["proj"] == "geos":
x__ *= proj4_dict["h"]
y__ *= proj4_dict["h"]
x_pixel_size = abs((np.diff(x__)).mean())
y_pixel_size = abs((np.diff(y__)).mean())
llx = x__[0] - x_pixel_size / 2.0
lly = y__[-1] - y_pixel_size / 2.0
urx = x__[-1] + x_pixel_size / 2.0
ury = y__[0] + y_pixel_size / 2.0
area_extent = (llx, lly, urx, ury)
try:
# create the pyresample areadef
from pyresample.geometry import AreaDefinition
area = AreaDefinition("myareaid", "myareaname",
"myprojid", proj4_dict,
len(x__), len(y__),
area_extent)
except ImportError:
LOG.warning("Pyresample not found, "
"cannot load area descrition")
processed |= set([area_var_name, x_name, y_name])
LOG.debug("Grid mapping found and used.")
except AttributeError:
LOG.debug("No grid mapping found.")
try:
area_var = getattr(var,"coordinates")
coordinates_vars = area_var.split(" ")
lons = None
lats = None
for coord_var_name in coordinates_vars:
coord_var = rootgrp.variables[coord_var_name]
units = getattr(coord_var, "units")
if(coord_var_name.lower().startswith("lon") or
units.lower().endswith("east") or
units.lower().endswith("west")):
lons = coord_var[:]
elif(coord_var_name.lower().startswith("lat") or
units.lower().endswith("north") or
units.lower().endswith("south")):
lats = coord_var[:]
if lons.any() and lats.any():
try:
from pyresample.geometry import SwathDefinition
area = SwathDefinition(lons=lons, lats=lats)
except ImportError:
LOG.warning("Pyresample not found, "
"cannot load area descrition")
processed |= set(coordinates_vars)
LOG.debug("Lon/lat found and used.")
except AttributeError:
LOG.debug("No lon/lat found.")
names = rootgrp.variables[dim][:]
scales = data.scale_factor
offsets = data.add_offset
if len(names) == 1:
scales = np.array([scales])
offsets = np.array([offsets])
LOG.info("Scales and offsets: %s %s %s" % (str(names), str(scales), str(offsets)))
for nbr, name in enumerate(names):
name = str(name)
try:
if cnt == 0:
chn_data = data[nbr, :, :].squeeze()
if cnt == 1:
chn_data = data[:, nbr, :].squeeze()
if cnt == 2:
chn_data = data[:, :, nbr].squeeze()
scene[name] = (np.ma.masked_equal(chn_data, data._FillValue)
* scales[nbr] + offsets[nbr])
scene[name].info["units"] = var.units
except KeyError:
from mpop.channel import Channel
scene.channels.append(Channel(name))
if area is not None:
scene[name].area = area
processed |= set([var_name, dim])
non_processed = set(rootgrp.variables.keys()) - processed
for var_name in non_processed:
var = rootgrp.variables[var_name]
if not (hasattr(var, "standard_name") or
hasattr(var, "long_name")):
LOG.info("Delayed processing of " + var_name)
continue
dims = var.dimensions
if len(dims) != 1:
LOG.info("Don't know what to do with " + var_name)
continue
dim = dims[0]
if var.standard_name == "radiation_wavelength":
names = rootgrp.variables[dim][:]
for nbr, name in enumerate(names):
name = str(name)
scene[name].wavelength_range[1] = var[nbr]
try:
bnds = rootgrp.variables[var.bounds][:]
for nbr, name in enumerate(names):
name = str(name)
scene[name].wavelength_range[0] = bnds[nbr, 0]
scene[name].wavelength_range[2] = bnds[nbr, 1]
processed |= set([var.bounds])
except AttributeError:
pass
processed |= set([var_name])
non_processed = set(rootgrp.variables.keys()) - processed
if len(non_processed) > 0:
LOG.warning("Remaining non-processed variables: " + str(non_processed))
return scene
def load_from_nc4(filename):
"""Load data from a netcdf4 file, cf-satellite v0.1
"""
rootgrp = Dataset(filename, 'r')
try:
rootgrp.satellite_number
warnings.warn("You are loading old style netcdf files...", DeprecationWarning)
except AttributeError:
return _load02(filename)
if not isinstance(rootgrp.satellite_number, str):
satellite_number = "%02d" % rootgrp.satellite_number
else:
satellite_number = str(rootgrp.satellite_number)
time_slot = rootgrp.variables["time"].getValue()[0]
time_slot = num2date(time_slot, TIME_UNITS)
service = str(rootgrp.service)
satellite_name = str(rootgrp.satellite_name)
instrument_name = str(rootgrp.instrument_name)
try:
orbit = str(rootgrp.orbit)
except AttributeError:
orbit = None
try:
scene = GenericFactory.create_scene(satellite_name,
satellite_number,
instrument_name,
time_slot,
orbit,
None,
service)
except NoSectionError:
scene = VisirCompositer(time_slot=time_slot)
scene.satname = satellite_name
scene.number = satellite_number
scene.service = service
for var_name, var in rootgrp.variables.items():
area = None
if var_name.startswith("band_data"):
resolution = var.resolution
str_res = str(int(resolution)) + "m"
names = rootgrp.variables["bandname"+str_res][:]
data = var[:, :, :].astype(var.dtype)
data = np.ma.masked_outside(data,
var.valid_range[0],
var.valid_range[1])
try:
area_var = getattr(var,"grid_mapping")
area_var = rootgrp.variables[area_var]
proj4_dict = {}
for attr, projattr in MAPPING_ATTRIBUTES.items():
try:
the_attr = getattr(area_var, attr)
if projattr == "proj":
proj4_dict[projattr] = PROJNAME[the_attr]
elif(isinstance(projattr, (list, tuple))):
try:
for i, subattr in enumerate(the_attr):
proj4_dict[projattr[i]] = subattr
except TypeError:
proj4_dict[projattr[0]] = the_attr
else:
proj4_dict[projattr] = the_attr
except AttributeError:
pass
x__ = rootgrp.variables["x"+str_res][:]
y__ = rootgrp.variables["y"+str_res][:]
x_pixel_size = abs((x__[1] - x__[0]))
y_pixel_size = abs((y__[1] - y__[0]))
llx = x__[0] - x_pixel_size / 2.0
lly = y__[-1] - y_pixel_size / 2.0
urx = x__[-1] + x_pixel_size / 2.0
ury = y__[0] + y_pixel_size / 2.0
area_extent = (llx, lly, urx, ury)
try:
# create the pyresample areadef
from pyresample.geometry import AreaDefinition
area = AreaDefinition("myareaid", "myareaname",
"myprojid", proj4_dict,
data.shape[1], data.shape[0],
area_extent)
except ImportError:
LOG.warning("Pyresample not found, "
"cannot load area descrition")
except AttributeError:
LOG.debug("No grid mapping found.")
try:
area_var = getattr(var,"coordinates")
coordinates_vars = area_var.split(" ")
lons = None
lats = None
for coord_var_name in coordinates_vars:
coord_var = rootgrp.variables[coord_var_name]
units = getattr(coord_var, "units")
if(coord_var_name.lower().startswith("lon") or
units.lower().endswith("east") or
units.lower().endswith("west")):
lons = coord_var[:]
elif(coord_var_name.lower().startswith("lat") or
units.lower().endswith("north") or
units.lower().endswith("south")):
lats = coord_var[:]
if lons and lats:
try:
from pyresample.geometry import SwathDefinition
area = SwathDefinition(lons=lons, lats=lats)
except ImportError:
LOG.warning("Pyresample not found, "
"cannot load area descrition")
except AttributeError:
LOG.debug("No lon/lat found.")
for i, name in enumerate(names):
name = str(name)
if var.dimensions[0].startswith("band"):
chn_data = data[i, :, :]
elif var.dimensions[1].startswith("band"):
chn_data = data[:, i, :]
elif var.dimensions[2].startswith("band"):
chn_data = data[:, :, i]
else:
raise ValueError("Invalid dimension names for band data")
try:
scene[name] = (chn_data *
rootgrp.variables["scale"+str_res][i] +
rootgrp.variables["offset"+str_res][i])
#FIXME complete this
#scene[name].info
except KeyError:
# build the channel on the fly
from mpop.channel import Channel
wv_var = rootgrp.variables["nominal_wavelength"+str_res]
wb_var = rootgrp.variables[getattr(wv_var, "bounds")]
minmax = wb_var[i]
scene.channels.append(Channel(name,
resolution,
(minmax[0],
wv_var[i][0],
minmax[1])))
scene[name] = (chn_data *
rootgrp.variables["scale"+str_res][i] +
rootgrp.variables["offset"+str_res][i])
if area is not None:
scene[name].area = area
area = None
for attr in rootgrp.ncattrs():
scene.info[attr] = getattr(rootgrp, attr)
scene.add_to_history("Loaded from netcdf4/cf by mpop")
return scene
|
mraspaud/mpop
|
mpop/satin/nc_reader.py
|
Python
|
gpl-3.0
| 18,241
|
[
"NetCDF"
] |
5f263669bbc13a05190480b5ae9cadf103b5a00ded92ff0b157965d98cdc3cb9
|
import pytest
from pysisyphus.calculators import ORCA
from pysisyphus.calculators.PySCF import PySCF
from pysisyphus.helpers import geom_loader
from pysisyphus.io import geom_from_hessian
from pysisyphus.testing import using
from pysisyphus.thermo import (
print_thermoanalysis,
get_thermoanalysis_from_hess_h5,
)
from pysisyphus.run import run_from_dict
@pytest.mark.skip
@using("thermoanalysis")
def test_thermoanalysis(this_dir):
# H2O HF/321-G/RIJCOSX ORCA5
hess_fn = this_dir / "h2o_hessian.h5"
thermo = get_thermoanalysis_from_hess_h5(hess_fn, point_group="c2v")
assert thermo.M == pytest.approx(18.01528)
assert thermo.dG == pytest.approx(0.00412717, abs=1e-6)
@pytest.fixture
def hcn_geom():
"""Optimized at HF/STO-3G"""
geom = geom_loader("lib:hcn_sto3g_freq_ref.xyz")
return geom
@pytest.mark.skip
@using("pyscf")
@using("thermoanalysis")
def test_get_thermoanalysis(hcn_geom):
hcn_geom.set_calculator(PySCF(basis="sto3g", verbose=4, pal=2))
thermo = hcn_geom.get_thermoanalysis()
print_thermoanalysis(thermo)
assert thermo.dG == pytest.approx(-0.00029409, abs=1e-6)
@pytest.mark.skip
@using("orca")
@using("thermoanalysis")
def test_hcn_thermo(hcn_geom):
hcn_geom.set_calculator(ORCA(keywords="HF sto-3g"))
thermo = hcn_geom.get_thermoanalysis()
print_thermoanalysis(thermo, geom=hcn_geom)
assert thermo.dG == pytest.approx(-0.00029614, abs=1e-5)
@using("pyscf")
@using("thermoanalysis")
def test_opt_h2o_do_hess():
T = 398.15
run_dict = {
"geom": {
"type": "redund",
"fn": "lib:h2o.xyz",
},
"calc": {
"type": "pyscf",
"basis": "sto3g",
"pal": 2,
"verbose": 0,
},
"opt": {
"thresh": "gau",
"do_hess": True,
"T": T,
},
}
run_result = run_from_dict(run_dict)
thermo = run_result.opt_geom.get_thermoanalysis(T=T)
assert thermo.dG == pytest.approx(-0.00164376)
@using("thermoanalysis")
def test_print_thermo(this_dir):
thermo, geom = get_thermoanalysis_from_hess_h5(
this_dir / "hcn_orca_b973c_hessian.h5", return_geom=True
)
print_thermoanalysis(thermo, geom=geom)
@using("thermoanalysis")
@pytest.mark.parametrize(
"id_, dG_ref", (
# Ref values from ORCA logfiles
(24, 0.62709533),
(63, 0.62781152),
(84, 0.62876245),
)
)
def test_irc_h5(this_dir, id_, dG_ref):
h5 = this_dir / f"irc_000.0{id_}.orca.h5"
geom = geom_from_hessian(h5)
thermo = geom.get_thermoanalysis()
print_thermoanalysis(thermo)
assert thermo.dG == pytest.approx(dG_ref, abs=2.5e-3)
|
eljost/pysisyphus
|
tests/test_thermo/test_thermo.py
|
Python
|
gpl-3.0
| 2,710
|
[
"ORCA",
"PySCF"
] |
04a2a3b948a3a0ea72da31373ed01a030de78ff972eb1540fbdcd2aed4b67b73
|
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Heiko Strathmann
#
from numpy import *
from pylab import *
from scipy import *
from shogun import RealFeatures
from shogun import MeanShiftDataGenerator
from shogun import LinearTimeMMD, MMDKernelSelectionOpt
from shogun import PERMUTATION, MMD1_GAUSSIAN
from shogun import EuclideanDistance
from shogun import Statistics, Math
import shogun as sg
# for nice plotting that fits into our shogun tutorial
import latex_plot_inits
def linear_time_mmd_graphical():
# parameters, change to get different results
m=1000 # set to 10000 for a good test result
dim=2
# setting the difference of the first dimension smaller makes a harder test
difference=1
# number of samples taken from null and alternative distribution
num_null_samples=150
# streaming data generator for mean shift distributions
gen_p=MeanShiftDataGenerator(0, dim)
gen_q=MeanShiftDataGenerator(difference, dim)
# use the median kernel selection
# create combined kernel with Gaussian kernels inside (shoguns Gaussian kernel is
# compute median data distance in order to use for Gaussian kernel width
# 0.5*median_distance normally (factor two in Gaussian kernel)
# However, shoguns kernel width is different to usual parametrization
# Therefore 0.5*2*median_distance^2
# Use a subset of data for that, only 200 elements. Median is stable
sigmas=[2**x for x in range(-3,10)]
widths=[x*x*2 for x in sigmas]
print "kernel widths:", widths
combined=sg.kernel("CombinedKernel")
for i in range(len(sigmas)):
combined.append_kernel(sg.kernel("GaussianKernel", log_width=widths[i]))
# mmd instance using streaming features, blocksize of 10000
block_size=1000
mmd=LinearTimeMMD(combined, gen_p, gen_q, m, block_size)
# kernel selection instance (this can easily replaced by the other methods for selecting
# single kernels
selection=MMDKernelSelectionOpt(mmd)
# perform kernel selection
kernel=selection.select_kernel()
mmd.set_kernel(kernel)
print "selected kernel width:", kernel.get_width()
# sample alternative distribution, stream ensures different samples each run
alt_samples=zeros(num_null_samples)
for i in range(len(alt_samples)):
alt_samples[i]=mmd.compute_statistic()
# sample from null distribution
# bootstrapping, biased statistic
mmd.set_null_approximation_method(PERMUTATION)
mmd.set_num_null_samples(num_null_samples)
null_samples_boot=mmd.sample_null()
# fit normal distribution to null and sample a normal distribution
mmd.set_null_approximation_method(MMD1_GAUSSIAN)
variance=mmd.compute_variance_estimate()
null_samples_gaussian=normal(0,sqrt(variance),num_null_samples)
# to plot data, sample a few examples from stream first
features=gen_p.get_streamed_features(m)
features=features.create_merged_copy(gen_q.get_streamed_features(m))
data=features.get_feature_matrix()
# plot
figure()
# plot data of p and q
subplot(2,3,1)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 4) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 4) ) # reduce number of x-ticks
plot(data[0][0:m], data[1][0:m], 'ro', label='$x$')
plot(data[0][m+1:2*m], data[1][m+1:2*m], 'bo', label='$x$', alpha=0.5)
title('Data, shift in $x_1$='+str(difference)+'\nm='+str(m))
xlabel('$x_1, y_1$')
ylabel('$x_2, y_2$')
# histogram of first data dimension and pdf
subplot(2,3,2)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
hist(data[0], bins=50, alpha=0.5, facecolor='r', normed=True)
hist(data[1], bins=50, alpha=0.5, facecolor='b', normed=True)
xs=linspace(min(data[0])-1,max(data[0])+1, 50)
plot(xs,normpdf( xs, 0, 1), 'r', linewidth=3)
plot(xs,normpdf( xs, difference, 1), 'b', linewidth=3)
xlabel('$x_1, y_1$')
ylabel('$p(x_1), p(y_1)$')
title('Data PDF in $x_1, y_1$')
# compute threshold for test level
alpha=0.05
null_samples_boot.sort()
null_samples_gaussian.sort()
thresh_boot=null_samples_boot[floor(len(null_samples_boot)*(1-alpha))];
thresh_gaussian=null_samples_gaussian[floor(len(null_samples_gaussian)*(1-alpha))];
type_one_error_boot=sum(null_samples_boot<thresh_boot)/float(num_null_samples)
type_one_error_gaussian=sum(null_samples_gaussian<thresh_boot)/float(num_null_samples)
# plot alternative distribution with threshold
subplot(2,3,4)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
hist(alt_samples, 20, normed=True);
axvline(thresh_boot, 0, 1, linewidth=2, color='red')
type_two_error=sum(alt_samples<thresh_boot)/float(num_null_samples)
title('Alternative Dist.\n' + 'Type II error is ' + str(type_two_error))
# compute range for all null distribution histograms
hist_range=[min([min(null_samples_boot), min(null_samples_gaussian)]), max([max(null_samples_boot), max(null_samples_gaussian)])]
# plot null distribution with threshold
subplot(2,3,3)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
hist(null_samples_boot, 20, range=hist_range, normed=True);
axvline(thresh_boot, 0, 1, linewidth=2, color='red')
title('Sampled Null Dist.\n' + 'Type I error is ' + str(type_one_error_boot))
# plot null distribution gaussian
subplot(2,3,5)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
hist(null_samples_gaussian, 20, range=hist_range, normed=True);
axvline(thresh_gaussian, 0, 1, linewidth=2, color='red')
title('Null Dist. Gaussian\nType I error is ' + str(type_one_error_gaussian))
# pull plots a bit apart
subplots_adjust(hspace=0.5)
subplots_adjust(wspace=0.5)
if __name__=='__main__':
linear_time_mmd_graphical()
show()
|
lambday/shogun
|
examples/undocumented/python/graphical/statistics_linear_time_mmd.py
|
Python
|
bsd-3-clause
| 6,089
|
[
"Gaussian"
] |
398d85b41d75303d53542b93e69e524365503fc1e94248b06fb4be6a6187b47e
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/examples/ensemble/plot_adaboost_regression.py
|
Python
|
mit
| 1,530
|
[
"Gaussian"
] |
e7e1a5889d7b71af401eac4f89d397d816a048c5fce4eeb51bf731fa7291de5b
|
"""Utility modules for the VTK-Python wrappers."""
__all__ = ['algorithms', 'dataset_adapter']
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Wrapping/Python/vtk/numpy_interface/__init__.py
|
Python
|
bsd-3-clause
| 99
|
[
"VTK"
] |
8ef711391b56e94900510260c1b494fb8cbde65d6227c39a24ca3f1c82eab280
|
# -*- encoding: utf-8 -*-
import random
import string
from django.contrib.auth.models import User, Group, Permission
from django.core.management.base import BaseCommand, CommandError
import click
from event.models import *
class Command(BaseCommand):
help = 'Creates new Make Things event'
def create_group(self):
group = Group.objects.create(name='Organizers')
permissions = [
'add_faq', 'change_faq', 'delete_faq',
'add_sponsor', 'change_sponsor', 'delete_sponsor',
'change_user',
'change_website',
'add_workshop', 'change_workshop', 'delete_workshop',
'add_workshopleader', 'change_workshopleader', 'delete_workshopleader'
]
for permission in permissions:
perm_obj = Permission.objects.get(codename=permission)
group.permissions.add(perm_obj)
group.save()
return group
def handle(self, *args, **options):
#Basics
click.echo("Hello sir or madam! My name is Verynicebot and I'm here to help you create your new Make Things event. So exciting!")
click.echo("Let's start with some basics.")
city = click.prompt("What is the name of the city?")
country = click.prompt("What is the name of the country?")
date = click.prompt("What is the date of the event? (Format: YYYY-MM-DD)")
url = click.prompt("What should be the URL of website? makethings.io/xxxx")
click.echo(u"Ok, got that! Your new event will happen in {0}, {1} on {2}".format(city, country, date))
#Main organizer
team = []
click.echo("Now let's talk about the team. First the main organizer:")
main_name = click.prompt("First and last name")
main_email = click.prompt("E-mail address")
try:
team.append({'first_name': main_name.split(' ')[0], 'last_name': main_name.split(' ')[1], 'email': main_email})
except IndexError:
team.append({'first_name': main_name, 'last_name': '', 'email': main_email})
click.echo(u"All right, the main organizer of Make Things in {0} is {1} ({2})".format(city, main_name, main_email))
#Team
add_team = click.prompt("Do you want to add additional team members? y/n")
i = 1
while add_team != 'n':
i += 1
name = click.prompt("First and last name of #{0} member".format(i))
email = click.prompt("E-mail address of #{0} member".format(i))
if len(name) > 0:
try:
team.append({'first_name': name.split(' ')[0], 'last_name': name.split(' ')[1], 'email': email})
except IndexError:
team.append({'first_name': main_name, 'last_name': '', 'email': main_email})
click.echo(u"All right, the #{0} team member of Make Things in {1} is {2} ({3})".format(i, city, name, email))
add_team = click.prompt("Do you want to add additional team members? y/n")
#Save data
click.echo("OK! That's it. Now I'll create your event.")
click.echo("Here is an access info for team members:")
main_organizer = None
members = []
for member in team:
member['password'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
user = User.objects.create(email=member['email'],
first_name=member['first_name'],
last_name=member['last_name'],
is_active=True,
is_staff=True)
user.set_password(member['password'])
user.save()
try:
group = Group.objects.get(name='Organizers')
except:
group = self.create_group()
group.user_set.add(user)
if not main_organizer:
main_organizer = user
members.append(user)
click.echo(u"{0} - email: {1} password: {2}".format(member['first_name'], member['email'], member['password']))
event = Event.objects.create(city=city, country=country, main_organizer=main_organizer)
website = Website.objects.create(event=event, url=url, date=date, status=0, about_title=u"Make Things in {0}".format(city), organizers_title=u"Make Things in {0} is organized by".format(city))
for member in members:
website.team.add(member)
member.event = event
member.save()
click.echo(u"Website is ready here: http://makethings.io/{0}".format(url))
click.echo("Congrats on yet another event!")
|
Makerland/makethings.io
|
core/management/commands/newevent.py
|
Python
|
gpl-3.0
| 4,726
|
[
"exciting"
] |
94893bf593f6dc750f91485f767bfd520451e2c4cf11a7320bd8a1925a82f48b
|
#!/usr/bin/python
"""
A simple routine to load in a LIGGGHTS hybrid dump file containing
contact and contact force data and convert into a .vtk unstructured
grid which can be used to visualise the force network.
evtk is used to write binary VTK files:
https://bitbucket.org/pauloh/pyevtk
The pizza.py bdump command is used to handle LIGGGHTS dump files and
therefore PYTHONPATH must include the pizza/src location.
NOTE: bdump is NOT included in granular pizza, and should be taken
from the standard LAMMPS pizza package!
NOTE: it is impossible to tell from the bdump header which values
have been requested in the compute, so check that your compute
and dump match the format here - this will be checked in future!
"""
from pyevtk.vtk import VtkFile, VtkGroup, VtkUnstructuredGrid
# from bdump import bdump
import numpy as np
import sys
import os
import errno
# TODO: use a try/except here to check for missing modules, and fallback to ASCII VTK if evtk not found
# TODO: ask for timestep or timestep range as input (code is NOT efficient and large files = long runtimes!)
# TODO: write celldata for contact area and heat flux (if present)
def dump2force(filename, inputpath, outputdir):
forcedata = bdump(filename, 0)
groupfile = fileprefix
groupfile = os.path.join(inputdir, groupfile)
groupfile = VtkGroup(groupfile)
fileindex = 0
timestep = forcedata.next()
# check that we have the right number of colums (>11)
#
# NOTE: the first timesteps are often blank, and then natoms returns 0,
# so this doesn't really work...
#
if forcedata.snaps[fileindex].natoms !=0 and len(forcedata.snaps[0].atoms[0]) < 12:
print("Error - dump file requires at least all parameters from a compute pair/gran/local id pos force (12 in total)")
sys.exit()
# loop through available timesteps
while timestep >= 0:
# default data are stored as pos1 (3) pos2 (3) id1 id2 periodic_flag
# force (3) -> 12 columns
#
# if contactArea is enabled, that's one more (13) and heatflux (14)
#
# assign names to atom columns (1-N)
forcedata.map(
1, "x1",
2, "y1",
3, "z1",
4, "x2",
5, "y2",
6, "z2",
7, "id1",
8, "id2",
9, "periodic",
10, "fx",
11, "fy",
12, "fz")
# check for contact data (some timesteps may have no particles in contact)
#
# NB. if one loads two datasets into ParaView with defined timesteps, but
# in which one datasets has some missing, data for the previous timestep are
# still displayed - this means that it is better here to generate "empty"
# files for these timesteps.
if forcedata.snaps[fileindex].natoms == 0:
vtufile = fileprefix+'_'+str(timestep)+'.vtu'
vtufile = os.path.join(outputdir, vtufile)
vtuwrite = open(vtufile, 'w')
vtuwrite.write("""<?xml version="1.0"?>
<VTKFile byte_order="LittleEndian" version="0.1" type="UnstructuredGrid">
<UnstructuredGrid>
<Piece NumberOfCells="0" NumberOfPoints="0">
<Cells>
<DataArray NumberOfComponents="1" offset="0" type="Int64" Name="connectivity"/>
<DataArray NumberOfComponents="1" offset="0" type="Int64" Name="offsets"/>
<DataArray NumberOfComponents="1" offset="0" type="Int64" Name="types"/>
</Cells>
</Piece>
</UnstructuredGrid>
</VTKFile>""")
else:
# number of cells = number of interactions (i.e. entries in the dump file)
ncells = len(forcedata.snaps[fileindex].atoms)
# number of periodic interactions
periodic = np.array(forcedata.snaps[fileindex].atoms[:, forcedata.names["periodic"]], dtype=bool)
nperiodic = sum(periodic)
# number of non-periodic interactions (which will be written out)
nconnex = ncells - nperiodic
# extract the IDs as an array of integers
id1 = np.array(forcedata.snaps[fileindex].atoms[:, forcedata.names["id1"]],dtype=np.long)
id2 = np.array(forcedata.snaps[fileindex].atoms[:, forcedata.names["id2"]],dtype=np.long)
# and convert to lists
id1 = id1.tolist()
id2 = id2.tolist()
# concatenate into a single list
ids = []
ids = id1[:]
ids.extend(id2)
# convert to a set and back to remove duplicates, then sort
ids = list(set(ids))
ids.sort()
# number of points = number of unique IDs (particles)
npoints = len(ids)
# create empty arrays to hold x,y,z data
x = np.zeros(npoints, dtype=np.float64)
y = np.zeros(npoints, dtype=np.float64)
z = np.zeros(npoints, dtype=np.float64)
print('Timestep:', str(timestep), 'npoints=', str(npoints),
'ncells=', str(ncells), 'nperiodic=', nperiodic)
# Point data = location of each unique particle
#
# The order of this data is important since we use the position of each particle
# in this list to reference particle connectivity! We will use the order of the
# sorted ids array to determine this.
counter = 0
for id in ids:
if id in id1:
index = id1.index(id)
xtemp, ytemp, ztemp = forcedata.snaps[fileindex].atoms[index, forcedata.names["x1"]],\
forcedata.snaps[fileindex].atoms[index, forcedata.names["y1"]],\
forcedata.snaps[fileindex].atoms[index, forcedata.names["z1"]]
else:
index = id2.index(id)
xtemp, ytemp, ztemp = forcedata.snaps[fileindex].atoms[index, forcedata.names["x2"]],\
forcedata.snaps[fileindex].atoms[index, forcedata.names["y2"]],\
forcedata.snaps[fileindex].atoms[index, forcedata.names["z2"]]
x[counter] = xtemp
y[counter] = ytemp
z[counter] = ztemp
counter += 1
# Now create the connectivity list - this corresponds to pairs of IDs, but referencing
# the order of the ids array, so now we loop through 0..ncells and have to connect
# id1 and id2, so I need to see where in ids these correspond to
# If the periodic flag is set for a given interactions, DO NOT connect the points
# (to avoid lines that cross the simulation domain)
# Mask out periodic interactions from the cell (connectivity) array
# newList = [word for (word, mask) in zip(s,b) if mask]
id1_masked = [ident for (ident, mask) in zip(id1, np.invert(periodic)) if mask]
id2_masked = [ident for (ident, mask) in zip(id2, np.invert(periodic)) if mask]
# create an empty array to hold particle pairs
connections = np.zeros(2 * nconnex, dtype=int)
for pair in range(nconnex):
connections[2 * pair], connections[2 * pair + 1] =\
ids.index(id1_masked[pair]), ids.index(id2_masked[pair])
# The offset array is simply generated from 2*(1..ncells)
offset = (np.arange(nconnex, dtype=int) + 1) * 2
# The type array is simply ncells x 3 (i.e. a VTKLine type)
celltype = np.ones(nconnex, dtype=int) * 3
# Finally we need force data for each cell
force = np.sqrt( np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["fx"]],dtype=np.float64)**2 + \
np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["fy"]],dtype=np.float64)**2 + \
np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["fz"]],dtype=np.float64)**2 )
# And, optionally, contact area and heat flux (using the same connectivity)
# area = np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["area"]],dtype=np.float64)
# heatflux = np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["heatflux"]],dtype=np.float64)
# Now we have enough data to create the file:
# Points - (x,y,z) (npoints)
# Cells
# Connectivity - connections (nconnex,2)
# Offset - offset (nconnex)
# type - celltype (nconnex)
# Celldata
# force (nconnex)
# area (nconnex)
# heatflux (nconnex)
# create a VTK unstructured grid (.vtu) file
vtufile = fileprefix+'_'+str(timestep)
vtufile = os.path.join(outputdir, vtufile)
w = VtkFile(vtufile, VtkUnstructuredGrid)
vtufile += '.vtu'
w.openGrid()
w.openPiece(npoints=npoints, ncells=nconnex)
# Set up Points (x,y,z) data XML
w.openElement("Points")
w.addData("points", (x, y, z))
w.closeElement("Points")
# Set up Cell data
w.openElement("Cells")
w.addData("connectivity", connections)
w.addData("offsets", offset)
w.addData("types", celltype)
w.closeElement("Cells")
# Set up force data
w.openData("Cell")
w.addData("force", force)
# w.addData("area", area)
# w.addData("heatflux", heatflux)
w.closeData("Cell")
# and contact area
# w.openData("Cell", scalars = "area")
# w.addData("area", area)
# w.closeData("Cell")
# and heat flux
# w.openData("Cell", scalars = "heatflux")
# w.addData("heatflux", heatflux)
# w.closeData("Cell")
# Wrap up
w.closePiece()
w.closeGrid()
# Append binary data
w.appendData( (x,y,z) )
w.appendData(connections).appendData(offset).appendData(celltype)
# w.appendData(force).appendData(area).appendData(heatflux)
w.appendData(force)
w.save()
# Add this file to the group of all timesteps
groupfile.addFile(filepath=os.path.relpath(vtufile, inputdir), sim_time=timestep)
fileindex += 1
timestep = forcedata.next()
# end of main loop - close group file
groupfile.save()
if __name__ == "__main__":
# Check for command line arguments
if len(sys.argv) != 2:
sys.exit('Usage: dump2forcenetwork.py <filename>, where filename is typically dump.<runname>')
elif len(sys.argv) == 2: # we have one input param, that should be parsed as a filename
filename = str(sys.argv[1])
if not os.path.isfile(filename):
sys.exit('File ' + filename + ' does not exist!')
splitname = filename.split('.')
if len(splitname) == 2 and splitname[0].lower() == 'dump':
fileprefix = splitname[1]
else:
fileprefix = splitname[0]
inputpath = os.path.abspath(filename)
inputdir = os.path.split(inputpath)[0]
# create a sub-directory for the output .vtu files
outputdir = os.path.join(inputdir, fileprefix)
try:
os.mkdir(outputdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
# Read in the dump file - since we can have many contacts (i.e. >> nparticles)
# and many timesteps I will deal with one timestep at a time in memory,
# write to the appropriate .vtu file for a single timestep, then move on.
dump2force(filename, inputpath, outputdir)
|
msbentley/liggghts-utils
|
liggghts_utils/dump2force.py
|
Python
|
mit
| 11,953
|
[
"LAMMPS",
"ParaView",
"VTK"
] |
5df25101551e692e34769deb6a50faed2e3378135cfd9d5ba5453045339cb5bf
|
# cycle through NaturalReaderSpeech voices
# with i2c connected jaw servo
# Author: Acapulco Rolf
# Date: December 4th 2017
# Build: myrobotlab development build version 2645
from time import sleep
from org.myrobotlab.service import Speech
lang="EN" #for NaturalReaderSpeech
Voice="Ryan"
voiceType = Voice
speech = Runtime.createAndStart("Speech", "NaturalReaderSpeech")
speech.setVoice(voiceType)
speech.setLanguage(lang)
#open and close mouth in sync with speech
openclosemoouth = False
if openclosemoouth:
#set up Jaw with Raspberry Pi with Adafruit16C servo service
# 50 Hz servo frequency
frequency = 50
adaFruit16c1 = Runtime.createAndStart("AdaFruit16C1","Adafruit16CServoDriver")
raspi = Runtime.createAndStart("RasPi","RasPi")
adaFruit16c1.setController("RasPi","1","0x40")
adaFruit16c1.setPWMFreq(0,frequency)
jawPin = 8
jawServo = Runtime.createAndStart("jaw","Servo")
mouth = Runtime.createAndStart("Mouth","MouthControl")
sleep(20) # fix for servo attach timing issue as at myrobotlab 236x development builds
jawServo.attach(adaFruit16c1,jawPin,150,-1)
jaw = mouth.getJaw()
sleep(1)
jaw.attach(adaFruit16c1,jawPin)
jawServo.setMinMax(140,180) # set min and max jaw position accordingly for your own use-case
# these min/max settings work for me for this particular jaw: https://www.thingiverse.com/thing:992918
# @Mats, thanks :)
jawServo.setRest(175)
jawServo.moveTo(100)
jawServo.rest()
mouth.setmouth(140,175)
mouth.autoAttach = False
mouth.setMouth(speech)
def onEndSpeaking(text):
sleep(.5)
#Start of main script
sleep(1)
speech.speakBlocking(text)
#mouth.jaw.moveTo(175)
def saystuff():
#myvoices = ['Ryan','Rich','Mike','Graham','Laura','Charles','Crystal','Heather','Ella','Rod','Peter','Audrey','Lucy','Rachel','Rosy','Ryan']
myvoices = ["Sharon", "Amanda","Tracy","Ryan","Tim","Suzan", "Mike","Rod","Rachel","Peter","Graham","Selene","Darren","Charles","Audrey","Rosa","Alberto","Diego", "Camila","Paula","Joaquim","Alain","Juliette","Emmanuel", "Marie","Bruno","Alice","Louice","Reiner", "Klara","Klaus","Sarah","Bertha","Jacob","Vittorio","Chiara","Mario","Valentina","Celia","Renata","Andrea","Julieta","Emma","Erik","Gus","Maja","Anika", "Markus"]
myvoicescount = len(myvoices)
for i in range(0,myvoicescount):
speech.setVoice(myvoices[i])
print("I am speaking with "+(myvoices[i])+"'s voice")
onEndSpeaking ("I am speaking with "+(myvoices[i])+"'s voice")
onEndSpeaking ("I'm completely operational, and all my circuits are functioning perfectly.")
saystuff()
|
MyRobotLab/pyrobotlab
|
home/CheekyMonkey/naturalreaderspeech-v2-test.py
|
Python
|
apache-2.0
| 2,574
|
[
"CRYSTAL"
] |
2402c8a00cd0d8b4eaceab45189d4e22dbeb64399281b2adaa8a332c2d8c6400
|
#Standard imports
import os
import inspect
#Non-standard imports
import catmap
from catmap import ReactionModelWrapper
from catmap.model import ReactionModel
from ase.atoms import string2symbols
class ParserBase(ReactionModelWrapper):
def __init__(self,reaction_model=ReactionModel()):
"""Class for `parsing' information from raw data
(databases, spreadsheets, text files, trajectories, etc.) into a
structure which is useful to the microkinetic model. This class acts
as a base class to be inherited by other parser classes, but it is
not functional on its own.
input_file: defines the file path or object to get data from
A functional derived parser class must also contain the methods:
parse(input_file): a function to parse the input_file file/object and
return properly formatted data. The parse function should save all
necessary attributes to the Parser class. After parsing the parent
microkinetic model class will update itself from the Parser attributes.
"""
self._rxm = reaction_model
self._required = {} #No user-defined attributes are required.
@staticmethod
def get_composition(species_string):
composition = {}
try:
symbs = string2symbols(species_string.replace('-',''))
for a in set(symbs):
composition[a] = symbs.count(a)
except ValueError:
composition = None
return composition
def _baseparse(self):
#Make dictionary of useful information about species in model
if not self.species_definitions:
self.species_definitions = {}
for species in (self.gas_names+self.adsorbate_names+
self.transition_state_names):
ads_info = {}
if '_' in species:
name,site = species.rsplit('_',1)
else:
name = species
site = self._default_site
ads_info['name'] = name
ads_info['site'] = site
if species in self.gas_names:
ads_info['type'] = 'gas'
ads_info['n_sites'] = 0
elif species in self.adsorbate_names:
ads_info['type'] = 'adsorbate'
ads_info['n_sites'] = 1
elif species in self.transition_state_names:
ads_info['type'] = 'transition_state'
ads_info['n_sites'] = 1
else:
ads_info['type'] = 'unknown'
composition = self.get_composition(name)
ads_info['composition'] = composition
if species in self.species_definitions:
ads_info.update(self.species_definitions[species])
if not ads_info['composition']:
raise ValueError('Could not determine composition for '+species)
self.species_definitions[species] = ads_info
for species in self.species_definitions.keys(): #set site definitions
site = self.species_definitions[species].get('site',None)
if site:
ads_info = {}
ads_info['type'] = 'site'
ads_info['site'] = site
ads_info['formation_energy'] = 0
if site not in self._gas_sites:
ads_info['n_sites'] = 1
else:
ads_info['n_sites'] = 0
ads_info['site_names'] = ['gas']
ads_info['total'] = 0
ads_info['composition'] = {}
if self.site_definitions: #Deprecate later...
site_names = self.site_definitions[site]
if isinstance(site_names,basestring):
site_names = [site_names]
ads_info['site_names'] = site_names
if self.site_totals: #Deprecate later...
ads_info['total'] = self.site_totals[site]
if site in self.species_definitions:
ads_info.update(self.species_definitions[site])
self.species_definitions[site] = self.species_definitions['*_'+site] \
= ads_info
if not self.atomic_reservoir_list:
#Make list of valid reference sets for e.g. boltzmann coverages
cart_product = []
all_atoms = []
composition_dict = {}
dummy_dict = {}
for sp in self.gas_names:
composition_dict[sp] = self.species_definitions[sp]['composition']
dummy_dict[sp] = 0 #dummy dict of energies
for key in composition_dict[sp].keys():
if key not in all_atoms:
all_atoms.append(key)
for key in all_atoms:
possibles = []
for sp in self.gas_names:
if composition_dict[sp].get(key,None):
possibles.append(sp)
cart_product.append(possibles)
ref_sets = []
for prod in catmap.functions.cartesian_product(*cart_product):
refdict = {}
for ai,pi in zip(all_atoms,prod):
refdict[ai] = pi
if (sorted(list(refdict.values())) ==
sorted(list(set(refdict.values()))) and
sorted(list(refdict.values())) not in
[sorted(list(rs.values())) for rs in ref_sets]):
if refdict and dummy_dict and composition_dict:
try:
self.convert_formation_energies(dummy_dict,
refdict,composition_dict)
ref_sets.append(refdict)
except ValueError:
pass
if ref_sets:
self.atomic_reservoir_list = ref_sets
else:
raise AttributeError('No valid reference sets from gas-phase species ' + \
'in the system. Add gasses or specify atomic_reservoir_list')
|
rybrogaard/catmap
|
catmap/parsers/parser_base.py
|
Python
|
gpl-3.0
| 6,160
|
[
"ASE"
] |
cc8e5ba47cebfd4b87ac2eff0dc7afebc6cb737efe82385be3d7cfa2a5616ef3
|
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: Kate Willett
# Created: 24 January 2018 (based on IDL create_monthseriesJAN2015.pro 1 Feb 2013)
# Last update: 3 August 2021
# Location: home/h04/hadkw/HadISDH_Code/HADISDH_BUILD/
# GitHub: https://github.com/Kate-Willett/HadISDH_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# Reads in hourly HadISD data, converts to humidity variables, caluclates monthly means and monthly mean anomalies, saves to ascii and netCDF.
# Now also outputs wetbulb extreme statistics: month max, days exceeding thresholds of 25, 27, 29, 31, 33, 35
# Outputs station files to:
# PHA folder: /scratch/hadkw/pha52jgo/data/hadisdh/
# Depending on PHA switch it will output anomalies or absolurtes to PHA
# Still need to test PHA with anomalies which would allow me to produce absolutes from anomaly + climatology which is better
# /scratch/hadkwUPDATE<YYYY>/MONTHLIES/NETCDF/
# /scratch/hadkw/UPDATE<YYYY>/MONTHLIES/ASCII/
#
# Outputs list files to:
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/
#
# this program reads in every QC'd HadISD netcdf file and outputs a monthly mean anomaly, abs, clim and sd version
# this uses T, Tdew and SLP from the netCDF but also needs to know elevation in order to calculate SLP if necessary.
# this outputs T, Tdew, DPD, Twetbulb, vapour pressure, specific humidity and relative humidity using calc_evap.pro
# Also outputs SLP and windspeed
# May add heat indices in the future
#
# This previously also read in source data and output a station history file
# /data/local/hadkw/HADCRUH2/UPDATE2017/MONTHLIES/HISTORY/ of the format:
# /data/local/hadkw/HADCRUH2/PROGS/USHCN_v52d/src_codes/documentation/SHF_tob_inst.txt
# some amendments:
# SOURCE CODE 3=history created from raw data using:
# - change in station source (composited stations) - uses IDs listed for each time point within netCDF file
# - change in ISD within file lat/lon (2012 onwards)
# - change in observing frequency
# - change in observing times
# - change in recording resolution (2012 onwards?)
# REMEMBER to convert lat/lon to degrees, minutes and seconds and elevation to feet (?)
# This functionality hasn't been brought through from IDL to python yet.
#
# Initial kick ouf if fewer than 1344 obs for a calendar month over the 1981-2010 climatology period - 80% of days in month with at least 4 obs and 15 years (15*4*(28*0.8))
# Initial kick out if fewer than 24000 total obs - 20 years with 4 obs on at least 300 days
# First, month hour averages are taken for each hour of the day
# - there must be at least 15 days present for each hour within the month
# Then the month average is made from the month hour averages
# - there must be at least 4 month hour averages with at least 1 in each tercile 00 to 07, 08 to 15, 16 to 23
# There must also be at least one year in each decade of climatology 81-90, 91-00, 01-10
# There must be at least (>=) 15 years of T and Td (tests RH) within the 1981-2010 climatology period
# for each month present for the station to be kept
#1) Makes hr means for each month where >=15 obs for each hour over the month
#2) ABS and St Devs: Makes month means from month hr means where >= 4 hrs within month and 1 in each tercile 0-7, 8-15, 16-23
#3) Makes month hr clims where >= 15 years of data for each month hr and one in each decade
#4) CLIMS and CLIM ST DEVS: Makes month CLIMS and Clim Std Devs if >= 4 clim hr means and one in each tercile
#5) Makes month hr mean anomalies if >= 15 hr anomalies (hr - month hr climatology)
#6) ANOMS: Make month mean anomalies if >= 4 month hr mean anomalies in each month and one in each hr tercile
#7) Add a final check that if the station is still good (has all 12 months of climatology)
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# import datetime as dt
# import matplotlib.pyplot as plt
# import numpy as np
# from matplotlib.dates import date2num,num2date
# import sys, os
# from scipy.optimize import curve_fit,fsolve,leastsq
# from scipy import pi,sqrt,exp
# from scipy.special import erf
# import scipy.stats
# from math import sqrt,pi
# import struct
# import pdb
# import netCDF4 as nc4
# from subprocess import check_output
#
# Kates:
# import CalcHums - written by kate Willett to calculate humidity variables
# import ReadNetCDF
# from GetNiceTimes import MakeDaysSince
#
# -----------------------
# DATA
# -----------------------
# reads in netCDF hourly station data from HadISD
# - /scratch/hadkw/UPDATE<YYYY>/HADISDTMP/
# New list of potential HadISD stations to include
# inlists='/scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/HadISD.<Version>_candidate_stations_details.txt'
# inCIDs='/scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/isd-history_downloaded18JAN2018_1230.txt'
# 20CR SLP data for making climatological SLP for humidity calculation
# inSLP='/scratch/hadkw/UPDATE<YYYY>/OTHERDATA/' #20CR*7605MSLP_yycompos.151.170.240.10.37.8.8.59.nc
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# First make sure the HadISD and 20CR source data are in the right place.
# Make sure this year's directories are set up: makeHadISDHdirectories.sh
# Make sure this year's PHA directories are set up: makePHAdirectories.sh
# Go through everything in the 'Start' section to make sure dates, versions and filepaths are up to date
# This can take an hour or so to run through ~3800 stations so consider using screen, screen -d, screen -r
# python3 CreateMonthSeriesfromHadISD.py
#
# Run from desktop:
# module load scitools/default-current # to load python 3
# python CreateMonthSeriesfromHadISD.py
#
# Run from spice:
# ./F4_submit_spice.bash
#
# -----------------------
# OUTPUT
# -----------------------
# ASCII monthly means and anomalies
# outdirASC='/scratch/hadkw/UPDATE<YYYY>/MONTHLIES/ASCII/'
# GHCNM style ASCII monthly means for PHA
# outdirRAW<var>='/scratch/hadkw/UPDATE<YYYY>/pha52jgo/data/hadisdh/<var>/monthly/raw/'
# outdirHIST='scratch/hadkw/UPDATE<YYYY>/MONTHLIES/HISTORY/'
# outdirNCF='scratch/hadkw/UPDATE<YYYY>/MONTHLIES/NETCDF/'
# A list of stations that are not carried forward because they do not contain enough months of data
# ditchfile='scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/tooshortforHadISDH.'+version+'.txt'
# A list of stations that have enough months to be carried forward
# keepfile='scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/goodforHadISDH.'+version+'.txt'
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 7 (3 August 2021)
# ---------
#
# Enhancements
# Outputs ASCII wetbulb temperature extremes statistics.
#
# Changes
# Outputs a seperate station list for Tw extremes as fewer pass the test of havinf sufficient data to create a climatology
# This list is only used at the F12_CreateHomog.. stage to cross-check with the remaining homogenised Tw stations.
# This avoids unnecessary removal of stations from the main product or incorporation of poor quality (incomplete) stations into the TwEx products.
#
# Bug fixes
#
#
#
# Version 6 (22 October 2020)
# ---------
#
# Enhancements
# Updated headers and processes to work in a fully automated manner from /scratch/hadkw/
# Can now output anomalies or absolutes to the PHA file structure
# Now outputs the number of 'good' and 'bad' stations to an output file.
#
# Changes
#
# Bug fixes
#
#
# Version 5 (3 February 2020)
# ---------
#
# Enhancements
# Now python 3
#
# Changes
# Now month climatology catch kicks out if there are fewer than 1344 obs - 80% of days with at least 4 obs per day and 15 years of climatology (Feb has 28 days so ((28*4)*0.8) * 15
# Its also kicked out if there are fewer than 20 years of data with 4 obs on at least 300 days (24000) - these would have been caught anyway but this is more efficient to catch here early
# Climatology can be calculated where there are >= 15 years of data rather than > 15 years
#
# Bug fixes
# 1) reshaping of 20CR SLP arrays has been corrected - this created very small errors within the monthly values
# 2) RH was being calculated with respect to water in all cases rather than with respect to ice when Tw <= 0
# 3) Climatology maker checked that there was at least 1 year of data in each decade but this wasn't working properly so more stations passed than should have
#
#
#Version 4 (24 January 2018)
# ---------
#
# Enhancements
# Now python
#
# Changes
# Can work with any climatology period BUT check that 20CR data is there for that clim period.
# Changed 20CRMMM7605 filenames to match the 19812010 formats
#
# Bug fixes
#
# IDL VERSIONS PREVIOUSLY
# Version 3 (18 January 2017)
# ---------
#
# Enhancements
# Updated to deal with HadISD.2.0.1 - now 8000 stations plus, data from 1900 onwards and new station list
# Sticking with 1976-2005 clim for now so that I don't have to redo 20CR stuff yet - next year I should do this
#
# Changes
# Now requires an ish-history...txt file to find the CID (2 digit country code ID) to match up to the station WMO-WBAN numbers
# These are saved and then put in the output station list file at the end for continuity with previous versions and the hope
# that one day this CID will be corrected and usable - we know of many errors in it e.g., CI meaning China or Chile in some cases.
#
# Bug fixes
#
# Version 2 (22 January 2016)
# ---------
#
# Enhancements
# Updated to deal with 2016
# Added more detail to the header for code legacy
#
# Changes
#
# Bug fixes
#
# Version 1 (15 January 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
# COULD CHANGE TO RUN EACH STATION INDEPENDENTLY SO IT CAN BE SPAT OUT TO SPICE AND RUN VERY VERY QUICKLY
# WOULD REQUIRE A SORT ON THE STATION OUTPUT LISTS AT THE END THOUGH TO CHECK THEY ARE IN ORDER
# AS of 7th Feb 2020 this runs and appears to output sensible values
# HOWEVER - these values to not match the IDL values either for anomalies or absolutes.
# They are close but not the same, sometimes even a few numbers out (whole numbers not just decimal precision)
# Could this be the humidity calculation?
# Could this be the month means - have I changed anything that might have altered the climatologies
# If it also affects T and Td then its not just the humidity calculations - IT DOES NOT - SO IT IS SOMETHING TO DO WITH CALCHUMS....precision? Method? Ice bulb method?
# I have cross-checked IDL calc_evap.pro and python 3 CalcHums.py and they produce identical values to at least 4 decimal places.
# Maybe its the pressure read in? - 20CR values for 010010-99999 are very slightly different
# HOwever, I've just tested tolerance and its very small, even for very low temperatures.
# Now check both codes at variable conversion and then again at monthly means
# SOOO - there were twobugs in IDL
# - the pull out and reformat of 20CR slp data was wrong because the shift() term did not have a 0 for the second axis.
# all data were rolling through as a vector rather than an array
# This is an error but would not have lead to very different values because sensitivity to pressure is small
# - the calculation of RH was done only with respect to water even when tw <= 0.
# this only affected RH values but the difference could be large
#
# On complete run through Python kicks out 18 more stations than IDL
# Some stations kicked out by IDL are retained by python and vice versa and stations have slightly different numbers of months in some cases.
# I think this comes down to the make_months_oddtimes or MakeMonths functions
#
# In Python we calculate clims where there are >= 15 years over clim period but IDL is just > 15 so Python keeps more stations for this
# Still trying to find out why python kicks out others that make it in IDL
# Found ANOTHER bug in the IDL code in make_months_oddtimesJAN2015.pro
# - to create a climatology there must be at least 15 years of data AND at least 1 year in each decade
# - in IDL the decade counter was wrong - rather than the decade goin 0-9 and 10-19 and 20-29 of the climatology it
# started at the start year 1973 so any year within the first 17 was counted even though a subarray of only the climatology years had been created
#
# SOOOO - my python code that kicks out more stations but keeps some that were kicked out in IDL is CORRECT!!!
#
#-------------------------------------------------------------------------
# JAN 2015
# updated to read in 2014
# now includes windpeed and sea level pressure
# moved 'BAD MONTHS' kick out to just below RH make_months and moved RH to be done first
# no point carrying on if there isn't enough humidity data - do not base on SLP which has lots of
# missing
# JAN 2014
# updated to read in 2013
# now creates hourly DPD and then monthly DPD AND monthly derived DPD (compare later)
# added a loop in make_months_oddtimesJAN2014.pro to remove all stations that have fewer
# than 15 months for any one month within climatology period. This can occur in some cases
# when the odd hour makes an hour_month clim possible but not a monthly..
# DEC 2013
# Adding dewpoint depression ready for 2013 update
# Need to check that T-DPD = Td as it may not.
# Not entirely sure whether its best to calculate monthly T and Td and then create DPD
# Or whether its best to calculate monthly DPD directly from the hourly data
# For playing (to see whether there is more S-N ratio in DPD compared to Td) just use monthly conversions
# FEB 2013
# CHANGED station P calculation
# i) use actual station T to convert to monthly - test
# ii) make climatological monthly mean T values and use those to calc station P
# iii) read in CR20 monthly MSLP climatologies 1976-2005 and use these instead of 1013.25 - use climatological monthly mean T
#"Support for the Twentieth Century Reanalysis Project dataset is provided by the U.S. Department of Energy,
# Office of Science Innovative and Novel Computational Impact on Theory and Experiment (DOE INCITE) program,
# and Office of Biological and Environmental Research (BER), and by the National Oceanic and Atmospheric
# Administration Climate Program Office."
#"20th Century Reanalysis V2 data provided by the NOAA/OAR/ESRL PSD, Boulder, Colorado, USA, from their Web site at
# http://www.esrl.noaa.gov/psd/"
# We would also appreciate receiving a copy of the relevant publications.
# both use Eq. from Smithsonian Tables p268
#************************************************************************
# START
#************************************************************************
# USE python3
# module load scitools/default-current
# python F4_CreateMonthSeriesfromHadISD.py
#
# For debugging
# ipython
# %pdb
# %run F4_CreateMonthSeriesfromHadISD.py
#
# REQUIRES
# CalcHums.py
# MakeMonths.py
# ReadNetCDF.py
#
#************************************************************************
# Set up python imports
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os
from scipy.optimize import curve_fit,fsolve,leastsq
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi
import struct
import pdb
import netCDF4 as nc4
from subprocess import check_output, call
import glob
import CalcHums
import ReadNetCDF
from GetNiceTimes import MakeDaysSince
# RESTART VALUE
RestartValue = '-----------' #'-----------' #'------' #'681040'
# Anomalies or absolutes to PHA?
PHAActuals = True # True for outputting actuals to PHA, False to output Anomalies to PHA
# Start and end years if HardWire = 1
isdstyear = 1931 # start year of HadISD dataset
styear = 1973
edyear = 2019
# Dataset version if HardWire = 1
versiondots = '4.2.0.2019f'
version = 'v420_2019f'
hadisdversiondots = '3.1.0.2019f'
hadisdversion = 'v310_2019f'
# HARDWIRED SET UP!!!
# If HardWire = 1 then program reads from the above run choices
# If HardWire = 0 then program reads in from F1_HadISDHBuildConfig.txt
HardWire = 0
if (HardWire == 0):
#' Read in the config file to get all of the info
with open('F1_HadISDHBuildConfig.txt') as f:
ConfigDict = dict(x.rstrip().split('=', 1) for x in f)
versiondots = ConfigDict['VersionDots']
hadisdversiondots = ConfigDict['HadISDVersionDots']
styear = int(ConfigDict['StartYear'])
edyear = int(ConfigDict['EndYear'])
# AttribDict held in memory to probide global attribute text later
#' Read in the attribute file to get all of the info
with open('F1_HadISDHBuildAttributes.txt') as f:
AttribDict = dict(x.rstrip().split('=', 1) for x in f)
# Climatology start and end years
clims = [1981,2010]
# Set up directories locations
updateyy = str(edyear)[2:4]
updateyyyy = str(edyear)
workingdir = '/scratch/hadkw/UPDATE'+updateyyyy
# Hope this stays teh same. May need to change this when we go to monthly updating
# Could use glob.glob with wildcard for the date bit...
#INDIR = workingdir+'/HADISDTMP/hadisd.'+hadisdversiondots+'_19310101-'+str(edyear+1)+'0101_'
INDIR = workingdir+'/HADISDTMP/hadisd.'+hadisdversiondots+'_19310101-'+str(edyear+1)+'0701_'
OUTASC = workingdir+'/MONTHLIES/ASCII/'
OUTRAWq = workingdir+'/pha52jgo/data/hadisdh/q/'
OUTRAWe = workingdir+'/pha52jgo/data/hadisdh/e/'
OUTRAWt = workingdir+'/pha52jgo/data/hadisdh/t/'
OUTRAWdpd = workingdir+'/pha52jgo/data/hadisdh/dpd/'
OUTRAWtd = workingdir+'/pha52jgo/data/hadisdh/td/'
OUTRAWtw = workingdir+'/pha52jgo/data/hadisdh/tw/'
OUTRAWrh = workingdir+'/pha52jgo/data/hadisdh/rh/'
OUTRAWws = workingdir+'/pha52jgo/data/hadisdh/ws/'
OUTRAWslp = workingdir+'/pha52jgo/data/hadisdh/slp/'
OUTHIST = workingdir+'/MONTHLIES/HISTORY/'
OUTNCF = workingdir+'/MONTHLIES/NETCDF/'
# Set up filenames
RAWSUFFIX = '.raw.tavg'
HISSUFFIX = '.his'
ANOMSUFFIX = 'monthQCanoms.raw'
ABSSUFFIX = 'monthQCabs.raw'
NCSUFFIX = '_hummonthQC.nc'
INSTATLIST = workingdir+'/LISTS_DOCS/HadISD.'+hadisdversiondots+'_candidate_stations_details.txt'
INCIDs = workingdir+'/LISTS_DOCS/isd-history-*.txt' # does wildcard work here?
INSLP = workingdir+'/OTHERDATA/' #20CRJan7605MSLP_yycompos.151.170.240.10.37.8.8.59.nc or 20CRv2cJan19812010_SLP_Jan2018.nc
OUTDITCH = workingdir+'/LISTS_DOCS/tooshortforHadISDH.'+versiondots+'.txt'
OUTKEEP = workingdir+'/LISTS_DOCS/goodforHadISDH.'+versiondots+'.txt'
OUTTWEX = workingdir+'/LISTS_DOCS/goodforHadISDHTwEx.'+versiondots+'.txt'
OUTPUTLOG = workingdir+'/LISTS_DOCS/OutputLogFile'+versiondots+'.txt'
# Set up variables
MDI = -1e+30
INTMDI = -999
ASCMDI = -99.99
#*** at some point add all the header info from the new HadISD files***
# date and time stuff
MonArr = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
stday = 1
stmon = 1
stjul = dt.date(styear,stmon,stday).toordinal() # 00:00 hrs not set for hours just integer days + 1721424.5 #JULDAY(stmon,stday,styear,0)
edday = 31
edmon = 12
#edjul = dt.date(edyear+1,edmon,edday).toordinal() # 00:00 hrs not sure why this is year + 1 but still with Dec 31st
edjul = dt.date(edyear+1,1,1).toordinal() # 00:00 hrs not sure why this is year + 1 but still with Dec 31st
# Should it not be dt.date(edyear+1,1,1)
edactjul = dt.date(edyear,edmon,edday).toordinal() # should be 11pm 23:00
# Will have to *24 to get bours
# Using JULIAN DAYS to get at number of hours - doesn't need to be Julian days!
ntims = (edjul - stjul) * 24 # time points in hours
nmons = ((edyear + 1) - styear) * 12 # time points in months
nyrs = (edyear + 1) - styear # time points in years
actyears = np.arange(styear, (edyear + 1)) # array of integer years
ndays = (edjul - stjul)
# ISD times are HOURS since 1931-01-01 00:00 rather than DAYS since 1973-01-01 00:00 so we need to extract
# this is a little complicated as they are provided as integers rather than decimals of a whole day (1./24.)
# set up an array of time pointers from 1973 onwards in HadISD time language (hours since 1931-01-01 00:00)
# 753887 hours since 1931 comes out as Dec 31st 2016 at 23:00 - which is correct!!!
# 24 * (JULDAY(12,31,2016,23) - JULDAY(1,1,1931,0) = 753887
# 24 * (JULDAY(1,1,1973,0) - JULDAY(1,1,1931,0) = 368184
isdstjul = dt.date(isdstyear,stmon,stday).toordinal() # JULDAY(stmon,stday,isdstyear,0) ; this gives a number in days 2426342.5
hrssince1931 = (stjul - isdstjul) * 24 # hours since Jan 1st 1931 for Jan 1st 1973 00:00
isd_full_times = np.arange(ntims) + hrssince1931 # array for each hour from Jan 1st 1973 00:00 starting count at hours since jan 1st 1931 00:00
full_times = np.arange(0, ntims) # array for each hour from Jan 1st 1973 00:00 starting at 0
# create array of half year counts taking into account leap years
# These are for analysing the station data for shifts in resolution or frequency
# i.e., 1973 June 30th = 181st day Dec 31st = 365th day (184)
# i.e., 1974 June 30th = 181st day Dec 31st = 365th day (184)
# i.e., 1975 June 30th = 181st day Dec 31st = 365th day (184)
# i.e., 1976 June 30th = 182nt day Dec 31st = 366th day (184)
# leaps are 1976,1980,1984,1988,1992,1996,2000,2004,2008,2012,2016
# leap if divisible by four by not 100, unless also divisible by 400 i.e. 1600, 2000
# IDentify the leap years
founds = np.where( ((actyears/4.) - np.floor(actyears/4.) == 0.0) & ( ((actyears/100.) - np.floor(actyears/100.) != 0.0) | ((actyears/400.) - np.floor(actyears/400.) == 0.0)))
leapsids = np.repeat(0,nyrs)
leapsids[founds] = 1 #1s identify leap years
HrDict = {'JanHrs':np.arange(744),
'FebHrs':np.arange(696)+744, # THIS INCLUDES 29th FEB!!!
'MarHrs':np.arange(744)+1440,
'AprHrs':np.arange(720)+2184,
'MayHrs':np.arange(744)+2904,
'JunHrs':np.arange(720)+3648,
'JulHrs':np.arange(744)+4368,
'AugHrs':np.arange(744)+5112,
'SepHrs':np.arange(720)+5856,
'OctHrs':np.arange(744)+6576,
'NovHrs':np.arange(720)+7320,
'DecHrs':np.arange(744)+8040}
dates = [stjul,edactjul] # this is Jan 1st 1973 to end-year, Dec 31st so the day counts will need +1 to be correct
stclim = clims[0] - styear
edclim = clims[1] - styear
climsum = (edclim + 1) - stclim
CLIMstjul = dt.date(clims[0],stmon,stday).toordinal() #JULDAY(stmon,stday,clims(0),0)
# Can't understand why I did clims(1)+1, edmon,edday
CLIMedjul = dt.date(clims[1]+1,stmon,stday).toordinal() #JULDAY(edmon,edday,clims(1)+1, 0)
CLIMtims = (CLIMedjul - CLIMstjul) * 24.
CLIMstpoint = (CLIMstjul - stjul) * 24.
clpointies = (np.arange(CLIMtims) + CLIMstpoint).astype(int)
#print('check these clims')
#pdb.set_trace()
#************************************************************************
# Subroutines
#************************************************************************
# READDATA
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee,delimiter=delimee,encoding='latin-1') # ReadData
#************************************************************************
#ReadSLPdata
def ReadSLPdata(TheMDI):
''' REad in 1981-2010 climatological SLP from 20CRv2 data '''
''' REform lats and lons accordingly '''
''' Compile 12 month clim to one array '''
''' Data are 2 x 2 degrees '''
TheData = np.empty((12,91,180))
TheData.fill(TheMDI)
LatInfo = ['lat']
LonInfo = ['lon']
ReadInfo = ['VAR']
# Loop through each month to read in and append data into array
MonArr = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
for mm,mon in enumerate(MonArr):
TmpData,TmpLats,TmpLons = ReadNetCDF.GetGrid(INSLP+'20CRv2c'+mon+'19812010_SLP_Jan2018.nc',ReadInfo,LatInfo,LonInfo)
# A time, lat, lon data numpy array
# If this is January then sort out lats and lons
if (mm == 0):
# Make lats the northern-most boundary and reverse to go 91 to - 89
TheLats = np.flip(np.copy(TmpLats))
TheLats = TheLats + 1
# Make the lons the western-most boundary from -179 to 179
TheLons = np.roll(np.copy(TmpLons) - 1,89)
TheLons[np.where(TheLons >= 180)] = -(360 - TheLons[np.where(TheLons >= 180)])
TheData[mm,:,:] = np.flipud(np.roll(np.copy(TmpData),89,axis=1)) # lons were 0 to 358 and are now to -179 to 179 (gridbox centres)
#print('Check this reformatting')
#pdb.set_trace()
return TheData,TheLats,TheLons
#************************************************************************
# GetHadISD
def GetHadISD(TheFilee,TheHTimes,TheMDI):
''' THis reads in the data from netCDF and pulls out the correct time period '''
# Build correct time arrays
TheTempArr = np.repeat(TheMDI, len(TheHTimes))
TheDewpArr = np.repeat(TheMDI, len(TheHTimes))
TheWSArr = np.repeat(TheMDI, len(TheHTimes))
TheSLPArr = np.repeat(TheMDI, len(TheHTimes))
TheObsSource = np.empty((len(TheHTimes),12),dtype='|S1')
# Read the time, t, td, slp and ws (and obs source) data from the netcdf file
ncf = nc4.Dataset(TheFilee,'r')
tims = np.copy(ncf.variables['time'][:]).astype(int) # hours wince 1931, 1, 1, 00:00 - these are doubles so convert to integer
# Now find the period we're interested and only copy that data
# Note that we may be reading HadISD that has data after our period of interest (monthly updates) so need to be specific
#StartPoint = np.where(tims >= TheHTimes[0])[0]
StartPoint = np.where((tims >= TheHTimes[0]) & (tims <= TheHTimes[-1]) )[0]
# pdb.set_trace()
# Catch station if there is no data in desired period and return with empty arrays
if (len(StartPoint) == 0):
return TheTempArr,TheDewpArr,TheWSArr,TheSLPArr,tims,TheObsSource
#StartPoint = np.where(tims >= TheHTimes[0])[0][0]
tims = tims[StartPoint[0]:]
temps = np.copy(ncf.variables['temperatures'][StartPoint[0]:])
dewps = np.copy(ncf.variables['dewpoints'][StartPoint[0]:])
slp = np.copy(ncf.variables['slp'][StartPoint[0]:])
ws = np.copy(ncf.variables['windspeeds'][StartPoint[0]:])
#tims = tims.astype(int)
sourceids = np.copy(ncf.variables['input_station_id'][StartPoint[0]:,:]) # time,long_character_length (12) - comes out is byte array b'0' etc
# needs to be converted to string and then concatenated
#pdb.set_trace()
# Trying to convert byte character array into a joined string array but FAR TOO SLOWWWW
#sourceids = np.array([]) # a blank numpy array to append to
#for row in range(len(tmp[:,0])):
# #print(row)
# #sourceids = np.append(sourceids,''.join([''.join(i) for i in tmp[row,:].astype('U')])) # TOOOOOOOO SLOW!!!!
# sourceids = np.append(sourceids,tmp[row,:].astype('U'))
# Its not actually used at the moment - it was for history files previously
# So I've left as b''
# pull out the HadISDH data period
# Which points in the HadISD data match the desired times for HadISDH TheHTimes
# np.isin gave IndexError when run from script but not from pdb - np.intersect1d seems more efficient anyway
Mush,ISDMap,HISDMap = np.intersect1d(tims,TheHTimes,assume_unique=True,return_indices=True)
TheTempArr[HISDMap] = np.copy(temps[ISDMap])
TheDewpArr[HISDMap] = np.copy(dewps[ISDMap])
TheWSArr[HISDMap] = np.copy(ws[ISDMap])
TheSLPArr[HISDMap] = np.copy(slp[ISDMap])
TheObsSource[HISDMap,:] = np.copy(sourceids[ISDMap,:])
#print('Check mapping has worked: ISDcounts = ',len(ISDMap),' HISDCounts = ',len(HISDMap))
#pdb.set_trace()
# Convert all flagged -2e30 and missing -1e30 valuess to missing
TheTempArr[np.where(TheTempArr <= TheMDI)] = TheMDI
TheDewpArr[np.where(TheDewpArr <= TheMDI)] = TheMDI
TheWSArr[np.where(TheWSArr <= TheMDI)] = TheMDI
TheSLPArr[np.where(TheSLPArr <= TheMDI)] = TheMDI
#print('check the mdi catching has worked') # may need [0] at end of np.where()
#pdb.set_trace()
return TheTempArr,TheDewpArr,TheWSArr,TheSLPArr,tims,TheObsSource
#**************************************************************************
# MakeMonths
def MakeMonths(TheDataArr,TheDates,TheMDI):
''' Code converted from IDL make_months_oddtimesJAN2015.pro '''
''' COMPLEX method makes month hour means, substracts month hour climatology, makes month hour anomaly'''
''' Importantly this removes the diurnal cycle before averaging so reduces biasing from uneven temporal sampling '''
''' 1) Makes hr means for each month where >=15 obs for each hour over the month
2) ABS and St Devs: Makes month means from month hr means where >= 4 hrs within month and 1 in each tercile 0-7, 8-15, 16-23
3) Makes month hr clims where >= 15 years of data for each month hr and one in each decade
4) CLIMS and CLIM ST DEVS: Makes month CLIMS and Clim Std Devs if >= 4 clim hr means and one in each tercile
5) Makes month hr mean anomalies if >= 15 hr anomalies (hr - month hr climatology)
6) ANOMS: Make month mean anomalies if >= 4 month hr mean anomalies in each month and one in each hr tercile
7) Add a final check that if the station is still good (has all 12 months of climatology) !!!'''
# Set up the times
nhhrs = len(TheDataArr) # number of hours in record - complete years of data including leap years
nddys = (TheDates[1] - TheDates[0]) + 1 # number of days in the record - should be nhhrs / 24 so this is a check
#print('Number of days/hours check: ',nhhrs/24, nddys)
#pdb.set_trace()
TheStYr = dt.date.fromordinal(TheDates[0]).year
TheEdYr = dt.date.fromordinal(TheDates[1]).year
nyyrs = (TheEdYr - TheStYr) + 1 # number of years of record
nmms = nyyrs * 12 # number of months of record
clim_points = [clims[0]-TheStYr,clims[1]-TheStYr] # remember in python that if we're getting a range the last value needs to be +1
climlength = (clims[1] - clims[0]) + 1 # should be 30
#print('Number of years in clim (30?) check: ',climlength)
#pdb.set_trace()
# Set up leap year stuff
# This is already in the code above so may just be referencable but by putting it here this could be a stand alone function
ActYears = np.arange(TheStYr, (TheEdYr + 1)) # array of integer years
LeapIDs = np.repeat(0,nyyrs)
LeapIDs[np.where( ((ActYears/4.) - np.floor(ActYears/4.) == 0.0) & ( ((ActYears/100.) - np.floor(ActYears/100.) != 0.0) | ((ActYears/400.) - np.floor(ActYears/400.) == 0.0)))] = 1 #1s identify leap years
#print('Check the LeapIDs are correct')
#pdb.set_trace()
# SEt up the final arrays
TheAnoms = np.repeat(TheMDI,nmms)
TheAbs = np.repeat(TheMDI,nmms)
TheSDs = np.repeat(TheMDI,nmms)
TheClims = np.repeat(TheMDI,12)
TheClimSDs = np.repeat(TheMDI,12)
# Set up the working arrays
mm_hr_abs = np.empty((nmms,24)) # month mean for each hour
mm_hr_abs.fill(TheMDI)
mm_hr_anoms = np.empty((nmms,24)) # month mean anomaly for each hour
mm_hr_anoms.fill(TheMDI)
mm_hr_clims = np.empty((12,24)) # month climatological mean for each hour
mm_hr_clims.fill(TheMDI)
# 1) MAKE MONTH HOUR MEANS FOR ALL YEARS - OK WITH LEAP YEARS
# chunk by years then work through each month - allows us to easily ID leap years
MCount = 0 # counter for months
stpoint = 0
edpoint = 0
for yy,year in enumerate(ActYears): # loops through with 0,1973 1,1974 etc
if (LeapIDs[yy] == 0): # not a leap year
MonDays = [31,28,31,30,31,30,31,31,30,31,30,31]
else:
MonDays = [31,29,31,30,31,30,31,31,30,31,30,31]
#print('Check leap year ID')
#pdb.set_trace()
# Now loop through each month
for mm in range(12):
# Extract month of data and reform to hrs,days array
edpoint = stpoint + (MonDays[mm]*24)
HrDayArr = np.reshape(TheDataArr[stpoint:edpoint],(MonDays[mm],24)) # each row is a day, each column is an hour
#print('Check stpoint and edpoint and extraction')
#pdb.set_trace()
# Loop through the hours in the day
for hh in range(24):
# *** Get month hour means where there are AT LEAST 15 days present within the month
# THIS COULD HAVE BEEN 20 BUT THEN STATIONS WITH CHANGES TO GMT REPORTING WITHIN A MONTH i.e. Australia MAY
# HAVE EVERY e.g. MARCH and SEPTEMBER REMOVED.
if (len(np.where(HrDayArr[:,hh] > TheMDI)[0]) >= 15):
mm_hr_abs[MCount,hh] = np.mean(HrDayArr[np.where(HrDayArr[:,hh] > TheMDI)[0],hh])
# 2) Make Month Means from actuals
# *** If there are at least 4 hrs means within the day and one in each tercile [0-7,8-15,16-23] then Fill TheAbs and TheSDs[MCount]
if ((len(np.where(mm_hr_abs[MCount,:] > TheMDI)[0]) >= 4) &
(len(np.where(mm_hr_abs[MCount,0:8] > TheMDI)[0]) > 0) &
(len(np.where(mm_hr_abs[MCount,8:16] > TheMDI)[0]) > 0) &
(len(np.where(mm_hr_abs[MCount,16:24] > TheMDI)[0]) > 0)):
TheSDs[MCount] = np.std(HrDayArr[np.where(HrDayArr > TheMDI)])
TheAbs[MCount] = np.mean(mm_hr_abs[MCount,np.where(mm_hr_abs[MCount,:] > TheMDI)[0]])
# potentially we could have a value for TheAbs but not TheAnoms so need to make sure the 15 days minimum is also applied there
# Later, if one month fails for climatology the whole station is dumped
stpoint = np.copy(edpoint)
MCount = MCount + 1
#print('Check hour sampling and means')
#pdb.set_trace()
# 3) MAKE MONTH HOUR CLIMS where >= 15 years of month hr data adn one in each decade
# Firstextract clim years and reshape the mm_hr_abs from nmms,24 to climlength,12,24
mm_hr_abs_clim = np.reshape(mm_hr_abs[(clim_points[0]*12):((clim_points[1]+1)*12),:],(climlength,12,24))
#print('Check extraction of climatological months - 360')
#pdb.set_trace()
for mm in range(12):
for hh in range(24):
# *** There should be at least 15 years (50% of climlength) and one year in each decade
# NOTE THAT IN IDL THIS WAS JUST GT 15 NOT GE 15!!! SO WE KEEP MORE STATIONS HERE AT LEAST
if ((len(mm_hr_abs_clim[np.where(mm_hr_abs_clim[:,mm,hh] > TheMDI)[0],mm,hh]) >= climlength*0.5) &
(len(mm_hr_abs_clim[np.where(mm_hr_abs_clim[0:10,mm,hh] > TheMDI)[0],mm,hh]) > 0) &
(len(mm_hr_abs_clim[np.where(mm_hr_abs_clim[10:20,mm,hh] > TheMDI)[0],mm,hh]) > 0) &
(len(mm_hr_abs_clim[np.where(mm_hr_abs_clim[20:30,mm,hh] > TheMDI)[0],mm,hh]) > 0)):
mm_hr_clims[mm,hh] = np.mean(mm_hr_abs_clim[np.where(mm_hr_abs_clim[:,mm,hh] > TheMDI)[0],mm,hh])
#print('Check the month hr clim')
#pdb.set_trace()
# 4) THEN MONTH CLIMS if >= 4 clim hr means and one in each tercile
# NOTE FAIL IF: one month fails then this station will be ditched
# *** If there are at least 4 hrs means within the day and one in each tercile [0-7,8-15,16-23] then Fill TheClims and TheClimSDs[MCount]
if ((len(np.where(mm_hr_clims[mm,:] > TheMDI)[0]) >= 4) &
(len(np.where(mm_hr_clims[mm,0:8] > TheMDI)[0]) > 0) &
(len(np.where(mm_hr_clims[mm,8:16] > TheMDI)[0]) > 0) &
(len(np.where(mm_hr_clims[mm,16:24] > TheMDI)[0]) > 0)):
TheClims[mm] = np.mean(mm_hr_clims[mm,np.where(mm_hr_clims[mm,:] > TheMDI)])
monthstash = mm_hr_abs_clim[:,mm,:]
TheClimSDs[mm] = np.std(monthstash[np.where(monthstash > TheMDI)])
#print('Check the month clim and sd')
#pdb.set_trace()
# This month does not have enough data so we need to ditch the station
else:
#print('Failed to produce climatology')
#pdb.set_trace()
return TheAnoms, TheAbs, TheSDs, TheClims, TheClimSDs # exit with empty / incomplete return arrays
# 5) Make month hr mean anomalies if >= 15 hr anomalies (hr - month hr climatology)
# NOW BUILD THE month hr anoms and TheAnoms
# chunk by years then work through each month - allows us to easily ID leap years
MCount = 0 # counter for months
stpoint = 0
edpoint = 0
for yy,year in enumerate(ActYears): # loops through with 0,1973 1,1974 etc
if (LeapIDs[yy] == 0): # not a leap year
MonDays = [31,28,31,30,31,30,31,31,30,31,30,31]
else:
MonDays = [31,29,31,30,31,30,31,31,30,31,30,31]
# Now loop through each month
for mm in range(12):
# Extract month of data and reform to hrs,days array
edpoint = stpoint + (MonDays[mm]*24)
HrDayArr = np.reshape(TheDataArr[stpoint:edpoint],(MonDays[mm],24)) # each row is a day, each column is an hour
# Loop through the hours in the day
for hh in range(24):
# *** Get month hour mean anoms where there are AT LEAST 15 days present within the month
# THIS COULD HAVE BEEN 20 BUT THEN STATIONS WITH CHANGES TO GMT REPORTING WITHIN A MONTH i.e. Australia MAY
# HAVE EVERY e.g. MARCH and SEPTEMBER REMOVED.
if ((len(np.where(HrDayArr[:,hh] > TheMDI)[0]) >= 15) & (mm_hr_clims[mm,hh] > TheMDI)):
mm_hr_anoms[MCount,hh] = np.mean((HrDayArr[np.where(HrDayArr[:,hh] > TheMDI)[0],hh] - mm_hr_clims[mm,hh]))
# 6) Make month mean anomalies if >= 4 month hr mean anomalies in each month and one in each hr tercile
# *** If there are at least 4 hrs means within the day and one in each tercile [0-7,8-15,16-23] then Fill TheAnoms
if ((len(np.where(mm_hr_anoms[MCount,:] > TheMDI)[0]) >= 4) &
(len(np.where(mm_hr_anoms[MCount,0:8] > TheMDI)[0]) > 0) &
(len(np.where(mm_hr_anoms[MCount,8:16] > TheMDI)[0]) > 0) &
(len(np.where(mm_hr_anoms[MCount,16:24] > TheMDI)[0]) > 0)):
TheAnoms[MCount] = np.mean(mm_hr_anoms[MCount,np.where(mm_hr_anoms[MCount,:] > TheMDI)[0]])
stpoint = np.copy(edpoint)
MCount = MCount + 1
# 7) Add a final check that if the station is still good (has all 12 months of climatology)
# there are sufficient numbers of absolute values to calculate a climatology
TheAbsClims = np.reshape(TheAbs,(len(ActYears),12))[clim_points[0]:clim_points[1]+1,:]
for mm in range(12):
if (len(TheAbsClims[np.where(TheAbsClims[:,mm] > TheMDI)[0],mm]) < 15):
# this is bad so fail everything
TheClims[:] = TheMDI
#print('Failed to produce enough absolute values')
#pdb.set_trace()
return TheAnoms, TheAbs, TheSDs, TheClims, TheClimSDs
return TheAnoms, TheAbs, TheSDs, TheClims, TheClimSDs
#************************************************************************
# MakeMonthsExtremes
def MakeMonthsExtremes(TheDataArr, TheDates, TheMDI, Thresholds):
'''
TheDataArray: np.array of hourly values for the station
TheDates: list of datetime object of julian days for the start date and end date
TheMDI: missing data indicator
Thresholds: list of threshold values used as >=
1) If there are >= 4 obs per day with 1 in each tercile (0-7, 8-15, 16-23)
Calculate day max value
Calculate day mean value
2) If there are >= 15 day max values per month:
Calculate maximum day max over the month
Calculate number of day max exceeding each threshhold
3) If there are >= 15 climatology years of data for that month and one in each decade [1981-1990, 1991-2000, 2001-2010]with >= 15 day maxes / means for that month
Calculate 95 percentile of daymax over climatology (1981-2010)
Calculate 95 percentile of daymean over climatology (1981-2010)
IF THERE ARE NOT >= 15 CLIMATOLOGY YEARS OF DATA FOR ANY ONE MONTH AND AT LEAST ONE IN EACH DECADE [1981-1990, 1991-2000, 2001-2010] THEN RETURN BLANKS
4) If there are >= 15 day maxes / means for that month
Calculate day maxes >= 95 percentile day maxes for the month
Calculate day means >= 95 percentile day means for the month
Test applied following call to this function to fail a station with < 12 clims or < 200 monthly obs
Returns:
TheMax: np.array of monthly values - Maximum day max for the month (>= 4 obs per day with 1 in each tercile (0-7, 8-15, 16-23), >= 15 days per month)
TheMax95p: np.array of monthly values - Number of day max exceeding 95percentile for the month (>= 4 obs per day with 1 in each tercile (0-7, 8-15, 16-23), >= 15 days per month)
TheMean95p: np.array of monthly values - Number of day mean exceeding 95percentile for the month (>= 4 obs per day with 1 in each tercile (0-7, 8-15, 16-23), >= 15 days per month)
TheMax95pClim: np.array of climatology values - Climatological day max 95percentile for the month (>= 15 days per month and >= 15 months per climatology)
TheMean95pClim: np.array of climatology values - Climatological day mean 95percentile for the month (>= 15 days per month and >= 15 months per climatology)
TheExceedances: a list of np.array of monthly values - Number of day max exceeding thresholds for the month (>= 4 obs per day with 1 in each tercile (0-7, 8-15, 16-23), >= 15 days per month)
'''
# Set up the times
nhhrs = len(TheDataArr) # number of hours in record - complete years of data including leap years
nddys = (TheDates[1] - TheDates[0]) + 1 # number of days in the record - should be nhhrs / 24 so this is a check
#print('Number of days/hours check: ',nhhrs/24, nddys)
#pdb.set_trace()
TheStYr = dt.date.fromordinal(TheDates[0]).year
TheEdYr = dt.date.fromordinal(TheDates[1]).year
nyyrs = (TheEdYr - TheStYr) + 1 # number of years of record
nmms = nyyrs * 12 # number of months of record
clim_points = [clims[0]-TheStYr,clims[1]-TheStYr] # remember in python that if we're getting a range the last value needs to be +1
climlength = (clims[1] - clims[0]) + 1 # should be 30
#print('Number of years in clim (30?) check: ',climlength)
#pdb.set_trace()
# Set up leap year stuff
# This is already in the code above so may just be referencable but by putting it here this could be a stand alone function
ActYears = np.arange(TheStYr, (TheEdYr + 1)) # array of integer years
LeapIDs = np.repeat(0,nyyrs)
LeapIDs[np.where( ((ActYears/4.) - np.floor(ActYears/4.) == 0.0) & ( ((ActYears/100.) - np.floor(ActYears/100.) != 0.0) | ((ActYears/400.) - np.floor(ActYears/400.) == 0.0)))] = 1 #1s identify leap years
#print('Check the LeapIDs are correct')
#pdb.set_trace()
# Set up temporary arrays
DayArrTwMax = np.reshape(np.repeat(TheMDI,nyyrs*366),(nyyrs,366))
DayArrTwMean = np.reshape(np.repeat(TheMDI,nyyrs*366),(nyyrs,366))
# SEt up the final arrays
TheMax = np.repeat(TheMDI,nmms)
TheMaxStd = np.repeat(TheMDI,nmms)
TheMax95p = np.repeat(INTMDI,nmms) # Very important to distinguish between no exceedance days (0) and missing data days (-999)
TheMean95p = np.repeat(INTMDI,nmms)
TheMax95pClim = np.repeat(TheMDI,12)
TheMean95pClim = np.repeat(TheMDI,12)
TheExceedances = [np.repeat(INTMDI,nmms) for i in Thresholds] # return each item in list seperately
# 1) MAKE DAY MAX WHERE THERE ARE >= 4 OBS PER DAY WITH 1 IN EACH TERCILE (0-7, 8-15, 16-23)
# chunk by years then work through each month - allows us to easily ID leap years
MCount = 0 # counter for months
stpoint = 0
edpoint = 0
for yy,year in enumerate(ActYears): # loops through with 0,1973 1,1974 etc
if (LeapIDs[yy] == 0): # not a leap year
MonDays = [31,28,31,30,31,30,31,31,30,31,30,31]
else:
MonDays = [31,29,31,30,31,30,31,31,30,31,30,31]
#print('Check leap year ID')
#pdb.set_trace()
# Now loop through each month
YearDayCount = 0 # COunter for days of year reset each year
for mm in range(12):
# Extract month of data and reform to hrs,days array
edpoint = stpoint + (MonDays[mm]*24)
HrDayArr = np.reshape(TheDataArr[stpoint:edpoint],(MonDays[mm],24)) # each row is a day, each column is an hour
#print('Check stpoint and edpoint and extraction')
#pdb.set_trace()
# Loop through the days
daymaxarr = np.array(())
for dd in range(MonDays[mm]):
# If there are >= 4 obs in the day and one in each tercile then carry on
if ((len(np.where(HrDayArr[dd,:] > TheMDI)[0]) >= 4) &
(len(np.where(HrDayArr[dd,0:8] > TheMDI)[0]) > 0) &
(len(np.where(HrDayArr[dd,8:16] > TheMDI)[0]) > 0) &
(len(np.where(HrDayArr[dd,16:24] > TheMDI)[0]) > 0)):
daymaxarr = np.append(daymaxarr,np.max(HrDayArr[dd,np.where(HrDayArr[dd,:] > TheMDI)])) # np.where not strictly necessary as TheMDI < 0.
DayArrTwMax[yy,YearDayCount] = np.max(HrDayArr[dd,np.where(HrDayArr[dd,:] > TheMDI)]) # np.where not strictly necessary as TheMDI < 0.
DayArrTwMean[yy,YearDayCount] = np.mean(HrDayArr[dd,np.where(HrDayArr[dd,:] > TheMDI)])
YearDayCount += 1
# Add an extra count when its not a leap year but the end of February so that the arrays can be equal length of days.
if (mm == 1) & (MonDays[mm] == 28) & (YearDayCount == 59):
YearDayCount += 1
# 2) MAKE MONTH MAX DAY MAX AND DAYS >= THRESHOLD IF >= 15 DAYS PRESENT
if (len(daymaxarr >= 15)):
TheMax[MCount] = np.max(daymaxarr)
TheMaxStd[MCount] = np.std(daymaxarr)
# Calculate days above threshold
for tt, thresh in enumerate(Thresholds):
TheExceedances[tt][MCount] = len(np.where(daymaxarr >= thresh)[0])
stpoint = np.copy(edpoint)
MCount += 1
# 3) MAKE THE CLIMATOLOGICAL 95 PERCENTILES FOR MAX AND MEAN (If there are >= 15 climatology years of data for that month and one in each decade [1981-1990, 1991-2000, 2001-2010] with >= 15 day maxes / means for that month)
MonDays = [31,29,31,30,31,30,31,31,30,31,30,31]
DayStart = 0
DayEnd = 0
TheMax95p = np.reshape(TheMax95p,(nyyrs,12))
TheMean95p = np.reshape(TheMean95p,(nyyrs,12))
for mm in range(12):
DayStart = np.copy(DayEnd)
DayEnd = DayStart + MonDays[mm]
CountOfDays = np.count_nonzero(DayArrTwMax[clim_points[0]:clim_points[1]+1,DayStart:DayEnd] > TheMDI,1)
# print('check CountOfDays')
# pdb.set_trace()
# *** There should be at least 15 years (50% of climlength) and one year in each decade
if ((np.count_nonzero(CountOfDays >= 15) >= climlength*0.5) &
(np.count_nonzero(CountOfDays[0:10] >= 15) > 0) &
(np.count_nonzero(CountOfDays[10:20] >= 15) > 0) &
(np.count_nonzero(CountOfDays[20:30] >= 15) > 0)):
BinOfDays = DayArrTwMax[clim_points[0]:clim_points[1]+1,DayStart:DayEnd]
TheMax95pClim[mm] = np.percentile(BinOfDays[np.where(BinOfDays > TheMDI)],95)
BinOfDays = DayArrTwMean[clim_points[0]:clim_points[1]+1,DayStart:DayEnd]
TheMean95pClim[mm] = np.percentile(BinOfDays[np.where(BinOfDays > TheMDI)],95)
# print('Check climatology binning')
# pdb.set_trace()
# 4) MAKE THE MONTH DAY COUNTS EXCEEDING PERCENTILE THRESHOLD
for yy in range(nyyrs):
BinOfDays = DayArrTwMax[yy,DayStart:DayEnd]
if (len(np.where(BinOfDays > TheMDI)[0]) >= 15):
TheMax95p[yy,mm] = np.count_nonzero(BinOfDays >= TheMax95pClim[mm])
BinOfDays = DayArrTwMean[yy,DayStart:DayEnd]
if (len(np.where(BinOfDays > TheMDI)[0]) >= 15):
TheMean95p[yy,mm] = np.count_nonzero(BinOfDays >= TheMean95pClim[mm])
else:
#print('Failed to produce climatology')
#pdb.set_trace()
TheMax95p = np.reshape(TheMax95p,nmms)
TheMean95p = np.reshape(TheMean95p,nmms)
return TheMax, TheMaxStd, TheMax95p, TheMean95p, TheMax95pClim, TheMean95pClim, TheExceedances # exit with empty / incomplete return arrays
TheMax95p = np.reshape(TheMax95p,nmms)
TheMean95p = np.reshape(TheMean95p,nmms)
# print('Finished making extremes')
# pdb.set_trace()
return TheMax, TheMaxStd, TheMax95p, TheMean95p, TheMax95pClim, TheMean95pClim, TheExceedances
#************************************************************************
# WriteNetCDF
def WriteNetCDF(FileName,TheStYr,TheEdYr,TheClims,TheDataList,DimObject,AttrObject,GlobAttrObject,OLDMDI,INTMDI):
''' WRites NetCDF4 '''
''' Sort out the date/times to write out and time bounds '''
''' Convert variables using the obtained scale_factor and add_offset: stored_var=int((var-offset)/scale) '''
''' Write to file, set up given dimensions, looping through all potential variables and their attributes, and then the provided dictionary of global attributes '''
# # Attributes and things common to all vars
# add_offset = -100.0 # storedval=int((var-offset)/scale)
# scale_factor = 0.01
# Sort out date/times to write out
TimPoints,TimBounds = MakeDaysSince(TheStYr,1,TheEdYr,12,'month',Return_Boundaries = True)
nTims = len(TimPoints)
MonthName = ['January ',
'February ',
'March ',
'April ',
'May ',
'June ',
'July ',
'August ',
'September ',
'October ',
'November ',
'December ']
# No need to convert float data using given scale_factor and add_offset to integers - done within writing program (packV = (V-offset)/scale
# Not sure what this does to float precision though...
# # Change mdi into an integer -999 because these are stored as integers
# # NOTE THAT THIS CHANGES THE ACTUAL DATA ARRAYS IN THE LIST BECAUSE THE LIST IS JUST A POINTER!!!
# NEWMDI = -999
# for vv in range(len(TheDataList)):
# TheDataList[vv][np.where(TheDataList[vv] == OLDMDI)] = NEWMDI
# Create a new netCDF file - have tried zlib=True,least_significant_digit=3 (and 1) - no difference
ncfw = nc4.Dataset(FileName,'w',format='NETCDF4_CLASSIC') # need to try NETCDF4 and also play with compression but test this first
# Write out the global attributes
if ('description' in GlobAttrObject):
ncfw.description = GlobAttrObject['description']
#print(GlobAttrObject['description'])
if ('File_created' in GlobAttrObject):
ncfw.File_created = GlobAttrObject['File_created']
if ('Title' in GlobAttrObject):
ncfw.Title = GlobAttrObject['Title']
if ('Institution' in GlobAttrObject):
ncfw.Institution = GlobAttrObject['Institution']
if ('History' in GlobAttrObject):
ncfw.History = GlobAttrObject['History']
if ('Licence' in GlobAttrObject):
ncfw.Licence = GlobAttrObject['Licence']
if ('Project' in GlobAttrObject):
ncfw.Project = GlobAttrObject['Project']
if ('Processing_level' in GlobAttrObject):
ncfw.Processing_level = GlobAttrObject['Processing_level']
if ('Acknowledgement' in GlobAttrObject):
ncfw.Acknowledgement = GlobAttrObject['Acknowledgement']
if ('Source' in GlobAttrObject):
ncfw.Source = GlobAttrObject['Source']
if ('Comment' in GlobAttrObject):
ncfw.Comment = GlobAttrObject['Comment']
if ('References' in GlobAttrObject):
ncfw.References = GlobAttrObject['References']
if ('Creator_name' in GlobAttrObject):
ncfw.Creator_name = GlobAttrObject['Creator_name']
if ('Creator_email' in GlobAttrObject):
ncfw.Creator_email = GlobAttrObject['Creator_email']
if ('Version' in GlobAttrObject):
ncfw.Version = GlobAttrObject['Version']
if ('doi' in GlobAttrObject):
ncfw.doi = GlobAttrObject['doi']
if ('Conventions' in GlobAttrObject):
ncfw.Conventions = GlobAttrObject['Conventions']
if ('netcdf_type' in GlobAttrObject):
ncfw.netcdf_type = GlobAttrObject['netcdf_type']
# Loop through and set up the dimension names and quantities
for vv in range(len(DimObject[0])):
ncfw.createDimension(DimObject[0][vv],DimObject[1][vv])
# Go through each dimension and set up the variable and attributes for that dimension if needed
for vv in range(len(DimObject)-2): # ignore first two elements of the list but count all other dictionaries
print(DimObject[vv+2]['var_name'])
# NOt 100% sure this works in a loop with overwriting
# initiate variable with name, type and dimensions
MyVar = ncfw.createVariable(DimObject[vv+2]['var_name'],DimObject[vv+2]['var_type'],DimObject[vv+2]['var_dims'])
# Apply any other attributes
if ('standard_name' in DimObject[vv+2]):
MyVar.standard_name = DimObject[vv+2]['standard_name']
if ('long_name' in DimObject[vv+2]):
MyVar.long_name = DimObject[vv+2]['long_name']
if ('units' in DimObject[vv+2]):
MyVar.units = DimObject[vv+2]['units']
if ('axis' in DimObject[vv+2]):
MyVar.axis = DimObject[vv+2]['axis']
if ('calendar' in DimObject[vv+2]):
MyVar.calendar = DimObject[vv+2]['calendar']
if ('start_year' in DimObject[vv+2]):
MyVar.start_year = DimObject[vv+2]['start_year']
if ('end_year' in DimObject[vv+2]):
MyVar.end_year = DimObject[vv+2]['end_year']
if ('start_month' in DimObject[vv+2]):
MyVar.start_month = DimObject[vv+2]['start_month']
if ('end_month' in DimObject[vv+2]):
MyVar.end_month = DimObject[vv+2]['end_month']
if ('bounds' in DimObject[vv+2]):
MyVar.bounds = DimObject[vv+2]['bounds']
# Provide the data to the variable
if (DimObject[vv+2]['var_name'] == 'time'):
MyVar[:] = TimPoints
if (DimObject[vv+2]['var_name'] == 'bounds_time'):
MyVar[:,:] = TimBounds
if (DimObject[vv+2]['var_name'] == 'month'):
for mm in range(12):
MyVar[mm,:] = nc4.stringtochar(np.array(MonthName[mm],dtype='S10'))
# Go through each variable and set up the variable attributes
for vv in range(len(AttrObject)): # ignore first two elements of the list but count all other dictionaries
print(AttrObject[vv]['var_name'])
# NOt 100% sure this works in a loop with overwriting
# initiate variable with name, type and dimensions
# MyVar = ncfw.createVariable(AttrObject[vv]['var_name'],AttrObject[vv]['var_type'],AttrObject[vv]['var_dims'],fill_value = NEWMDI)
if (AttrObject[vv]['var_type'] == 'f4'):
MyVar = ncfw.createVariable(AttrObject[vv]['var_name'],AttrObject[vv]['var_type'],AttrObject[vv]['var_dims'],fill_value = OLDMDI)
elif (AttrObject[vv]['var_type'] == 'i4'):
MyVar = ncfw.createVariable(AttrObject[vv]['var_name'],AttrObject[vv]['var_type'],AttrObject[vv]['var_dims'],fill_value = INTMDI)
# Apply any other attributes
if ('long_name' in AttrObject[vv]):
MyVar.long_name = AttrObject[vv]['long_name']
if ('units' in AttrObject[vv]):
MyVar.units = AttrObject[vv]['units']
# MyVar.add_offset = add_offset
# MyVar.scale_factor = scale_factor
# if ('valid_min' in AttrObject[vv]):
# MyVar.valid_min = AttrObject[vv]['valid_min']
# if ('valid_max' in AttrObject[vv]):
# MyVar.valid_max = AttrObject[vv]['valid_max']
MyVar.reference_period = str(TheClims[0])+', '+str(TheClims[1])
# Provide the data to the variable - depending on howmany dimensions there are
MyVar[:] = TheDataList[vv]
ncfw.close()
return # WriteNCCF
#************************************************************************
# WriteASCII
def WriteAscii(TheFilee,TheData,TheID,TheStYr,TheEdYr):
''' Write out to text file '''
nyrs = (TheEdYr - TheStYr) + 1
filee = open(TheFilee,'a+')
for y,yy in enumerate(np.arange(TheStYr,TheEdYr+1)):
#DataString = ' '.join(['%6i' % (i) for i in TheData[y,:]])
#pdb.set_trace()
filee.write('%11s %4s%105s\n' % (TheID,str(yy),' '.join(['%6i' % (i) for i in TheData[y,:]]))) # 105 is 12 * 6 + 11 * 3
filee.close()
return
#************************************************************************
# FailureMode
def FailureMode(FailType,Message,Counter,StationID):
'''This function prints out the failure mode, stationID and Counter to
screen and to relevant file then returns '''
'''TheMessage should be a string of maximum 30 characters '''
'''Counter should be max of 999999 '''
if (FailType == 'TooFewHours'):
print('Too few hours in month climatology')
filee = open(OUTDITCH,'a+')
filee.write('%12s %32s %6i \n' % (StationID,Message+': ',Counter))
filee.close()
elif (FailType == 'SubzeroDPD'):
print('Subzero values in DPD data')
filee = open(OUTDITCH,'a+')
filee.write('%12s %32s %6i \n' % (StationID,Message+': ',Counter))
filee.close()
elif (FailType == 'EarlyRecord'):
print('No data in desired time period')
filee = open(OUTDITCH,'a+')
filee.write('%12s %32s %6i \n' % (StationID,Message+': ',Counter))
filee.close()
elif (FailType == 'TooFewMonths'):
print('Too few months with enough data for climatology')
filee = open(OUTDITCH,'a+')
filee.write('%12s %32s %6i \n' % (StationID,Message+': ',Counter))
filee.close()
elif (FailType == 'TooFewClims'):
print('Too few climatological months')
filee = open(OUTDITCH,'a+')
filee.write('%12s %32s %6i \n' % (StationID,Message+': ',Counter))
filee.close()
elif (FailType == 'ShortStation'):
print('Too few months in record')
filee = open(OUTDITCH,'a+')
filee.write('%12s %32s %6i \n' % (StationID,Message+': ',Counter))
filee.close()
return
#************************************************************************
# MAIN
#************************************************************************
# Read in the SLP data from netCDF
CR20arr,CR20lats,CR20lons = ReadSLPdata(MDI)
# Open and read in station list
#MyTypes = str
MyTypes = ("|U6","|U1","|U5","|U1","|U30","|U1","float","|U1","float","|U1","float","|U1","|U21")
#MyTypes = ("|S6","|S1","|S5","|S1","|S30","|S1","float","|S1","float","|S1","float","|S1","|S21")
#MyTypes = ("str","str","str","str","str","str","float","str","float","str","float","str","str")
#MyTypes = (str,str,str,str,str,str,float,str,float,str,float,str,str)
#MyTypes = "|U5"
# Could try:
#MyTypes = ("|S6","x","|S5","x","|S30","x","float","x","float","x","float","x","|S21")
MyDelimiters = [6,1,5,1,30,1,7,1,8,1,7,1,21]
RawData = ReadData(INSTATLIST,MyTypes,MyDelimiters)
StationListWMO = np.array(RawData['f0'])
StationListWBAN = np.array(RawData['f2'])
StationListLat = np.array(RawData['f6'])
StationListLon = np.array(RawData['f8'])
StationListElev = np.array(RawData['f10'])
StationListCID = np.repeat('XX',len(StationListWMO)) # added later
StationListName = np.array(RawData['f4'])
nstations = len(StationListWMO)
#print('Test to see if station read in has worked correctly and whether there is a more efficient method')
#pdb.set_trace()
# loop through station by station
for st in range(nstations):
# check if restart necessary
if RestartValue != '-----------' and RestartValue != StationListWMO[st]+StationListWBAN[st]:
continue
RestartValue = '-----------'
stationid = StationListWMO[st]+'-'+StationListWBAN[st] # New ISD will have different filenames
outstationid = StationListWMO[st]+StationListWBAN[st] # New ISD will have different filenames
print('Working on ',stationid)
# Find the CID from the ish-history file
# INCIDs is a wildcard file path as the date of download changes each time.
GotCID = 0
FilNameWild = glob.glob(INCIDs)
# with open (FilNameWild[0], 'rt') as myfile:
with open (FilNameWild[0], 'r', errors='ignore') as myfile: # ignore to cope with umlaut : found for 999999-27516 Jan 2021
for line in myfile:
if line.find(StationListWMO[st]+' '+StationListWBAN[st]) != -1: # if there is a match with the WMO ID?
StationListCID[st] = str(line[43:45])
GotCID = 1
break
if (GotCID == 0): # There should always be a CID!
StationListCID[st] = '**'
print('No CID found!')
pdb.set_trace()
#print('Check that the CID search is working')
#pdb.set_trace()
# open the file, extract HadISDH time period data for times, t, td, slp and ws and station sources----------------------------------------------------------
filee = INDIR+stationid+'.nc'
# Need to gunzip then gzip
call(['gunzip',filee+'.gz'])
fulltemp_arr,fulldewp_arr,fullws_arr,fullslp_arr,tims,obssource = GetHadISD(filee,isd_full_times,MDI)
call(['gzip',filee])
# Catch failure from no data in desired time period
if (len(fulltemp_arr[np.where(fulltemp_arr > MDI)[0]]) == 0):
FailureMode('EarlyRecord','No data in period',0,stationid)
continue
#print('TOTAL TEMPS and DEWPS: ',len(np.where(fulltemp_arr > MDI)[0]),' ',len(np.where(fulldewp_arr > MDI)[0]))
#pdb.set_trace()
# Convert to other variables
statP_arr = np.repeat(MDI, ntims)
fullddep_arr = np.repeat(MDI, ntims)
fulltwet_arr = np.repeat(MDI, ntims)
fullevap_arr = np.repeat(MDI, ntims)
fullqhum_arr = np.repeat(MDI, ntims)
fullrhum_arr = np.repeat(MDI, ntims)
# Double check there are vaguely enough data in the climatology period
if (len(np.where((fulltemp_arr[clpointies] > MDI) & (fulldewp_arr[clpointies] > MDI))[0]) > 24000): # 300 days for 20 years with 4 obs per day.
#There are provisionally enough data - yippee!
# Get the climatological station P
# FEB2013 use CR20 climatological MSLP and climatological temperature (from HadISD) to get a climatological station P for each time point (duplicated year to year for ease)
# so we need all January hours over the climatology period etc.
tempyearsarr = np.empty((8784,climsum))
tempyearsarr.fill(MDI) # 30 year by all hours (including leaps) array - containing the years within the climatology period
tempclimsarr = np.empty((8784,nyrs))
tempclimsarr.fill(MDI) # all years by all hours (including leaps) array - to fill with climatological daily means
slpclimsarr = np.empty((8784,nyrs))
slpclimsarr.fill(MDI) # array with all hours including leaps present for each year
temppointer = 0
# Pull out the climatological data
for yrfill in range(nyrs):
if ((yrfill >= stclim) & (yrfill <= edclim)):
if (leapsids[yrfill] != 1): # not a leap year so fill to Feb 28th and from Mar 1st
tempyearsarr[0:1416,yrfill-stclim] = fulltemp_arr[temppointer:temppointer+1416]
tempyearsarr[1440:8784,yrfill-stclim] = fulltemp_arr[temppointer+1416:temppointer+8760]
temppointer = temppointer+8760
else:
tempyearsarr[:,yrfill-stclim] = fulltemp_arr[temppointer:temppointer+8784]
temppointer = temppointer+8784
else:
if (leapsids[yrfill] != 1): # not a leap year so fill to Feb 28th and from Mar 1st
temppointer = temppointer+8760
else:
temppointer = temppointer+8784
# now subset to get clims for each month, fill tempclimsarr with those clims, slpclimsarr with CR20 MSLP for closestgridbox
matchlats = np.where(CR20lats < StationListLat[st])
thelat = matchlats[0][0] - 1
# Need a catch in case the longitude is < -179 or > 179
if ((StationListLon[st] > 179.) | (StationListLon[st] < -179.)):
thelon = 179
else:
matchlons = np.where(CR20lons > StationListLon[st])
thelon = matchlons[0][0] - 1
#print('Check the lat and lon matching bit')
#pdb.set_trace()
# Annoyingly we need a catch in here to check if any of the months do not have data over the climatology period just in case
# THERE MUST BE AT LEAST 15 years of data over the climatology with 4 hours a day and 80% of days
# For February that's ((28*4)*0.8)*15 = 1344 observations THIS IS DIFFERENT TO EARLIER CODE (Feb 2020) SO MAY KICK OUT MORE STATIONS
BadMonth = 0 # Counter for months with too few data for climatology to catch failure
for mm,Mon in enumerate(MonArr):
lotsofhours = tempyearsarr[HrDict[Mon+'Hrs'],:] # calculate T clims over clim period 1981-2010
if (len(lotsofhours[np.where(lotsofhours > MDI)]) >= 1344):
tempclimsarr[HrDict[Mon+'Hrs'],:] = np.median(lotsofhours[np.where(lotsofhours > MDI)])
slpclimsarr[HrDict[Mon+'Hrs'],:] = CR20arr[mm,thelat,thelon]
# if there isn't enough data then fail
else:
BadMonth = BadMonth + 1
if (BadMonth > 0):
FailureMode('TooFewMonths','Months with no climatology',BadMonth,stationid)
continue
#print('Are you happy with how the climatology is being done?')
#pdb.set_trace()
# now convert back to fulltemp_arr space without fake leap years - converting standard P too
temppointer = 0
for yrfill in range(nyrs):
if (leapsids[yrfill] != 1): # not a leap year so fill to Feb 28th and from Mar 1st
#pdb.set_trace()
# AS WE'RE USING STATION T NOT SEA LEVEL T, TO GET RATIO OF SEA LEVEL T TO STATION T NEEDS A REARRANGMENT OF THE (slT-HeightConv)/slT to stT/(stT+HeighConv)
statP_arr[temppointer:temppointer+1416] = slpclimsarr[0:1416,yrfill] * (( (273.15 + tempclimsarr[0:1416,yrfill]) /
((273.15 + tempclimsarr[0:1416,yrfill]) + (0.0065 * StationListElev[st])) ) **5.256)
statP_arr[temppointer+1416:temppointer+8760] = slpclimsarr[1440:8784,yrfill] * (( (273.15 + tempclimsarr[1440:8784,yrfill]) /
((273.15 + tempclimsarr[1440:8784,yrfill]) + (0.0065*StationListElev[st])) )**5.256)
temppointer = temppointer + 8760
else:
statP_arr[temppointer:temppointer+8784] = slpclimsarr[:,yrfill] * (( (273.15 + tempclimsarr[:,yrfill]) /
((273.15 + tempclimsarr[:,yrfill]) + (0.0065 * StationListElev[st])) )**5.256)
temppointer = temppointer + 8784
#print('Check the station pressures')
#pdb.set_trace()
# Calculate Dew point depression
gots = np.where((fulltemp_arr > MDI) & (fulldewp_arr > MDI))
fullddep_arr[gots[0]] = fulltemp_arr[gots[0]] - fulldewp_arr[gots[0]]
# Check DPD for subzeros - there really shouldn't be any as QC should have picked this up
if (len(np.where(fullddep_arr[gots[0]] < 0.)[0]) > 0):
FailureMode('SubzeroDPD','No. of subzero DPDs',len(np.where(fullddep_arr[gots[0]] < 0.)[0]),stationid)
continue
# Calculate vapour pressure - over ice if twet <= 0.0
fullevap_arr[gots[0]] = CalcHums.vap(fulldewp_arr[gots[0]],fulltemp_arr[gots[0]],statP_arr[gots[0]],roundit=False) #station_P
# Calculate relative humidity - over ice if twet <= 0.0
fullrhum_arr[gots[0]] = CalcHums.rh(fulldewp_arr[gots[0]],fulltemp_arr[gots[0]],statP_arr[gots[0]],roundit=False) #station_P
# Calculate specific humidity - over ice if twet <= 0.0
fullqhum_arr[gots[0]] = CalcHums.sh(fulldewp_arr[gots[0]],fulltemp_arr[gots[0]],statP_arr[gots[0]],roundit=False) #station_P
# Calculate wetbulb temperature - over ice if twet <= 0.0
fulltwet_arr[gots[0]] = CalcHums.wb(fulldewp_arr[gots[0]],fulltemp_arr[gots[0]],statP_arr[gots[0]],roundit=False) #station_P
else: # too few hours for climatology
FailureMode('TooFewHours','Hours of good data',len(np.where((fulltemp_arr[clpointies] > MDI) & (fulldewp_arr[clpointies] > MDI))[0]),stationid)
continue
#create monthly means/anoms/clims/sds------------------------------------
RHanoms_mm, RHabs_mm, RHsd_mm, RHclims_mm,RHclimSD_mm = MakeMonths(fullrhum_arr,dates,MDI) # will return MDI for RHclims_mm if a month fails a climatology count
#print('Check the monthly means - are the anoms and abs present identically?')
#pdb.set_trace()
# Test RH clims to see if they have been calculated because RH requires both T and Td to be present
if (len(np.where(RHclims_mm > MDI)[0]) < 12):
FailureMode('TooFewClims','No of Months Clim',len(np.where(RHclims_mm > MDI)[0]),stationid)
continue
# Check number of months in station and if < 200 then ditch
if (len(np.where(RHanoms_mm > MDI)[0]) < 200):
FailureMode('ShortStation','Months present',len(np.where(RHanoms_mm > MDI)[0]),stationid)
continue
Panoms_mm, Pabs_mm, Psd_mm, Pclims_mm, PclimSD_mm = MakeMonths(statP_arr,dates,MDI)
Tanoms_mm, Tabs_mm, Tsd_mm, Tclims_mm, TclimSD_mm = MakeMonths(fulltemp_arr,dates,MDI)
Tdanoms_mm, Tdabs_mm, Tdsd_mm, Tdclims_mm, TdclimSD_mm = MakeMonths(fulldewp_arr,dates,MDI)
DPDanoms_mm, DPDabs_mm, DPDsd_mm, DPDclims_mm, DPDclimSD_mm = MakeMonths(fullddep_arr,dates,MDI)
# Derive Td and DPD at the monthly resolution for comparison
derivedDPDabs_mm = np.repeat(MDI,nmons)
derivedTdabs_mm = np.repeat(MDI,nmons)
gots = np.where(DPDabs_mm > MDI)
if (len(gots[0]) > 0):
derivedDPDabs_mm[gots[0]] = Tabs_mm[gots[0]] - Tdabs_mm[gots[0]]
derivedTdabs_mm[gots[0]] = Tabs_mm[gots[0]] - DPDabs_mm[gots[0]]
Twanoms_mm, Twabs_mm, Twsd_mm, Twclims_mm, TwclimSD_mm = MakeMonths(fulltwet_arr,dates,MDI)
eanoms_mm, eabs_mm, esd_mm, eclims_mm, eclimSD_mm = MakeMonths(fullevap_arr,dates,MDI)
qanoms_mm, qabs_mm, qsd_mm, qclims_mm, qclimSD_mm = MakeMonths(fullqhum_arr,dates,MDI)
WSanoms_mm, WSabs_mm, WSsd_mm, WSclims_mm, WSclimSD_mm = MakeMonths(fullws_arr,dates,MDI)
SLPanoms_mm, SLPabs_mm, SLPsd_mm, SLPclims_mm, SLPclimSD_mm = MakeMonths(fullslp_arr,dates,MDI)
Tw_max, Tw_max_std, Twmax_95p, Twmean_95p, Twmax95p_clim, Twmean95p_clim, Tw_TEX = MakeMonthsExtremes(fulltwet_arr,dates,MDI,[25., 27., 29., 31., 33., 35])
# Unpack Exceedances
Tw25_tex, Tw27_tex, Tw29_tex, Tw31_tex, Tw33_tex, Tw35_tex, = [i for i in Tw_TEX]
# print('Check the extremes')
# pdb.set_trace()
# RH completeness test should have kicked out a station with too few obs but the extremes use different calculations so check again to be sure
# Test Tw extreme clims to see if they have been calculated
if (len(np.where(Twmax95p_clim > MDI)[0]) < 12):
FailureMode('TooFewClims','No of Months Extreme Clim',len(np.where(Twmax95p_clim > MDI)[0]),stationid)
# continue
# Check number of months in station and if < 200 then ditch
elif (len(np.where(Tw_max > MDI)[0]) < 200):
FailureMode('ShortStation','Extremes months present',len(np.where(Tw_max > MDI)[0]),stationid)
# continue
else:
# Print out station listing in keep file
filee = open(OUTTWEX,'a+')
filee.write('%11s%8.4f %9.4f %6.1f %2s %29s%8s%4i\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st],'MONTHS: ',len(np.where(Tw_max > MDI)[0])))
# NOTE usihng -1e30 here.
filee.close()
# List data together to pass to NetCDF writer
# DataList = [RHanoms_mm, RHabs_mm, RHsd_mm, RHclims_mm, RHclimSD_mm, Tanoms_mm, Tabs_mm, Tsd_mm, Tclims_mm, TclimSD_mm,
# Tdanoms_mm, Tdabs_mm, Tdsd_mm, Tdclims_mm, TdclimSD_mm, DPDanoms_mm, DPDabs_mm, DPDsd_mm, DPDclims_mm, DPDclimSD_mm,
# Twanoms_mm, Twabs_mm, Twsd_mm, Twclims_mm, TwclimSD_mm, eanoms_mm, eabs_mm, esd_mm, eclims_mm, eclimSD_mm,
# qanoms_mm, qabs_mm, qsd_mm, qclims_mm, qclimSD_mm, WSanoms_mm, WSabs_mm, WSsd_mm, WSclims_mm, WSclimSD_mm,
# SLPanoms_mm, SLPabs_mm, SLPsd_mm, SLPclims_mm, SLPclimSD_mm, Pabs_mm,derivedDPDabs_mm,derivedTdabs_mm]
DataList = [RHanoms_mm, RHabs_mm, RHsd_mm, RHclims_mm, RHclimSD_mm, Tanoms_mm, Tabs_mm, Tsd_mm, Tclims_mm, TclimSD_mm,
Tdanoms_mm, Tdabs_mm, Tdsd_mm, Tdclims_mm, TdclimSD_mm, DPDanoms_mm, DPDabs_mm, DPDsd_mm, DPDclims_mm, DPDclimSD_mm,
Twanoms_mm, Twabs_mm, Twsd_mm, Twclims_mm, TwclimSD_mm, eanoms_mm, eabs_mm, esd_mm, eclims_mm, eclimSD_mm,
qanoms_mm, qabs_mm, qsd_mm, qclims_mm, qclimSD_mm, WSanoms_mm, WSabs_mm, WSsd_mm, WSclims_mm, WSclimSD_mm,
SLPanoms_mm, SLPabs_mm, SLPsd_mm, SLPclims_mm, SLPclimSD_mm, Pabs_mm,derivedDPDabs_mm,derivedTdabs_mm,
Tw_max, Tw_max_std, Twmax_95p, Twmean_95p, Twmax95p_clim, Twmean95p_clim, Tw25_tex, Tw27_tex, Tw29_tex, Tw31_tex, Tw33_tex, Tw35_tex]
DimList=[['time','month','characters','bound_pairs'],
[nmons,12,10,2],
dict([('var_type','f4'),
('var_name','time'),
('var_dims',('time',)),
('standard_name','time'),
('long_name','time'),
('units','days since 1973-1-1 00:00:00'),
('axis','T'),
('calendar','gregorian'),
('start_year',styear),
('end_year',edyear),
('start_month',1),
('end_month',12),
('bounds','bounds_time')]),
dict([('var_type','i4'),
('var_name','bounds_time'),
('var_dims',('time','bound_pairs',)),
('standard_name','time'),
('long_name','time period boundaries')]),
dict([('var_type','S1'),
('var_name','month'),
('var_dims',('month','characters',)),
('long_name','month of year')])]
# Attribute list for variables
AttrList=[dict([('var_type','f4'),
('var_name','rh_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) relative humidity monthly mean anomaly'),
('units','%rh')]),
dict([('var_type','f4'),
('var_name','rh_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) relative humidity monthly mean'),
('units','%rh')]),
dict([('var_type','f4'),
('var_name','rh_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) relative humidity monthly standard deviations'),
('units','%rh')]),
dict([('var_type','f4'),
('var_name','rh_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) relative humidity monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','%rh')]),
dict([('var_type','f4'),
('var_name','rh_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) relative humidity monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','%rh')]),
dict([('var_type','f4'),
('var_name','t_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) air temperature monthly mean anomaly'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','t_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) air temperature monthly mean'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','t_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) air temperature monthly standard deviations'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','t_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) air temperature monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','t_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) air temperature monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','td_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) dewpoint temperature monthly mean anomaly'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','td_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) dewpoint temperature monthly mean'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','td_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) dewpoint temperature monthly standard deviations'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','td_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) dewpoint temperature monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','td_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) dewpoint temperature monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','dpd_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) dewpoint depression monthly mean anomaly'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','dpd_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) dewpoint depression monthly mean'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','dpd_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) dewpoint depression monthly standard deviations'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','dpd_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) dewpoint depression monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','dpd_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) dewpoint depression monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) wetbulb temperature monthly mean anomaly'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) wetbulb temperature monthly mean'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) wetbulb temperature monthly standard deviations'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) wetbulb temperature monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) wetbulb temperature monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','e_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) vapour pressure monthly mean anomaly'),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','e_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) vapour pressure monthly mean'),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','e_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) vapour pressure monthly standard deviations'),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','e_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) vapour pressure monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','e_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) vapour pressure monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','q_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) specific humidity monthly mean anomaly'),
('units','g/kg')]),
dict([('var_type','f4'),
('var_name','q_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) specific humidity monthly mean'),
('units','g/kg')]),
dict([('var_type','f4'),
('var_name','q_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) specific humidity monthly standard deviations'),
('units','g/kg')]),
dict([('var_type','f4'),
('var_name','q_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) specific humidity monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','g/kg')]),
dict([('var_type','f4'),
('var_name','q_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) specific humidity monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','g/kg')]),
dict([('var_type','f4'),
('var_name','ws_anoms'),
('var_dims',('time',)),
('long_name','near surface (~10m) wind speed monthly mean anomaly'),
('units','m/s')]),
dict([('var_type','f4'),
('var_name','ws_abs'),
('var_dims',('time',)),
('long_name','near surface (~10m) wind speed monthly mean'),
('units','m/s')]),
dict([('var_type','f4'),
('var_name','ws_std'),
('var_dims',('time',)),
('long_name','near surface (~10m) wind speed monthly standard deviations'),
('units','m/s')]),
dict([('var_type','f4'),
('var_name','ws_clims'),
('var_dims',('month',)),
('long_name','near surface (~10m) wind speed monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','m/s')]),
dict([('var_type','f4'),
('var_name','ws_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~10m) wind speed monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','m/s')]),
dict([('var_type','f4'),
('var_name','slp_anoms'),
('var_dims',('time',)),
('long_name','near surface (~2m) station level pressure monthly mean anomaly'),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','slp_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) station level pressure monthly mean'),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','slp_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) station level pressure monthly standard deviations'),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','slp_clims'),
('var_dims',('month',)),
('long_name','near surface (~2m) station level pressure monthly climatology '+str(clims[0])+'-'+str(clims[1])),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','slp_climSDs'),
('var_dims',('month',)),
('long_name','near surface (~2m) station level pressure monthly climatological standard deviation '+str(clims[0])+'-'+str(clims[1])),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','20CRstation_Pclim'),
('var_dims',('time',)),
('long_name','near surface (~2m) 20CRv2c station level pressure monthly climatological mean '+str(clims[0])+'-'+str(clims[1])),
('units','hPa')]),
dict([('var_type','f4'),
('var_name','de_dpd_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) derived (T-Td) dewpoint depression monthly mean'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','de_td_abs'),
('var_dims',('time',)),
('long_name','near surface (~2m) derived dewpoint temperature (T-DPD) monthly mean'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_max'),
('var_dims',('time',)),
('long_name','near surface (~2m) maximum daily maximum wetbulb temperature in the month'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_max_std'),
('var_dims',('time',)),
('long_name','near surface (~2m) standard deviation of daily maximum wetbulb temperature in the month'),
('units','deg C')]),
dict([('var_type','i4'),
('var_name','tw_max_95p'),
('var_dims',('time',)),
('long_name','days per month exceeding 95th percentile (1981-2010) of near surface (~2m) daily maximum wetbulb temperature'),
('units','1')]),
dict([('var_type','i4'),
('var_name','tw_mean_95p'),
('var_dims',('time',)),
('long_name','days per month exceeding 95th percentile (1981-2010) of near surface (~2m) daily mean wetbulb temperature'),
('units','1')]),
dict([('var_type','f4'),
('var_name','tw_max_95p_clim'),
('var_dims',('month',)),
('long_name','climatological 95th percentile (1981-2010) of near surface (~2m) daily maximum wetbulb temperature'),
('units','deg C')]),
dict([('var_type','f4'),
('var_name','tw_mean_95p_clim'),
('var_dims',('month',)),
('long_name','climatological 95th percentile (1981-2010) of near surface (~2m) daily mean wetbulb temperature'),
('units','deg C')]),
dict([('var_type','i4'),
('var_name','tw_max_ex25'),
('var_dims',('time',)),
('long_name','days per month of near surface (~2m) daily maximum wetbulb temperature exceeding 25 degrees'),
('units','d1')]),
dict([('var_type','i4'),
('var_name','tw_max_ex27'),
('var_dims',('time',)),
('long_name','days per month of near surface (~2m) daily maximum wetbulb temperature exceeding 27 degrees'),
('units','1')]),
dict([('var_type','i4'),
('var_name','tw_max_ex29'),
('var_dims',('time',)),
('long_name','days per month of near surface (~2m) daily maximum wetbulb temperature exceeding 29 degrees'),
('units','1')]),
dict([('var_type','i4'),
('var_name','tw_max_ex31'),
('var_dims',('time',)),
('long_name','days per month of near surface (~2m) daily maximum wetbulb temperature exceeding 31 degrees'),
('units','1')]),
dict([('var_type','i4'),
('var_name','tw_max_ex33'),
('var_dims',('time',)),
('long_name','days per month of near surface (~2m) daily maximum wetbulb temperature exceeding 33 degrees'),
('units','1')]),
dict([('var_type','i4'),
('var_name','tw_max_ex35'),
('var_dims',('time',)),
('long_name','days per month of near surface (~2m) daily maximum wetbulb temperature exceeding 35 degrees'),
('units','1')])]
GlobAttrObjectList = dict([['File_created',dt.datetime.strftime(dt.datetime.now(), '%Y-%m-%d %H:%M:%S')], # Is there a call for time stamping?
['Description','HadISDH monthly mean land surface raw data'],
['Title','HadISDH monthly mean land surface raw climate monitoring product'],
['Institution', AttribDict['Institution']],
['History', AttribDict['History']],
['Licence', AttribDict['NCLicence']],
['Project', AttribDict['Project']],
['Processing_level', AttribDict['Processing_level']],
['Acknowledgement', AttribDict['Acknowledgement']],
['Source', 'HadISD '+hadisdversiondots+' '+AttribDict['Source']],
['Comment',''],
['References', AttribDict['References']],
['Creator_name', AttribDict['Creator_name']],
['Creator_email', AttribDict['Creator_email']],
['Version', versiondots],
['doi',''], # This needs to be filled in
['Conventions', AttribDict['Conventions']],
['netCDF_type', AttribDict['netCDF_type']]])
# Write out monthly data to netCDH
WriteNetCDF(OUTNCF+stationid+NCSUFFIX,styear,edyear,clims,DataList,DimList,AttrList,GlobAttrObjectList,MDI,INTMDI)
# Write out anoms and abs for PHA and just for ascii versions
# First reform the data arrays
# Now convert missing to -9999 and cross-check abs and anoms (should match but maybe they don't)
# Do this within list which should also change the master
# ReList data together to pass to ASCII writer
DataList = [RHanoms_mm, RHabs_mm,
Tanoms_mm, Tabs_mm,
Tdanoms_mm, Tdabs_mm,
DPDanoms_mm, DPDabs_mm,
Twanoms_mm, Twabs_mm,
eanoms_mm, eabs_mm,
qanoms_mm, qabs_mm,
WSanoms_mm, WSabs_mm,
SLPanoms_mm, SLPabs_mm,
derivedDPDabs_mm,derivedTdabs_mm,
Tw_max, Twmax_95p,
Twmean_95p, Tw25_tex,
Tw27_tex, Tw29_tex,
Tw31_tex, Tw33_tex,
Tw35_tex]
# If PHAActuals = True then push actuals to PHA else push anomalies
if (PHAActuals):
OutListRAW = dict([('1', OUTRAWrh+'monthly/raw/'+outstationid+RAWSUFFIX),
('3',OUTRAWt+'monthly/raw/'+outstationid+RAWSUFFIX),
('5',OUTRAWtd+'monthly/raw/'+outstationid+RAWSUFFIX),
('9',OUTRAWtw+'monthly/raw/'+outstationid+RAWSUFFIX),
('11',OUTRAWe+'monthly/raw/'+outstationid+RAWSUFFIX),
('13',OUTRAWq+'monthly/raw/'+outstationid+RAWSUFFIX),
('15',OUTRAWws+'monthly/raw/'+outstationid+RAWSUFFIX),
('17',OUTRAWslp+'monthly/raw/'+outstationid+RAWSUFFIX),
('18',OUTRAWdpd+'monthly/raw/'+outstationid+RAWSUFFIX)])
else:
# If PHAActuals = False then push anomalies to PHA
OutListRAW = dict([('0', OUTRAWrh+'monthly/raw/'+outstationid+RAWSUFFIX),
('2',OUTRAWt+'monthly/raw/'+outstationid+RAWSUFFIX),
('4',OUTRAWtd+'monthly/raw/'+outstationid+RAWSUFFIX),
('8',OUTRAWtw+'monthly/raw/'+outstationid+RAWSUFFIX),
('10',OUTRAWe+'monthly/raw/'+outstationid+RAWSUFFIX),
('12',OUTRAWq+'monthly/raw/'+outstationid+RAWSUFFIX),
('14',OUTRAWws+'monthly/raw/'+outstationid+RAWSUFFIX),
('16',OUTRAWslp+'monthly/raw/'+outstationid+RAWSUFFIX),
('6',OUTRAWdpd+'monthly/raw/'+outstationid+RAWSUFFIX)]) # NOTE WITH ANOMALIES WE@RE NOT USING DERIVED DPD (T - TD)!!!
OutListAll = dict([('0',OUTASC+'RHANOMS/'+stationid+'_RH'+ANOMSUFFIX),
('1',OUTASC+'RHABS/'+stationid+'_RH'+ABSSUFFIX),
('2',OUTASC+'TANOMS/'+stationid+'_T'+ANOMSUFFIX),
('3',OUTASC+'TABS/'+stationid+'_T'+ABSSUFFIX),
('4',OUTASC+'TDANOMS/'+stationid+'_Td'+ANOMSUFFIX),
('5',OUTASC+'TDABS/'+stationid+'_Td'+ABSSUFFIX),
('6',OUTASC+'DPDANOMS/'+stationid+'_DPD'+ANOMSUFFIX),
('7',OUTASC+'DPDABS/'+stationid+'_DPD'+ABSSUFFIX),
('8',OUTASC+'TWANOMS/'+stationid+'_Tw'+ANOMSUFFIX),
('9',OUTASC+'TWABS/'+stationid+'_Tw'+ABSSUFFIX),
('10',OUTASC+'EANOMS/'+stationid+'_e'+ANOMSUFFIX),
('11',OUTASC+'EABS/'+stationid+'_e'+ABSSUFFIX),
('12',OUTASC+'QANOMS/'+stationid+'_q'+ANOMSUFFIX),
('13',OUTASC+'QABS/'+stationid+'_q'+ABSSUFFIX),
('14',OUTASC+'WSANOMS/'+stationid+'_WS'+ANOMSUFFIX),
('15',OUTASC+'WSABS/'+stationid+'_WS'+ABSSUFFIX),
('16',OUTASC+'SLPANOMS/'+stationid+'_SLP'+ANOMSUFFIX),
('17',OUTASC+'SLPABS/'+stationid+'_SLP'+ABSSUFFIX),
('18',OUTASC+'derivedDPDABS/'+stationid+'_deDPD'+ABSSUFFIX),
('19',OUTASC+'derivedTDABS/'+stationid+'_deTd'+ABSSUFFIX),
('20',OUTASC+'WBMAX/'+stationid+'_TwMax'+ABSSUFFIX),
('21',OUTASC+'WBMAX95p/'+stationid+'_TwMax95p'+ABSSUFFIX),
('22',OUTASC+'WBMEAN95p/'+stationid+'_TwMean95p'+ABSSUFFIX),
('23',OUTASC+'WBTHRESH25/'+stationid+'_TwThresh25'+ABSSUFFIX),
('24',OUTASC+'WBTHRESH27/'+stationid+'_TwThresh27'+ABSSUFFIX),
('25',OUTASC+'WBTHRESH29/'+stationid+'_TwThresh29'+ABSSUFFIX),
('26',OUTASC+'WBTHRESH31/'+stationid+'_TwThresh31'+ABSSUFFIX),
('27',OUTASC+'WBTHRESH33/'+stationid+'_TwThresh33'+ABSSUFFIX),
('28',OUTASC+'WBTHRESH35/'+stationid+'_TwThresh35'+ABSSUFFIX)])
# get mask of qanoms_mm to mask out other variables NOT 100% SURE WE NEED/WANT TO DO THIS
# NOTE THAT NOW THE MDI IS -999 BECAUSE THIS WAS SET WITHIN WriteNetCDF FOR NetCDF output as integers
# print('Check t arrays prior to conversion')
# pdb.set_trace()
# NEWMDI = -999.
# qmask = np.where(qanoms_mm == NEWMDI)
qmask = np.where(qanoms_mm == MDI)
for v,vv in enumerate(DataList):
# For float data:
if (v < 21): # Hopefully there isn't any floating point silliness here...
# mask to qanoms
vv[qmask] = ASCMDI
#pdb.set_trace()
# change MDI to -9999 and multiply values by 10 - round whole number
# vv[np.where(vv == NEWMDI)] = -99.99
vv[np.where(vv == MDI)] = ASCMDI
vv = np.round(vv * 100).astype(int)
# For the day counts the missing data indicator is -999
elif (v >= 21):
# mask to qanoms
vv[qmask] = INTMDI
# reshape to print out a row of 12 months for each year
vv = np.reshape(vv,(nyrs,12))
# Write to ascii
WriteAscii(OutListAll[str(v)],vv,outstationid,styear,edyear)
if (str(v) in OutListRAW):
WriteAscii(OutListRAW[str(v)],vv,outstationid,styear,edyear)
# print('Check writing to file has worked')
# pdb.set_trace()
# Print out station listing in keep file
filee = open(OUTKEEP,'a+')
filee.write('%11s%8.4f %9.4f %6.1f %2s %29s%8s%4i\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st],'MONTHS: ',len(np.where(qanoms_mm > ASCMDI)[0])))
# NOTE usihng -99.99 here. The above loop changes MDI to -99.99 within the DataList so qanoms_mm now has MDI=-99.99 but for some reason the reshape and *100 doesn't happen in place?
filee.close()
# Out put stnlist to each PHA directory
filee = open(OUTRAWq+'meta/q_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWe+'meta/e_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWt+'meta/t_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWdpd+'meta/dpd_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWtd+'meta/td_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWtw+'meta/tw_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWrh+'meta/rh_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWws+'meta/ws_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
filee = open(OUTRAWslp+'meta/slp_stnlist.tavg','a+')
filee.write('%11s%7.2f%10.2f %5i %2s %29s\n' % (outstationid,StationListLat[st],StationListLon[st],StationListElev[st],StationListCID[st],StationListName[st]))
filee.close()
CountW = int(check_output(["wc","-l",OUTTWEX]).decode('utf-8').split()[0])
CountG = int(check_output(["wc","-l",OUTKEEP]).decode('utf-8').split()[0])
CountB = int(check_output(["wc","-l",OUTDITCH]).decode('utf-8').split()[0])
filee = open(OUTPUTLOG,'a+')
filee.write('%s%i\n' % ('HadISDH_Enough_Months_Station_Count=',CountG))
filee.write('%s%i\n' % ('HadISDH_NotEnough_Months_Station_Count=',CountB))
filee.write('%s%i\n' % ('HadISDH_Twex_Enough_Months_Station_Count=',CountW))
filee.close()
print('All done!')
|
Kate-Willett/HadISDH_Build
|
F4_CreateMonthSeriesfromHadISD.py
|
Python
|
cc0-1.0
| 101,019
|
[
"NetCDF"
] |
222b8ca5a8a9fa6741b237468e615b370d007647bd53757f56d3ed489265baa1
|
__author__ = 'tom.bailey'
'''TB Animation Tools is a toolset for animators
*******************************************************************************
License and Copyright
Copyright 2015-Tom Bailey
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
this script holds a bunch of useful timeline functions to make life easier
send issues/ requests to brimblashman@gmail.com
visit tb-animator.blogspot.com for "stuff"
*******************************************************************************
'''
import pymel.core as pm
import maya.cmds as mc
import os.path
from tb_timeline import timeline
def make_playblast(type="mov"):
formats = { "mov": "qt", "avi": "avi" }
my_timeline = timeline()
directory = pm.optionVar.get('tb_playblast_folder', 'c://qss//')
file = mc.file(query=True, sceneName=True, shortName=True)
filename = file.split('.')[0]
folderName = '%sp_%s/' % (directory, filename)
if not os.path.exists(folderName):
os.makedirs(folderName)
count = len(mc.getFileList(fld=folderName))
string_count = len(str(count))
string_base = "0000"
string_base_count = len(string_base)
index = string_base_count-string_count
string_result = string_base[:index]
blast_name = '%s%s_%s%s.%s' % (folderName,filename,string_result,count,type)
if my_timeline.isHighlighted():
__range = my_timeline.get_highlighted_range()
else:
__range = my_timeline.get_range()
print "pb range", __range
print "file name to save", blast_name
mc.playblast(startTime=__range[0], endTime=__range[1], format=formats[type],
clearCache=False, percent=75, filename=blast_name)
|
tb-animator/tbtools
|
apps/tb_playblast.py
|
Python
|
mit
| 2,309
|
[
"VisIt"
] |
226ae3bf4463dd93c1cae0392305b51dd0aa299121d092a1667d8901bdc72e0c
|
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
from math import *
import numpy as np
import os
import time
class openGLLive:
def __init__(self, system, specs={}):
#TODO: COMPLETE DESCRIPTION FOR SPECS
# 'particle_coloring': 'auto', 'charge', 'type'
# 'particle_type_colors': [r_t1,g_t1,b_t1],[r_t2,g_t2,b_t2],... ]
# 'particle_charge_colors': [[r_lowq,g_lowq,b_lowq],[r_highq,g_highq,b_highq]]
# 'particle_sizes': 'auto', 'type'
# 'particle_type_sizes': [size_t1,size_t2,..]
# 'ext_force_arrows': True/False
# 'window_size': [x,y]
# 'background_color': [r,g,b]
# 'update_fps': fps
# 'draw_bonds': True/False
# 'bond_coloring': 'type'
# 'bond_type_radius': [r_t1,r_t2,...]
# 'bond_type_colors': [r_t1,g_t1,b_t1],[r_t2,g_t2,b_t2],... ]
# 'LB': True/False
# 'light_pos': 'auto', [x,y,z]
# 'light_color': [r,g,b]
# 'lightDecay': factor*[box_l] to light attenuation
# 'particle_type_materials'
#USER FRIENDLY DICT WITH VISUALIZATION SPECIFICATIONS
self.specs = {
'window_size': [800, 800],
'name': 'Espresso Visualization',
'background_color': [0, 0, 0],
'update_fps': 30,
'periodic_images': [0, 0, 0],
'draw_box': True,
'quality_spheres': 15,
'quality_bonds': 15,
'quality_arrows': 15,
'close_cut_distance': 0.1,
'far_cut_distance': 5,
'particle_coloring': 'auto',
'particle_sizes': 'auto',
'particle_type_colors': [[1, 1, 0, 1], [1, 0, 1, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1], [1, 0.5, 0, 1], [0.5, 0, 1, 1]],
'particle_type_materials': [[0.6, 1, 0.1], [0.6, 1, 0.1], [0.6, 1, 0.1], [0.6, 1, 0.1], [0.6, 1, 0.1], [0.6, 1, 0.1], [0.6, 1, 0.1]],
'particle_charge_colors': [np.array([1, 0, 0, 1]), np.array([0, 1, 0, 1])],
'particle_type_sizes': [1, 1, 1, 1, 1, 1, 1, ],
'draw_bonds': True,
'bond_coloring': 'type',
'bond_type_radius': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
'bond_type_colors': [[1, 1, 1, 1], [1, 0, 1, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 0, 1], [1, 0.5, 0, 1], [0.5, 0, 1, 1]],
'ext_force_arrows': True,
'ext_force_arrows_scale': [1, 1, 1, 1, 1, 1, 1],
'LB': False,
'light_pos': 'auto',
'light_color': [0.8, 0.8, 0.8],
'lightBrightness': 1.0,
'lightSize': 1.0,
'dragEnabled': True,
'dragForce': 3.0
}
for prop in specs.iterkeys():
if prop not in self.specs.iterkeys():
raise ValueError(
prop + 'is not a valid visualization property')
else:
self.specs[prop] = specs[prop]
self.system = system
self.started = False
self.keyboardManager = KeyboardManager()
self.mouseManager = MouseManager()
self.timers = []
#CALLBACKS FOR THE MAIN THREAD
def registerCallback(self, cb, interval=1000):
self.timers.append((int(interval), cb))
#THE BLOCKING START METHOD
def start(self):
self.initOpenGL()
self.initEspressoVisualization()
self.initCamera()
self.initControls()
self.initCallbacks()
#POST DISPLAY WITH 60FPS
def timed_update_redraw(data):
glutPostRedisplay()
glutTimerFunc(17, timed_update_redraw, -1)
#PLACE LIGHT AT PARTICLE CENTER, DAMPED SPRING FOR SMOOTH POSITION CHANGE, CALL WITH 10FPS
def timed_update_centerLight(data):
ldt = 0.8
cA = (self.particle_COM - self.smooth_light_pos) * \
0.1 - self.smooth_light_posV * 1.8
self.smooth_light_posV += ldt * cA
self.smooth_light_pos += ldt * self.smooth_light_posV
self.updateLightPos=True
glutTimerFunc(100, timed_update_centerLight, -1)
#AVERAGE PARTICLE COM ONLY EVERY 2sec
def timed_update_particleCOM(data):
self.particle_COM = np.average(self.particles['coords'], axis=0)
glutTimerFunc(2000, timed_update_particleCOM, -1)
self.started = True
glutTimerFunc(17, timed_update_redraw, -1)
if self.specs['light_pos'] == 'auto':
glutTimerFunc(2000, timed_update_particleCOM, -1)
glutTimerFunc(60, timed_update_centerLight, -1)
#FOR MAC, BRING WINDOW TO FRONT
os.system(
'''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
#START THE BLOCKING MAIN LOOP
glutMainLoop()
#CALLED FROM ESPRESSO INTEGRATION LOOP
#CHANGES OF ESPRESSO SYSTEM CAN ONLY HAPPEN HERE
def update(self):
if self.started:
#IF CALLED TOO OFTEN, ONLY UPDATE WITH GIVEN FREQ
self.elapsedTime += (time.time() - self.measureTimeBeforeIntegrate)
if self.elapsedTime > 1.0 / self.specs['update_fps']:
self.elapsedTime = 0
self.updateParticles()
#KEYBOARD CALLBACKS MAY CHANGE ESPRESSO SYSTEM PROPERTIES, ONLY SAVE TO CHANGE HERE
self.keyboardManager.handleInput()
self.measureTimeBeforeIntegrate = time.time()
if self.triggerSetParticleDrag==True and self.dragId != -1:
self.system.part[self.dragId].ext_force = self.dragExtForce
self.triggerSetParticleDrag=False
elif self.triggerResetParticleDrag==True and self.dragId != -1:
self.system.part[self.dragId].ext_force = self.extForceOld
self.triggerResetParticleDrag=False
self.dragId = -1
#GET THE PARTICLE DATA
def updateParticles(self):
self.particles = {'coords': self.system.part[:].pos,
'types': self.system.part[:].type,
'ext_forces': self.system.part[:].ext_force,
'charges': self.system.part[:].q
}
#GET THE BOND DATA, SO FAR CALLED ONCE UPON INITIALIZATION
def updateBonds(self):
if self.specs['draw_bonds']:
self.bonds = []
for i in range(len(self.system.part)):
bs = self.system.part[i].bonds
for b in bs:
t = b[0].type_number()
# b[0]: Bond, b[1:] Partners
for p in b[1:]:
self.bonds.append([i, p, t])
#DRAW CALLED AUTOMATICALLY FROM GLUT DISPLAY FUNC
def draw(self):
if self.specs['LB']:
self.drawLBVel()
if self.specs['draw_box']:
self.drawSystemBox()
self.drawSystemParticles()
# drawSphere(self.smooth_light_pos,0.5,[0,1.0,0,1.0],[1.,1.,1.])
# drawSphere(self.particle_COM,0.5,[1.0,0,0,1.0],[1.,1.,1.])
if self.specs['draw_bonds']:
self.drawBonds()
def drawSystemBox(self):
box_l = self.system.box_l[0]
b2 = box_l / 2.0
drawBox([b2, b2, b2], box_l, [1 - self.specs['background_color'][0], 1 -
self.specs['background_color'][1], 1 - self.specs['background_color'][2]])
def drawSystemParticles(self):
coords = self.particles['coords']
pIds = range(len(coords))
for pid in pIds:
pos = coords[pid]
q = self.particles['charges'][pid]
ptype = self.particles['types'][pid]
ext_f = self.particles['ext_forces'][pid]
# Size: Lennard Jones Sigma,
if self.specs['particle_sizes'] == 'auto':
lj_sig = self.system.non_bonded_inter[ptype, ptype].lennard_jones.get_params()[
'sigma']
radius = lj_sig / 2.0
if radius == 0:
radius = self.sizeByType(ptype)
elif self.specs['particle_sizes'] == 'type':
radius = self.sizeByType(ptype)
material = self.materialByType(ptype)
if self.specs['particle_coloring'] == 'id':
color = self.IdToColorf(pid)
glColor(color)
elif self.specs['particle_coloring'] == 'auto':
# Color auto: Charge then Type
if q != 0:
color = self.colorByCharge(q)
else:
color = self.colorByType(ptype)
elif self.specs['particle_coloring'] == 'charge':
color = self.colorByCharge(q)
elif self.specs['particle_coloring'] == 'type':
color = self.colorByType(q)
drawSphere(pos, radius, color, material, self.specs['quality_spheres'])
for imx in range(-self.specs['periodic_images'][0], self.specs['periodic_images'][0] + 1):
for imy in range(-self.specs['periodic_images'][1], self.specs['periodic_images'][1] + 1):
for imz in range(-self.specs['periodic_images'][2], self.specs['periodic_images'][2] + 1):
if imx != 0 or imy != 0 or imz != 0:
redrawSphere(pos + (imx * self.imPos[0]+imy*self.imPos[1]+imz*self.imPos[2]), radius, self.specs['quality_spheres'])
if self.specs['ext_force_arrows'] or pid == self.dragId:
if ext_f[0] != 0 or ext_f[1] != 0 or ext_f[2] != 0:
if pid == self.dragId:
sc = 1
else:
sc = self.extForceArrowScaleByType(ptype)
if sc > 0:
drawArrow(pos, np.array(ext_f) * sc, 0.25, [1, 1, 1], self.specs['quality_arrows'])
def drawBonds(self):
coords = self.particles['coords']
pIds = range(len(coords))
b2 = self.system.box_l[0] / 2.0
box_l2_sqr = pow(b2, 2.0)
for b in self.bonds:
if self.specs['bond_coloring'] == 'type':
col = self.bondColorByType(b[2])
radius = self.bondRadiusByType(b[2])
d = coords[b[0]] - coords[b[1]]
bondLen_sqr = d[0] * d[0] + d[1] * d[1] + d[2] * d[2]
if bondLen_sqr < box_l2_sqr:
drawCylinder(coords[b[0]], coords[b[1]], radius, col, self.specs['quality_bonds'])
for imx in range(-self.specs['periodic_images'][0], self.specs['periodic_images'][0] + 1):
for imy in range(-self.specs['periodic_images'][1], self.specs['periodic_images'][1] + 1):
for imz in range(-self.specs['periodic_images'][2], self.specs['periodic_images'][2] + 1):
if imx != 0 or imy != 0 or imz != 0:
drawCylinder(coords[b[0]] + im * self.imPos[dim], coords[b[1]] + im * self.imPos[dim], radius, col, self.specs['quality_bonds'])
else:
l = coords[b[0]] - coords[b[1]]
l0 = coords[b[0]]
hits = 0
for i in range(6):
lineBoxNDot = float(np.dot(l, self.box_n[i]))
if lineBoxNDot == 0:
continue
s = l0 - \
np.dot(l0 - self.box_p[i],
self.box_n[i]) / lineBoxNDot * l
if self.isInsideBox(s):
if lineBoxNDot < 0:
s0 = s
else:
s1 = s
hits += 1
if hits >= 2:
break
drawCylinder(coords[b[0]], s0, radius, col, self.specs['quality_bonds'])
drawCylinder(coords[b[1]], s1, radius, col, self.specs['quality_bonds'])
for imx in range(-self.specs['periodic_images'][0], self.specs['periodic_images'][0] + 1):
for imy in range(-self.specs['periodic_images'][1], self.specs['periodic_images'][1] + 1):
for imz in range(-self.specs['periodic_images'][2], self.specs['periodic_images'][2] + 1):
if imx != 0 or imy != 0 or imz != 0:
drawCylinder(coords[b[0]] + im * self.imPos[dim], s0 + im * self.imPos[dim], radius, col, self.specs['quality_bonds'])
drawCylinder(coords[b[1]] + im * self.imPos[dim], s1 + im * self.imPos[dim], radius, col, self.specs['quality_bonds'])
#HELPER TO DRAW PERIODIC BONDS
def isInsideBox(self, p):
eps = 1e-5
for i in range(3):
if p[i] < -eps or p[i] > eps + self.system.box_l[i]:
return False
return True
#VOXELS FOR LB VELOCITIES
def drawLBVel(self):
grid = 10
velRelax = 0.2
cubeSize = grid * 0.25
r = np.array([grid] * 3)
min_vel_new = np.array([1e100] * 3)
max_vel_new = np.array([-1e100] * 3)
for ix in range(r[0]):
for iy in range(r[1]):
for iz in range(r[2]):
c = self.system.box_l * \
(np.array([ix, iy, iz]) +
np.array([0.5, 0.5, 0.5])) / r
v = self.system.actors[0].lbnode_get_node_velocity(c)
col = (np.array(v) - self.lb_min_vel) / self.lb_vel_range
alpha = 0.1 # np.linalg.norm(col)
drawCube(c, cubeSize, col, alpha)
#USE MODULO IF THERE ARE MORE PARTICLE TYPES THAN TYPE DEFINITIONS FOR COLORS, MATERIALS ETC..
def extForceArrowScaleByType(self, btype):
return self.specs['ext_force_arrows_scale'][btype % len(self.specs['ext_force_arrows_scale'])]
def materialByType(self, btype):
return self.specs['particle_type_materials'][btype % len(self.specs['particle_type_materials'])]
def bondColorByType(self, btype):
return self.specs['bond_type_colors'][btype % len(self.specs['bond_type_colors'])]
def bondRadiusByType(self, btype):
return self.specs['bond_type_radius'][btype % len(self.specs['bond_type_radius'])]
def sizeByType(self, ptype):
return self.specs['particle_type_sizes'][ptype % len(self.specs['particle_type_sizes'])]
def colorByType(self, ptype):
return self.specs['particle_type_colors'][ptype % len(self.specs['particle_type_colors'])]
#FADE PARTICE CHARGE COLOR FROM WHITE (q=0) to PLUSCOLOR (q=q_max) RESP MINUSCOLOR (q=q_min)
def colorByCharge(self, q):
if q < 0:
c = 1.0 * q / self.minq
return self.specs['particle_charge_colors'][0] * c + (1 - c) * np.array([1, 1, 1, 1])
else:
c = 1.0 * q / self.maxq
return self.specs['particle_charge_colors'][1] * c + (1 - c) * np.array([1, 1, 1, 1])
#ON INITIALIZATION, CHECK q_max/q_min
def updateChargeColorRange(self):
if len(self.particles['charges'][:])>0:
self.minq = min(self.particles['charges'][:])
self.maxq = max(self.particles['charges'][:])
# INITS FOR GLUT FUNCTIONS
def initCallbacks(self):
# OpenGl Callbacks
def display():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
self.camera.glLookAt()
self.camera.rotateSystem()
if self.updateLightPos:
self.setLightPos()
self.updateLightPos=False
self.draw()
glutSwapBuffers()
return
def keyboardUp(button, x, y):
self.keyboardManager.keyboardUp(button)
return
def keyboardDown(button, x, y):
self.keyboardManager.keyboardDown(button)
return
def mouse(button, state, x, y):
self.mouseManager.mouseClick(button, state, x, y)
return
def motion(x, y):
self.mouseManager.mouseMove(x, y)
return
#CALLED ION WINDOW POSITION/SIZE CHANGE
def reshapeWindow(w, h):
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
box_diag = pow(pow(self.system.box_l[0], 2) + pow(self.system.box_l[1], 2) + pow(self.system.box_l[1], 2), 0.5)
gluPerspective(40, 1.0*w/h, self.specs['close_cut_distance'], self.specs['far_cut_distance'] * box_diag)
glMatrixMode(GL_MODELVIEW)
self.specs['window_size'][0] =1.0*w
self.specs['window_size'][1] =1.0*h
#glPushMatrix()
#TIMERS FOR registerCallback
def dummyTimer(index):
self.timers[index][1]()
glutTimerFunc(self.timers[index][0], dummyTimer, index)
glutDisplayFunc(display)
glutMouseFunc(mouse)
glutKeyboardFunc(keyboardDown)
glutKeyboardUpFunc(keyboardUp)
glutReshapeFunc(reshapeWindow)
#TODO: ZOOM WITH MOUSEWHEEL
# glutMouseWheelFunc(mouseWheel);
glutMotionFunc(motion)
index=0
for t in self.timers:
glutTimerFunc(t[0], dummyTimer, index)
index+=1
#CLICKED ON PARTICLE: DRAG; CLICKED ON BACKGROUND: CAMERA
def mouseMotion(self, mousePos, mousePosOld):
if self.dragId != -1:
ppos = self.particles['coords'][self.dragId]
viewport = glGetIntegerv(GL_VIEWPORT)
mouseWorld = gluUnProject(mousePos[0], viewport[3] - mousePos[1], self.depth)
self.dragExtForce = self.specs['dragForce'] * (np.asarray(mouseWorld) - np.array(ppos))
self.triggerSetParticleDrag = True
#self.system.part[self.dragId].ext_force = f
else:
self.camera.rotateCamera(mousePos, mousePosOld)
#DRAW SCENE AGAIN WITHOUT LIGHT TO IDENTIFY PARTICLE ID BY PIXEL COLOR
def setParticleDrag(self, pos, pos_old):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
self.camera.glLookAt()
self.camera.rotateSystem()
oldColMode = self.specs['particle_coloring']
self.specs['particle_coloring'] = 'id'
glDisable(GL_LIGHTING)
self.drawSystemParticles()
viewport = glGetIntegerv(GL_VIEWPORT)
readPixel = glReadPixelsui(
pos[0], viewport[3] - pos[1], 1, 1, GL_RGB, GL_FLOAT)[0][0]
depth = glReadPixelsf(
pos[0], viewport[3] - pos[1], 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)[0][0]
pid = self.fcolorToId(readPixel)
print "Selected Particle ", pid
self.dragId = pid
if pid != -1:
self.dragPosInitial = self.particles['coords'][self.dragId]
self.extForceOld = self.particles['ext_forces'][self.dragId][:]
self.depth = depth
self.specs['particle_coloring'] = oldColMode
glEnable(GL_LIGHTING)
def resetParticleDrag(self, pos, pos_old):
if self.dragId != -1:
self.triggerResetParticleDrag = True
def IdToColorf(self, pid):
pid += 1
return [int(pid / (256 * 256)) / 255.0, int((pid % (256 * 256)) / 256) / 255.0, (pid % 256) / 255.0, 1.0]
def fcolorToId(self, fcol):
return 256 * 256 * int(fcol[0] * 255) + 256 * int(fcol[1] * 255) + int(fcol[2] * 255) - 1
#ALL THE INITS
def initEspressoVisualization(self):
self.maxq = 0
self.minq = 0
self.dragId = -1
self.dragPosInitial = []
self.extForceOld = []
self.dragExtForceOld = []
self.triggerResetParticleDrag=False
self.triggerSetParticleDrag=False
self.depth = 0
self.imPos = [np.array([self.system.box_l[0], 0, 0]), np.array(
[0, self.system.box_l[1], 0]), np.array([0, 0, self.system.box_l[2]])]
self.lb_min_vel = np.array([-1e-6] * 3)
self.lb_max_vel = np.array([1e-6] * 3)
self.lb_vel_range = self.lb_max_vel - self.lb_min_vel
self.lb_min_dens = np.array([0] * 3)
self.lb_max_dens = np.array([0] * 3)
self.elapsedTime = 0
self.measureTimeBeforeIntegrate = 0
self.boxSizeDependence()
self.updateParticles()
self.updateChargeColorRange()
self.updateBonds()
#BOX PLANES (NORMAL, ORIGIN) FOR PERIODIC BONDS
def boxSizeDependence(self):
self.box_n = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.array(
[0, 0, 1]), np.array([-1, 0, 0]), np.array([0, -1, 0]), np.array([0, 0, -1])]
self.box_p = [np.array([0, 0, 0]), np.array([0, 0, 0]), np.array([0, 0, 0]), np.array(
self.system.box_l), np.array(self.system.box_l), np.array(self.system.box_l)]
#DEFAULT CONTROLS
def initControls(self):
self.mouseManager.registerButton(MouseButtonEvent(
None, MouseFireEvent.FreeMotion, self.mouseMotion))
if self.specs['dragEnabled']:
self.mouseManager.registerButton(MouseButtonEvent(
GLUT_LEFT_BUTTON, MouseFireEvent.ButtonPressed, self.setParticleDrag))
self.mouseManager.registerButton(MouseButtonEvent(
GLUT_LEFT_BUTTON, MouseFireEvent.ButtonReleased, self.resetParticleDrag))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'w', KeyboardFireEvent.Hold, self.camera.moveForward))
self.keyboardManager.registerButton(KeyboardButtonEvent(
's', KeyboardFireEvent.Hold, self.camera.moveBackward))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'a', KeyboardFireEvent.Hold, self.camera.moveLeft))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'd', KeyboardFireEvent.Hold, self.camera.moveRight))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'e', KeyboardFireEvent.Hold, self.camera.rotateSystemXR))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'q', KeyboardFireEvent.Hold, self.camera.rotateSystemXL))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'z', KeyboardFireEvent.Hold, self.camera.rotateSystemYR))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'c', KeyboardFireEvent.Hold, self.camera.rotateSystemYL))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'r', KeyboardFireEvent.Hold, self.camera.rotateSystemZR))
self.keyboardManager.registerButton(KeyboardButtonEvent(
'f', KeyboardFireEvent.Hold, self.camera.rotateSystemZL))
#ASYNCHRONOUS PARALLEL CALLS OF glLight CAUSES SEG FAULTS, SO ONLY CHANGE LIGHT AT CENTRAL display METHOD AND TRIGGER CHANGES
def setLightPos(self):
if self.specs['light_pos'] == 'auto':
glLightfv(GL_LIGHT0, GL_POSITION, [self.smooth_light_pos[0], self.smooth_light_pos[1], self.smooth_light_pos[2], 0.6])
else:
glLightfv(GL_LIGHT0, GL_POSITION, self.specs['light_pos'])
def triggerLightPosUpdate(self):
self.updateLightPos=True
def initCamera(self):
bl = self.system.box_l[0]
bl2 = bl / 2.0
box_center = np.array([bl2, bl2, bl2])
self.camera = Camera(camPos=np.array([bl * 1.3, bl * 1.3, bl * 2.5]), camRot=np.array([3.55, -0.4]), center=box_center, updateLights=self.triggerLightPosUpdate)
self.smooth_light_pos = np.copy(box_center)
self.smooth_light_posV = np.array([0.0, 0.0, 0.0])
self.particle_COM = np.copy(box_center)
self.updateLightPos=True
def initOpenGL(self):
glutInit(self.specs['name'])
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(self.specs['window_size'][
0], self.specs['window_size'][1])
glutCreateWindow(self.specs['name'])
glClearColor(self.specs['background_color'][0], self.specs[
'background_color'][1], self.specs['background_color'][2], 1.)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glLineWidth(2.0)
glutIgnoreKeyRepeat(1)
# setup lighting
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
if self.specs['light_pos'] != 'auto':
glLightfv(GL_LIGHT0, GL_POSITION, self.specs['light_pos'])
else:
glLightfv(GL_LIGHT0, GL_POSITION, self.system.box_l * 0.5)
glLightfv(GL_LIGHT0, GL_DIFFUSE, self.specs['light_color'])
glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION,
0.7 / self.specs['lightBrightness'])
glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, self.system.box_l[
0] / 67. * 0.005 / self.specs['lightSize'])
glEnable(GL_LIGHT0)
#END OF MAIN CLASS
#OPENGL DRAW WRAPPERS
def setSolidMaterial(r, g, b, a=1.0, ambient=0.6, diffuse=1.0, specular=0.1):
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT, [
ambient * r, ambient * g, ambient * g, a])
glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, [
diffuse * r, diffuse * g, diffuse * b, a])
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, [
specular * r, specular * g, specular * g, a])
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
def setOutlineMaterial(r, g, b, a=1.0):
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT, [r, g, b, a])
glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, [0, 0, 0, a])
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, [0, 0, 0, a])
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 100)
def drawBox(pos, size, color):
setSolidMaterial(color[0], color[1], color[2], 1, 2, 1)
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
glutWireCube(size)
glPopMatrix()
def drawSphere(pos, radius, color, material, quality):
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
setSolidMaterial(color[0], color[1], color[2], color[
3], material[0], material[1], material[2])
glutSolidSphere(radius, quality, quality)
glPopMatrix()
def redrawSphere(pos, radius, quality):
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
glutSolidSphere(radius, quality, quality)
glPopMatrix()
def drawCube(pos, size, color, alpha):
setSolidMaterial(color[0], color[1], color[2], alpha)
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
glutSolidCube(size)
glPopMatrix()
def calcAngle(d):
z = np.array([0, 0, 1])
l = np.linalg.norm(d)
dot_z = np.dot(z, d)
t = np.cross(z, d)
return 180.0 / pi * acos(dot_z / l), t, l
def drawCylinder(posA, posB, radius, color, quality):
setSolidMaterial(color[0], color[1], color[2])
glPushMatrix()
d = posB - posA
v = np.linalg.norm(d)
ax = 57.2957795 * acos(d[2] / v)
if d[2] < 0.0:
ax = -ax
rx = -d[1] * d[2]
ry = d[0] * d[2]
#angle,t,length = calcAngle(d)
length = np.linalg.norm(d)
glTranslatef(posA[0], posA[1], posA[2])
glRotatef(ax, rx, ry, 0.0)
quadric = gluNewQuadric()
gluCylinder(quadric, radius, radius, length, quality, quality)
# glutSolidCylinder(radius,radius,length,10,10)
glPopMatrix()
def drawArrow(pos, d, radius, color, quality):
setSolidMaterial(color[0], color[1], color[2])
glPushMatrix()
glPushMatrix()
v = np.linalg.norm(d)
ax = 57.2957795 * acos(d[2] / v)
if d[2] < 0.0:
ax = -ax
rx = -d[1] * d[2]
ry = d[0] * d[2]
#angle,t,length = calcAngle(d)
glTranslatef(pos[0], pos[1], pos[2])
glRotatef(ax, rx, ry, 0.0)
# glRotatef(angle,t[0],t[1],t[2]);
quadric = gluNewQuadric()
gluCylinder(quadric, radius, radius, v, quality, quality)
glPopMatrix()
e = pos + d
glTranslatef(e[0], e[1], e[2])
# glRotatef(angle,t[0],t[1],t[2]);
glRotatef(ax, rx, ry, 0.0)
glutSolidCone(radius * 3, 3, quality, quality)
glPopMatrix()
#MOUSE EVENT MANAGER
class MouseFireEvent:
ButtonPressed = 0
FreeMotion = 1
ButtonMotion = 2
ButtonReleased = 3
class MouseButtonEvent:
def __init__(self, button, fireEvent, callback):
self.button = button
self.fireEvent = fireEvent
self.callback = callback
class MouseManager:
def __init__(self):
self.mousePos = np.array([0, 0])
self.mousePosOld = np.array([0, 0])
self.mouseEventsPressed = []
self.mouseEventsFreeMotion = []
self.mouseEventsButtonMotion = []
self.mouseEventsReleased = []
def registerButton(self, mouseEvent):
if mouseEvent.fireEvent == MouseFireEvent.ButtonPressed:
self.mouseEventsPressed.append(mouseEvent)
elif mouseEvent.fireEvent == MouseFireEvent.FreeMotion:
self.mouseEventsFreeMotion.append(mouseEvent)
elif mouseEvent.fireEvent == MouseFireEvent.ButtonMotion:
self.mouseEventsButtonMotion.append(mouseEvent)
elif mouseEvent.fireEvent == MouseFireEvent.ButtonReleased:
self.mouseEventsReleased.append(mouseEvent)
def mouseClick(self, button, state, x, y):
self.mousePosOld = self.mousePos
self.mousePos = np.array([x, y])
for me in self.mouseEventsPressed:
if me.button == button and state == GLUT_DOWN:
me.callback(self.mousePos, self.mousePosOld)
for me in self.mouseEventsReleased:
if me.button == button and state == GLUT_UP:
me.callback(self.mousePos, self.mousePosOld)
def mouseMove(self, x, y):
self.mousePosOld = self.mousePos
self.mousePos = np.array([x, y])
for me in self.mouseEventsFreeMotion:
me.callback(self.mousePos, self.mousePosOld)
# for me in self.mouseEventsButtonMotion:
# if me.button == button:
# me.callback(self.mousePos,self.mousePosOld)
#KEYBOARD EVENT MANAGER
class KeyboardFireEvent:
Pressed = 0
Hold = 1
Released = 2
class KeyboardButtonEvent:
def __init__(self, button, fireEvent, callback):
self.button = button
self.fireEvent = fireEvent
self.callback = callback
class KeyboardManager:
def __init__(self):
self.pressedKeys = set([])
self.keyStateOld = {}
self.keyState = {}
self.buttonEventsPressed = []
self.buttonEventsHold = []
self.buttonEventsReleased = []
def registerButton(self, buttonEvent):
if buttonEvent.fireEvent == KeyboardFireEvent.Pressed:
self.buttonEventsPressed.append(buttonEvent)
elif buttonEvent.fireEvent == KeyboardFireEvent.Hold:
self.buttonEventsHold.append(buttonEvent)
elif buttonEvent.fireEvent == KeyboardFireEvent.Released:
self.buttonEventsReleased.append(buttonEvent)
def handleInput(self):
removeKeys = set([])
for b in self.pressedKeys:
if self.keyStateOld[b] == 0 and self.keyState[b] == 1:
for be in self.buttonEventsPressed:
if be.button == b:
be.callback()
for be in self.buttonEventsHold:
if be.button == b:
be.callback()
# print 'Key',b,'Pressed'
elif self.keyStateOld[b] == 1 and self.keyState[b] == 1:
for be in self.buttonEventsHold:
if be.button == b:
be.callback()
# print 'Key',b,'Hold'
elif self.keyStateOld[b] == 1 and self.keyState[b] == 0:
for be in self.buttonEventsReleased:
if be.button == b:
be.callback()
# print 'Key',b,'Released'
removeKeys.add(b)
self.keyStateOld[b] = self.keyState[b]
self.pressedKeys = self.pressedKeys.difference(removeKeys)
def keyboardUp(self, button):
self.keyState[button] = 0 # Key up
def keyboardDown(self, button):
self.pressedKeys.add(button)
self.keyState[button] = 1 # Key down
if not button in self.keyStateOld.keys():
self.keyStateOld[button] = 0
#CAMERA
class Camera:
def __init__(self, camPos=np.array([0, 0, 1]), camRot=np.array([pi, 0]), moveSpeed=3, rotSpeed=0.001, globalRotSpeed=3, center=np.array([0, 0, 0]), updateLights=None):
self.moveSpeed = moveSpeed
self.lookSpeed = rotSpeed
self.globalRotSpeed = globalRotSpeed
self.camPos = camPos
self.camRot = camRot
self.center = center
self.camRotGlobal = np.array([0, 0, 0])
self.updateLights = updateLights
self.calcCameraDirections()
def moveForward(self):
self.camPos += self.lookDir * self.moveSpeed
self.updateLights()
def moveBackward(self):
self.camPos -= self.lookDir * self.moveSpeed
self.updateLights()
def moveLeft(self):
self.camPos -= self.right * self.moveSpeed
self.updateLights()
def moveRight(self):
self.camPos += self.right * self.moveSpeed
self.updateLights()
def rotateSystemXL(self):
self.camRotGlobal[1] += self.globalRotSpeed
def rotateSystemXR(self):
self.camRotGlobal[1] -= self.globalRotSpeed
def rotateSystemYL(self):
self.camRotGlobal[2] += self.globalRotSpeed
def rotateSystemYR(self):
self.camRotGlobal[2] -= self.globalRotSpeed
def rotateSystemZL(self):
self.camRotGlobal[0] += self.globalRotSpeed
def rotateSystemZR(self):
self.camRotGlobal[0] -= self.globalRotSpeed
def calcCameraDirections(self):
self.lookDir = np.array([cos(self.camRot[1]) * sin(self.camRot[0]),
sin(self.camRot[1]),
cos(self.camRot[1]) * cos(self.camRot[0])])
self.right = np.array(
[sin(self.camRot[0] - pi / 2.0), 0, cos(self.camRot[0] - pi / 2.0)])
self.up = np.cross(self.right, self.lookDir)
def rotateCamera(self, mousePos, mousePosOld):
self.camRot += (mousePos - mousePosOld) * self.lookSpeed
self.calcCameraDirections()
def glLookAt(self):
lookAt = self.camPos + self.lookDir
gluLookAt(self.camPos[0], self.camPos[1], self.camPos[2],
lookAt[0], lookAt[1], lookAt[2],
self.up[0], self.up[1], self.up[2])
def rotateSystem(self):
glTranslatef(self.center[0], self.center[1], self.center[2])
glRotatef(self.camRotGlobal[0], 1, 0, 0)
glRotatef(self.camRotGlobal[1], 0, 1, 0)
glRotatef(self.camRotGlobal[2], 0, 0, 1)
glTranslatef(-self.center[0], -self.center[1], -self.center[2])
|
lahnerml/espresso
|
src/python/espressomd/visualizationOpenGL.py
|
Python
|
gpl-3.0
| 35,178
|
[
"ESPResSo"
] |
a634bd7795547f9750f2ad0218d446380a910f3905e340edc69193791bbefddc
|
# -*- coding: utf-8 -*-
"""Multivariate Normal and t distributions
Created on Sat May 28 15:38:23 2011
@author: Josef Perktold
TODO:
* renaming,
- after adding t distribution, cov doesn't make sense for Sigma DONE
- should mean also be renamed to mu, if there will be distributions
with mean != mu
* not sure about corner cases
- behavior with (almost) singular sigma or transforms
- df <= 2, is everything correct if variance is not finite or defined ?
* check to return possibly univariate distribution for marginals or conditional
distributions, does univariate special case work? seems ok for conditional
* are all the extra transformation methods useful outside of testing ?
- looks like I have some mixup in definitions of standardize, normalize
* new methods marginal, conditional, ... just added, typos ?
- largely tested for MVNormal, not yet for MVT DONE
* conditional: reusing, vectorizing, should we reuse a projection matrix or
allow for a vectorized, conditional_mean similar to OLS.predict
* add additional things similar to LikelihoodModelResults? quadratic forms,
F distribution, and others ???
* add Delta method for nonlinear functions here, current function is hidden
somewhere in miscmodels
* raise ValueErrors for wrong input shapes, currently only partially checked
* quantile method (ppf for equal bounds for multiple testing) is missing
http://svitsrv25.epfl.ch/R-doc/library/mvtnorm/html/qmvt.html seems to use
just a root finder for inversion of cdf
* normalize has ambiguous definition, and mixing it up in different versions
std from sigma or std from cov ?
I would like to get what I need for mvt-cdf, or not
univariate standard t distribution has scale=1 but std>1
FIXED: add std_sigma, and normalize uses std_sigma
* more work: bivariate distributions,
inherit from multivariate but overwrite some methods for better efficiency,
e.g. cdf and expect
I kept the original MVNormal0 class as reference, can be deleted
See Also
--------
sandbox/examples/ex_mvelliptical.py
Examples
--------
Note, several parts of these examples are random and the numbers will not be
(exactly) the same.
>>> import numpy as np
>>> import statsmodels.sandbox.distributions.mv_normal as mvd
>>>
>>> from numpy.testing import assert_array_almost_equal
>>>
>>> cov3 = np.array([[ 1. , 0.5 , 0.75],
... [ 0.5 , 1.5 , 0.6 ],
... [ 0.75, 0.6 , 2. ]])
>>> mu = np.array([-1, 0.0, 2.0])
multivariate normal distribution
--------------------------------
>>> mvn3 = mvd.MVNormal(mu, cov3)
>>> mvn3.rvs(size=3)
array([[-0.08559948, -1.0319881 , 1.76073533],
[ 0.30079522, 0.55859618, 4.16538667],
[-1.36540091, -1.50152847, 3.87571161]])
>>> mvn3.std
array([ 1. , 1.22474487, 1.41421356])
>>> a = [0.0, 1.0, 1.5]
>>> mvn3.pdf(a)
0.013867410439318712
>>> mvn3.cdf(a)
0.31163181123730122
Monte Carlo integration
>>> mvn3.expect_mc(lambda x: (x<a).all(-1), size=100000)
0.30958999999999998
>>> mvn3.expect_mc(lambda x: (x<a).all(-1), size=1000000)
0.31197399999999997
multivariate t distribution
---------------------------
>>> mvt3 = mvd.MVT(mu, cov3, 4)
>>> mvt3.rvs(size=4)
array([[-0.94185437, 0.3933273 , 2.40005487],
[ 0.07563648, 0.06655433, 7.90752238],
[ 1.06596474, 0.32701158, 2.03482886],
[ 3.80529746, 7.0192967 , 8.41899229]])
>>> mvt3.pdf(a)
0.010402959362646937
>>> mvt3.cdf(a)
0.30269483623249821
>>> mvt3.expect_mc(lambda x: (x<a).all(-1), size=1000000)
0.30271199999999998
>>> mvt3.cov
array([[ 2. , 1. , 1.5],
[ 1. , 3. , 1.2],
[ 1.5, 1.2, 4. ]])
>>> mvt3.corr
array([[ 1. , 0.40824829, 0.53033009],
[ 0.40824829, 1. , 0.34641016],
[ 0.53033009, 0.34641016, 1. ]])
get normalized distribution
>>> mvt3n = mvt3.normalized()
>>> mvt3n.sigma
array([[ 1. , 0.40824829, 0.53033009],
[ 0.40824829, 1. , 0.34641016],
[ 0.53033009, 0.34641016, 1. ]])
>>> mvt3n.cov
array([[ 2. , 0.81649658, 1.06066017],
[ 0.81649658, 2. , 0.69282032],
[ 1.06066017, 0.69282032, 2. ]])
What's currently there?
>>> [i for i in dir(mvn3) if not i[0]=='_']
['affine_transformed', 'cdf', 'cholsigmainv', 'conditional', 'corr', 'cov',
'expect_mc', 'extra_args', 'logdetsigma', 'logpdf', 'marginal', 'mean',
'normalize', 'normalized', 'normalized2', 'nvars', 'pdf', 'rvs', 'sigma',
'sigmainv', 'standardize', 'standardized', 'std', 'std_sigma', 'whiten']
>>> [i for i in dir(mvt3) if not i[0]=='_']
['affine_transformed', 'cdf', 'cholsigmainv', 'corr', 'cov', 'df', 'expect_mc',
'extra_args', 'logdetsigma', 'logpdf', 'marginal', 'mean', 'normalize',
'normalized', 'normalized2', 'nvars', 'pdf', 'rvs', 'sigma', 'sigmainv',
'standardize', 'standardized', 'std', 'std_sigma', 'whiten']
"""
from __future__ import print_function
import numpy as np
from statsmodels.sandbox.distributions.multivariate import (
mvstdtprob, mvstdnormcdf, mvnormcdf)
def expect_mc(dist, func=lambda x: 1, size=50000):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
number of random samples to use in the Monte Carlo integration,
Notes
-----
this doesn't batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
integrate probability that both observations are negative
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc(mvn, lambda x: (x<np.array([0,0])).all(-1), size=100000)
0.25306000000000001
get tail probabilities of marginal distribution (should be 0.1)
>>> c = stats.norm.isf(0.05, scale=np.sqrt(2.))
>>> expect_mc(mvn, lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09969, 0.0986 ])
or calling the method
>>> mvn.expect_mc(lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09937, 0.10075])
'''
def fun(x):
return func(x) # * dist.pdf(x)
rvs = dist.rvs(size=size)
return fun(rvs).mean(0)
def expect_mc_bounds(dist, func=lambda x: 1, size=50000, lower=None, upper=None,
conditional=False, overfact=1.2):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
minimum number of random samples to use in the Monte Carlo integration,
the actual number used can be larger because of oversampling.
lower : None or array_like
lower integration bounds, if None, then it is set to -inf
upper : None or array_like
upper integration bounds, if None, then it is set to +inf
conditional : bool
If true, then the expectation is conditional on being in within
[lower, upper] bounds, otherwise it is unconditional
overfact : float
oversampling factor, the actual number of random variables drawn in
each attempt are overfact * remaining draws. Extra draws are also
used in the integration.
Notes
-----
this doesn't batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.ones(x.shape[0]),
lower=[-10,-10],upper=[0,0])
0.24990416666666668
get 3 marginal moments with one integration
>>> mvn = mve.MVNormal([0,0],1.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.dstack([x, x**2, x**3, x**4]),
lower=[-np.inf,-np.inf], upper=[np.inf,np.inf])
array([[ 2.88629497e-03, 9.96706297e-01, -2.51005344e-03,
2.95240921e+00],
[ -5.48020088e-03, 9.96004409e-01, -2.23803072e-02,
2.96289203e+00]])
>>> from scipy import stats
>>> [stats.norm.moment(i) for i in [1,2,3,4]]
[0.0, 1.0, 0.0, 3.0]
'''
#call rvs once to find length of random vector
rvsdim = dist.rvs(size=1).shape[-1]
if lower is None:
lower = -np.inf * np.ones(rvsdim)
else:
lower = np.asarray(lower)
if upper is None:
upper = np.inf * np.ones(rvsdim)
else:
upper = np.asarray(upper)
def fun(x):
return func(x) # * dist.pdf(x)
rvsli = []
used = 0 #remain = size #inplace changes size
total = 0
while True:
remain = size - used #just a temp variable
rvs = dist.rvs(size=int(remain * overfact))
total += int(size * overfact)
rvsok = rvs[((rvs >= lower) & (rvs <= upper)).all(-1)]
#if rvsok.ndim == 1: #possible shape problems if only 1 random vector
rvsok = np.atleast_2d(rvsok)
used += rvsok.shape[0]
rvsli.append(rvsok) #[:remain]) use extras instead
print(used)
if used >= size: break
rvs = np.vstack(rvsli)
print(rvs.shape)
assert used == rvs.shape[0] #saftey check
mean_conditional = fun(rvs).mean(0)
if conditional:
return mean_conditional
else:
return mean_conditional * (used * 1. / total)
def bivariate_normal(x, mu, cov):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
X, Y = np.transpose(x)
mux, muy = mu
sigmax, sigmaxy, tmp, sigmay = np.ravel(cov)
sigmax, sigmay = np.sqrt(sigmax), np.sqrt(sigmay)
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
class BivariateNormal(object):
#TODO: make integration limits more flexible
# or normalize before integration
def __init__(self, mean, cov):
self.mean = mu
self.cov = cov
self.sigmax, self.sigmaxy, tmp, self.sigmay = np.ravel(cov)
self.nvars = 2
def rvs(self, size=1):
return np.random.multivariate_normal(self.mean, self.cov, size=size)
def pdf(self, x):
return bivariate_normal(x, self.mean, self.cov)
def logpdf(self, x):
#TODO: replace this
return np.log(self.pdf(x))
def cdf(self, x):
return self.expect(upper=x)
def expect(self, func=lambda x: 1, lower=(-10,-10), upper=(10,10)):
def fun(x, y):
x = np.column_stack((x,y))
return func(x) * self.pdf(x)
from scipy.integrate import dblquad
return dblquad(fun, lower[0], upper[0], lambda y: lower[1],
lambda y: upper[1])
def kl(self, other):
'''Kullback-Leibler divergence between this and another distribution
int f(x) (log f(x) - log g(x)) dx
where f is the pdf of self, and g is the pdf of other
uses double integration with scipy.integrate.dblquad
limits currently hardcoded
'''
fun = lambda x : self.logpdf(x) - other.logpdf(x)
return self.expect(fun)
def kl_mc(self, other, size=500000):
fun = lambda x : self.logpdf(x) - other.logpdf(x)
rvs = self.rvs(size=size)
return fun(rvs).mean()
class MVElliptical(object):
'''Base Class for multivariate elliptical distributions, normal and t
contains common initialization, and some common methods
subclass needs to implement at least rvs and logpdf methods
'''
#getting common things between normal and t distribution
def __init__(self, mean, sigma, *args, **kwds):
'''initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used
'''
self.extra_args = []
self.mean = np.asarray(mean)
self.sigma = sigma = np.asarray(sigma)
sigma = np.squeeze(sigma)
self.nvars = nvars = len(mean)
#self.covchol = np.linalg.cholesky(sigma)
#in the following sigma is original, self.sigma is full matrix
if sigma.shape == ():
#iid
self.sigma = np.eye(nvars) * sigma
self.sigmainv = np.eye(nvars) / sigma
self.cholsigmainv = np.eye(nvars) / np.sqrt(sigma)
elif (sigma.ndim == 1) and (len(sigma) == nvars):
#independent heteroscedastic
self.sigma = np.diag(sigma)
self.sigmainv = np.diag(1. / sigma)
self.cholsigmainv = np.diag( 1. / np.sqrt(sigma))
elif sigma.shape == (nvars, nvars): #python tuple comparison
#general
self.sigmainv = np.linalg.pinv(sigma)
self.cholsigmainv = np.linalg.cholesky(self.sigmainv).T
else:
raise ValueError('sigma has invalid shape')
#store logdetsigma for logpdf
self.logdetsigma = np.log(np.linalg.det(self.sigma))
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
'''
raise NotImplementedError
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
doesn't work now because of dot in whiten
'''
raise NotImplementedError
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
raise NotImplementedError
def affine_transformed(self, shift, scale_matrix):
'''affine transformation define in subclass because of distribution
specific restrictions'''
#implemented in subclass at least for now
raise NotImplementedError
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
-----------
x : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholsigmainv.T)
Notes
-----
This only does rescaling, it doesn't subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
return np.dot(x, self.cholsigmainv.T)
def pdf(self, x):
'''probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector
'''
return np.exp(self.logpdf(x))
def standardize(self, x):
'''standardize the random variable, i.e. subtract mean and whiten
Parameters
-----------
x : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x - self.mean, self.cholsigmainv.T)
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
return self.whiten(x - self.mean)
def standardized(self):
'''return new standardized MVNormal instance
'''
return self.affine_transformed(-self.mean, self.cholsigmainv)
def normalize(self, x):
'''normalize the random variable, i.e. subtract mean and rescale
The distribution will have zero mean and sigma equal to correlation
Parameters
-----------
x : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
(x - self.mean)/std_sigma
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
std_ = np.atleast_2d(self.std_sigma)
return (x - self.mean)/std_ #/std_.T
def normalized(self, demeaned=True):
'''return a normalized distribution where sigma=corr
if demeaned is True, then mean will be set to zero
'''
if demeaned:
mean_new = np.zeros_like(self.mean)
else:
mean_new = self.mean / self.std_sigma
sigma_new = self.corr
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args)
def normalized2(self, demeaned=True):
'''return a normalized distribution where sigma=corr
second implementation for testing affine transformation
'''
if demeaned:
shift = -self.mean
else:
shift = self.mean * (1. / self.std_sigma - 1.)
return self.affine_transformed(shift, np.diag(1. / self.std_sigma))
#the following "standardizes" cov instead
#return self.affine_transformed(shift, self.cholsigmainv)
@property
def std(self):
'''standard deviation, square root of diagonal elements of cov
'''
return np.sqrt(np.diag(self.cov))
@property
def std_sigma(self):
'''standard deviation, square root of diagonal elements of sigma
'''
return np.sqrt(np.diag(self.sigma))
@property
def corr(self):
'''correlation matrix'''
return self.cov / np.outer(self.std, self.std)
expect_mc = expect_mc
def marginal(self, indices):
'''return marginal distribution for variables given by indices
this should be correct for normal and t distribution
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
Returns
-------
mvdist : instance
new instance of the same multivariate distribution class that
contains the marginal distribution of the variables given in
indices
'''
indices = np.asarray(indices)
mean_new = self.mean[indices]
sigma_new = self.sigma[indices[:,None], indices]
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args)
#parts taken from linear_model, but heavy adjustments
class MVNormal0(object):
'''Class for Multivariate Normal Distribution
original full version, kept for testing, new version inherits from
MVElliptical
uses Cholesky decomposition of covariance matrix for the transformation
of the data
'''
def __init__(self, mean, cov):
self.mean = mean
self.cov = cov = np.asarray(cov)
cov = np.squeeze(cov)
self.nvars = nvars = len(mean)
#in the following cov is original, self.cov is full matrix
if cov.shape == ():
#iid
self.cov = np.eye(nvars) * cov
self.covinv = np.eye(nvars) / cov
self.cholcovinv = np.eye(nvars) / np.sqrt(cov)
elif (cov.ndim == 1) and (len(cov) == nvars):
#independent heteroscedastic
self.cov = np.diag(cov)
self.covinv = np.diag(1. / cov)
self.cholcovinv = np.diag( 1. / np.sqrt(cov))
elif cov.shape == (nvars, nvars): #python tuple comparison
#general
self.covinv = np.linalg.pinv(cov)
self.cholcovinv = np.linalg.cholesky(self.covinv).T
else:
raise ValueError('cov has invalid shape')
#store logdetcov for logpdf
self.logdetcov = np.log(np.linalg.det(self.cov))
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
-----------
X : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholcovinv.T)
Notes
-----
This only does rescaling, it doesn't subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
if np.any(self.cov):
#return np.dot(self.cholcovinv, x)
return np.dot(x, self.cholcovinv.T)
else:
return x
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly
'''
return np.random.multivariate_normal(self.mean, self.cov, size=size)
def pdf(self, x):
'''probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector
'''
return np.exp(self.logpdf(x))
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
doesn't work now because of dot in whiten
'''
x = np.asarray(x)
x_whitened = self.whiten(x - self.mean)
SSR = np.sum(x_whitened**2, -1)
llf = -SSR
llf -= self.nvars * np.log(2. * np.pi)
llf -= self.logdetcov
llf *= 0.5
return llf
expect_mc = expect_mc
class MVNormal(MVElliptical):
'''Class for Multivariate Normal Distribution
uses Cholesky decomposition of covariance matrix for the transformation
of the data
'''
__name__ == 'Multivariate Normal Distribution'
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly
'''
return np.random.multivariate_normal(self.mean, self.sigma, size=size)
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
doesn't work now because of dot in whiten
'''
x = np.asarray(x)
x_whitened = self.whiten(x - self.mean)
SSR = np.sum(x_whitened**2, -1)
llf = -SSR
llf -= self.nvars * np.log(2. * np.pi)
llf -= self.logdetsigma
llf *= 0.5
return llf
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
#lower = -np.inf * np.ones_like(x)
#return mvstdnormcdf(lower, self.standardize(x), self.corr, **kwds)
return mvnormcdf(x, self.mean, self.cov, **kwds)
@property
def cov(self):
'''covariance matrix'''
return self.sigma
def affine_transformed(self, shift, scale_matrix):
'''return distribution of an affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
the affine transformation is defined by
y = a + B x
where a is shift,
B is a scale matrix for the linear transformation
Notes
-----
This should also work to select marginal distributions, but not
tested for this case yet.
currently only tested because it's called by standardized
'''
B = scale_matrix #tmp variable
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVNormal(mean_new, sigma_new)
def conditional(self, indices, values):
'''return conditional distribution
indices are the variables to keep, the complement is the conditioning
set
values are the values of the conditioning variables
\bar{\mu} = \mu_1 + \Sigma_{12} \Sigma_{22}^{-1} \left( a - \mu_2 \right)
and covariance matrix
\overline{\Sigma} = \Sigma_{11} - \Sigma_{12} \Sigma_{22}^{-1} \Sigma_{21}.T
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
given : array_like
values of the conditioning variables
Returns
-------
mvn : instance of MVNormal
new instance of the MVNormal class that contains the conditional
distribution of the variables given in indices for given
values of the excluded variables.
'''
#indices need to be nd arrays for broadcasting
keep = np.asarray(indices)
given = np.asarray([i for i in range(self.nvars) if not i in keep])
sigmakk = self.sigma[keep[:, None], keep]
sigmagg = self.sigma[given[:, None], given]
sigmakg = self.sigma[keep[:, None], given]
sigmagk = self.sigma[given[:, None], keep]
sigma_new = sigmakk - np.dot(sigmakg, np.linalg.solve(sigmagg, sigmagk))
mean_new = self.mean[keep] + \
np.dot(sigmakg, np.linalg.solve(sigmagg, values-self.mean[given]))
# #or
# sig = np.linalg.solve(sigmagg, sigmagk).T
# mean_new = self.mean[keep] + np.dot(sigmakg, values-self.mean[given])
# sigma_new = sigmakk - np.dot(sigmakg, sig)
return MVNormal(mean_new, sigma_new)
from scipy import special
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
class MVT(MVElliptical):
__name__ == 'Multivariate Student T Distribution'
def __init__(self, mean, sigma, df):
'''initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used
'''
super(MVT, self).__init__(mean, sigma)
self.extra_args = ['df'] #overwrites extra_args of super
self.df = df
def rvs(self, size=1):
'''random variables with Student T distribution
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
- TODO: Not sure if this works for size tuples with len>1.
Notes
-----
generated as a chi-square mixture of multivariate normal random
variables.
does this require df>2 ?
'''
from .multivariate import multivariate_t_rvs
return multivariate_t_rvs(self.mean, self.sigma, df=self.df, n=size)
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
'''
x = np.asarray(x)
df = self.df
nvars = self.nvars
x_whitened = self.whiten(x - self.mean) #should be float
llf = - nvars * np_log(df * np_pi)
llf -= self.logdetsigma
llf -= (df + nvars) * np_log(1 + np.sum(x_whitened**2,-1) / df)
llf *= 0.5
llf += sps_gamln((df + nvars) / 2.) - sps_gamln(df / 2.)
return llf
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
lower = -np.inf * np.ones_like(x)
#std_sigma = np.sqrt(np.diag(self.sigma))
upper = (x - self.mean)/self.std_sigma
return mvstdtprob(lower, upper, self.corr, self.df, **kwds)
#mvstdtcdf doesn't exist yet
#return mvstdtcdf(lower, x, self.corr, df, **kwds)
@property
def cov(self):
'''covariance matrix
The covariance matrix for the t distribution does not exist for df<=2,
and is equal to sigma * df/(df-2) for df>2
'''
if self.df <= 2:
return np.nan * np.ones_like(self.sigma)
else:
return self.df / (self.df - 2.) * self.sigma
def affine_transformed(self, shift, scale_matrix):
'''return distribution of a full rank affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
This checks for eigvals<=0, so there are possible problems for cases
with positive eigenvalues close to zero.
see: http://www.statlect.com/mcdstu1.htm
I'm not sure about general case, non-full rank transformation are not
multivariate t distributed.
y = a + B x
where a is shift,
B is full rank scale matrix with same dimension as sigma
'''
#full rank method could also be in elliptical and called with super
#after the rank check
B = scale_matrix #tmp variable as shorthand
if not B.shape == (self.nvars, self.nvars):
if (np.linalg.eigvals(B) <= 0).any():
raise ValueError('affine transform has to be full rank')
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVT(mean_new, sigma_new, self.df)
def quad2d(func=lambda x: 1, lower=(-10,-10), upper=(10,10)):
def fun(x, y):
x = np.column_stack((x,y))
return func(x)
from scipy.integrate import dblquad
return dblquad(fun, lower[0], upper[0], lambda y: lower[1],
lambda y: upper[1])
if __name__ == '__main__':
from numpy.testing import assert_almost_equal, assert_array_almost_equal
examples = ['mvn']
mu = (0,0)
covx = np.array([[1.0, 0.5], [0.5, 1.0]])
mu3 = [-1, 0., 2.]
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
if 'mvn' in examples:
bvn = BivariateNormal(mu, covx)
rvs = bvn.rvs(size=1000)
print(rvs.mean(0))
print(np.cov(rvs, rowvar=0))
print(bvn.expect())
print(bvn.cdf([0,0]))
bvn1 = BivariateNormal(mu, np.eye(2))
bvn2 = BivariateNormal(mu, 4*np.eye(2))
fun = lambda x : np.log(bvn1.pdf(x)) - np.log(bvn.pdf(x))
print(bvn1.expect(fun))
print(bvn1.kl(bvn2), bvn1.kl_mc(bvn2))
print(bvn2.kl(bvn1), bvn2.kl_mc(bvn1))
print(bvn1.kl(bvn), bvn1.kl_mc(bvn))
mvn = MVNormal(mu, covx)
mvn.pdf([0,0])
mvn.pdf(np.zeros((2,2)))
#np.dot(mvn.cholcovinv.T, mvn.cholcovinv) - mvn.covinv
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu3 = [-1, 0., 2.]
mvn3 = MVNormal(mu3, cov3)
mvn3.pdf((0., 2., 3.))
mvn3.logpdf((0., 2., 3.))
#comparisons with R mvtnorm::dmvnorm
#decimal=14
# mvn3.logpdf(cov3) - [-7.667977543898155, -6.917977543898155, -5.167977543898155]
# #decimal 18
# mvn3.pdf(cov3) - [0.000467562492721686, 0.000989829804859273, 0.005696077243833402]
# #cheating new mean, same cov
# mvn3.mean = np.array([0,0,0])
# #decimal= 16
# mvn3.pdf(cov3) - [0.02914269740502042, 0.02269635555984291, 0.01767593948287269]
#as asserts
r_val = [-7.667977543898155, -6.917977543898155, -5.167977543898155]
assert_array_almost_equal( mvn3.logpdf(cov3), r_val, decimal = 14)
#decimal 18
r_val = [0.000467562492721686, 0.000989829804859273, 0.005696077243833402]
assert_array_almost_equal( mvn3.pdf(cov3), r_val, decimal = 17)
#cheating new mean, same cov, too dangerous, got wrong instance in tests
#mvn3.mean = np.array([0,0,0])
mvn3c = MVNormal(np.array([0,0,0]), cov3)
r_val = [0.02914269740502042, 0.02269635555984291, 0.01767593948287269]
assert_array_almost_equal( mvn3c.pdf(cov3), r_val, decimal = 16)
mvn3b = MVNormal((0,0,0), 1)
fun = lambda x : np.log(mvn3.pdf(x)) - np.log(mvn3b.pdf(x))
print(mvn3.expect_mc(fun))
print(mvn3.expect_mc(fun, size=200000))
mvt = MVT((0,0), 1, 5)
assert_almost_equal(mvt.logpdf(np.array([0.,0.])), -1.837877066409345,
decimal=15)
assert_almost_equal(mvt.pdf(np.array([0.,0.])), 0.1591549430918953,
decimal=15)
mvt.logpdf(np.array([1.,1.]))-(-3.01552989458359)
mvt1 = MVT((0,0), 1, 1)
mvt1.logpdf(np.array([1.,1.]))-(-3.48579549941151) #decimal=16
rvs = mvt.rvs(100000)
assert_almost_equal(np.cov(rvs, rowvar=0), mvt.cov, decimal=1)
mvt31 = MVT(mu3, cov3, 1)
assert_almost_equal(mvt31.pdf(cov3),
[0.0007276818698165781, 0.0009980625182293658, 0.0027661422056214652],
decimal=18)
mvt = MVT(mu3, cov3, 3)
assert_almost_equal(mvt.pdf(cov3),
[0.000863777424247410, 0.001277510788307594, 0.004156314279452241],
decimal=17)
|
hlin117/statsmodels
|
statsmodels/sandbox/distributions/mv_normal.py
|
Python
|
bsd-3-clause
| 38,702
|
[
"Gaussian"
] |
5f5bcf977f8207d718699c539673cdd96f79aafaa59ed8d5c0f9335bb0d1e07c
|
#!/usr/bin/env python
"""
Uses the mat3 and vec3 classes from Python Computer Graphics Kit v1.2.0
module by Matthias Baas (see http://cgkit.sourceforge.net).
License: http://www.opensource.org/licenses/bsd-license.php
"""
__author__ = "Pierre Legrand (pierre.legrand \at synchrotron-soleil.fr)"
__date__ = "21-10-2005"
__copyright__ = "Copyright (c) 2005-2009 Pierre Legrand"
__license__ = "New BSD License"
__version__ = "0.4.8"
import sys
import os.path
from XOconv import *
_progname = os.path.split(sys.argv[0])[1]
_usage = """
Converting Denzo crystal orientation informations to Mosflm format.
A program to convert the orientation matix from Denzo (.x file
output files) to a Mosflm matrix file and a simple Mosflm input file:
USAGE: %s [OPTION]... FILE
FILE is the Denzo dot.x reflection file.
OPTIONS:
-h
--help
Print this help message.
-p
--pg-permutations
Print out the other equivalent crystal orientation
informations based on the point group allowed permutations.
-s
--start-mosflm
Start "ipmosflm < dnz2mos.inp". Than clic on the "Predict" buton
to verify predictions.
-v
--verbose
Turn on verbose output.
""" % _progname
DNZAxes = ey, -1*ex, -1*ez
Qdnz2mos = mat3(ez, ex, ey).transpose()
mosflmInpTemplate = """
TILE Input created from %(title)s
MATRIX %(matrix_file)s
WAVELENGTH %(wavelength)12.5f
DISTANCE %(distance)12.3f
BEAM %(beam_x)12.3f %(beam_y)12.3f
SYMMETRY %(symmetry)s
MOSAICITY %(mosaicity).3f
IMAGE %(image_name)s
GO
"""
def PARS_dnz2mos(dnzPar):
"Convert Denzo output parameters to Mosflm input parameters."
mosPar = {}
mosPar["title"] = "dnz2mos version: %s" % (__version__)
mosPar["distance"] = dnzPar.distance
mosPar["wavelength"] = dnzPar.wavel
mosPar["symmetry"] = dnzPar.spg.upper()
#mosPar["omega"] = xdsPar["omega"]*r2d
#mosPar["twotheta"] = xdsPar["twotheta"]*r2d
mosPar["beam_x"] = dnzPar.beam_x
mosPar["beam_y"] = dnzPar.beam_y
mosPar["template"] = dnzPar.template
mosPar["mosaicity"] = dnzPar.mosaic
return mosPar
if __name__ == '__main__':
import getopt
short_opt = "aho:pvs"
long_opt = ["angles",
"help",
"output=",
"pg-permutations",
"start-mosflm",
"verbose"]
try:
opts, inputf = getopt.getopt(sys.argv[1:], short_opt, long_opt)
except getopt.GetoptError:
# print help information and exit:
print _usage
sys.exit(2)
_angles = False
_do_PG_permutations = False
_start_mosflm = False
_verbose = False
if inputf:
base = ".".join(inputf[0].split(".")[:-1])
matName = os.path.basename(base + ".dnz2mos.umat")
inpName = os.path.basename(base + ".dnz2mos.inp")
for o, a in opts:
if o == "-v":
_verbose = True
if o in ("-h", "--help"):
print _usage
sys.exit()
if o in ("-a", "--angles"):
_angles = True
" Writing Crystal setting angles in place of the U matrix."
if o in ("-o", "--output"):
matName = a
if o in ("-p","--pg-permutations"):
_do_PG_permutations = True
if o in ("-s", "--start-mosflm"):
_start_mosflm = True
x = DenzoParser(inputf[0])
B = BusingLevy(x.cell_r)
MOSi = MosflmParser()
MOSi.cell = x.cell
MOSi.UB = Qdnz2mos * x.UB * x.wavel
MOSi.U = MOSi.UB * B.inverse() / x.wavel
print
print ">>> Space Group : %s" % (x.spg.upper())
print ">>> Unit Cell : %s" % (6*"%.2f " % (x.cell))
printmat(MOSi.UB/x.wavel, '\n>>> UBmos/x.wavel')
printmat(MOSi.U, '\n>>> Umos')
printmat(B, '\n>>> Bmos')
# There is two way to write the crystal orientation in mosflm:
# using the U matrix or the phi1, phi2, phi3 misseting angles (in degree).
if not _angles:
MOSi.missetingAngles = 0, 0, 0
else:
solve = ThreeAxisRotation2(MOSi.U.mlist, inversAxesOrder=1).getAngles()
MOSi.missetingAngles = map_r2d(solve[0])
print "\n Setting angles: PHIX %8.2f PHIY %8.2f PHIZ %8.2f\n" % \
tuple(MOSi.missetingAngles)
Umos = mat3(1)
MOSi.write_umat(matName)
MOSpar = PARS_dnz2mos(x)
MOSpar["matrix_file"] = matName
MOSpar["image_name"] = MOSpar["template"].replace("###","001")
openWriteClose(inpName, mosflmInpTemplate % MOSpar)
if _start_mosflm:
os.system("mosflm < %s" % inpName)
|
RAPD/RAPD
|
src/plugins/subcontractors/xdsme/new/xdsme-0.4.9/XOconv/dnz2mos.py
|
Python
|
agpl-3.0
| 4,805
|
[
"CRYSTAL"
] |
df309ff6df833839e3e70a63d3bc895353e65bb29ce89eb5b032cd9b13c967f1
|
#!/usr/bin/env python
#!/usr/bin/env python
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "1979-01-01/to/2017-05-31",
"expver": "1",
"grid": "0.75/0.75",
"levelist": "500",
"levtype": "pl",
"param": "129.128",
"step": "0",
"stream": "oper",
"time": "00:00:00/06:00:00/12:00:00/18:00:00",
"format" : "netcdf",
"type": "an",
"target": "/barnes-scratch/sbrey/era_interim_nc_6_hourly/z_all.nc",
})
|
stevenjoelbrey/PMFutures
|
sh/get_all_era_interim_z.py
|
Python
|
mit
| 532
|
[
"NetCDF"
] |
057df6ffa01b5df87391478bcf1ce323b5546d86fd1161f7baf4d002a74ac18a
|
"""
Test the Studio help links.
"""
from nose.plugins.attrib import attr
from unittest import skip
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest, ContainerBase
from common.test.acceptance.pages.studio.index import DashboardPage
from common.test.acceptance.pages.studio.utils import click_studio_help, studio_help_links
from common.test.acceptance.pages.studio.index import IndexPage, HomePage
from common.test.acceptance.tests.studio.base_studio_test import StudioLibraryTest
from common.test.acceptance.pages.studio.course_info import CourseUpdatesPage
from common.test.acceptance.pages.studio.utils import click_css
from common.test.acceptance.pages.studio.library import LibraryPage
from common.test.acceptance.pages.studio.users import LibraryUsersPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.asset_index import AssetIndexPage
from common.test.acceptance.pages.studio.edit_tabs import PagesPage
from common.test.acceptance.pages.studio.textbook_upload import TextbookUploadPage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.pages.studio.settings_graders import GradingPage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.settings_advanced import AdvancedSettingsPage
from common.test.acceptance.pages.studio.settings_certificates import CertificatesPage
from common.test.acceptance.pages.studio.import_export import ExportCoursePage, ImportCoursePage
from common.test.acceptance.pages.studio.users import CourseTeamPage
from common.test.acceptance.tests.helpers import (
AcceptanceTest,
assert_nav_help_link,
assert_side_bar_help_link
)
from common.test.acceptance.pages.studio.import_export import ExportLibraryPage, ImportLibraryPage
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage
from openedx.core.release import doc_version
DOCUMENTATION_URL_TEMPLATE = (
'http://edx.readthedocs.io/projects/open-edx-building-and-running-a-course/en/{doc_version}{path}'
)
def _get_expected_documentation_url(path):
"""
Returns the expected URL for the building and running a course documentation.
"""
return DOCUMENTATION_URL_TEMPLATE.format(
doc_version=doc_version(),
path=path,
)
@attr(shard=10)
class StudioHelpTest(StudioCourseTest):
"""Tests for Studio help."""
def test_studio_help_links(self):
"""Test that the help links are present and have the correct content."""
page = DashboardPage(self.browser)
page.visit()
click_studio_help(page)
links = studio_help_links(page)
expected_links = [{
'href': u'http://docs.edx.org/',
'text': u'edX Documentation',
'sr_text': u'Access documentation on http://docs.edx.org'
}, {
'href': u'https://open.edx.org/',
'text': u'Open edX Portal',
'sr_text': u'Access the Open edX Portal'
}, {
'href': u'https://www.edx.org/course/overview-creating-edx-course-edx-edx101#.VO4eaLPF-n1',
'text': u'Enroll in edX101',
'sr_text': u'Enroll in edX101: Overview of Creating an edX Course'
}, {
'href': u'https://www.edx.org/course/creating-course-edx-studio-edx-studiox',
'text': u'Enroll in StudioX',
'sr_text': u'Enroll in StudioX: Creating a Course with edX Studio'
}, {
'href': u'mailto:partner-support@example.com',
'text': u'Contact Us',
'sr_text': 'Send an email to partner-support@example.com'
}]
for expected, actual in zip(expected_links, links):
self.assertEqual(expected['href'], actual.get_attribute('href'))
self.assertEqual(expected['text'], actual.text)
self.assertEqual(
expected['sr_text'],
actual.find_element_by_xpath('following-sibling::span').text
)
@attr(shard=10)
class SignInHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign In' page
"""
def setUp(self):
super(SignInHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_in_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign In' page.
Given that I am on the 'Sign In" page.
And I want help about the sign in
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_in_page = self.index_page.click_sign_in()
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_in_page,
href=expected_url,
signed_in=False
)
@attr(shard=10)
class SignUpHelpTest(AcceptanceTest):
"""
Tests help links on 'Sign Up' page.
"""
def setUp(self):
super(SignUpHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_sign_up_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Sign Up' page.
Given that I am on the 'Sign Up" page.
And I want help about the sign up
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
sign_up_page = self.index_page.click_sign_up()
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=sign_up_page,
href=expected_url,
signed_in=False
)
@attr(shard=10)
class HomeHelpTest(StudioCourseTest):
"""
Tests help links on 'Home'(Courses tab) page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(HomeHelpTest, self).setUp()
self.home_page = HomePage(self.browser)
self.home_page.visit()
def test_course_home_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.home_page,
href=expected_url
)
def test_course_home_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the courses
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.home_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class NewCourseHelpTest(AcceptanceTest):
"""
Test help links while creating a new course.
"""
def setUp(self):
super(NewCourseHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.new_course_button.present)
self.dashboard_page.click_new_course_button()
def test_course_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_course_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Course' page in the dashboard.
Given that I am on the 'Create a New Course' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class NewLibraryHelpTest(AcceptanceTest):
"""
Test help links while creating a new library
"""
def setUp(self):
super(NewLibraryHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_new_library_button)
self.dashboard_page.click_new_library()
def test_library_create_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
def test_library_create_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Create a New Library' page in the dashboard.
Given that I am on the 'Create a New Library' page in the dashboard.
And I want help about the process
And I click the 'Getting Started with edX Studio' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.dashboard_page,
href=expected_url,
help_text='Getting Started with edX Studio',
as_list_item=True
)
@attr(shard=10)
class LibraryTabHelpTest(AcceptanceTest):
"""
Test help links on the library tab present at dashboard.
"""
def setUp(self):
super(LibraryTabHelpTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.auth_page.visit()
self.dashboard_page.visit()
def test_library_tab_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Home'(Courses tab) page.
Given that I am on the 'Home'(Courses tab) page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
self.assertTrue(self.dashboard_page.has_new_library_button)
click_css(self.dashboard_page, '#course-index-tabs .libraries-tab', 0, False)
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.dashboard_page,
href=expected_url
)
@attr(shard=10)
class LibraryHelpTest(StudioLibraryTest):
"""
Test help links on a Library page.
"""
def setUp(self):
super(LibraryHelpTest, self).setUp()
self.library_page = LibraryPage(self.browser, self.library_key)
self.library_user_page = LibraryUsersPage(self.browser, self.library_key)
def test_library_content_nav_help(self):
"""
Scenario: Help link in navigation bar is working on content
library page(click a library on the Library list page).
Given that I am on the content library page(click a library on the Library list page).
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
self.library_page.visit()
expected_url = _get_expected_documentation_url('/course_components/libraries.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_page,
href=expected_url
)
def test_library_content_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on
content library page(click a library on the Library list page).
Given that I am on the content library page(click a library on the Library list page).
And I want help about the process
And I click the 'Learn more about content libraries' in the sidebar links
Then Help link should open.
And help url should be correct
"""
self.library_page.visit()
expected_url = _get_expected_documentation_url('/course_components/libraries.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_page,
href=expected_url,
help_text='Learn more about content libraries'
)
def test_library_user_access_setting_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'User Access'
settings page of library.
Given that I am on the 'User Access' settings page of library.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct.
"""
self.library_user_page.visit()
expected_url = _get_expected_documentation_url(
'/course_components/libraries.html#give-other-users-access-to-your-library'
)
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_user_page,
href=expected_url,
)
@attr(shard=10)
class LibraryImportHelpTest(StudioLibraryTest):
"""
Test help links on a Library import and export pages.
"""
def setUp(self):
super(LibraryImportHelpTest, self).setUp()
self.library_import_page = ImportLibraryPage(self.browser, self.library_key)
self.library_import_page.visit()
def test_library_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_import_page,
href=expected_url
)
def test_library_import_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library import page.
Given that I am on the Library import page.
And I want help about the process
And I click the 'Learn more about importing a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#import-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_import_page,
href=expected_url,
help_text='Learn more about importing a library'
)
@attr(shard=10)
class LibraryExportHelpTest(StudioLibraryTest):
"""
Test help links on a Library export pages.
"""
def setUp(self):
super(LibraryExportHelpTest, self).setUp()
self.library_export_page = ExportLibraryPage(self.browser, self.library_key)
self.library_export_page.visit()
def test_library_export_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.library_export_page,
href=expected_url
)
def test_library_export_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Library export page.
Given that I am on the Library export page.
And I want help about the process
And I click the 'Learn more about exporting a library' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_components/libraries.html#export-a-library')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.library_export_page,
href=expected_url,
help_text='Learn more about exporting a library'
)
@attr(shard=10)
class CourseOutlineHelpTest(StudioCourseTest):
"""
Tests help links on course outline page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseOutlineHelpTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_outline_page.visit()
@skip("This scenario depends upon TNL-5460")
def test_course_outline_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Outline page
Given that I am on the Course Outline page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_outline_page,
href=expected_url
)
def test_course_outline_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on Course Outline page
Given that I am on the Course Outline page.
And I want help about the process
And I click the 'Learn more about the course outline' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/developing_course/course_outline.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_outline_page,
href=expected_url,
help_text='Learn more about the course outline',
index=0
)
@attr(shard=10)
class CourseUpdateHelpTest(StudioCourseTest):
"""
Test help links on Course Update page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CourseUpdateHelpTest, self).setUp()
self.course_update_page = CourseUpdatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_update_page.visit()
def test_course_update_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Course Update' page
Given that I am on the 'Course Update' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/handouts_updates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_update_page,
href=expected_url,
)
@attr(shard=10)
class AssetIndexHelpTest(StudioCourseTest):
"""
Test help links on Course 'Files & Uploads' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(AssetIndexHelpTest, self).setUp()
self.course_asset_index_page = AssetIndexPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_asset_index_page.visit()
def test_asset_index_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Files & Uploads' page
Given that I am on the 'Files & Uploads' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/course_files.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_asset_index_page,
href=expected_url,
)
def test_asset_index_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Files & Uploads' page
Given that I am on the 'Files & Uploads' page.
And I want help about the process
And I click the 'Learn more about managing files' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/course_files.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_asset_index_page,
href=expected_url,
help_text='Learn more about managing files'
)
@attr(shard=10)
class CoursePagesHelpTest(StudioCourseTest):
"""
Test help links on Course 'Pages' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(CoursePagesHelpTest, self).setUp()
self.course_pages_page = PagesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_pages_page.visit()
def test_course_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Pages' page
Given that I am on the 'Pages' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/pages.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_pages_page,
href=expected_url,
)
@attr(shard=10)
class UploadTextbookHelpTest(StudioCourseTest):
"""
Test help links on Course 'Textbooks' page
"""
def setUp(self): # pylint: disable=arguments-differ
super(UploadTextbookHelpTest, self).setUp()
self.course_textbook_upload_page = TextbookUploadPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_textbook_upload_page.visit()
def test_course_textbook_upload_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
)
def test_course_textbook_side_bar_help(self):
"""
Scenario: Help link in sidebar links is working on 'Textbooks' page
Given that I am on the 'Textbooks' page
And I want help about the process
And I click the 'Learn more about textbooks' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_assets/textbooks.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_textbook_upload_page,
href=expected_url,
help_text='Learn more about textbooks'
)
@attr(shard=10)
class StudioUnitHelpTest(ContainerBase):
"""
Tests help links on Unit page.
"""
def setUp(self, is_staff=True):
super(StudioUnitHelpTest, self).setUp(is_staff=is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
Also add a section with a subsection and a unit.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def test_unit_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Unit page.
Given that I am on the Unit page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
unit_page = self.go_to_unit_page()
expected_url = _get_expected_documentation_url('/developing_course/course_units.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=unit_page,
href=expected_url,
)
@attr(shard=10)
class SettingsHelpTest(StudioCourseTest):
"""
Tests help links on Schedule and Details Settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(SettingsHelpTest, self).setUp()
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page.visit()
def test_settings_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Settings page.
Given that I am on the Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/setting_up_student_view.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.settings_page,
href=expected_url,
)
@attr(shard=10)
class GradingPageHelpTest(StudioCourseTest):
"""
Tests help links on Grading page
"""
def setUp(self, is_staff=False, test_xss=True):
super(GradingPageHelpTest, self).setUp()
self.grading_page = GradingPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.grading_page.visit()
def test_grading_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Grading page.
Given that I am on the Grading page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/grading/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.grading_page,
href=expected_url,
)
@attr(shard=10)
class CourseTeamSettingsHelpTest(StudioCourseTest):
"""
Tests help links on Course Team settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseTeamSettingsHelpTest, self).setUp()
self.course_team_settings_page = CourseTeamPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_team_settings_page.visit()
def test_course_course_team_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Course Team settings page
Given that I am on the Course Team settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/course_staffing.html#add-course-team-members')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_team_settings_page,
href=expected_url,
)
@attr(shard=10)
class CourseGroupConfigurationHelpTest(StudioCourseTest):
"""
Tests help links on course Group Configurations settings page
"""
def setUp(self, is_staff=False, test_xss=True):
super(CourseGroupConfigurationHelpTest, self).setUp()
self.course_group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_group_configuration_page.visit()
def test_course_group_conf_nav_help(self):
"""
Scenario: Help link in navigation bar is working on
Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
)
def test_course_group_conf_content_group_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'content group' is working
on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/course_features/cohorts/cohorted_courseware.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.course_group_configuration_page,
href=expected_url,
help_text='Learn More'
)
@attr(shard=10)
class AdvancedSettingHelpTest(StudioCourseTest):
"""
Tests help links on course Advanced Settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(AdvancedSettingHelpTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.advanced_settings.visit()
def test_advanced_settings_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Advanced Settings page.
Given that I am on the Advanced Settings page.
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/index.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.advanced_settings,
href=expected_url,
)
@attr(shard=10)
class CertificatePageHelpTest(StudioCourseTest):
"""
Tests help links on course Certificate settings page.
"""
def setUp(self, is_staff=False, test_xss=True):
super(CertificatePageHelpTest, self).setUp()
self.certificates_page = CertificatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.certificates_page.visit()
def test_certificate_page_nav_help(self):
"""
Scenario: Help link in navigation bar is working on Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/creating_course_certificates.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
)
def test_certificate_page_side_bar_help(self):
"""
Scenario: Help link in side bar is working Certificate settings page
Given that I am on the Certificate settings page
And I want help about the process
And I click the 'Learn more about certificates' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/set_up_course/creating_course_certificates.html')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.certificates_page,
href=expected_url,
help_text='Learn more about certificates',
)
@attr(shard=10)
class GroupExperimentConfigurationHelpTest(ContainerBase):
"""
Tests help links on course Group Configurations settings page
It is related to Experiment Group Configurations on the page.
"""
def setUp(self): # pylint: disable=arguments-differ
super(GroupExperimentConfigurationHelpTest, self).setUp()
self.group_configuration_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# self.create_poorly_configured_split_instance()
self.group_configuration_page.visit()
def populate_course_fixture(self, course_fixture):
"""
Populates the course fixture.
We are modifying 'advanced_modules' setting of the
course.
"""
course_fixture.add_advanced_settings(
{u"advanced_modules": {"value": ["split_test"]}}
)
def test_course_group_configuration_experiment_side_bar_help(self):
"""
Scenario: Help link in side bar under the 'Experiment Group Configurations'
is working on Group Configurations settings page
Given that I am on the Group Configurations settings page
And I want help about the process
And I click the 'Learn More' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url(
'/course_features/content_experiments/content_experiments_configure.html'
'#set-up-group-configurations-in-edx-studio'
)
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.group_configuration_page,
href=expected_url,
help_text='Learn More',
)
@attr(shard=10)
class ToolsImportHelpTest(StudioCourseTest):
"""
Tests help links on tools import pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsImportHelpTest, self).setUp()
self.import_page = ImportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.import_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library import page
Given that I am on the Library import tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.import_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library import page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about importing a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#import-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.import_page,
href=expected_url,
help_text='Learn more about importing a course',
)
@attr(shard=10)
class ToolsExportHelpTest(StudioCourseTest):
"""
Tests help links on tools export pages.
"""
def setUp(self, is_staff=False, test_xss=True):
super(ToolsExportHelpTest, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.export_page.visit()
def test_tools_import_nav_help(self):
"""
Scenario: Help link in navigation bar is working on tools Library export page
Given that I am on the Library export tools page
And I want help about the process
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.export_page,
href=expected_url,
)
def test_tools_import_side_bar_help(self):
"""
Scenario: Help link in side bar is working on tools Library export page
Given that I am on the tools Library import page
And I want help about the process
And I click the 'Learn more about exporting a course' in the sidebar links
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/releasing_course/export_import_course.html#export-a-course')
# Assert that help link is correct.
assert_side_bar_help_link(
test=self,
page=self.export_page,
href=expected_url,
help_text='Learn more about exporting a course',
)
@attr(shard=10)
class StudioWelcomeHelpTest(AcceptanceTest):
"""
Tests help link on 'Welcome' page ( User not logged in)
"""
def setUp(self):
super(StudioWelcomeHelpTest, self).setUp()
self.index_page = IndexPage(self.browser)
self.index_page.visit()
def test_welcome_nav_help(self):
"""
Scenario: Help link in navigation bar is working on 'Welcome' page (User not logged in).
Given that I am on the 'Welcome' page.
And I want help about the edx
And I click the 'Help' in the navigation bar
Then Help link should open.
And help url should be correct
"""
expected_url = _get_expected_documentation_url('/get_started.html')
# Assert that help link is correct.
assert_nav_help_link(
test=self,
page=self.index_page,
href=expected_url,
signed_in=False
)
|
romain-li/edx-platform
|
common/test/acceptance/tests/studio/test_studio_help.py
|
Python
|
agpl-3.0
| 43,599
|
[
"VisIt"
] |
cadce52701f2abf94995632ed6d7d959aa45df0bca3f12844489d5bc2a0c4331
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
import json
from io import open
import warnings
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.electronic_structure.bandstructure import (BandStructureSymmLine,
get_reconstructed_band_structure)
from pymatgen.util.testing import PymatgenTest
from monty.serialization import loadfn
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class BandStructureSymmLine_test(PymatgenTest):
def setUp(self):
self.bs = loadfn(os.path.join(test_dir, "Cu2O_361_bandstructure.json"))
self.bs2 = loadfn(os.path.join(test_dir, "CaO_2605_bandstructure.json"))
self.bs_spin = loadfn(os.path.join(test_dir, "NiO_19009_bandstructure.json"))
self.bs_cbm0 = loadfn(os.path.join(test_dir, "InN_22205_bandstructure.json"))
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_diff_spins = loadfn(os.path.join(test_dir, "VBr2_971787_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.resetwarnings()
def test_basic(self):
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][10][12][0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][25][0][
Orbital.dyz.value],
[0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069])
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
proj = self.bs.get_projections_on_elements_and_orbitals({'Cu': ['s',
'd']})
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
self.assertEqual(self.bs2.nb_bands, 16)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs2.branches[5]['name'], "L-U")
self.assertEqual(self.bs2.branches[5]['start_index'], 80)
self.assertEqual(self.bs2.branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs2.distance[70], 4.2335127528765737)
self.assertEqual(self.bs_spin.nb_bands, 27)
self.assertAlmostEqual(self.bs_spin.bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin.bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.one_kpoint = self.bs2.kpoints[31]
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs2.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs2.get_branch(110)[0]['name'], "U-W")
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_diff_spins.get_direct_band_gap_dict()
self.assertEqual(direct_dict[Spin.down]['value'], 4.5365)
for bs in [self.bs2, self.bs_spin]:
dg_dict = bs.get_direct_band_gap_dict()
for spin, v in bs.bands.items():
kpt = dg_dict[spin]['kpoint_index']
vb, cb = dg_dict[spin]['band_indices']
gap = v[cb][kpt] - v[vb][kpt]
self.assertEqual(gap, dg_dict[spin]['value'])
self.assertRaises(ValueError, self.bs_cu.get_direct_band_gap_dict)
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs2.get_direct_band_gap(),
4.0125999999999999)
self.assertTrue(self.bs_diff_spins.get_direct_band_gap() > 0)
self.assertEqual(self.bs_cu.get_direct_band_gap(), 0)
def test_is_metal(self):
self.assertFalse(self.bs2.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
self.assertTrue(self.bs_cu.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs2.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs2.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM number of bands")
self.assertEqual(vbm_spin['band_index'][Spin.up][0], 10, "wrong VBM band index")
self.assertEqual(vbm_spin['kpoint_index'][0], 79, "wrong VBM kpoint index")
self.assertEqual(vbm_spin['kpoint'].frac_coords[0], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[1], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].frac_coords[2], 0.5, "wrong VBM kpoint frac coords")
self.assertEqual(vbm_spin['kpoint'].label, "L", "wrong VBM kpoint label")
def test_get_band_gap(self):
bg = self.bs2.get_band_gap()
self.assertAlmostEqual(bg['energy'], 3.6348, "wrong gap energy")
self.assertEqual(bg['transition'], "\\Gamma-X", "wrong kpoint transition")
self.assertFalse(bg['direct'], "wrong nature of the gap")
bg_spin = self.bs_spin.get_band_gap()
self.assertAlmostEqual(bg_spin['energy'], 2.3148, "wrong gap energy")
self.assertEqual(bg_spin['transition'], "L-\\Gamma", "wrong kpoint transition")
self.assertFalse(bg_spin['direct'], "wrong nature of the gap")
bg_cbm0 = self.bs_cbm0.get_band_gap()
self.assertAlmostEqual(bg_cbm0['energy'], 0, places=3, msg="wrong gap energy")
def test_get_sym_eq_kpoints_and_degeneracy(self):
bs = self.bs2
cbm_k = bs.get_cbm()['kpoint'].frac_coords
vbm_k = bs.get_vbm()['kpoint'].frac_coords
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), None)
bs.structure = loadfn(os.path.join(test_dir, "CaO_2605_structure.json"))
self.assertEqual(bs.get_kpoint_degeneracy(cbm_k), 3)
self.assertEqual(bs.get_kpoint_degeneracy(vbm_k), 1)
cbm_eqs = bs.get_sym_eq_kpoints(cbm_k)
self.assertTrue([0.5, 0., 0.5] in cbm_eqs)
self.assertTrue([0., 0.5, 0.5] in cbm_eqs)
self.assertTrue([0.5, 0.5, 0.] in cbm_eqs)
vbm_eqs = bs.get_sym_eq_kpoints(vbm_k)
self.assertTrue([0., 0., 0.] in vbm_eqs)
def test_as_dict(self):
s = json.dumps(self.bs.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs2.as_dict())
self.assertIsNotNone(s)
s = json.dumps(self.bs_spin.as_dict())
self.assertIsNotNone(s)
def test_old_format_load(self):
with open(os.path.join(test_dir, "bs_ZnS_old.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
bs_old = BandStructureSymmLine.from_dict(d)
self.assertEqual(bs_old.get_projection_on_elements()[
Spin.up][0][0]['Zn'], 0.0971)
class ReconstructBandStructureTest(PymatgenTest):
def setUp(self):
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_cu2 = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.resetwarnings()
def test_reconstruct_band_structure(self):
bs = get_reconstructed_band_structure([self.bs_cu, self.bs_cu2])
self.assertEqual(bs.bands[Spin.up].shape, (20, 700), "wrong number of bands or kpoints")
if __name__ == '__main__':
unittest.main()
|
czhengsci/pymatgen
|
pymatgen/electronic_structure/tests/test_bandstructure.py
|
Python
|
mit
| 11,404
|
[
"pymatgen"
] |
dccf036d21ca29d699f5aba963f3a2cae655e0d779f10ed4b122405493401eee
|
#
# This file is part of the Connection-Set Algebra (CSA).
# Copyright (C) 2010,2011,2012,2019,2020 Mikael Djurfeldt
#
# CSA is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import math
import random
import copy
#from scipy.spatial import KDTree
from . import connset as cs
from . import valueset as vs
from . import _elementary
from .csaobject import *
class Random (cs.Operator):
def __mul__ (self, valueSet):
return ValueSetRandomMask (valueSet)
def __call__ (self, p = None, N = None, fanIn = None, fanOut = None):
if p != None:
assert N == None and fanIn == None and fanOut == None, \
'inconsistent parameters'
return _elementary.ConstantRandomMask (p)
elif N != None:
assert fanIn == None and fanOut == None, \
'inconsistent parameters'
return _elementary.SampleNRandomOperator (N)
elif fanIn != None:
assert fanOut == None, \
'inconsistent parameters'
return _elementary.FanInRandomOperator (fanIn)
elif fanOut != None:
return _elementary.FanOutRandomOperator (fanOut)
assert False, 'inconsistent parameters'
class ValueSetRandomMask (cs.Mask):
def __init__ (self, valueSet):
cs.Mask.__init__ (self)
self.valueSet = valueSet
self.state = random.getstate ()
def startIteration (self, state):
random.setstate (self.state)
return self
def iterator (self, low0, high0, low1, high1, state):
for j in range (low1, high1):
for i in range (low0, high0):
if random.random () < self.valueSet (i, j):
yield (i, j)
def _to_xml (self):
return CSAObject.apply ('times', 'random', self.valueSet._to_xml ())
class Disc (cs.Operator):
def __init__ (self, r):
self.r = r
def __mul__ (self, metric):
return DiscMask (self.r, metric)
class DiscMask (cs.Mask):
def __init__ (self, r, metric):
cs.Mask.__init__ (self)
self.r = r
self.metric = metric
def iterator (self, low0, high0, low1, high1, state):
for j in range (low1, high1):
for i in range (low0, high0):
if self.metric (i, j) < self.r:
yield (i, j)
class Rectangle (cs.Operator):
def __init__ (self, width, height):
self.width = width
self.height = height
def __mul__ (self, gFunction):
if isinstance (gFunction, tuple):
return RectangleMask (self.width, self.height,
gFunction[0], gFunction[1])
else:
return RectangleMask (self.width, self.height, gFunction, gFunction)
class RectangleMask (cs.Mask):
def __init__ (self, width, height, g0, g1):
cs.Mask.__init__ (self)
self.hwidth = width / 2.0
self.hheight = height / 2.0
self.g0 = g0
self.g1 = g1
def iterator (self, low0, high0, low1, high1, state):
for j in range (low1, high1):
for i in range (low0, high0):
p0 = self.g0 (i)
p1 = self.g1 (j)
dx = p0[0] - p1[0]
dy = p0[1] - p1[1]
if abs (dx) < self.hwidth and abs (dy) < self.hheight:
yield (i, j)
class Gaussian (cs.Operator):
def __init__ (self, sigma, cutoff):
cs.Operator.__init__ (self, 'gaussian')
self.sigma = sigma
self.cutoff = cutoff
def __mul__ (self, metric):
return GaussianValueSet (self, metric)
class GaussianValueSet (OpExprValue, vs.ValueSet):
def __init__ (self, operator, metric):
OpExprValue.__init__ (self, operator, metric)
self.sigma22 = 2 * operator.sigma * operator.sigma
self.cutoff = operator.cutoff
self.metric = metric
def __call__ (self, i, j):
d = self.metric (i, j)
return math.exp (- d * d / self.sigma22) if d < self.cutoff else 0.0
class Block (cs.Operator):
def __init__ (self, M, N):
self.M = M
self.N = N
def __mul__ (self, other):
c = cs.coerceCSet (other)
if isinstance (c, cs.Mask):
return BlockMask (self.M, self.N, c)
else:
return cs.ConnectionSet (BlockCSet (self.M, self.N, c))
class BlockMask (cs.Mask):
def __init__ (self, M, N, mask):
cs.Mask.__init__ (self)
self.M = M
self.N = N
self.m = mask
def startIteration (self, state):
#*fixme* filter out 'partitions' from state
nState = {}
for k in state:
if k != 'partitions':
nState[k] = state[k]
self.obj = self.m.startIteration (nState)
return self
def iterator (self, low0, high0, low1, high1, state):
maskIter = self.obj.iterator (low0 // self.M,
(high0 + self.M - 1) // self.M,
low1 // self.N,
(high1 + self.N - 1) // self.N,
state)
try:
pre = []
(i, j) = next (maskIter)
while True:
# collect connections in one connection matrix column
post = j
while j == post:
pre.append (i)
(i, j) = next (maskIter)
# generate blocks for the column
for jj in range (max (self.N * post, low1),
min (self.N * (post + 1), high1)):
for k in pre:
for ii in range (max (self.M * k, low0),
min (self.M * (k + 1), high0)):
yield (ii, jj)
pre = []
except StopIteration:
if pre:
# generate blocks for the last column
for jj in range (max (self.N * post, low1),
min (self.N * (post + 1), high1)):
for k in pre:
for ii in range (max (self.M * k, low0),
min (self.M * (k + 1), high0)):
yield (ii, jj)
class Repeat (cs.Operator):
def __init__ (self, M, N):
self.M = M
self.N = N
def __mul__ (self, other):
c = cs.coerceCSet (other)
if isinstance (c, cs.Mask):
return RepeatMask (self.M, self.N, c)
else:
return cs.ConnectionSet (RepeatCSet (self.M, self.N, c))
# Not fully implemented
# Currently only handles cases where we iterate over an even number of
# ocurrences of the template mask, both with regard to sources and targets
#
class RepeatMask (cs.Mask):
def __init__ (self, M, N, mask):
cs.Mask.__init__ (self)
self.M = M
self.N = N
self.m = mask
def iterator (self, low0, high0, low1, high1, state):
try:
jj = low1
nextHigh1 = (low1 + self.N) / self.N * self.N
while nextHigh1 <= high1:
maskIter = self.m.iterator (0,
self.M,
0,
self.N,
state)
try:
(i, j) = next (maskIter)
post = j
while post < self.N:
pre = []
while j == post:
pre.append (i)
(i, j) = next (maskIter)
ii = low0
while ii < high0:
for k in pre:
yield (ii + k, jj + post)
ii += self.M
post = j
except StopIteration:
ii = low0
while ii < high0:
for k in pre:
yield (ii + k, jj + post)
ii += self.M
jj = nextHigh1
nextHigh1 += self.N
except StopIteration:
return
class Transpose (cs.Operator):
def __mul__ (self, other):
c = cs.coerceCSet (other)
if isinstance (c, cs.Mask):
return other.transpose ()
else:
return cs.ConnectionSet (other.transpose ())
class Shift (cs.Operator):
def __init__ (self, M, N):
self.M = M
self.N = N
def __mul__ (self, other):
c = cs.coerceCSet (other)
if isinstance (c, cs.Mask):
return other.shift (self.M, self.N)
else:
return cs.ConnectionSet (other.shift (self.M, self.N))
class Fix (cs.Operator):
def __mul__ (self, other):
c = cs.coerceCSet (other)
if isinstance (c, cs.Mask):
return FixedMask (other)
else:
return cs.ConnectionSet (FixedCSet (other))
class FixedMask (cs.FiniteMask):
def __init__ (self, mask):
cs.FiniteMask.__init__ (self)
ls = []
for c in mask:
ls.append (c)
self.connections = ls
targets = list (map (cs.target, ls))
self.low0 = min (ls)[0]
self.high0 = max (ls)[0] + 1
self.low1 = min (targets)
self.high1 = max (targets) + 1
def iterator (self, low0, high0, low1, high1, state):
if not self.isBoundedBy (low0, high0, low1, high1):
return iter (self.connections)
else:
return self.boundedIterator (low0, high0, low1, high1)
def boundedIterator (self, low0, high0, low1, high1):
for c in self.connections:
if low0 <= c[0] and c[0] < high0 \
and low1 <= c[1] and c[1] < high1:
yield c
|
INCF/csa
|
csa/_misc.py
|
Python
|
gpl-3.0
| 10,587
|
[
"Gaussian"
] |
ed49bea707c2bbe068719ae318938b045b9ff4efc25c63e898de594fd7957b0f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Version 3 splits Order into Order and OrderItem,
Shipment into Shipment and ShipmentItem.
Just as Invoice was split from version 1 to 2.
InvoiceItem and ShipmentItem now reference each other.
Invoice number is removed as primary key and placed with integer.
NOTE: Class functions can be changed and added without migration.
"""
import sqlalchemy as sqla
from sqlalchemy.orm import relationship as rel
from sqlalchemy.orm import backref, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import datetime
import json
Int = sqla.Integer
#Str = sqla.String #TODO: Can probably delete this line
Utf = sqla.Unicode
Float = sqla.Float
Col = sqla.Column
Bool = sqla.Boolean
Date = sqla.Date
DateTime = sqla.DateTime
ForeignKey = sqla.ForeignKey
Base = declarative_base()
def today():
return datetime.datetime.now()
def AddDictRepr(aClass):
def repr_str(obj):
'''Returns a string representation of the dictionary object with
underscored keywords removed ("_keyword").
'''
copy = obj.__dict__.copy()
for key in copy.keys():
if key.startswith(u'_'):
try:
del copy[key]
except KeyError:
pass
return repr(copy)
aClass.__repr__ = repr_str
return aClass
#==============================================================================
# Order class
#==============================================================================
@AddDictRepr
class Order(Base):
__tablename__ = 'order'
id = Col(Int, primary_key=True)
group = Col(Utf(4), ForeignKey('cogroup.name'), nullable=False) # Of second party main company
seller = Col(Utf(4), ForeignKey('branch.name'), nullable=False) # For billing/receipts
buyer = Col(Utf(4), ForeignKey('branch.name'), nullable=False) # For billing/receipts
parent = rel('CoGroup')
recorddate = Col(DateTime, nullable=False, default=today) # Date of entering a record
# Keep all product information in the outgoing product list
MPN = Col(Utf(20), ForeignKey('product.MPN'), nullable=False) # Product code
price = Col(Float) # Price for one SKU or unit on this order
discount = Col(Int, nullable=False, default=0) # Discount percentage as integer (0-100)
#XXX: name change in version 3: totalskus = Col(Int)
qty = Col(Int, nullable=False)
applytax = Col(Bool, nullable=False) # True = 5%, False = 0%
#XXX: Remove in version 3
#totalunits = Col(Float) # AUTO: unitssku * totalskus
#subtotal = Col(Float) #TODO: Remove and leave final totals in attached invoice.
#totalcharge = Col(Int) #TODO: Remove
orderdate = Col(Date, nullable=False) # Order placement date
duedate = Col(Date) # Expected delivery date
date = Col(Date) # Extra date field if needed
orderID = Col(Utf(20)) # PO Number
ordernote = Col(Utf(100)) # Information concerning the order
note = Col(Utf(100)) # Extra note field if needed.
checked = Col(Bool, nullable=False, default=False) # Match against second party records
is_sale = Col(Bool, nullable=False, default=False) # Boolean. Customer
is_purchase = Col(Bool, nullable=False, default=False) # Boolean. Supplier
shipments = rel('ShipmentItem', backref='order')
invoices = rel('InvoiceItem', backref='order')
product = rel('Product')
#XXX: New in version 3
is_open = Col(Bool, nullable=False, default=True) # Active or closed PO
@property
def units(self):
return self.qty * self.product.units
def shipped_value(self):
'''Return the value of total shipped items.'''
value = self.qty_shipped() * self.price
if self.product.unitpriced:
value = value * self.product.units
if self.applytax:
value = value * 1.05
return value
def qty_shipped(self):
'''By number of SKUs'''
if len(self.shipments) == 0:
return 0
return sum([srec.qty if isinstance(srec.qty,int) else 0 for srec in self.shipments])
def qty_remaining(self):
'''By number of SKUs remaining to be shipped'''
return int(self.qty - self.qty_shipped())
def all_shipped(self):
'''By number of SKUs'''
if len(self.shipments) == 0:
return False
return self.qty == self.qty_shipped()
def qty_invoiced(self):
'''By number of SKUs'''
if len(self.invoices) == 0:
return 0
return sum([prec.qty if isinstance(prec.qty,int) else 0 for prec in self.invoices])
def all_invoiced(self):
'''By number of SKUs'''
# if len(self.invoices) == 0:
# return False
return self.qty_shipped() == self.qty_invoiced()
def total_paid(self):
if len(self.invoices) == 0:
return 0
return sum([prec.qty if prec.paid else 0 for prec in self.invoices])
def all_paid(self):
if len(self.invoices) == 0:
return False
return not (False in [prec.invoice.paid for prec in self.invoices])
def qty_quote(self, qty):
subtotal = self.price * qty
if self.product.unitpriced:
subtotal *= self.product.units
return int(round(subtotal))
#==============================================================================
# Shipment (track multiple shipments in SKU's for one order)
#==============================================================================
@AddDictRepr
class Shipment(Base): # Keep track of shipments/SKUs for one order
'''
#TODO: Separate manifest info and manifest items.
order : backref to Order
'''
__tablename__ = 'shipment'
id = Col(Int, primary_key=True)
shipmentdate = Col(Date, nullable=False)
shipment_no = Col(Utf(20), default=u'') # i.e., Manifest number
shipmentnote = Col(Utf(100), default=u'') # Information concerning the delivery
shipmentdest = Col(Utf(100), default=u'')
driver = Col(Utf(4)) # Track vehicle driver (optional)
truck = Col(Utf(10)) # Track vehicle by license (optional)
note = Col(Utf(100)) # Extra note field if needed
checked = Col(Bool, nullable=False, default=False) # Extra boolean for matching/verifying
items = rel('ShipmentItem', backref='shipment')
# # METHODS
# def listbox_summary(self):
# """
# Return a single line unicode summary intended for a listbox.
# """
# txt = u'{date:<10} 編號: {s.shipment_no:<10} QTY: {s.items[0].qty:>5} {s.order.product.SKU:<6} 品名: {s.order.product.inventory_name}'
# txt = txt.format(s=self, date=str(self.shipmentdate))
# return txt
@AddDictRepr
class ShipmentItem(Base):
__tablename__ = 'shipmentitem'
id = Col(Int, primary_key=True)
order_id = Col(Int, ForeignKey('order.id'), nullable=False)
shipment_id = Col(Int, ForeignKey('shipment.id'))
qty = Col(Int, nullable=False) # Deduct from total SKUs due
lot = Col(Utf(20))
lot_start = Col(Int)
lot_end = Col(Int)
rt_no = Col(Utf(20))
duedate = Col(Date)
shipped = Col(Bool, default=False)
invoiceitem = rel('InvoiceItem', backref='shipmentitem')
#==============================================================================
# Invoice class (track multiple invoices for one order)
#==============================================================================
@AddDictRepr
class Invoice(Base): # Keep track of invoices/payments for one order
__tablename__ = 'invoice'
#XXX: New in version 3, primary key change
id = Col(Int, primary_key=True)
invoice_no = Col(Utf(20), default=u'') # i.e., Invoice number
seller = Col(Utf(4), ForeignKey('branch.name'), nullable=False) # For billing/receipts
buyer = Col(Utf(4), ForeignKey('branch.name'), nullable=False) # For billing/receipts
invoicedate = Col(Date, nullable=False)
invoicenote = Col(Utf(100)) # Information concerning the invoice
check_no = Col(Utf(20))
paid = Col(Bool, nullable=False, default=False)
paydate = Col(Date)
note = Col(Utf(100)) # Extra note field if needed
checked = Col(Bool, nullable=False, default=False) # Extra boolean for matching/verifying
items = rel('InvoiceItem', backref='invoice')
def subtotal(self):
return sum([item.subtotal() for item in self.items])
def tax(self):
'''Tax is rounded to nearest integer before returning value.'''
return int(round(self.subtotal() * 0.05))
def taxtotal(self):
total = self.subtotal() + (self.tax() if self.items[0].order.applytax else 0)
return int(round(total))
#==============================================================================
# InvoiceItem class (track multiple products for one invoice)
#==============================================================================
@AddDictRepr
class InvoiceItem(Base): # Keep track of invoices/payments for one order
'''
order : backref to Order
invoice : backref to Invoice
shipmentitem : backref to ShipmentItem
'''
__tablename__ = 'invoiceitem'
id = Col(Int, primary_key=True)
invoice_id = Col(Int, ForeignKey('invoice.id'), nullable=False)
shipmentitem_id = Col(Int, ForeignKey('shipmentitem.id'), nullable=False)
order_id = Col(Int, ForeignKey('order.id'), nullable=False)
qty = Col(Int, nullable=False)
def subtotal(self):
subtotal = self.order.price * self.qty
if self.order.product.unitpriced:
subtotal *= self.order.product.units
return int(round(subtotal,0))
def total(self):
subtotal = self.subtotal()
if self.order.applytax:
subtotal *= 1.05
return int(round(subtotal,0))
# def listbox_summary(self):
# """
# Return a single line unicode summary intended for a listbox.
# """
# txt = u'{date:<10} 編號: {s.invoice_no:<10} QTY: {s.qty:>5} {s.order.product.SKU:<6} Subtotal: ${total:<6} 品名: {s.order.product.inventory_name}'
# txt = txt.format(s=self, date=str(self.invoice.invoicedate), total=self.subtotal())
# return txt
#==============================================================================
# CoGroup (company grouping class for branches)
#==============================================================================
@AddDictRepr
class CoGroup(Base):
__tablename__ = 'cogroup'
name = Col(Utf(4), primary_key=True, nullable=False) # Abbreviated name of company (2 to 4 chars)
is_active = Col(Bool, nullable=False, default=True) # Boolean for listing the company. Continuing business.
is_supplier = Col(Bool, nullable=False, default=True) # Maybe use in later versions
is_customer = Col(Bool, nullable=False, default=True) # Maybe use in later versions
branches = rel('Branch', backref='cogroup', lazy='joined') # lazy -> Attaches on retrieving a cogroup
orders = rel('Order')
products = rel('Product', backref='cogroup')
contacts = rel('Contact')
purchases = rel('Order', primaryjoin="and_(CoGroup.name==Order.group, Order.is_sale==False)") #Purchases FROM this company
sales = rel('Order', primaryjoin="and_(CoGroup.name==Order.group, Order.is_sale==True)") #Sales TO this company
openpurchases = rel('Order', primaryjoin="and_(CoGroup.name==Order.group, Order.is_sale==False, Order.is_open==True)") #Purchases FROM this company
opensales = rel('Order', primaryjoin="and_(CoGroup.name==Order.group, Order.is_sale==True, Order.is_open==True)") #Sales TO this company
#==============================================================================
# Branch class
#==============================================================================
@AddDictRepr
class Branch(Base):
__tablename__ = 'branch'
name = Col(Utf(4), primary_key=True, nullable=False) # Abbreviated name of company (2 to 4 chars)
group= Col(Utf(4), ForeignKey('cogroup.name'), nullable=False) # Name of main company representing all branches
fullname = Col(Utf(100), default=u'')
english_name = Col(Utf(100), default=u'')
tax_id = Col(Utf(8), nullable=False, default=u'')
phone = Col(Utf(20), default=u'')
fax = Col(Utf(20), default=u'')
email = Col(Utf(20), default=u'')
note = Col(Utf(100), default=u'')
address_office = Col(Utf(100), default=u'')
address_shipping = Col(Utf(100), default=u'')
address_billing = Col(Utf(100), default=u'')
address = Col(Utf(100), default=u'') # Extra address space if needed
is_active = Col(Bool, nullable=False, default=True) # Boolean for listing the company. Continuing business.
# parent = rel('CoGroup')
contacts = rel('Contact')
purchases = rel('Order', primaryjoin="and_(Branch.name==Order.seller, Order.is_sale==False)") #Purchases FROM this company
sales = rel('Order', primaryjoin="and_(Branch.name==Order.buyer, Order.is_sale==True)") #Sales TO this company
#==============================================================================
# Contact class
#==============================================================================
@AddDictRepr
class Contact(Base):
__tablename__ = 'contact'
id = Col(Int, primary_key=True)
group = Col(Utf(4), ForeignKey('cogroup.name'), nullable=False)
branch = Col(Utf(4), ForeignKey('branch.name'))
name = Col(Utf(20), nullable=False)
position = Col(Utf(20), default=u'')
phone = Col(Utf(20), default=u'')
fax = Col(Utf(20), default=u'')
email = Col(Utf(20), default=u'')
note = Col(Utf(100), default=u'')
#==============================================================================
# Product class
#==============================================================================
@AddDictRepr
class Product(Base): # Information for each unique product (including packaging)
__tablename__ = 'product'
MPN = Col(Utf(20), primary_key=True)
group = Col(Utf(4), ForeignKey('cogroup.name'), nullable=False)
product_label = Col(Utf(100), default=u'') #Optional 2nd party product name
inventory_name = Col(Utf(100), nullable=False) #Required
english_name = Col(Utf(100), default=u'')
units = Col(Float, nullable=False) #Units per SKU
UM = Col(Utf(10), nullable=False) #Unit measurement
SKU = Col(Utf(10), nullable=False) #Stock keeping unit (countable package)
SKUlong = Col(Utf(100), default=u'')
unitpriced = Col(Bool, nullable=False)
ASE_PN = Col(Utf(20)) # ASE product number
ASE_RT = Col(Utf(20)) # ASE department routing number
ASE_END = Col(Int) # Last used SKU index number for current lot
note = Col(Utf(100)) # {JSON} contains extra data, i.e. current ASE and RT numbers
# {JSON} must be appended to the end after any notes. Last char == '}'
is_supply = Col(Bool, nullable=False)
discontinued = Col(Bool, nullable=False, default=False)
curr_price = Col(Float, default=0.0)
stock = rel('Stock', backref='product')
orders = rel('Order', primaryjoin="Product.MPN==Order.MPN")
@property
def price(self):
if self.curr_price.is_integer():
return int(self.curr_price)
return self.curr_price
@property
def PrMeas(self):
'''Return the unit measure associated with the price.
PrMeas = pricing measure
'''
return self.UM if self.unitpriced or self.SKU == u'槽車' else self.SKU
def qty_available(self):
available = dict(
units = 0.0,
SKUs = 0.0,
value = 0.0,
unit_value = None,
SKU_value = None
)
for each in self.stock:
available['units'] += each.adj_unit
available['SKUs'] += each.adj_SKU
available['value'] += each.adj_value
if available['units'] != 0.0:
available['unit_value'] = available['value'] / available['units']
if available['SKUs'] != 0.0:
available['SKU_value'] = available['value'] / available['SKUs']
return available
def label(self):
'''Returns product_label, which is the client desired name.
If a product_label does not exist, then return our inventory_name
for the product.
'''
if self.product_label != u'':
return self.product_label
else:
return self.inventory_name
@property
def name(self):
'''Returns product_label, which is the client desired name.
If a product_label does not exist, then return our inventory_name
for the product.
'''
if self.product_label != u'':
return self.product_label
else:
return self.inventory_name
@property
def specs(self):
'''Short text of product key values.
"## UM SKU"
e.g. "20 kg barrel"
'''
u = self.units
units = int(u) if int(u)==u else u #Truncate if mantissa is zero
if self.SKU == u'槽車':
return u'槽車-{}'.format(self.UM)
else:
txt = u"{0} {1} {2}"
return txt.format(units,self.UM,self.SKU)
def json(self, new_dic=None):
'''Saves 'new_dic' as a json string and overwrites previous json.
Returns contents of json string as a dictionary object.
Note: Commit session after making changes.'''
if new_dic == None:
if self.note.find(u'}') != -1:
return json.loads(self.note[self.note.index(u'{'):])
else:
return {}
else:
old_dic = dict()
# Delete existing json string
if self.note.find(u'}') != -1:
old_dic = self.json()
self.note = self.note.split(u'{')[0]
# Merge and overwrite old with new.
new_dic = dict(old_dic.items() + new_dic.items())
self.note += json.dumps(new_dic)
return True
return False
#==============================================================================
# Stock class
#==============================================================================
@AddDictRepr
class Stock(Base): #For warehouse transactions
__tablename__ = 'stock'
id = Col(Int, primary_key=True)
MPN = Col(Utf(20), ForeignKey('product.MPN'), nullable=False)
date = Col(Date, nullable=False)
adj_unit = Col(Float, nullable=False) #Use units for stock amounts
adj_SKU = Col(Float) #Probably will NOT use this
adj_value = Col(Float, nullable=False) #Value of units in transaction
note = Col(Utf(100))
#==============================================================================
# Vehicle class
#==============================================================================
@AddDictRepr
class Vehicle(Base):
__tablename__ = 'vehicle'
id = Col(Utf(10), primary_key=True) #license plate number
purchasedate = Col(Date)
description = Col(Utf(100))
value = Col(Float)
note = Col(Utf(100))
#==============================================================================
# Database loading method
#==============================================================================
def get_database(db_path, echo=False):
'''Opens a database and returns an 'engine' object.'''
database = sqla.create_engine(db_path, echo=echo)
Base.metadata.create_all(database) #FIRST TIME SETUP ONLY
return database
#==============================================================================
# Testing and Debugging
#==============================================================================
if __name__ == '__main__':
engine = get_database(u'test.db', echo=False)
session = sessionmaker(bind=engine)()
session.add(Vehicle(id=u'MONKEY'))
veh = session.query(Vehicle).get(u'MONKEY')
print veh
print veh.__dict__
order = Order(orderID=u'ZV1234', seller=u'Oscorp', buyer=u'Marvel', group=u'DC comics',
is_sale=True, MPN=666, subtotal=1000, duedate=datetime.date.today(), totalskus=24)
product = Product(MPN=666, group=u'DC comics', units=12, UM=u'ounce', SKU=u'vial', unitpriced=False, is_supply=False, product_label=u'Muscle Juice', inventory_name=u'serim 234')
shipment = Shipment(shipment_no=u'003568', qty=10, order=order, shipmentdate=datetime.date.today())
session.add(product)
session.add(order)
order = session.query(Order).first()
print 'order', order
print order.formatted()
for each in order.shipments:
print each.listbox_summary()
|
Ripley6811/TAIMAU
|
src/db_tools/TM2014_tables_v3.py
|
Python
|
gpl-2.0
| 20,670
|
[
"ASE"
] |
dd6cf2316c68df086370384db9924d61515efce52a9ab7ed076411fc48844603
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import warnings
from setuptools import find_packages
from setuptools import setup
from setuptools import Extension
# Try to use Cython to compile .pyx files.
# If Cython it not available then try to compile .c related files.
# So be sure to always have the .c version for each .pyx files.
# You can generate .c files with python setup.py build_ext --inplace
try:
import numpy as np
except ImportError as e:
raise ImportError("Numpy is needed to compile .pyx extensions. Use : pip install numpy")
try:
from Cython.Distutils import build_ext
except ImportError as e:
from setuptools.command.build_ext import build_ext
warnings.warn("Cython is not present. .pyx extensions will be build against .c files.")
extensions = [Extension("sktracker.tracker.lapjv._lapjv",
["sktracker/tracker/lapjv/_lapjv.pyx"],
include_dirs=[np.get_include()]),
Extension("sktracker.io._tifffile",
["sktracker/io/_tifffile.c"],
include_dirs=[np.get_include()]),
]
# Get version number
import sys
sys.path.append('.')
import sktracker
# Fill project desciption fields
DISTNAME = 'scikit-tracker'
DESCRIPTION = 'Object detection and tracking for cell biology'
LONG_DESCRIPTION = """scikit-tracker aims to be a robust Python library to work with cell biology
microscopy images. OME XML and OME Tiff are supported to handle input/output to the lib. The two
main goals of the library is to implement detection and tracking algorithms relevant to analyse
biological microscopy dataset.
Several algorithms are featured and it is planned to add others:
- Gaussian peak detection by deflation loop : Segré et al. Nature Methods (2008)
- Cell boundary detection with bright field depth fitting : Julou, T., PNAS, (2013)
- Cell nucleus segmentation : by Guillaume Gay
- Lap Tracker, a robust single-particle tracking : K. Jaqaman and G. Danuser, Nature Methods, 2008.
The version implemented in scikit-tracker is a slightly modified version from the original to allow
easy, flexible and yet powerfull parameters adjustements with custom cost function.
For more details, please visit : http://scikit-tracker.org/
"""
MAINTAINER = 'Guillaume Gay and Hadrien Mary'
MAINTAINER_EMAIL = 'gllm.gay@gmail.com'
URL = 'http://scikit-tracker.org/'
LICENSE = 'BSD 3-Clause'
DOWNLOAD_URL = 'https://github.com/bnoi/scikit-tracker'
VERSION = sktracker.__version__
DEPENDENCIES = ["numpy >= 1.8",
"scipy >= 0.12",
"pandas >= 0.13",
"scikit-image >= 0.9",
"scikit-learn >= 0.13",
"matplotlib >= 1.3",
]
if VERSION.endswith('dev'):
DEPENDENCIES += ["nose >= 1.3",
"sphinx >= 1.2",
"coverage >= 3.7"
]
if __name__ == "__main__":
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: Microsoft",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Medical Science Apps",
],
packages=find_packages(),
package_data={
'': ['data/*.h5', 'data/*.xml', 'data/*.tif', 'data/stack_list/*.tif', 'data/stack_list/*.h5'],
},
tests_require='nose',
test_suite='nose.collector',
# Should DEPENDENCIES need to be included or let the user install them himself ?
install_requires=[],
# install_requires=DEPENDENCIES,
setup_requires=['numpy'],
cmdclass={"build_ext": build_ext},
ext_modules=extensions,
)
|
bnoi/scikit-tracker
|
setup.py
|
Python
|
bsd-3-clause
| 4,846
|
[
"Gaussian",
"VisIt"
] |
76314a5270f3f93d45cd22a9ace0aca97e342e69ef35756100a45d819729fdea
|
#!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script generates an rc file and header (setup_strings.{rc,h}) to be
included in setup.exe. The rc file includes translations for strings pulled
from generated_resource.grd and the localized .xtb files.
The header file includes IDs for each string, but also has values to allow
getting a string based on a language offset. For example, the header file
looks like this:
#define IDS_L10N_OFFSET_AR 0
#define IDS_L10N_OFFSET_BG 1
#define IDS_L10N_OFFSET_CA 2
...
#define IDS_L10N_OFFSET_ZH_TW 41
#define IDS_MY_STRING_AR 1600
#define IDS_MY_STRING_BG 1601
...
#define IDS_MY_STRING_BASE IDS_MY_STRING_AR
This allows us to lookup an an ID for a string by adding IDS_MY_STRING_BASE and
IDS_L10N_OFFSET_* for the language we are interested in.
"""
import glob
import os
import sys
from xml.dom import minidom
# We are expected to use ../../../../third_party/python_24/python.exe
from google import path_utils
# Quick hack to fix the path.
sys.path.append(os.path.abspath('../../tools/grit/grit/extern'))
import FP
# The IDs of strings we want to import from generated_resources.grd and include
# in setup.exe's resources.
kStringIds = [
'IDS_PRODUCT_NAME',
'IDS_UNINSTALL_CHROME',
'IDS_ABOUT_VERSION_COMPANY_NAME',
'IDS_INSTALL_HIGHER_VERSION',
'IDS_INSTALL_USER_LEVEL_EXISTS',
'IDS_INSTALL_SYSTEM_LEVEL_EXISTS',
'IDS_INSTALL_FAILED',
'IDS_SETUP_PATCH_FAILED',
'IDS_INSTALL_OS_NOT_SUPPORTED',
'IDS_INSTALL_OS_ERROR',
'IDS_INSTALL_TEMP_DIR_FAILED',
'IDS_INSTALL_UNCOMPRESSION_FAILED',
'IDS_INSTALL_INVALID_ARCHIVE',
'IDS_INSTALL_INSUFFICIENT_RIGHTS',
'IDS_UNINSTALL_FAILED',
'IDS_UNINSTALL_COMPLETE',
'IDS_INSTALL_DIR_IN_USE',
'IDS_OEM_MAIN_SHORTCUT_NAME',
'IDS_SHORTCUT_TOOLTIP',
]
# The ID of the first resource string.
kFirstResourceID = 1600
class TranslationStruct:
"""A helper struct that holds information about a single translation."""
def __init__(self, resource_id_str, language, translation):
self.resource_id_str = resource_id_str
self.language = language
self.translation = translation
def __cmp__(self, other):
"""Allow TranslationStructs to be sorted by id."""
return cmp(self.resource_id_str, other.resource_id_str)
def CollectTranslatedStrings():
"""Collects all the translations for all the strings specified by kStringIds.
Returns a list of tuples of (string_id, language, translated string). The
list is sorted by language codes."""
kGeneratedResourcesPath = os.path.join(path_utils.ScriptDir(), '..', '..',
'..', 'app/google_chrome_strings.grd')
kTranslationDirectory = os.path.join(path_utils.ScriptDir(), '..', '..',
'..', 'app', 'resources')
kTranslationFiles = glob.glob(os.path.join(kTranslationDirectory,
'google_chrome_strings*.xtb'))
# Get the strings out of generated_resources.grd.
dom = minidom.parse(kGeneratedResourcesPath)
# message_nodes is a list of message dom nodes corresponding to the string
# ids we care about. We want to make sure that this list is in the same
# order as kStringIds so we can associate them together.
message_nodes = []
all_message_nodes = dom.getElementsByTagName('message')
for string_id in kStringIds:
message_nodes.append([x for x in all_message_nodes if
x.getAttribute('name') == string_id][0])
message_texts = [node.firstChild.nodeValue.strip() for node in message_nodes]
# The fingerprint of the string is the message ID in the translation files
# (xtb files).
translation_ids = [str(FP.FingerPrint(text)) for text in message_texts]
# Manually put _EN_US in the list of translated strings because it doesn't
# have a .xtb file.
translated_strings = []
for string_id, message_text in zip(kStringIds, message_texts):
translated_strings.append(TranslationStruct(string_id + '_EN_US',
'EN_US',
message_text))
# Gather the translated strings from the .xtb files. If an .xtb file doesn't
# have the string we want, use the en-US string.
for xtb_filename in kTranslationFiles:
dom = minidom.parse(xtb_filename)
language = dom.documentElement.getAttribute('lang')
language = language.replace('-', '_').upper()
translation_nodes = {}
for translation_node in dom.getElementsByTagName('translation'):
translation_id = translation_node.getAttribute('id')
if translation_id in translation_ids:
translation_nodes[translation_id] = (translation_node.firstChild
.nodeValue
.strip())
for i, string_id in enumerate(kStringIds):
translated_string = translation_nodes.get(translation_ids[i],
message_texts[i])
translated_strings.append(TranslationStruct(string_id + '_' + language,
language,
translated_string))
translated_strings.sort()
return translated_strings
def WriteRCFile(translated_strings, out_filename):
"""Writes a resource (rc) file with all the language strings provided in
|translated_strings|."""
kHeaderText = (
u'#include "%s.h"\n\n'
u'STRINGTABLE\n'
u'BEGIN\n'
) % os.path.basename(out_filename)
kFooterText = (
u'END\n'
)
lines = [kHeaderText]
for translation_struct in translated_strings:
lines.append(u' %s "%s"\n' % (translation_struct.resource_id_str,
translation_struct.translation))
lines.append(kFooterText)
outfile = open(out_filename + '.rc', 'wb')
outfile.write(''.join(lines).encode('utf-16'))
outfile.close()
def WriteHeaderFile(translated_strings, out_filename):
"""Writes a .h file with resource ids. This file can be included by the
executable to refer to identifiers."""
lines = []
# Write the values for how the languages ids are offset.
seen_languages = set()
offset_id = 0
for translation_struct in translated_strings:
lang = translation_struct.language
if lang not in seen_languages:
seen_languages.add(lang)
lines.append(u'#define IDS_L10N_OFFSET_%s %s' % (lang, offset_id))
offset_id += 1
else:
break
# Write the resource ids themselves.
resource_id = kFirstResourceID
for translation_struct in translated_strings:
lines.append(u'#define %s %s' % (translation_struct.resource_id_str,
resource_id))
resource_id += 1
# Write out base ID values.
for string_id in kStringIds:
lines.append(u'#define %s_BASE %s_%s' % (string_id,
string_id,
translated_strings[0].language))
outfile = open(out_filename + '.h', 'wb')
outfile.write('\n'.join(lines))
outfile.write('\n') # .rc files must end in a new line
outfile.close()
def main(argv):
translated_strings = CollectTranslatedStrings()
kFilebase = os.path.join(argv[1], 'installer_util_strings')
WriteRCFile(translated_strings, kFilebase)
WriteHeaderFile(translated_strings, kFilebase)
if '__main__' == __name__:
if len(sys.argv) < 2:
print 'Usage:\n %s <output_directory>' % sys.argv[0]
sys.exit(1)
main(sys.argv)
|
rwatson/chromium-capsicum
|
chrome/installer/util/prebuild/create_string_rc.py
|
Python
|
bsd-3-clause
| 7,669
|
[
"xTB"
] |
0966a4ea9d4b7698a8d5581bf5f291d6a4bb41e8403fe368102a23f0411f35ed
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
from pymatgen import Molecule
from pymatgen.io.gaussian import GaussianInput, GaussianOutput
from pymatgen.electronic_structure.core import Spin
"""
Created on Apr 17, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 17, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
class GaussianInputTest(unittest.TestCase):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.coords = coords
mol = Molecule(["C", "H", "H", "H", "H"], coords)
self.gau = GaussianInput(
mol, route_parameters={'SP': "", "SCF": "Tight"},
input_parameters={"EPS": 12})
def test_init(self):
mol = Molecule(["C", "H", "H", "H", "H"], self.coords)
gau = GaussianInput(mol, charge=1, route_parameters={'SP': "",
"SCF": "Tight"})
self.assertEqual(gau.spin_multiplicity, 2)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=-1)
gau = GaussianInput(mol, route_parameters={'SP': "", "SCF": "Tight"})
self.assertEqual(gau.spin_multiplicity, 2)
self.assertRaises(ValueError, GaussianInput, mol, spin_multiplicity=1)
def test_str_and_from_string(self):
ans = """#P HF/6-31G(d) SCF=Tight SP
H4 C1
0 1
C
H 1 B1
H 1 B2 2 A2
H 1 B3 2 A3 3 D3
H 1 B4 2 A4 4 D4
B1=1.089000
B2=1.089000
A2=109.471221
B3=1.089000
A3=109.471213
D3=120.000017
B4=1.089000
A4=109.471213
D4=119.999966
EPS=12
"""
self.assertEqual(str(self.gau), ans)
gau = GaussianInput.from_string(ans)
self.assertEqual(gau.functional, 'HF')
self.assertEqual(gau.input_parameters['EPS'], '12')
def test_from_file(self):
filepath = os.path.join(test_dir, 'MethylPyrrolidine_drawn.gjf')
gau = GaussianInput.from_file(filepath)
self.assertEqual(gau.molecule.composition.formula, "H11 C5 N1")
self.assertIn("opt", gau.route_parameters)
self.assertEqual(gau.route_parameters["geom"], "connectivity")
self.assertEqual(gau.functional, "b3lyp")
self.assertEqual(gau.basis_set, "6-311+g(d,p)")
filepath = os.path.join(test_dir, "g305_hb.txt")
with open(filepath) as f:
txt = f.read()
toks = txt.split("--link1--")
for i, t in enumerate(toks):
lines = t.strip().split("\n")
lines = [l.strip() for l in lines]
gau = GaussianInput.from_string("\n".join(lines))
self.assertIsNotNone(gau.molecule)
if i == 0:
mol = gau.molecule
ans = """Full Formula (H4 O2)
Reduced Formula: H2O
Charge = 0, Spin Mult = 1
Sites (6)
0 O 0.000000 0.000000 0.000000
1 O 0.000000 0.000000 2.912902
2 H 0.892596 0.000000 -0.373266
3 H 0.143970 0.000219 0.964351
4 H -0.582554 0.765401 3.042783
5 H -0.580711 -0.766761 3.043012"""
self.assertEqual(str(mol), ans)
def test_from_string(self):
gau_str = """%mem=5000000
%chk=filename
# mp2/6-31g* scf=direct
SIH4+ H2---SIH2+ CS //MP2(full)/6-31G* MP2=-290.9225259
1,2
Si
X,1,1.
H,1,R1,2,HALF1
H,1,R1,2,HALF1,3,180.,0
X,1,1.,2,90.,3,90.,0
X,1,1.,5,THETA,2,180.,0
H,1,R3,6,HALF3,5,0.,0
H,1,R4,6,HALF3,7,180.,0
R1=1.47014
R3=1.890457
R4=1.83514
HALF1=60.633314
THETA=10.35464
HALF3=11.861807"""
gau = GaussianInput.from_string(gau_str)
self.assertEqual("X3SiH4", gau.molecule.composition.reduced_formula)
def test_gen_basis(self):
gau_str = """#N B3LYP/Gen Pseudo=Read
Test
0 1
C
H 1 B1
H 1 B2 2 A2
H 1 B3 2 A3 3 D3
H 1 B4 2 A4 4 D4
B1=1.089000
B2=1.089000
A2=109.471221
B3=1.089000
A3=109.471213
D3=120.000017
B4=1.089000
A4=109.471213
D4=119.999966
C 0
6-31G(d,p)
****
H 0
6-31G
****
"""
mol = Molecule(["C", "H", "H", "H", "H"], self.coords)
gen_basis = "C 0\n6-31G(d,p)\n****\nH 0\n6-31G\n****"
gau = GaussianInput(mol, functional="B3LYP", gen_basis=gen_basis,
dieze_tag="#N", route_parameters={"Pseudo": "Read"},
title="Test")
self.assertEqual(gau.to_string(cart_coords=False), gau_str)
def test_multiple_paramaters(self):
"""
This test makes sure that input files with multi-parameter keywords
and route cards with multiple lines can be parsed accurately.
"""
filepath = os.path.join(test_dir, "l-cysteine.inp")
route = {"test": None, "integral": {"grid": "UltraFine"},
"opt": {"Z-Matrix": None, "maxcycles": "80", "tight": None}}
gin = GaussianInput.from_file(filepath)
self.assertEqual(gin.dieze_tag, "#n")
self.assertEqual(gin.functional, "B3LYP")
self.assertEqual(gin.basis_set, "6-31+G**")
self.assertEqual(gin.route_parameters, route)
self.assertEqual(gin.title, "L-cysteine neutral")
self.assertEqual(gin.charge, 0)
self.assertEqual(gin.spin_multiplicity, 1)
class GaussianOutputTest(unittest.TestCase):
# todo: Add unittest for PCM type output.
def setUp(self):
self.gauout = GaussianOutput(os.path.join(test_dir, "methane.log"))
def test_resume(self):
resume = self.gauout.resumes[0]
methane_resume = r"""1\1\GINC-SHYUE-LAPTOP\FOpt\RHF\3-21G\C1H4\SHYUE\27-Feb-2008\0\\#p hf/3
-21G opt\\Title Card Required\\0,1\C,0.,0.,0.\H,0.,0.,1.0829014152\H,1
.0209692454,0.,-0.3609671384\H,-0.5104846227,-0.884185303,-0.360967138
4\H,-0.5104846227,0.884185303,-0.3609671384\\Version=IA32L-G03RevD.01\
State=1-A1\HF=-39.9768776\RMSD=3.210e-09\RMSF=5.014e-08\Thermal=0.\Dip
ole=0.,0.,0.\PG=TD [O(C1),4C3(H1)]\\@"""
methane_resume = "".join([r.strip() for r in methane_resume.split("\n")])
self.assertEqual(resume, methane_resume)
def test_props(self):
gau = self.gauout
self.assertEqual(len(gau.energies), 3)
self.assertAlmostEqual(gau.energies[-1], -39.9768775602)
self.assertEqual(len(gau.structures), 4)
for mol in gau.structures:
self.assertEqual(mol.formula, 'H4 C1')
self.assertIn("opt", gau.route_parameters)
self.assertEqual("Minimum", gau.stationary_type)
self.assertEqual("hf", gau.functional)
self.assertEqual("3-21G", gau.basis_set)
self.assertEqual(17, gau.num_basis_func)
d = gau.as_dict()
self.assertEqual(d["input"]["functional"], "hf")
self.assertAlmostEqual(d["output"]["final_energy"], -39.9768775602)
self.assertEqual(len(gau.cart_forces), 3)
self.assertEqual(gau.cart_forces[0][5], 0.009791094)
self.assertEqual(gau.cart_forces[0][-1], -0.003263698)
self.assertEqual(gau.cart_forces[2][-1], -0.000000032)
self.assertEqual(gau.eigenvalues[Spin.up][-1], 1.95586)
self.assertEqual(gau.num_basis_func, 17)
self.assertEqual(gau.is_spin, False)
ch2o_co2 = GaussianOutput(os.path.join(test_dir, "CH2O_CO2.log"))
self.assertEqual(len(ch2o_co2.frequencies), 2)
self.assertEqual(len(ch2o_co2.frequencies[0]), 6)
self.assertEqual(len(ch2o_co2.frequencies[1]), 4)
self.assertEqual(ch2o_co2.frequencies[0][0]["frequency"], 1203.1940)
self.assertEqual(ch2o_co2.frequencies[0][0]["symmetry"], "A\"")
self.assertEqual(ch2o_co2.frequencies[0][3]["IR_intensity"], 60.9575)
self.assertEqual(ch2o_co2.frequencies[0][3]["r_mass"], 3.7543)
self.assertEqual(ch2o_co2.frequencies[0][4]["f_constant"], 5.4175)
self.assertListEqual(ch2o_co2.frequencies[0][1]["mode"], [0.15, 0.00, 0.00,
-0.26, 0.65, 0.00,
-0.26, -0.65, 0.00,
-0.08, 0.00, 0.00])
self.assertListEqual(ch2o_co2.frequencies[1][3]["mode"], [0.00, 0.00, 0.88,
0.00, 0.00, -0.33,
0.00, 0.00, -0.33])
self.assertEqual(ch2o_co2.frequencies[1][3]["symmetry"], "SGU")
self.assertEqual(ch2o_co2.eigenvalues[Spin.up][3], -1.18394)
h2o = GaussianOutput(os.path.join(test_dir, "H2O_gau_vib.out"))
self.assertEqual(len(h2o.frequencies[0]), 3)
self.assertEqual(h2o.frequencies[0][0]["frequency"], 1662.8033)
self.assertEqual(h2o.frequencies[0][1]["symmetry"], "A'")
self.assertEqual(h2o.hessian[0, 0], 0.356872)
self.assertEqual(h2o.hessian.shape, (9, 9))
self.assertEqual(h2o.hessian[8, :].tolist(), [-0.143692e-01, 0.780136e-01,
-0.362637e-01, -0.176193e-01,
0.277304e-01, -0.583237e-02,
0.319885e-01, -0.105744e+00,
0.420960e-01])
def test_pop(self):
gau = GaussianOutput(os.path.join(test_dir, "H2O_gau.out"))
self.assertEqual(gau.num_basis_func, 13)
self.assertEqual(gau.electrons, (5, 5))
self.assertEqual(gau.is_spin, True)
self.assertListEqual(gau.eigenvalues[Spin.down], [-20.55343, -1.35264,
-0.72655, -0.54824,
-0.49831, 0.20705,
0.30297, 1.10569,
1.16144, 1.16717,
1.20460, 1.38903,
1.67608])
mo = gau.molecular_orbital
self.assertEqual(len(mo), 2) # la 6
self.assertEqual(len(mo[Spin.down]), 13)
self.assertEqual(len(mo[Spin.down][0]), 3)
self.assertEqual(mo[Spin.down][5][0]["1S"], -0.08771)
self.assertEqual(mo[Spin.down][5][0]["2PZ"], -0.21625)
self.assertListEqual(gau.eigenvectors[Spin.up][:, 5].tolist(), [-0.08771,
0.10840,
0.00000,
0.00000,
-0.21625,
1.21165,
0.00000,
0.00000,
-0.44481,
-0.06348,
-1.00532,
-0.06348,
-1.00532])
self.assertListEqual(gau.atom_basis_labels[0], ["1S", "2S", "2PX", "2PY",
"2PZ", "3S", "3PX", "3PY",
"3PZ"])
self.assertListEqual(gau.atom_basis_labels[2], ["1S", "2S"])
gau = GaussianOutput(os.path.join(test_dir, "H2O_gau_vib.out"))
self.assertEqual(gau.bond_orders[(0, 1)], 0.7582)
self.assertEqual(gau.bond_orders[(1, 2)], 0.0002)
def test_scan(self):
gau = GaussianOutput(os.path.join(test_dir, "so2_scan.log"))
d = gau.read_scan()
self.assertAlmostEqual(-548.02102, d["energies"][-1])
self.assertEqual(len(d["coords"]), 1)
self.assertEqual(len(d["energies"]), len(gau.energies))
self.assertEqual(len(d["energies"]), 21)
def test_td(self):
gau = GaussianOutput(os.path.join(test_dir, "so2_td.log"))
transitions = gau.read_excitation_energies()
self.assertEqual(len(transitions), 4)
self.assertAlmostEqual(transitions[0], (3.9281, 315.64, 0.0054))
def test_multiple_paramaters(self):
"""
This test makes sure that input files with multi-parameter keywords
and route cards with multiple lines can be parsed accurately.
"""
filepath = os.path.join(test_dir, "l-cysteine.out")
route = {"test": None, "integral": {"grid": "UltraFine"},
"opt": {"Z-Matrix": None, "maxcycles": "80", "tight": None}}
gout = GaussianOutput(filepath)
self.assertEqual(gout.dieze_tag, "#n")
self.assertEqual(gout.functional, "B3LYP")
self.assertEqual(gout.basis_set, "6-31+G**")
self.assertEqual(gout.route_parameters, route)
self.assertEqual(gout.title, "L-cysteine neutral")
self.assertEqual(gout.charge, 0)
self.assertEqual(gout.spin_multiplicity, 1)
if __name__ == "__main__":
unittest.main()
|
gpetretto/pymatgen
|
pymatgen/io/tests/test_gaussian.py
|
Python
|
mit
| 13,950
|
[
"Gaussian",
"pymatgen"
] |
b2bd46e477681092840b1d52ab7c3f8de549297f7374798c12eb39bfdf871453
|
# Copyright (C) 2011 by Brandon Invergo (b.invergo@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
import os
import subprocess
try:
from os.path import relpath as _relpath
except ImportError:
# New in Python 2.6
def _relpath(path, start=None):
"""Return a relative version of a path.
Implementation by James Gardner in his BareNecessities
package, under MIT licence.
With a fix for Windows where posixpath.sep (and functions like
join) use the Unix slash not the Windows slash.
"""
import posixpath
if start is None:
start = posixpath.curdir
else:
start = start.replace(os.path.sep, posixpath.sep)
if not path:
raise ValueError("no path specified")
else:
path = path.replace(os.path.sep, posixpath.sep)
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return posixpath.curdir.replace(posixpath.sep, os.path.sep)
return posixpath.join(*rel_list).replace(posixpath.sep, os.path.sep)
class PamlError(EnvironmentError):
"""paml has failed. Run with verbose = True to view the error
message"""
class Paml(object):
"""Base class for wrapping PAML commands."""
def __init__(self, alignment=None, working_dir=None,
out_file=None):
if working_dir is None:
self.working_dir = os.getcwd()
else:
self.working_dir = working_dir
if alignment is not None:
if not os.path.exists(alignment):
raise IOError("The specified alignment file does not exist.")
self.alignment = alignment
self.out_file = out_file
self._options = {} # will be set in subclasses
def write_ctl_file(self):
pass
def read_ctl_file(self):
pass
def print_options(self):
"""Print out all of the options and their current settings."""
for option in self._options.items():
print("%s = %s" % (option[0], option[1]))
def set_options(self, **kwargs):
"""Set the value of an option.
This function abstracts the options dict to prevent the user from
adding options that do not exist or mispelling options.
"""
for option, value in kwargs.items():
if option not in self._options:
raise KeyError("Invalid option: " + option)
else:
self._options[option] = value
def get_option(self, option):
"""Return the value of an option."""
if option not in self._options:
raise KeyError("Invalid option: " + option)
else:
return self._options.get(option)
def get_all_options(self):
"""Return the values of all the options."""
return list(self._options.items())
def _set_rel_paths(self):
"""Convert all file/directory locations to paths relative to the current working directory.
paml requires that all paths specified in the control file be
relative to the directory from which it is called rather than
absolute paths.
"""
if self.working_dir is not None:
self._rel_working_dir = _relpath(self.working_dir)
if self.alignment is not None:
self._rel_alignment = _relpath(self.alignment,
self.working_dir)
if self.out_file is not None:
self._rel_out_file = _relpath(self.out_file, self.working_dir)
def run(self, ctl_file, verbose, command):
"""Run a paml program using the current configuration and then parse the results.
Return a process signal so the user can determine if
the execution was successful (return code 0 is successful, -N
indicates a failure). The arguments may be passed as either
absolute or relative paths, despite the fact that paml
requires relative paths.
"""
if self.alignment is None:
raise ValueError("Alignment file not specified.")
if not os.path.exists(self.alignment):
raise IOError("The specified alignment file does not exist.")
if self.out_file is None:
raise ValueError("Output file not specified.")
if self.working_dir is None:
raise ValueError("Working directory not specified.")
# Get the current working directory
cwd = os.getcwd()
# Move to the desired working directory
if not os.path.exists(self.working_dir):
os.mkdir(self.working_dir)
os.chdir(self.working_dir)
# If no external control file was specified...
if ctl_file is None:
# Dynamically build a control file
self.write_ctl_file()
if verbose:
result_code = subprocess.call([command, self.ctl_file])
else:
# To suppress output, redirect it to a pipe to nowhere
result_code = subprocess.call([command, self.ctl_file],
stdout=subprocess.PIPE)
else:
if not os.path.exists(ctl_file):
raise IOError("The specified control file does not exist.")
if verbose:
result_code = subprocess.call([command, ctl_file])
else:
result_code = subprocess.call([command, ctl_file],
stdout=subprocess.PIPE)
os.chdir(cwd)
if result_code > 0:
# If the program fails for any reason
raise PamlError(
"%s has failed (return code %i). Run with verbose = True to view error message"
% (command, result_code))
if result_code < 0:
# If the paml process is killed by a signal somehow
raise EnvironmentError("The %s process was killed (return code %i)."
% (command, result_code))
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Phylo/PAML/_paml.py
|
Python
|
apache-2.0
| 6,379
|
[
"Biopython"
] |
a9c4e220b9368d195e9534146f7ff681a365d937233ce0428dc403fe86281687
|
# -*- coding: UTF-8 -*-
# File: imgproc.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
from .base import ImageAugmentor
from ...utils import logger
import numpy as np
import cv2
__all__ = ['Hue', 'Brightness', 'Contrast', 'MeanVarianceNormalize',
'GaussianBlur', 'Gamma', 'Clip', 'Saturation', 'Lighting', 'MinMaxNormalize']
class Hue(ImageAugmentor):
""" Randomly change color hue.
"""
def __init__(self, range=(0, 180), rgb=None):
"""
Args:
range(list or tuple): hue range
rgb (bool): whether input is RGB or BGR.
"""
super(Hue, self).__init__()
if rgb is None:
logger.warn("Hue() now assumes rgb=False, but will by default use rgb=True in the future!")
rgb = False
rgb = bool(rgb)
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, hue):
m = cv2.COLOR_BGR2HSV if not self.rgb else cv2.COLOR_RGB2HSV
hsv = cv2.cvtColor(img, m)
# Note, OpenCV used 0-179 degree instead of 0-359 degree
hsv[..., 0] = (hsv[..., 0] + hue) % 180
m = cv2.COLOR_HSV2BGR if not self.rgb else cv2.COLOR_HSV2RGB
img = cv2.cvtColor(hsv, m)
return img
class Brightness(ImageAugmentor):
"""
Randomly adjust brightness.
"""
def __init__(self, delta, clip=True):
"""
Randomly add a value within [-delta,delta], and clip in [0,255] if clip is True.
"""
super(Brightness, self).__init__()
assert delta > 0
self._init(locals())
def _get_augment_params(self, img):
v = self._rand_range(-self.delta, self.delta)
return v
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img += v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class Contrast(ImageAugmentor):
"""
Apply ``x = (x - mean) * contrast_factor + mean`` to each channel.
"""
def __init__(self, factor_range, clip=True):
"""
Args:
factor_range (list or tuple): an interval to randomly sample the `contrast_factor`.
clip (bool): clip to [0, 255] if True.
"""
super(Contrast, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self._rand_range(*self.factor_range)
def _augment(self, img, r):
old_dtype = img.dtype
img = img.astype('float32')
mean = np.mean(img, axis=(0, 1), keepdims=True)
img = (img - mean) * r + mean
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MeanVarianceNormalize(ImageAugmentor):
"""
Linearly scales the image to have zero mean and unit norm.
``x = (x - mean) / adjusted_stddev``
where ``adjusted_stddev = max(stddev, 1.0/sqrt(num_pixels * channels))``
This augmentor always returns float32 images.
"""
def __init__(self, all_channel=True):
"""
Args:
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
mean = np.mean(img)
std = np.std(img)
else:
mean = np.mean(img, axis=(0, 1), keepdims=True)
std = np.std(img, axis=(0, 1), keepdims=True)
std = np.maximum(std, 1.0 / np.sqrt(np.prod(img.shape)))
img = (img - mean) / std
return img
class GaussianBlur(ImageAugmentor):
""" Gaussian blur the image with random window size"""
def __init__(self, max_size=3):
"""
Args:
max_size (int): max possible Gaussian window size would be 2 * max_size + 1
"""
super(GaussianBlur, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
sx, sy = self.rng.randint(self.max_size, size=(2,))
sx = sx * 2 + 1
sy = sy * 2 + 1
return sx, sy
def _augment(self, img, s):
return np.reshape(cv2.GaussianBlur(img, s, sigmaX=0, sigmaY=0,
borderType=cv2.BORDER_REPLICATE), img.shape)
class Gamma(ImageAugmentor):
""" Randomly adjust gamma """
def __init__(self, range=(-0.5, 0.5)):
"""
Args:
range(list or tuple): gamma range
"""
super(Gamma, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, gamma):
old_dtype = img.dtype
lut = ((np.arange(256, dtype='float32') / 255) ** (1. / (1. + gamma)) * 255).astype('uint8')
img = np.clip(img, 0, 255).astype('uint8')
ret = cv2.LUT(img, lut).astype(old_dtype)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
class Clip(ImageAugmentor):
""" Clip the pixel values """
def __init__(self, min=0, max=255):
"""
Args:
min, max: the clip range
"""
self._init(locals())
def _augment(self, img, _):
img = np.clip(img, self.min, self.max)
return img
class Saturation(ImageAugmentor):
""" Randomly adjust saturation.
Follows the implementation in `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L218>`__.
"""
def __init__(self, alpha=0.4, rgb=None):
"""
Args:
alpha(float): maximum saturation change.
rgb (bool): whether input is RGB or BGR.
"""
super(Saturation, self).__init__()
if rgb is None:
logger.warn("Saturation() now assumes rgb=False, but will by default use rgb=True in the future!")
rgb = False
rgb = bool(rgb)
assert alpha < 1
self._init(locals())
def _get_augment_params(self, _):
return 1 + self._rand_range(-self.alpha, self.alpha)
def _augment(self, img, v):
old_dtype = img.dtype
m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
grey = cv2.cvtColor(img, m)
ret = img * v + (grey * (1 - v))[:, :, np.newaxis]
return ret.astype(old_dtype)
class Lighting(ImageAugmentor):
""" Lighting noise, as in the paper
`ImageNet Classification with Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_.
The implementation follows `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L184>`__.
"""
def __init__(self, std, eigval, eigvec):
"""
Args:
std (float): maximum standard deviation
eigval: a vector of (3,). The eigenvalues of 3 channels.
eigvec: a 3x3 matrix. Each column is one eigen vector.
"""
eigval = np.asarray(eigval)
eigvec = np.asarray(eigvec)
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self._init(locals())
def _get_augment_params(self, img):
assert img.shape[2] == 3
return self.rng.randn(3) * self.std
def _augment(self, img, v):
old_dtype = img.dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MinMaxNormalize(ImageAugmentor):
"""
Linearly scales the image to the range [min, max].
This augmentor always returns float32 images.
"""
def __init__(self, min=0, max=255, all_channel=True):
"""
Args:
max (float): The new maximum value
min (float): The new minimum value
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
minimum = np.min(img)
maximum = np.max(img)
else:
minimum = np.min(img, axis=(0, 1), keepdims=True)
maximum = np.max(img, axis=(0, 1), keepdims=True)
img = (self.max - self.min) * (img - minimum) / (maximum - minimum) + self.min
return img
|
haamoon/tensorpack
|
tensorpack/dataflow/imgaug/imgproc.py
|
Python
|
apache-2.0
| 8,688
|
[
"Gaussian"
] |
7878da5fd759547c84dbd749439995eda94563b0bf5f80fd7a045256a0ee1b80
|
#
# Copyright (C) 2006 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
from Synopsis.Processor import Processor, Parameter
from Synopsis import ASG
class Formatter(Processor, ASG.Visitor):
"""Generate a high-level list of the content of a syn file.
This formatter can lists source files (by name), as well as
declarations (by name, type) contained in a given scope."""
show_files = Parameter(False, 'list files')
show_scope = Parameter(None, 'list declarations in the given scope')
def process(self, ir, **kwds):
self.set_parameters(kwds)
self.ir = self.merge_input(ir)
if self.show_files:
for f in self.ir.files.values():
print '%s (language=%s, primary=%d)'%(f.name, f.annotations['language'],
f.annotations['primary'])
if self.show_scope is not None:
if '.' in self.show_scope:
self.scope = tuple(self.show_scope.split('.'))
elif '::' in self.show_scope:
self.scope = tuple(self.show_scope.split('::'))
else:
self.scope = (self.show_scope,)
for d in self.ir.asg.declarations:
d.accept(self)
return self.ir
def visit_scope(self, node):
if self.scope == node.name:
# We found the right scope.
# List all declarations directly contained in it.
declarations = node.declarations[:]
declarations.sort(lambda x, y : cmp(x.name, y.name))
for d in declarations:
if isinstance(d, ASG.Builtin): continue
print '%s : %s'%(d.name[-1], d.type)
elif (len(node.name) < self.scope and
self.scope[0:len(node.name)] == node.name):
# We found a parent scope.
# Visit child scopes.
for d in node.declarations:
d.accept(self)
|
stefanseefeld/synopsis
|
Synopsis/Formatters/List.py
|
Python
|
lgpl-2.1
| 2,082
|
[
"VisIt"
] |
9e27d8206cc948aca4e82f0e74bc76962d795535433c3cdeade2a92c0275aef0
|
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .configurer import SuiteConfigurer
from .filter import Filter, EmptySuiteRemover
from .itemlist import ItemList
from .keyword import Keyword, Keywords
from .metadata import Metadata
from .modelobject import ModelObject
from .tagsetter import TagSetter
from .testcase import TestCase, TestCases
class TestSuite(ModelObject):
"""Base model for single suite.
"""
__slots__ = ['parent', 'source', '_name', 'doc', '_my_visitors']
test_class = TestCase
keyword_class = Keyword
def __init__(self, name='', doc='', metadata=None, source=None):
#: Parent :class:`TestSuite` or `None`.
self.parent = None
#: Test suite name.
self.name = name
#: Test suite documentation.
self.doc = doc
#: Test suite metadata as a dictionary.
self.metadata = metadata
#: Path to the source file or directory.
self.source = source
#: A list of child :class:`~.testsuite.TestSuite` instances.
self.suites = None
#: A list of :class:`~.testcase.TestCase` instances.
self.tests = None
#: A list containing setup and teardown as
#: :class:`~keyword.Keyword` instances.
self.keywords = None
self._my_visitors = []
@property
def _visitors(self):
parent_visitors = self.parent._visitors if self.parent else []
return self._my_visitors + parent_visitors
def _get_name(self):
return self._name or ' & '.join(s.name for s in self.suites)
def _set_name(self, name):
self._name = name
name = property(_get_name, _set_name)
@setter
def metadata(self, metadata):
"""Free test suite metadata as a dictionary."""
return Metadata(metadata)
@setter
def suites(self, suites):
"""A list-like :class:`~.TestSuites` object containing child suites."""
return TestSuites(self.__class__, self, suites)
@setter
def tests(self, tests):
return TestCases(self.test_class, self, tests)
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class, self, keywords)
@property
def id(self):
"""An automatically generated unique id.
The root suite has id ``s1``, its children have ids ``s1-s1``,
``s1-s2``, ..., their children get ids ``s1-s1-s1``, ``s1-s1-s2``,
..., ``s1-s2-s1``, ..., and so on.
"""
if not self.parent:
return 's1'
return '%s-s%d' % (self.parent.id, self.parent.suites.index(self)+1)
@property
def longname(self):
"""Suite name prefixed with all parent suite names."""
if not self.parent:
return self.name
return '%s.%s' % (self.parent.longname, self.name)
@property
def test_count(self):
"""Number of the tests in this suite, recursively."""
return len(self.tests) + sum(suite.test_count for suite in self.suites)
def set_tags(self, add=None, remove=None, persist=False):
"""Add and/or remove specified tags to the tests in this suite.
:param add: Tags to add as a list or, if adding only one,
as a single string.
:param remove: Tags to remove as a list or as a single string.
Can be given as patterns where ``*`` and ``?`` work as wildcards.
:param persist: Add/remove specified tags also to new tests added
to this suite in the future.
"""
setter = TagSetter(add, remove)
self.visit(setter)
if persist:
self._my_visitors.append(setter)
def filter(self, included_suites=None, included_tests=None,
included_tags=None, excluded_tags=None):
"""Select test cases and remove others from this suite.
Parameters have the same semantics as ``--suite``, ``--test``,
``--include``, and ``--exclude`` command line options. All of them
can be given as a list of strings, or when selecting only one, as
a single string.
Child suites that contain no tests after filtering are automatically
removed.
Example::
suite.filter(included_tests=['Test 1', '* Example'],
included_tags='priority-1')
"""
self.visit(Filter(included_suites, included_tests,
included_tags, excluded_tags))
def configure(self, **options):
self.visit(SuiteConfigurer(**options))
def remove_empty_suites(self):
"""Removes all child suites not containing any tests, recursively."""
self.visit(EmptySuiteRemover())
def visit(self, visitor):
visitor.visit_suite(self)
class TestSuites(ItemList):
__slots__ = []
def __init__(self, suite_class=TestSuite, parent=None, suites=None):
ItemList.__init__(self, suite_class, {'parent': parent}, suites)
|
yamateh/robotframework
|
src/robot/model/testsuite.py
|
Python
|
apache-2.0
| 5,519
|
[
"VisIt"
] |
c6263625a05394b022cb7ae5d2880fd73ec53d1a15a13d81302e71876c185d9f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
This package provides a collections of placement model classes.
A placement (or deployment) model describes a simulation area and a given
number of nodes deployed in the area. It provides also node positions in case
of static networks or initial node positions for mobile environments.
"""
__docformat__ = 'reStructuredText'
__all__ = ['grid', 'normal', 'uniform']
|
mkalewski/sim2net
|
sim2net/placement/__init__.py
|
Python
|
mit
| 941
|
[
"VisIt"
] |
1397556fd2253eac49ca233f0096ddfc75d2227b0e148e15895eb36884735f0e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import h5py
import scipy
from scipy import cos, exp, pi, sin, sqrt
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
import sys
from tools import parse_range, tag_hdf5_object_with_git_version
def find_mean(W):
Wn = W/W.sum()
m, n = Wn.shape
i_q = 0.
i_p = 0.
for i in range(m):
for j in range(n):
i_q += j*Wn[i,j]
i_p += i*Wn[i,j]
return (i_q, i_p)
def convert_params(sigma_q, sigma_p, theta):
a = (cos(theta)**2)/(sigma_q**2) + (sin(theta)**2)/(sigma_p**2)
b = -(sin(2.*theta))/(2.*sigma_q**2) + (sin(2.*theta))/(2.*sigma_p**2)
c = (sin(theta)**2)/(sigma_q**2) + (cos(theta)**2)/(sigma_p**2)
return a, b, c
def fit_gaussian_state(Q, P, W):
q = Q[0,:]
p = P[:,0]
m, n = W.shape
idx_to_q = interp1d(scipy.arange(n), q)
idx_to_p = interp1d(scipy.arange(m), p)
i_mean = find_mean(W)
try:
q0, p0 = idx_to_q(i_mean[0]), idx_to_p(i_mean[1])
s0 = 1./(W.max()*sqrt(2.*pi))
theta0 = 0.
def twoD_Gaussian(qp, a, b, c):
q, p = qp
det = a*c-b**2
if det<0:
raise RuntimeError
normalization = sqrt(det)/(2.*pi)
g = normalization*exp( -1./2.* (a*((q-q0)**2) + 2*b*(q-q0)*(p-p0) + c*((p-p0)**2)))
return g.ravel()
initial_guess = convert_params(s0, s0, theta0)
(a, b, c), pcov = curve_fit(twoD_Gaussian, (Q, P), W.ravel(), p0=initial_guess)
cov = scipy.array([[c, -b], [-b, a]])/(a*c-b**2)
dq = cov[0,0]
cqp = cov[0,1]
dp = cov[1,1]
except:
q0 = scipy.nan
p0 = scipy.nan
dq = scipy.nan
cqp = scipy.nan
dp = scipy.nan
return scipy.array([q0, p0, dq, cqp, dp])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("infilename",
help="HDF5 file containing reconstruction data")
parser.add_argument("-f", "--force",
help="overwrite previous gaussian fits (default: %(default)s)",
action="store_true")
parser.add_argument("-s", "--scans",
help="select scans to treat (default: %(default)s)",
type=parse_range, default="all")
args = parser.parse_args()
return args
def load_reconstructions(h5):
rg = h5["reconstructions"]
Q_ds = rg["Q"]
P_ds = rg["P"]
W_ds = rg["W"]
return Q_ds, P_ds, W_ds
def setup_gaussian_state_ds(h5, no_scans, no_steps, force):
if "gaussians" in h5.keys():
if force:
print("Old gaussian fits found. Force active, deleting old fits.")
del h5["gaussians"]
else:
print("Old gaussian fits found. If you want to overwrite them, use --force. Aborting.")
sys.exit(1)
G_ds = h5.create_dataset("gaussians", (no_scans, no_steps, 5))
tag_hdf5_object_with_git_version(G_ds)
return G_ds
def main():
args = parse_args()
h5 = h5py.File(args.infilename, "r+")
Q_ds, P_ds, W_ds = load_reconstructions(h5)
no_scans, no_steps, no_q, no_p = Q_ds.shape
G_ds = setup_gaussian_state_ds(h5, no_scans, no_steps, args.force)
if args.scans=="all":
scans = range(no_scans)
else:
scans = args.scans
no_scans = len(scans)
for scan_no, i_scan in enumerate(scans, 1):
sys.stderr.write("Starting scan {}, {} of {}:\n".format(i_scan, scan_no, no_scans))
for i_step, state in enumerate(map(fit_gaussian_state,
Q_ds[i_scan], P_ds[i_scan], W_ds[i_scan])):
G_ds[i_scan, i_step] = state
sys.stderr.write('\r{0:7.2%}'.format(float(i_step)/no_steps))
sys.stderr.write("\r100.00%\n")
if __name__ == "__main__":
main()
|
tomohowk/tomohowk
|
src/fit_gaussians.py
|
Python
|
mit
| 3,909
|
[
"Gaussian"
] |
c98ce528e173779503d556a5824b6eeb7ac91ece5216d9a39e8a3ecef220fcea
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.