text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Wrapper script around Rietveld's upload.py that simplifies working with groups
of files.
"""
import json
import optparse
import os
import random
import re
import ssl
import string
import sys
import tempfile
import time
import urllib2
import breakpad # pylint: disable=W0611
import auth
import fix_encoding
import gclient_utils
import git_cl
import presubmit_support
import rietveld
from scm import SVN
import subprocess2
from third_party import upload
__version__ = '1.2.1'
CODEREVIEW_SETTINGS = {
# To make gcl send reviews to a server, check in a file named
# "codereview.settings" (see |CODEREVIEW_SETTINGS_FILE| below) to your
# project's base directory and add the following line to codereview.settings:
# CODE_REVIEW_SERVER: codereview.yourserver.org
}
# globals that store the root of the current repository and the directory where
# we store information about changelists.
REPOSITORY_ROOT = ""
# Replacement for project name.
SWITCH_TO_GIT = "SWITCH_TO_GIT_ALREADY"
# Filename where we store repository specific information for gcl.
CODEREVIEW_SETTINGS_FILE = "codereview.settings"
CODEREVIEW_SETTINGS_FILE_NOT_FOUND = (
'No %s file found. Please add one.' % CODEREVIEW_SETTINGS_FILE)
# Warning message when the change appears to be missing tests.
MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
# Global cache of files cached in GetCacheDir().
FILES_CACHE = {}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
def CheckHomeForFile(filename):
"""Checks the users home dir for the existence of the given file. Returns
the path to the file if it's there, or None if it is not.
"""
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
full_path = os.path.join(home, filename)
if os.path.exists(full_path):
return full_path
return None
def UnknownFiles():
"""Runs svn status and returns unknown files."""
return [
item[1] for item in SVN.CaptureStatus([], GetRepositoryRoot())
if item[0][0] == '?'
]
def GetRepositoryRoot():
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
global REPOSITORY_ROOT
if not REPOSITORY_ROOT:
REPOSITORY_ROOT = SVN.GetCheckoutRoot(os.getcwd())
if not REPOSITORY_ROOT:
raise gclient_utils.Error("gcl run outside of repository")
return REPOSITORY_ROOT
def GetInfoDir():
"""Returns the directory where gcl info files are stored."""
return os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
def GetChangesDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'changes')
def GetCacheDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'cache')
def GetCachedFile(filename, max_age=60*60*24*3, use_root=False):
"""Retrieves a file from the repository and caches it in GetCacheDir() for
max_age seconds.
use_root: If False, look up the arborescence for the first match, otherwise go
directory to the root repository.
Note: The cache will be inconsistent if the same file is retrieved with both
use_root=True and use_root=False. Don't be stupid.
"""
if filename not in FILES_CACHE:
# Don't try to look up twice.
FILES_CACHE[filename] = None
# First we check if we have a cached version.
try:
cached_file = os.path.join(GetCacheDir(), filename)
except (gclient_utils.Error, subprocess2.CalledProcessError):
return None
if (not os.path.exists(cached_file) or
(time.time() - os.stat(cached_file).st_mtime) > max_age):
dir_info = SVN.CaptureLocalInfo([], '.')
repo_root = dir_info['Repository Root']
if use_root:
url_path = repo_root
else:
url_path = dir_info['URL']
while True:
# Look in the repository at the current level for the file.
for _ in range(5):
content = None
try:
# Take advantage of the fact that svn won't output to stderr in case
# of success but will do in case of failure so don't mind putting
# stderr into content_array.
content_array = []
svn_path = url_path + '/' + filename
args = ['svn', 'cat', svn_path]
if sys.platform != 'darwin':
# MacOSX 10.5.2 has a bug with svn 1.4.4 that will trigger the
# 'Can\'t get username or password' and can be fixed easily.
# The fix doesn't work if the user upgraded to svn 1.6.x. Bleh.
# I don't have time to fix their broken stuff.
args.append('--non-interactive')
gclient_utils.CheckCallAndFilter(
args, cwd='.', filter_fn=content_array.append)
# Exit the loop if the file was found. Override content.
content = '\n'.join(content_array)
break
except (gclient_utils.Error, subprocess2.CalledProcessError):
if content_array[0].startswith(
'svn: Can\'t get username or password'):
ErrorExit('Your svn credentials expired. Please run svn update '
'to fix the cached credentials')
if content_array[0].startswith('svn: Can\'t get password'):
ErrorExit('If are using a Mac and svn --version shows 1.4.x, '
'please hack gcl.py to remove --non-interactive usage, it\'s'
'a bug on your installed copy')
if (content_array[0].startswith('svn: File not found:') or
content_array[0].endswith('path not found')):
break
# Otherwise, fall through to trying again.
if content:
break
if url_path == repo_root:
# Reached the root. Abandoning search.
break
# Go up one level to try again.
url_path = os.path.dirname(url_path)
if content is not None or filename != CODEREVIEW_SETTINGS_FILE:
# Write a cached version even if there isn't a file, so we don't try to
# fetch it each time. codereview.settings must always be present so do
# not cache negative.
gclient_utils.FileWrite(cached_file, content or '')
else:
content = gclient_utils.FileRead(cached_file, 'r')
# Keep the content cached in memory.
FILES_CACHE[filename] = content
return FILES_CACHE[filename]
def GetCodeReviewSetting(key):
"""Returns a value for the given key for this repository."""
# Use '__just_initialized' as a flag to determine if the settings were
# already initialized.
if '__just_initialized' not in CODEREVIEW_SETTINGS:
settings_file = GetCachedFile(CODEREVIEW_SETTINGS_FILE)
if settings_file:
CODEREVIEW_SETTINGS.update(
gclient_utils.ParseCodereviewSettingsContent(settings_file))
CODEREVIEW_SETTINGS.setdefault('__just_initialized', None)
return CODEREVIEW_SETTINGS.get(key, "")
def Warn(msg):
print >> sys.stderr, msg
def ErrorExit(msg):
print >> sys.stderr, msg
sys.exit(1)
def RunShellWithReturnCode(command, print_output=False):
"""Executes a command and returns the output and the return code."""
p = subprocess2.Popen(
command,
cwd=GetRepositoryRoot(),
stdout=subprocess2.PIPE,
stderr=subprocess2.STDOUT,
universal_newlines=True)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
p.stdout.close()
return output, p.returncode
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
return RunShellWithReturnCode(command, print_output)[0]
def FilterFlag(args, flag):
"""Returns True if the flag is present in args list.
The flag is removed from args if present.
"""
if flag in args:
args.remove(flag)
return True
return False
class ChangeInfo(object):
"""Holds information about a changelist.
name: change name.
issue: the Rietveld issue number or 0 if it hasn't been uploaded yet.
patchset: the Rietveld latest patchset number or 0.
description: the description.
files: a list of 2 tuple containing (status, filename) of changed files,
with paths being relative to the top repository directory.
local_root: Local root directory
rietveld: rietveld server for this change
"""
# Kept for unit test support. This is for the old format, it's deprecated.
SEPARATOR = "\n-----\n"
def __init__(self, name, issue, patchset, description, files, local_root,
rietveld_url, needs_upload):
# Defer the description processing to git_cl.ChangeDescription.
self._desc = git_cl.ChangeDescription(description)
self.name = name
self.issue = int(issue)
self.patchset = int(patchset)
self._files = files or []
self.patch = None
self._local_root = local_root
self.needs_upload = needs_upload
self.rietveld = gclient_utils.UpgradeToHttps(
rietveld_url or GetCodeReviewSetting('CODE_REVIEW_SERVER'))
self._rpc_server = None
@property
def description(self):
return self._desc.description
def force_description(self, new_description):
self._desc = git_cl.ChangeDescription(new_description)
self.needs_upload = True
def append_footer(self, line):
self._desc.append_footer(line)
def get_reviewers(self):
return self._desc.get_reviewers()
def update_reviewers(self, reviewers):
self._desc.update_reviewers(reviewers)
def NeedsUpload(self):
return self.needs_upload
def GetFileNames(self):
"""Returns the list of file names included in this change."""
return [f[1] for f in self._files]
def GetFiles(self):
"""Returns the list of files included in this change with their status."""
return self._files
def GetLocalRoot(self):
"""Returns the local repository checkout root directory."""
return self._local_root
def Exists(self):
"""Returns True if this change already exists (i.e., is not new)."""
return (self.issue or self.description or self._files)
def _NonDeletedFileList(self):
"""Returns a list of files in this change, not including deleted files."""
return [f[1] for f in self.GetFiles()
if not f[0].startswith("D")]
def _AddedFileList(self):
"""Returns a list of files added in this change."""
return [f[1] for f in self.GetFiles() if f[0].startswith("A")]
def Save(self):
"""Writes the changelist information to disk."""
data = json.dumps({
'issue': self.issue,
'patchset': self.patchset,
'needs_upload': self.NeedsUpload(),
'files': self.GetFiles(),
'description': self.description,
'rietveld': self.rietveld,
}, sort_keys=True, indent=2)
gclient_utils.FileWrite(GetChangelistInfoFile(self.name), data)
def Delete(self):
"""Removes the changelist information from disk."""
os.remove(GetChangelistInfoFile(self.name))
def RpcServer(self):
if not self._rpc_server:
if not self.rietveld:
ErrorExit(CODEREVIEW_SETTINGS_FILE_NOT_FOUND)
# TODO(vadimsh): glc.py should be deleted soon. Do not bother much about
# authentication options and always use defaults.
self._rpc_server = rietveld.CachingRietveld(
self.rietveld, auth.make_auth_config())
return self._rpc_server
def CloseIssue(self):
"""Closes the Rietveld issue for this changelist."""
# Newer versions of Rietveld require us to pass an XSRF token to POST, so
# we fetch it from the server.
xsrf_token = self.SendToRietveld(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
# You cannot close an issue with a GET.
# We pass an empty string for the data so it is a POST rather than a GET.
data = [("description", self.description),
("xsrf_token", xsrf_token)]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/close' % self.issue, payload=body,
content_type=ctype)
def UpdateRietveldDescription(self):
"""Sets the description for an issue on Rietveld."""
data = [("description", self.description),]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/description' % self.issue, payload=body,
content_type=ctype)
self.needs_upload = False
def GetIssueDescription(self):
"""Returns the issue description from Rietveld."""
return self.SendToRietveld('/%d/description' % self.issue).replace('\r\n',
'\n')
def UpdateDescriptionFromIssue(self):
"""Updates self.description with the issue description from Rietveld."""
self._desc = git_cl.ChangeDescription(self.GetIssueDescription())
def GetApprovingReviewers(self):
"""Returns the issue reviewers list from Rietveld."""
return git_cl.get_approving_reviewers(
self.RpcServer().get_issue_properties(self.issue, True))
def AddComment(self, comment):
"""Adds a comment for an issue on Rietveld.
As a side effect, this will email everyone associated with the issue."""
return self.RpcServer().add_comment(self.issue, comment)
def PrimeLint(self):
"""Do background work on Rietveld to lint the file so that the results are
ready when the issue is viewed."""
if self.issue and self.patchset:
try:
self.SendToRietveld('/lint/issue%s_%s' % (self.issue, self.patchset),
timeout=60)
except ssl.SSLError as e:
# It takes more than 60 seconds to lint some CLs. Silently ignore
# the expected timeout.
if e.message != 'The read operation timed out':
raise
def SendToRietveld(self, request_path, timeout=None, **kwargs):
"""Send a POST/GET to Rietveld. Returns the response body."""
try:
return self.RpcServer().Send(request_path, timeout=timeout, **kwargs)
except urllib2.URLError:
if timeout is None:
ErrorExit('Error accessing url %s' % request_path)
else:
return None
def MissingTests(self):
"""Returns True if the change looks like it needs unit tests but has none.
A change needs unit tests if it contains any new source files or methods.
"""
SOURCE_SUFFIXES = [".cc", ".cpp", ".c", ".m", ".mm"]
# Ignore third_party entirely.
files = [f for f in self._NonDeletedFileList()
if f.find("third_party") == -1]
added_files = [f for f in self._AddedFileList()
if f.find("third_party") == -1]
# If the change is entirely in third_party, we're done.
if len(files) == 0:
return False
# Any new or modified test files?
# A test file's name ends with "test.*" or "tests.*".
test_files = [test for test in files
if os.path.splitext(test)[0].rstrip("s").endswith("test")]
if len(test_files) > 0:
return False
# Any new source files?
source_files = [item for item in added_files
if os.path.splitext(item)[1] in SOURCE_SUFFIXES]
if len(source_files) > 0:
return True
# Do the long test, checking the files for new methods.
return self._HasNewMethod()
def _HasNewMethod(self):
"""Returns True if the changeset contains any new functions, or if a
function signature has been changed.
A function is identified by starting flush left, containing a "(" before
the next flush-left line, and either ending with "{" before the next
flush-left line or being followed by an unindented "{".
Currently this returns True for new methods, new static functions, and
methods or functions whose signatures have been changed.
Inline methods added to header files won't be detected by this. That's
acceptable for purposes of determining if a unit test is needed, since
inline methods should be trivial.
"""
# To check for methods added to source or header files, we need the diffs.
# We'll generate them all, since there aren't likely to be many files
# apart from source and headers; besides, we'll want them all if we're
# uploading anyway.
if self.patch is None:
self.patch = GenerateDiff(self.GetFileNames())
definition = ""
for line in self.patch.splitlines():
if not line.startswith("+"):
continue
line = line.strip("+").rstrip(" \t")
# Skip empty lines, comments, and preprocessor directives.
# TODO(pamg): Handle multiline comments if it turns out to be a problem.
if line == "" or line.startswith("/") or line.startswith("#"):
continue
# A possible definition ending with "{" is complete, so check it.
if definition.endswith("{"):
if definition.find("(") != -1:
return True
definition = ""
# A { or an indented line, when we're in a definition, continues it.
if (definition != "" and
(line == "{" or line.startswith(" ") or line.startswith("\t"))):
definition += line
# A flush-left line starts a new possible function definition.
elif not line.startswith(" ") and not line.startswith("\t"):
definition = line
return False
@staticmethod
def Load(changename, local_root, fail_on_not_found, update_status):
"""Gets information about a changelist.
Args:
fail_on_not_found: if True, this function will quit the program if the
changelist doesn't exist.
update_status: if True, the svn status will be updated for all the files
and unchanged files will be removed.
Returns: a ChangeInfo object.
"""
info_file = GetChangelistInfoFile(changename)
if not os.path.exists(info_file):
if fail_on_not_found:
ErrorExit("Changelist " + changename + " not found.")
return ChangeInfo(changename, 0, 0, '', None, local_root, None, False)
content = gclient_utils.FileRead(info_file)
save = False
try:
values = ChangeInfo._LoadNewFormat(content)
except ValueError:
try:
values = ChangeInfo._LoadOldFormat(content)
save = True
except ValueError:
ErrorExit(
('Changelist file %s is corrupt.\n'
'Either run "gcl delete %s" or manually edit the file') % (
info_file, changename))
files = values['files']
if update_status:
for item in files[:]:
status_result = SVN.CaptureStatus(item[1], local_root)
if not status_result or not status_result[0][0]:
# File has been reverted.
save = True
files.remove(item)
continue
status = status_result[0][0]
if status != item[0]:
save = True
files[files.index(item)] = (status, item[1])
change_info = ChangeInfo(
changename,
values['issue'],
values['patchset'],
values['description'],
files,
local_root,
values.get('rietveld'),
values['needs_upload'])
if save:
change_info.Save()
return change_info
@staticmethod
def _LoadOldFormat(content):
# The info files have the following format:
# issue_id, patchset\n (, patchset is optional)
# SEPARATOR\n
# filepath1\n
# filepath2\n
# .
# .
# filepathn\n
# SEPARATOR\n
# description
split_data = content.split(ChangeInfo.SEPARATOR, 2)
if len(split_data) != 3:
raise ValueError('Bad change format')
values = {
'issue': 0,
'patchset': 0,
'needs_upload': False,
'files': [],
}
items = split_data[0].split(', ')
if items[0]:
values['issue'] = int(items[0])
if len(items) > 1:
values['patchset'] = int(items[1])
if len(items) > 2:
values['needs_upload'] = (items[2] == "dirty")
for line in split_data[1].splitlines():
status = line[:7]
filename = line[7:]
values['files'].append((status, filename))
values['description'] = split_data[2]
return values
@staticmethod
def _LoadNewFormat(content):
return json.loads(content)
def __str__(self):
out = ['%s:' % self.__class__.__name__]
for k in dir(self):
if k.startswith('__'):
continue
v = getattr(self, k)
if v is self or callable(getattr(self, k)):
continue
out.append(' %s: %r' % (k, v))
return '\n'.join(out)
def GetChangelistInfoFile(changename):
"""Returns the file that stores information about a changelist."""
if not changename or re.search(r'[^\w-]', changename):
ErrorExit("Invalid changelist name: " + changename)
return os.path.join(GetChangesDir(), changename)
def LoadChangelistInfoForMultiple(changenames, local_root, fail_on_not_found,
update_status):
"""Loads many changes and merge their files list into one pseudo change.
This is mainly usefull to concatenate many changes into one for a 'gcl try'.
"""
changes = changenames.split(',')
aggregate_change_info = ChangeInfo(
changenames, 0, 0, '', None, local_root, None, False)
for change in changes:
aggregate_change_info._files += ChangeInfo.Load(
change, local_root, fail_on_not_found, update_status).GetFiles()
return aggregate_change_info
def GetCLs():
"""Returns a list of all the changelists in this repository."""
cls = os.listdir(GetChangesDir())
if CODEREVIEW_SETTINGS_FILE in cls:
cls.remove(CODEREVIEW_SETTINGS_FILE)
return cls
def GenerateChangeName():
"""Generate a random changelist name."""
random.seed()
current_cl_names = GetCLs()
while True:
cl_name = (random.choice(string.ascii_lowercase) +
random.choice(string.digits) +
random.choice(string.ascii_lowercase) +
random.choice(string.digits))
if cl_name not in current_cl_names:
return cl_name
def GetModifiedFiles():
"""Returns a set that maps from changelist name to (status,filename) tuples.
Files not in a changelist have an empty changelist name. Filenames are in
relation to the top level directory of the current repository. Note that
only the current directory and subdirectories are scanned, in order to
improve performance while still being flexible.
"""
files = {}
# Since the files are normalized to the root folder of the repositary, figure
# out what we need to add to the paths.
dir_prefix = os.getcwd()[len(GetRepositoryRoot()):].strip(os.sep)
# Get a list of all files in changelists.
files_in_cl = {}
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
for status, filename in change_info.GetFiles():
files_in_cl[filename] = change_info.name
# Get all the modified files down the current directory.
for line in SVN.CaptureStatus(None, os.getcwd()):
status = line[0]
filename = line[1]
if status[0] == "?":
continue
if dir_prefix:
filename = os.path.join(dir_prefix, filename)
change_list_name = ""
if filename in files_in_cl:
change_list_name = files_in_cl[filename]
files.setdefault(change_list_name, []).append((status, filename))
return files
def GetFilesNotInCL():
"""Returns a list of tuples (status,filename) that aren't in any changelists.
See docstring of GetModifiedFiles for information about path of files and
which directories are scanned.
"""
modified_files = GetModifiedFiles()
if "" not in modified_files:
return []
return modified_files[""]
def ListFiles(show_unknown_files):
files = GetModifiedFiles()
cl_keys = files.keys()
cl_keys.sort()
for cl_name in cl_keys:
if not cl_name:
continue
note = ""
change_info = ChangeInfo.Load(cl_name, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
if len(change_info.GetFiles()) != len(files[cl_name]):
note = " (Note: this changelist contains files outside this directory)"
print "\n--- Changelist " + cl_name + note + ":"
for filename in files[cl_name]:
print "".join(filename)
if show_unknown_files:
unknown_files = UnknownFiles()
if (files.get('') or (show_unknown_files and len(unknown_files))):
print "\n--- Not in any changelist:"
for item in files.get('', []):
print "".join(item)
if show_unknown_files:
for filename in unknown_files:
print "? %s" % filename
return 0
def GenerateDiff(files):
return SVN.GenerateDiff(
files, GetRepositoryRoot(), full_move=False, revision=None)
def GetTreeStatus():
tree_status_url = GetCodeReviewSetting('STATUS')
return git_cl.GetTreeStatus(tree_status_url) if tree_status_url else "unset"
def OptionallyDoPresubmitChecks(change_info, committing, args):
if FilterFlag(args, "--no_presubmit") or FilterFlag(args, "--force"):
breakpad.SendStack(
breakpad.DEFAULT_URL + '/breakpad',
'GclHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing (tree status was "%s")' %
(change_info.rietveld, change_info.issue, GetTreeStatus()),
verbose=False)
return presubmit_support.PresubmitOutput()
return DoPresubmitChecks(change_info, committing, True)
def defer_attributes(a, b):
"""Copy attributes from an object (like a function) to another."""
for x in dir(a):
if not getattr(b, x, None):
setattr(b, x, getattr(a, x))
def need_change(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not len(args) == 1:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(), True, True)
return function(change_info)
defer_attributes(function, hook)
hook.need_change = True
hook.no_args = True
return hook
def need_change_and_args(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not args:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
return function(change_info, args)
defer_attributes(function, hook)
hook.need_change = True
return hook
def no_args(function):
"""Make sure no args are passed."""
# pylint: disable=W0612,W0621
def hook(args):
if args:
ErrorExit("Doesn't support arguments")
return function()
defer_attributes(function, hook)
hook.no_args = True
return hook
def attrs(**kwargs):
"""Decorate a function with new attributes."""
def decorate(function):
for k in kwargs:
setattr(function, k, kwargs[k])
return function
return decorate
@no_args
def CMDopened():
"""Lists modified files in the current directory down."""
return ListFiles(False)
@no_args
def CMDstatus():
"""Lists modified and unknown files in the current directory down."""
return ListFiles(True)
@need_change_and_args
@attrs(usage='[--no_presubmit] [--no_watchlists]')
def CMDupload(change_info, args):
"""Uploads the changelist to the server for review.
This does not submit a try job; use gcl try to submit a try job.
"""
if '-s' in args or '--server' in args:
ErrorExit('Don\'t use the -s flag, fix codereview.settings instead')
if not change_info.GetFiles():
print "Nothing to upload, changelist is empty."
return 0
output = OptionallyDoPresubmitChecks(change_info, False, args)
if not output.should_continue():
return 1
no_watchlists = (FilterFlag(args, "--no_watchlists") or
FilterFlag(args, "--no-watchlists"))
# Map --send-mail to --send_mail
if FilterFlag(args, "--send-mail"):
args.append("--send_mail")
# Replace -m with -t and --message with --title, but make sure to
# preserve anything after the -m/--message.
found_deprecated_arg = [False]
def replace_message(a):
if a.startswith('-m'):
found_deprecated_arg[0] = True
return '-t' + a[2:]
elif a.startswith('--message'):
found_deprecated_arg[0] = True
return '--title' + a[9:]
return a
args = map(replace_message, args)
if found_deprecated_arg[0]:
print >> sys.stderr, (
'\nWARNING: Use -t or --title to set the title of the patchset.\n'
'In the near future, -m or --message will send a message instead.\n'
'See http://goo.gl/JGg0Z for details.\n')
upload_arg = ["upload.py", "-y"]
upload_arg.append("--server=%s" % change_info.rietveld.encode('utf-8'))
reviewers = change_info.get_reviewers() or output.reviewers
if (reviewers and
not any(arg.startswith('-r') or arg.startswith('--reviewer') for
arg in args)):
upload_arg.append('--reviewers=%s' % ','.join(reviewers))
upload_arg.extend(args)
desc_file = None
try:
if change_info.issue:
# Uploading a new patchset.
upload_arg.append("--issue=%d" % change_info.issue)
project = GetCodeReviewSetting("PROJECT")
if project:
upload_arg.append("--project=%s" % SWITCH_TO_GIT)
if not any(i.startswith('--title') or i.startswith('-t') for i in args):
upload_arg.append('--title= ')
else:
# First time we upload.
handle, desc_file = tempfile.mkstemp(text=True)
os.write(handle, change_info.description)
os.close(handle)
# Watchlist processing -- CC people interested in this changeset
# http://dev.chromium.org/developers/contributing-code/watchlists
if not no_watchlists:
import watchlists
watchlist = watchlists.Watchlists(change_info.GetLocalRoot())
watchers = watchlist.GetWatchersForPaths(change_info.GetFileNames())
# We check this before applying the "PRIVATE" parameter of codereview
# settings assuming that the author of the settings file has put
# addresses which we can send private CLs to, and so we should ignore
# CC_LIST only when --private is specified explicitly on the command
# line.
if "--private" in upload_arg:
Warn("WARNING: CC_LIST and WATCHLISTS are ignored when --private is "
"specified. You need to review and add them manually if "
"necessary.")
cc_list = ""
no_watchlists = True
else:
cc_list = GetCodeReviewSetting("CC_LIST")
if not no_watchlists and watchers:
# Filter out all empty elements and join by ','
cc_list = ','.join(filter(None, [cc_list] + watchers))
if cc_list:
upload_arg.append("--cc=" + cc_list)
upload_arg.append("--file=%s" % desc_file)
if GetCodeReviewSetting("PRIVATE") == "True":
upload_arg.append("--private")
project = GetCodeReviewSetting("PROJECT")
if project:
upload_arg.append("--project=%s" % SWITCH_TO_GIT)
# If we have a lot of files with long paths, then we won't be able to fit
# the command to "svn diff". Instead, we generate the diff manually for
# each file and concatenate them before passing it to upload.py.
if change_info.patch is None:
change_info.patch = GenerateDiff(change_info.GetFileNames())
# Change the current working directory before calling upload.py so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
try:
issue, patchset = upload.RealMain(upload_arg, change_info.patch)
except KeyboardInterrupt:
sys.exit(1)
if issue and patchset:
change_info.issue = int(issue)
change_info.patchset = int(patchset)
change_info.Save()
change_info.PrimeLint()
finally:
os.chdir(previous_cwd)
finally:
if desc_file:
os.remove(desc_file)
print "*** Upload does not submit a try; use gcl try to submit a try. ***"
return 0
@need_change_and_args
@attrs(usage='[--upload]')
def CMDpresubmit(change_info, args):
"""Runs presubmit checks on the change.
The actual presubmit code is implemented in presubmit_support.py and looks
for PRESUBMIT.py files."""
if not change_info.GetFiles():
print('Nothing to presubmit check, changelist is empty.')
return 0
parser = optparse.OptionParser()
parser.add_option('--upload', action='store_true')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % args)
if options.upload:
print('*** Presubmit checks for UPLOAD would report: ***')
return not DoPresubmitChecks(change_info, False, False)
else:
print('*** Presubmit checks for COMMIT would report: ***')
return not DoPresubmitChecks(change_info, True, False)
def TryChange(change_info, args, swallow_exception):
"""Create a diff file of change_info and send it to the try server."""
try:
import trychange
except ImportError:
if swallow_exception:
return 1
ErrorExit("You need to install trychange.py to use the try server.")
trychange_args = []
if change_info:
trychange_args.extend(['--name', change_info.name])
if change_info.issue:
trychange_args.extend(["--issue", str(change_info.issue)])
if change_info.patchset:
trychange_args.extend(["--patchset", str(change_info.patchset)])
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
else:
change = None
trychange_args.extend(args)
return trychange.TryChange(
trychange_args,
change=change,
swallow_exception=swallow_exception,
prog='gcl try',
extra_epilog='\n'
'When called from gcl, use the format gcl try <change_name>.\n')
@need_change_and_args
@attrs(usage='[--no_presubmit]')
def CMDcommit(change_info, args):
"""Commits the changelist to the repository."""
if not change_info.GetFiles():
print "Nothing to commit, changelist is empty."
return 1
# OptionallyDoPresubmitChecks has a side-effect which eats these flags.
bypassed = '--no_presubmit' in args or '--force' in args
output = OptionallyDoPresubmitChecks(change_info, True, args)
if not output.should_continue():
return 1
# We face a problem with svn here: Let's say change 'bleh' modifies
# svn:ignore on dir1\. but another unrelated change 'pouet' modifies
# dir1\foo.cc. When the user `gcl commit bleh`, foo.cc is *also committed*.
# The only fix is to use --non-recursive but that has its issues too:
# Let's say if dir1 is deleted, --non-recursive must *not* be used otherwise
# you'll get "svn: Cannot non-recursively commit a directory deletion of a
# directory with child nodes". Yay...
commit_cmd = ["svn", "commit"]
if change_info.issue:
# Get the latest description from Rietveld.
change_info.UpdateDescriptionFromIssue()
change_info.update_reviewers(change_info.GetApprovingReviewers())
commit_desc = git_cl.ChangeDescription(change_info.description)
if change_info.issue:
server = change_info.rietveld
if not server.startswith("http://") and not server.startswith("https://"):
server = "http://" + server
commit_desc.append_footer('Review URL: %s/%d' % (server, change_info.issue))
handle, commit_filename = tempfile.mkstemp(text=True)
os.write(handle, commit_desc.description)
os.close(handle)
try:
handle, targets_filename = tempfile.mkstemp(text=True)
os.write(handle, "\n".join(change_info.GetFileNames()))
os.close(handle)
try:
commit_cmd += ['--file=' + commit_filename]
commit_cmd += ['--targets=' + targets_filename]
# Change the current working directory before calling commit.
output = ''
try:
output = RunShell(commit_cmd, True)
except subprocess2.CalledProcessError, e:
ErrorExit('Commit failed.\n%s' % e)
finally:
os.remove(commit_filename)
finally:
os.remove(targets_filename)
if output.find("Committed revision") != -1:
change_info.Delete()
if change_info.issue:
revision = re.compile(".*?\nCommitted revision (\d+)",
re.DOTALL).match(output).group(1)
viewvc_url = GetCodeReviewSetting('VIEW_VC')
if viewvc_url and revision:
change_info.append_footer('Committed: ' + viewvc_url + revision)
elif revision:
change_info.append_footer('Committed: ' + revision)
change_info.CloseIssue()
props = change_info.RpcServer().get_issue_properties(
change_info.issue, False)
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d (id:%d) manually as r%s" % (
patch_num, props['patchsets'][-1], revision)
if bypassed:
comment += ' (tree was closed).' if GetTreeStatus() == 'closed' else '.'
else:
comment += ' (presubmit successful).'
change_info.AddComment(comment)
return 0
def CMDchange(args):
"""Creates or edits a changelist.
Only scans the current directory and subdirectories.
"""
# Verify the user is running the change command from a read-write checkout.
svn_info = SVN.CaptureLocalInfo([], '.')
if not svn_info:
ErrorExit("Current checkout is unversioned. Please retry with a versioned "
"directory.")
if len(args) == 0:
# Generate a random changelist name.
changename = GenerateChangeName()
elif args[0] == '--force':
changename = GenerateChangeName()
else:
changename = args[0]
change_info = ChangeInfo.Load(changename, GetRepositoryRoot(), False, True)
if len(args) == 2:
if not os.path.isfile(args[1]):
ErrorExit('The change "%s" doesn\'t exist.' % args[1])
f = open(args[1], 'rU')
override_description = f.read()
f.close()
else:
override_description = None
if change_info.issue and not change_info.NeedsUpload():
try:
description = change_info.GetIssueDescription()
except urllib2.HTTPError, err:
if err.code == 404:
# The user deleted the issue in Rietveld, so forget the old issue id.
description = change_info.description
change_info.issue = 0
change_info.Save()
else:
ErrorExit("Error getting the description from Rietveld: " + err)
else:
if override_description:
description = override_description
else:
description = change_info.description
other_files = GetFilesNotInCL()
# Edited files (as opposed to files with only changed properties) will have
# a letter for the first character in the status string.
file_re = re.compile(r"^[a-z].+\Z", re.IGNORECASE)
affected_files = [x for x in other_files if file_re.match(x[0])]
unaffected_files = [x for x in other_files if not file_re.match(x[0])]
description = description.rstrip() + '\n'
separator1 = ("\n---All lines above this line become the description.\n"
"---Repository Root: " + change_info.GetLocalRoot() + "\n"
"---Paths in this changelist (" + change_info.name + "):\n")
separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
text = (description + separator1 + '\n' +
'\n'.join([f[0] + f[1] for f in change_info.GetFiles()]))
if change_info.Exists():
text += (separator2 +
'\n'.join([f[0] + f[1] for f in affected_files]) + '\n')
else:
text += ('\n'.join([f[0] + f[1] for f in affected_files]) + '\n' +
separator2)
text += '\n'.join([f[0] + f[1] for f in unaffected_files]) + '\n'
result = gclient_utils.RunEditor(text, False)
if not result:
ErrorExit('Running editor failed')
split_result = result.split(separator1, 1)
if len(split_result) != 2:
ErrorExit("Don't modify the text starting with ---!\n\n%r" % result)
# Update the CL description if it has changed.
new_description = split_result[0]
cl_files_text = split_result[1]
if new_description != description or override_description:
change_info.force_description(new_description)
new_cl_files = []
for line in cl_files_text.splitlines():
if not len(line):
continue
if line.startswith("---"):
break
status = line[:7]
filename = line[7:]
new_cl_files.append((status, filename))
if (not len(change_info.GetFiles()) and not change_info.issue and
not len(new_description) and not new_cl_files):
ErrorExit("Empty changelist not saved")
change_info._files = new_cl_files
change_info.Save()
if svn_info.get('URL', '').startswith('http:'):
Warn("WARNING: Creating CL in a read-only checkout. You will need to "
"commit using a commit queue!")
print change_info.name + " changelist saved."
if change_info.MissingTests():
Warn("WARNING: " + MISSING_TEST_MSG)
# Update the Rietveld issue.
if change_info.issue and change_info.NeedsUpload():
change_info.UpdateRietveldDescription()
change_info.Save()
return 0
@need_change_and_args
def CMDlint(change_info, args):
"""Runs cpplint.py on all the files in the change list.
Checks all the files in the changelist for possible style violations.
"""
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
ErrorExit("You need to install cpplint.py to lint C++ files.")
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
# Process cpplints arguments if any.
filenames = cpplint.ParseArguments(args + change_info.GetFileNames())
white_list = GetCodeReviewSetting("LINT_REGEX")
if not white_list:
white_list = DEFAULT_LINT_REGEX
white_regex = re.compile(white_list)
black_list = GetCodeReviewSetting("LINT_IGNORE_REGEX")
if not black_list:
black_list = DEFAULT_LINT_IGNORE_REGEX
black_regex = re.compile(black_list)
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
finally:
os.chdir(previous_cwd)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
return 1
def DoPresubmitChecks(change_info, committing, may_prompt):
"""Imports presubmit, then calls presubmit.DoPresubmitChecks."""
root_presubmit = GetCachedFile('PRESUBMIT.py', use_root=True)
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
output = presubmit_support.DoPresubmitChecks(
change=change,
committing=committing,
verbose=False,
output_stream=sys.stdout,
input_stream=sys.stdin,
default_presubmit=root_presubmit,
may_prompt=may_prompt,
rietveld_obj=change_info.RpcServer())
if not output.should_continue() and may_prompt:
# TODO(dpranke): move into DoPresubmitChecks(), unify cmd line args.
print "\nPresubmit errors, can't continue (use --no_presubmit to bypass)"
return output
@no_args
def CMDchanges():
"""Lists all the changelists and their files."""
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
print "\n--- Changelist " + change_info.name + ":"
for filename in change_info.GetFiles():
print "".join(filename)
return 0
@no_args
def CMDdeleteempties():
"""Delete all changelists that have no files."""
print "\n--- Deleting:"
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
if not len(change_info.GetFiles()):
print change_info.name
change_info.Delete()
return 0
@no_args
def CMDnothave():
"""Lists files unknown to Subversion."""
for filename in UnknownFiles():
print "? " + "".join(filename)
return 0
@attrs(usage='<svn options>')
def CMDdiff(args):
"""Diffs all files in the changelist or all files that aren't in a CL."""
files = None
if args:
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
files = change_info.GetFileNames()
else:
files = [f[1] for f in GetFilesNotInCL()]
root = GetRepositoryRoot()
cmd = ['svn', 'diff']
cmd.extend([os.path.join(root, x) for x in files])
cmd.extend(args)
return RunShellWithReturnCode(cmd, print_output=True)[1]
@no_args
def CMDsettings():
"""Prints code review settings for this checkout."""
# Force load settings
GetCodeReviewSetting("UNKNOWN")
del CODEREVIEW_SETTINGS['__just_initialized']
print '\n'.join(("%s: %s" % (str(k), str(v))
for (k,v) in CODEREVIEW_SETTINGS.iteritems()))
return 0
@need_change
def CMDdescription(change_info):
"""Prints the description of the specified change to stdout."""
print change_info.description
return 0
def CMDdelete(args):
"""Deletes a changelist."""
if not len(args) == 1:
ErrorExit('You need to pass a change list name')
filepath = GetChangelistInfoFile(args[0])
if not os.path.isfile(filepath):
ErrorExit('You need to pass a valid change list name')
os.remove(filepath)
return 0
def CMDtry(args):
"""Sends the change to the tryserver to do a test run on your code.
To send multiple changes as one path, use a comma-separated list of
changenames. Use 'gcl help try' for more information!"""
# When the change contains no file, send the "changename" positional
# argument to trychange.py.
# When the command is 'try' and --patchset is used, the patch to try
# is on the Rietveld server.
if not args:
ErrorExit("You need to pass a change list name")
if args[0].find(',') != -1:
change_info = LoadChangelistInfoForMultiple(args[0], GetRepositoryRoot(),
True, True)
else:
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(),
True, True)
props = change_info.RpcServer().get_issue_properties(
change_info.issue, False)
if props.get('private'):
ErrorExit('Cannot use trybots on a private issue')
if change_info.GetFiles():
args = args[1:]
else:
change_info = None
return TryChange(change_info, args, swallow_exception=False)
@attrs(usage='<old-name> <new-name>')
def CMDrename(args):
"""Renames an existing change."""
if len(args) != 2:
ErrorExit("Usage: gcl rename <old-name> <new-name>.")
src, dst = args
src_file = GetChangelistInfoFile(src)
if not os.path.isfile(src_file):
ErrorExit("Change '%s' does not exist." % src)
dst_file = GetChangelistInfoFile(dst)
if os.path.isfile(dst_file):
ErrorExit("Change '%s' already exists; pick a new name." % dst)
os.rename(src_file, dst_file)
print "Change '%s' renamed '%s'." % (src, dst)
return 0
def CMDpassthru(args):
"""Everything else that is passed into gcl we redirect to svn.
It assumes a change list name is passed and is converted with the files names.
"""
if not args or len(args) < 2:
ErrorExit("You need to pass a change list name for this svn fall-through "
"command")
cl_name = args[1]
args = ["svn", args[0]]
if len(args) > 1:
root = GetRepositoryRoot()
change_info = ChangeInfo.Load(cl_name, root, True, True)
args.extend([os.path.join(root, x) for x in change_info.GetFileNames()])
return RunShellWithReturnCode(args, print_output=True)[1]
def Command(name):
return getattr(sys.modules[__name__], 'CMD' + name, None)
def GenUsage(command):
"""Modify an OptParse object with the function's documentation."""
obj = Command(command)
display = command
more = getattr(obj, 'usage', '')
if command == 'help':
display = '<command>'
need_change_val = ''
if getattr(obj, 'need_change', None):
need_change_val = ' <change_list>'
options = ' [options]'
if getattr(obj, 'no_args', None):
options = ''
res = 'Usage: gcl %s%s%s %s\n\n' % (display, need_change_val, options, more)
res += re.sub('\n ', '\n', obj.__doc__)
return res
def CMDhelp(args):
"""Prints this help or help for the given command."""
if args and 'CMD' + args[0] in dir(sys.modules[__name__]):
print GenUsage(args[0])
# These commands defer to external tools so give this info too.
if args[0] == 'try':
TryChange(None, ['--help'], swallow_exception=False)
if args[0] == 'upload':
upload.RealMain(['upload.py', '--help'])
return 0
print GenUsage('help')
print sys.modules[__name__].__doc__
print 'version ' + __version__ + '\n'
print('Commands are:\n' + '\n'.join([
' %-12s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
return 0
def main(argv):
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
sys.stderr.write('Warning: gcl is going away soon. Get off subversion!\n')
sys.stderr.write('See http://crbug.com/475321 for more details.\n')
if not argv:
argv = ['help']
command = Command(argv[0])
# Help can be run from anywhere.
if command == CMDhelp:
return command(argv[1:])
try:
GetRepositoryRoot()
except (gclient_utils.Error, subprocess2.CalledProcessError):
print >> sys.stderr, 'To use gcl, you need to be in a subversion checkout.'
return 1
# Create the directories where we store information about changelists if it
# doesn't exist.
try:
if not os.path.exists(GetInfoDir()):
os.mkdir(GetInfoDir())
if not os.path.exists(GetChangesDir()):
os.mkdir(GetChangesDir())
if not os.path.exists(GetCacheDir()):
os.mkdir(GetCacheDir())
if command:
return command(argv[1:])
# Unknown command, try to pass that to svn
return CMDpassthru(argv)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, 'Got an exception'
print >> sys.stderr, str(e)
return 1
except upload.ClientLoginError, e:
print >> sys.stderr, 'Got an exception logging in to Rietveld'
print >> sys.stderr, str(e)
return 1
except urllib2.HTTPError, e:
if e.code != 500:
raise
print >> sys.stderr, (
'AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e))
return 1
if __name__ == "__main__":
fix_encoding.fix_encoding()
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
chinmaygarde/depot_tools
|
gcl.py
|
Python
|
bsd-3-clause
| 51,974
|
[
"VisIt"
] |
d2b55bfb548920cc510808e7f8f741252f869cd503da26cc7234be6c677bfd66
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 14:36:29 2017
@author: derek
"""
import os
import tensorflow as tf
import numpy as np
def _parse_function(example_proto):
"""Reads tfrecords with features {shape: (height,width,depth) of cube data,
label: (malignancy, lobulation, spiculation) labels, cube: usually 32x32x32 data).
Mapped onto a TFRecord dataset
Args:
example_proto: TFRecord protobuffer of data
Returns:
shape_int32: (int32) (height,width,depth)
label_int32: (int32) (malignancy, lobulation, spiculation)
cube: (float32) height x width x depth data (usually 32x32x32)
"""
features = {"shape": tf.FixedLenFeature((), tf.string, default_value=""),
"label": tf.FixedLenFeature((), tf.string, default_value=""),
"cube": tf.FixedLenFeature((), tf.string, default_value="")}
parsed_features = tf.parse_single_example(example_proto, features)
shape = tf.decode_raw(parsed_features['shape'], tf.int16)
shape_int32 = tf.cast(shape,tf.int32)
label = tf.decode_raw(parsed_features['label'], tf.int16)
label_int32 = tf.cast(label,tf.int32)
cube_flat = tf.decode_raw(parsed_features['cube'], tf.int16)
cube_flat_f32 = tf.cast(cube_flat,dtype=tf.float32)
cube = tf.reshape(cube_flat_f32,[shape_int32[0],shape_int32[1],shape_int32[2]])
return shape_int32,label_int32,cube
def augment_data(transpose_index,k_value,flip_yes_no, cubes):
"""augment data (cubes) by rotating the cubes k_values times, and tranposing
the indices specified by transpose_index.
To randomize input:
transpose_index: random permutation of [0,1,2]
k_value: random int [0-3]
flip_yes_no: random int [0-1]
Args:
transpose_index: (np array) array discribing the new order of the transposed
axis [x_axis, y_axis, z_axis] [0,1,2]-> would keep axis unchanged.
k_value: (int) number of rotations, 0 would keep data unrotated
Returns:
shape_int32: (int32) (height,width,depth)
label_int32: (int32) (malignancy, lobulation, spiculation)
cube: (float32) height x width x depth data (usually 32x32x32)
"""
cubes_trans = tf.map_fn(lambda img: tf.transpose(img, transpose_index), cubes)
cubes_90 = tf.map_fn(lambda img: tf.image.rot90(img,k=k_value), cubes_trans)
cubes_out = cubes_90
if flip_yes_no == 1:
cubes_out = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cubes_out)
return cubes_out
def _normalize(image):
""" Normalize image -> clip data between -1000 and 400. Scale values to -0.5 to 0.5
"""
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = tf.maximum(MIN_BOUND, image)
image = tf.minimum(MAX_BOUND, image)
image = (image - MIN_BOUND)
image = image / (MAX_BOUND - MIN_BOUND)
image = image - 0.5
return image
def _randomize(image):
"""Add randomization to the image by raising the image values to a random
power between 1 and 10. Then renormalize to -.5 to .5
Args:
image: input 3d data cube
Returns:
image: image after power and renormalized
"""
image = image - tf.reduce_min(image)
image = tf.pow(image, tf.random_uniform([1],minval=1,maxval=10))
image = image/tf.reduce_max(image)
image = image - 0.5
return image
##########################################################################
##########################################################################
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_initializer(name, shape, initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _variable_initializer(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
var = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32)
return var
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
#tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
#tf.summary.histogram(tensor_name + '/activations', x)
#tf.summary.scalar(tensor_name + '/sparsity',
#tf.nn.zero_fraction(x))
#tf.summary.histogram(x)
#tf.summary.scalar(x)
pass
BATCH_SIZE = 128
#NUM_CLASSES = 3
NUM_CLASSES = 6
global_step = tf.contrib.framework.get_or_create_global_step()
filenames = tf.placeholder(tf.string, shape=[None])
dataset = tf.contrib.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function) # Parse the record into tensors.
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat() # Repeat the input indefinitely.
dataset = dataset.batch(BATCH_SIZE)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
transpose_index = tf.Variable(initial_value=[0,1,2],trainable=False,dtype=tf.int32)
k_value = tf.Variable(initial_value=0,trainable=False,dtype=tf.int32)
flip_yes_no = tf.Variable(initial_value=0,trainable=False,dtype=tf.int32)
shape,label,cubes = next_element
cubes = _normalize(cubes) # Normalize t0 -.5 to .5.
cubes = _randomize(cubes)
cubes_aug_ = augment_data(transpose_index, k_value, flip_yes_no, cubes)
cubes_aug = tf.reshape(cubes_aug_,[-1,32,32,32,1])
#mal, lob, spic = tf.unstack(label,num = 3)
mal, lob, spic = tf.split(label,3,axis=1)
label_onehot = tf.one_hot(mal,6)
label_f= tf.reshape(mal,[BATCH_SIZE])
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
#with tf.variable_scope('conv1') as scope:
kernel1 = _variable_with_weight_decay('weights1',
shape=[5, 5, 5, 1, 8],
stddev=5e-2,
wd=0.0)
conv1_ = tf.nn.conv3d(cubes_aug, kernel1, [1, 1, 1, 1, 1], padding='SAME')
biases1 = _variable_initializer('biases1', [8], tf.constant_initializer(0.0))
pre_activation1 = tf.nn.bias_add(conv1_, biases1)
conv1 = tf.nn.relu(pre_activation1, name='scope.name1')
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool3d(conv1, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = pool1
#tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
# name='norm1')
# conv2
#with tf.variable_scope('conv2') as scope:
kernel2 = _variable_with_weight_decay('weights2',
shape=[5, 5, 5, 8, 64],
stddev=5e-2,
wd=0.0)
conv2_ = tf.nn.conv3d(norm1, kernel2, [1, 1, 1, 1, 1], padding='SAME')
biases2 = _variable_initializer('biases2', [64], tf.constant_initializer(0.1))
pre_activation2 = tf.nn.bias_add(conv2_, biases2)
conv2 = tf.nn.relu(pre_activation2, name='scope.name2')
_activation_summary(conv2)
# norm2
norm2 = conv2
#tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
# name='norm2')
# pool2
pool2 = tf.nn.max_pool3d(norm2, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1],
padding='SAME', name='pool2')
# local3
#with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
pool2_flatten = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = 32768
weights = _variable_with_weight_decay('weights3', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_initializer('biases3', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(pool2_flatten, weights) + biases, name='scope.name3')
_activation_summary(local3)
# local4
#with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights4', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_initializer('biases4', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='scope.name4')
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
#with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights5', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_initializer('biases5', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='scope.name')
mal_, lob_, spic_ = tf.split(softmax_linear,3,axis=1)
_activation_summary(softmax_linear)
#return softmax_linear
##########################################################################
##########################################################################
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
#mal_fl32 = tf.cast(mal,tf.float32)
#lob_fl32 = tf.cast(lob,tf.float32)
#spic_fl32 = tf.cast(spic,tf.float32)
#
#mal_cost = tf.pow(mal_ - mal_fl32, 2)
#lob_cost = tf.pow(lob_ - lob_fl32, 2)
#spic_cost = tf.pow(spic_ - spic_fl32, 2)
#
#cost_function = tf.reduce_sum(mal_cost + lob_cost + spic_cost)
# Calculate the average cross entropy loss across the batch.
#label_onehot_i64 = tf.cast(label_onehot, tf.int64)
label_onehot_= tf.reshape(label_onehot,[BATCH_SIZE,NUM_CLASSES])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_f, logits=softmax_linear, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
#tf.add_to_collection('losses', cross_entropy_mean)
labels_=tf.argmax(label_onehot_,axis=1)
predictions_=tf.argmax(softmax_linear,axis=1)
accuracy = (tf.reduce_sum(tf.cast(tf.equal(labels_,predictions_),tf.int32)))/BATCH_SIZE
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
#return tf.add_n(tf.get_collection('losses'), name='total_loss')
##########################################################################
##########################################################################
lr = 0.00001
optimizer_ = tf.train.GradientDescentOptimizer(lr)
grads = optimizer_.compute_gradients(cross_entropy_mean)
#grads = optimizer_.compute_gradients(cost_function)
# Apply gradients.
apply_gradient_op = optimizer_.apply_gradients(grads, global_step=global_step)
train_op = apply_gradient_op
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init)
src_dir_train = "/media/derek/disk1/kaggle_ndsb2017/resources/_tfrecords/train/"
src_dir_test = "/media/derek/disk1/kaggle_ndsb2017/resources/_tfrecords/test/"
filenames_train = os.listdir(src_dir_train)
filenames_test = os.listdir(src_dir_test)
training_filenames = [src_dir_train + f for f in filenames_train]
testing_filenames = [src_dir_test + f for f in filenames_test]
f_train = open("train6_values_rand_" + str(lr) + ".txt","a")
f_test = open("test6_values_rand_" + str(lr) + ".txt","a")
transpose_possiblities = np.array([[0,1,2],[0,2,1],[1,0,2],[1,2,0],[2,0,1],[2,1,0]])
#sess.run(train_op, feed_dict={transpose_index: transpose_possiblities[np.random.randint(0,6),:], k_value: np.random.randint(0,4)})
for index in range(10000):
sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
for i in range(100):
sess.run(train_op, feed_dict={transpose_index: transpose_possiblities[np.random.randint(0,6),:], k_value: np.random.randint(0,4)})
#train_results = sess.run(cost_function,feed_dict={transpose_index: [0,1,2], k_value: 0})
train_results = sess.run(accuracy,feed_dict={transpose_index: [0,1,2], k_value: 0})
print(train_results)
f_train.write(str(train_results) + "\n")
sess.run(iterator.initializer, feed_dict={filenames: testing_filenames})
#test_results = sess.run(cost_function,feed_dict={transpose_index: [0,1,2], k_value: 0})
test_results = sess.run(accuracy,feed_dict={transpose_index: [0,1,2], k_value: 0})
f_test.write(str(test_results) + "\n")
f_train.flush()
f_test.flush()
if np.mod(index,9)==0:
ave_path = saver.save(sess, "/media/derek/disk1/kaggle_ndsb2017/saved_models/model.ckpt")
|
dereknewman/cancer_detection
|
train_label_cat_cubes_3d.py
|
Python
|
mit
| 13,908
|
[
"Gaussian"
] |
eba1a468ea4f0119c76aad2f42fb1b1dcc725e4ae40746ce839905b68d590df4
|
'''
Guestbook Parser
Guestbook analyzes an Apache log file and returns information about who
visited a site, or a particular part of a site. For example, it can be used
to retrieve information about visitors' geographic location, or the most frequent
visitors by IP address.
It uses the freegeoip.net location API.
******************************************************************
USAGE
******************************************************************
guestbook.py [-h] [-agents AGENTS] [-times TIMES] [-cutoff CUTOFF]
[-target TARGET] [-popular] [-track] [-breakdown]
filepath
positional arguments:
filepath filepath for access log
optional arguments:
-h, --help show this help message and exit
-agents AGENTS Show user agents for a given ip
-times TIMES Show page visits with timestamps for a particular IP address
-cutoff CUTOFF Minimum view count cutoff when showing results
-target TARGET Only show results for specified IP address
-popular Show IP addresses of most popular visits
-track Enable tracking IP geolocation. Results will be shown with
tracking data.
-breakdown Show page visit breakdown for each IP address
'''
import sys
import operator
import re
import urllib2
import json
import argparse
from models.Visitor import Visitor
from models.Visit import Visit
'''
*********************************************************************
SETUP
*********************************************************************
'''
# Store a list of all visits
visitsList = []
# Store visitors by IP
visitorsMap = {}
# Regex string to match
regexString = '(\S+) (\S+) (\S+) \[([^:]+):(\d+:\d+:\d+) ([^\]]+)\] \"(\S+) (.*?) (\S+)\" (\S+) (\S+) "([^"]*)" "([^"]*)"'
# Add command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("filepath", type=str, help="Filepath for access log")
parser.add_argument("-agents", type=str, help="Show user agents for a given ip")
parser.add_argument("-times", type=str, help="Show page visits with timestamps for a particular IP address")
parser.add_argument("-cutoff", type=int, help="Minimum view count cutoff when showing results")
parser.add_argument("-target", type=str, help="Only show results for specified IP address")
parser.add_argument("-popular", action="store_true", help="Show IP addresses of most popular visits")
parser.add_argument("-track", action="store_true", help="Enable tracking IP geolocation. Results will be shown with tracking data.")
parser.add_argument("-breakdown", action="store_true", help="Show page visit breakdown for each IP address")
def main(args):
# Parse arguments
args = parser.parse_args()
fileLocation = args.filepath
importVisitsFromFile(fileLocation)
# Show known user agents for an IP address
if (args.agents):
showAgents(args.agents, args)
# Show access times for an IP address
if (args.times):
showTimes(args.times, args)
# Show the IP addresses with the most hits
if (args.popular):
mostPopularVisitors(args.track, args.cutoff)
# List IP addresses of visitors and which pages they visited
if (args.breakdown):
visitorPages(args.target, args)
# Prints out information about the visitors with the highest page hits.
# Optional parameter: track
# If track is set to true, this script will also send out a request to freegeoip.net
# to retreive IP address geolocation information
def mostPopularVisitors(track=False, cutoff=None):
for visitor in (sorted(visitorsMap.values(), key=operator.attrgetter('visitCount'), reverse=True)):
# Hide results that have less views than the cutoff
if cutoff:
if visitor.visitCount < cutoff:
continue
# If geotracking has been enabled
if track:
# getGeoLocationDataString(visitor)
print getGeoLocationDataString(visitor)
print "{} - {} visits".format(visitor.ipAddress, visitor.visitCount)
# Onle line break if we're also displaying geolocation data
if track:
print ""
# Prints out information about pages that a user has visited, and
# the pagehits on each page.
def visitorPages(targetIp=None, args=None):
# Only print page breakdown for target visitor
if targetIp:
if targetIp in visitorsMap.keys():
print visitorsMap[targetIp].pageBreakdown()
return
else:
print "Could not find any records for this IP in the access log file"
else:
# If no target IP, print out page breakdown for all visitors
for ip, visitor in visitorsMap.iteritems():
print ip
print visitor.pageBreakdown()
# Prints out information about the known user agents for a
# given IP address.
def showAgents(targetIp, args=None):
if targetIp and targetIp in visitorsMap.keys():
print visitorsMap[targetIp].userAgentsString()
else:
print "Could not find any records for this IP in the access log file"
# Prints out information about access times for a
# given IP address.
def showTimes(targetIp, args=None):
if targetIp and targetIp in visitorsMap.keys():
visitor = visitorsMap[targetIp]
print "\nShowing access times for {}\n".format(targetIp)
if args and args.track:
geoData = getGeoLocationDataString(visitor)
print "{}\n".format(geoData)
print visitor.timesAndUrls()
else:
print "Could not find any records for this IP in the access log file"
'''
*********************************************************************
Helper functions
*********************************************************************
'''
def getGeoLocationDataString(visitor):
url = "http://freegeoip.net/json/{}".format(visitor.ipAddress)
apiResponse = urllib2.urlopen(url)
country, city, region_name, zip_code = parseGeoLocationData(apiResponse) or (None, None, None, None, None)
return "{} {}, {} {}".format(country, city, region_name, zip_code)
# Given an API response from freegeoip, parse it and return
# geolocation data in a tuple
def parseGeoLocationData(apiResponse):
userData = json.load(apiResponse)
# Cleanse data and remove bad encoding
country = userData["country_name"].encode('ascii', 'ignore')
city = userData["city"].encode('ascii', 'ignore')
region_name = userData["region_name"].encode('ascii', 'ignore')
zip_code = userData["zip_code"].encode('ascii', 'ignore')
return country, city, region_name, zip_code
# Extracts specific pieces of data from a line in the
# access log. Returns None if no matches are found.
def getInfoFromLogLine(compiledRegex, line):
result = compiledRegex.match(line)
if result is not None:
result = result.groups()
else:
return None
if result is not None:
ip = result[0]
domain = result[11]
page = result[7]
dateTime = result[3] + " " + result[4]
userAgent = result[12]
return ip, domain, page, dateTime, userAgent
return None
# Populate visits list from access log
def importVisitsFromFile(fileLocation):
compiledRegex = re.compile(regexString)
with open(fileLocation) as f:
for line in f:
ip, domain, page, dateTime, userAgent = getInfoFromLogLine(compiledRegex, line) or (None, None, None, None, None)
if ip:
# Update visits list
visit = Visit(ip, domain, page, dateTime, userAgent)
visitsList.append(visit)
# Update visitors hashmap
if ip not in visitorsMap:
visitorsMap[ip] = Visitor(ip)
visitorsMap[ip].addVisit(visit)
# Run if started from command line
if __name__ == "__main__":
main(sys.argv)
|
Brian-Lam/Guestbook
|
guestbook.py
|
Python
|
mit
| 7,342
|
[
"VisIt"
] |
9a1d233b23f94aa95003b0b1a89f13521c0783464e01cca53c61db254c75b10b
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
#
# This code was in part derived from the python-magic library:
# The MIT License (MIT)
#
# Copyright (c) 2001-2014 Adam Hupp
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os.path
import ctypes
from commoncode import system
from commoncode import command
"""
magic2 is minimal and specialized wrapper around a vendored libmagic file
identification library. This is NOT thread-safe. It is based on python-magic
by Adam Hup and adapted to the specific needs of ScanCode.
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
# path to vendored magic DB, possibly OS-specific
basemag = os.path.join(data_dir, 'magic')
# keep the first which is the most specific directory
magdir = command.get_base_dirs(basemag)[0]
magic_db = os.path.join(magdir, 'magic.mgc')
#
# Cached detectors
#
detectors = {}
# libmagic flags
MAGIC_NONE = 0
MAGIC_MIME = 16
MAGIC_MIME_ENCODING = 1024
MAGIC_NO_CHECK_ELF = 65536
MAGIC_NO_CHECK_TEXT = 131072
MAGIC_NO_CHECK_CDF = 262144
DETECT_TYPE = MAGIC_NONE
DETECT_MIME = MAGIC_NONE | MAGIC_MIME
DETECT_ENC = MAGIC_NONE | MAGIC_MIME | MAGIC_MIME_ENCODING
def file_type(location):
""""
Return the detected filetype for file at `location` or an empty string if
nothing found or an error occurred.
"""
try:
return _detect(location, DETECT_TYPE)
except:
# TODO: log errors
return ''
def mime_type(location):
""""
Return the detected mimetype for file at `location` or an empty string if
nothing found or an error occurred.
"""
try:
return _detect(location, DETECT_MIME)
except:
# TODO: log errors
return ''
def encoding(location):
""""
Return the detected encoding for file at `location` or an empty string.
Raise an exception on errors.
"""
return _detect(location, DETECT_ENC)
def _detect(location, flags):
""""
Return the detected type using `flags` of file at `location` or an empty
string. Raise an exception on errors.
"""
try:
detector = detectors[flags]
except KeyError:
detector = Detector(flags=flags)
detectors[flags] = detector
val = detector.get(location)
val = val or ''
val = val.decode('ascii', 'ignore').strip()
return ' '.join(val.split())
class MagicException(Exception):
pass
class Detector(object):
def __init__(self, flags, magic_file=magic_db):
"""
Create a new libmagic detector.
flags - the libmagic flags
magic_file - use a mime database other than the vendored default
"""
self.flags = flags
self.cookie = _magic_open(self.flags)
_magic_load(self.cookie, magic_file)
def get(self, location):
"""
Return the magic type info from a file at `location`. The value
returned depends on the flags passed to the object. If this fails
attempt to get it using a UTF-encoded location or from loading the
first 16K of the file. Raise a MagicException on error.
"""
assert location
try:
# first use the path as is
return _magic_file(self.cookie, location)
except:
# then try to get a utf-8 encoded path: Rationale:
# https://docs.python.org/2/library/ctypes.html#ctypes.set_conversion_mode ctypes
# encode strings to byte as ASCII or MBCS depending on the OS The
# location string may therefore be mangled and the file not accessible
# anymore by libmagic in some cases.
try:
uloc = location.encode('utf-8')
return _magic_file(self.cookie, uloc)
except:
# if all fails, read the start of the file instead
with open(location) as fd:
buf = fd.read(16384)
return _magic_buffer(self.cookie, buf, len(buf))
def __del__(self):
"""
During shutdown magic_close may have been cleared already so make sure
it exists before using it.
"""
if self.cookie and _magic_close:
_magic_close(self.cookie)
def load_lib():
"""
Return the loaded libmagic shared library object from vendored paths.
"""
root_dir = command.get_base_dirs(bin_dir)[0]
_bin_dir, lib_dir = command.get_bin_lib_dirs(root_dir)
magic_so = os.path.join(lib_dir, 'libmagic' + system.lib_ext)
# add lib path to the front of the PATH env var
new_path = os.pathsep.join([lib_dir, os.environ['PATH']])
os.environ['PATH'] = new_path
if os.path.exists(magic_so):
lib = ctypes.CDLL(magic_so)
if lib and lib._name:
return lib
raise ImportError('Failed to load libmagic from %(magic_so)r' % locals())
# Main ctypes proxy
libmagic = load_lib()
def check_error(result, func, args): # @UnusedVariable
"""
ctypes error handler/checker: Check for errors and raise an exception or
return the result otherwise.
"""
if result is None or result < 0 or str(result).startswith('cannot open'):
err = _magic_error(args[0])
raise MagicException(err)
else:
return result
# ctypes functions aliases.
_magic_open = libmagic.magic_open
_magic_open.restype = ctypes.c_void_p
_magic_open.argtypes = [ctypes.c_int]
_magic_close = libmagic.magic_close
_magic_close.restype = None
_magic_close.argtypes = [ctypes.c_void_p]
_magic_error = libmagic.magic_error
_magic_error.restype = ctypes.c_char_p
_magic_error.argtypes = [ctypes.c_void_p]
_magic_file = libmagic.magic_file
_magic_file.restype = ctypes.c_char_p
_magic_file.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_magic_file.errcheck = check_error
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = ctypes.c_char_p
_magic_buffer.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
_magic_buffer.errcheck = check_error
_magic_load = libmagic.magic_load
_magic_load.restype = ctypes.c_int
_magic_load.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_magic_load.errcheck = check_error
|
yasharmaster/scancode-toolkit
|
src/typecode/magic2.py
|
Python
|
apache-2.0
| 8,508
|
[
"VisIt"
] |
552bcb74ffb24f46463d4161b0b206f47daf9fdb0947ba56cdd82d3128727c1c
|
try:
from __builtin__ import unicode as str
except ImportError:
pass
import os
import sys
import time as timer
import math
import traceback
try:
import Queue as queue
except ImportError:
import queue
import logging
import multiprocessing
import numpy as np
import netCDF4
from paegan.location4d import Location4D
from paegan.transport.utils.asatransport import AsaTransport
from paegan.transport.shoreline import Shoreline
from paegan.transport.bathymetry import Bathymetry
from paegan.transport.exceptions import CachingDataControllerError
from paegan.cdm.dataset import CommonDataset
from paegan.transport.forcers import BaseForcer
from paegan.logger import logger
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, n_run, nproc_lock, active=True, get_data=None, **kwargs):
"""
This is the process class that does all the handling of queued tasks
"""
multiprocessing.Process.__init__(self, **kwargs)
self.task_queue = task_queue
self.result_queue = result_queue
self.n_run = n_run
self.nproc_lock = nproc_lock
self.active = active
self.get_data = get_data
def run(self):
while True:
try:
next_task = self.task_queue.get(True, 10)
except queue.Empty:
logger.info("No tasks left to complete, closing %s" % self.name)
break
else:
answer = (None, None)
try:
answer = (1, next_task(self.active))
except Exception:
logger.exception("Disabling Error")
if isinstance(next_task, CachingDataController):
answer = (-2, "CachingDataController")
# Tell the particles that the CachingDataController is releasing file
self.get_data.value = False
# The data controller has died, so don't process any more tasks
self.active.value = False
elif isinstance(next_task, BaseForcer):
answer = (-1, next_task.particle)
else:
logger.warn("Strange task raised an exception: %s" % str(next_task.__class__))
answer = (None, None)
finally:
self.result_queue.put(answer)
self.nproc_lock.acquire()
self.n_run.value = self.n_run.value - 1
self.nproc_lock.release()
self.task_queue.task_done()
class CachingDataController(object):
def __init__(self, hydrodataset, common_variables, n_run, get_data, write_lock, has_write_lock, read_lock, read_count,
time_chunk, horiz_chunk, times, start_time, point_get, start, **kwargs):
"""
The data controller controls the updating of the
local netcdf data cache
"""
assert "cache_path" in kwargs
self.cache_path = kwargs["cache_path"]
self.caching = kwargs.get("caching", True)
self.hydrodataset = hydrodataset
if self.cache_path == self.hydrodataset and self.caching is True:
raise CachingDataControllerError("Caching is set to True but the cache path and data path are the same. Refusing to overwrite the data path.")
self.n_run = n_run
self.get_data = get_data
self.write_lock = write_lock
self.has_write_lock = has_write_lock
self.read_lock = read_lock
self.read_count = read_count
self.inds = None # np.arange(init_size+1)
self.time_size = time_chunk
self.horiz_size = horiz_chunk
self.point_get = point_get
self.start_time = start_time
self.times = times
self.start = start
# Set common variable names
self.uname = common_variables.get("u", None)
self.vname = common_variables.get("v", None)
self.wname = common_variables.get("w", None)
self.temp_name = common_variables.get("temp", None)
self.salt_name = common_variables.get("salt", None)
self.xname = common_variables.get("x", None)
self.yname = common_variables.get("y", None)
self.zname = common_variables.get("z", None)
self.tname = common_variables.get("time", None)
def get_remote_data(self, localvars, remotevars, inds, shape):
"""
Method that does the updating of local netcdf cache
with remote data
"""
# If user specifies 'all' then entire xy domain is
# grabbed, default is 4, specified in the model controller
if self.horiz_size == 'all':
y, y_1 = 0, shape[-2]
x, x_1 = 0, shape[-1]
else:
r = self.horiz_size
x, x_1 = self.point_get.value[2]-r, self.point_get.value[2]+r+1
y, y_1 = self.point_get.value[1]-r, self.point_get.value[1]+r+1
x, x_1 = x[0], x_1[0]
y, y_1 = y[0], y_1[0]
if y < 0:
y = 0
if x < 0:
x = 0
if y_1 > shape[-2]:
y_1 = shape[-2]
if x_1 > shape[-1]:
x_1 = shape[-1]
# Update domain variable for where we will add data
domain = self.local.variables['domain']
if len(shape) == 4:
domain[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1] = np.ones((inds[-1]+1-inds[0], shape[1], y_1-y, x_1-x))
elif len(shape) == 3:
domain[inds[0]:inds[-1]+1, y:y_1, x:x_1] = np.ones((inds[-1]+1-inds[0], y_1-y, x_1-x))
# Update the local variables with remote data
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Filling cache with: Time - %s:%s, Lat - %s:%s, Lon - %s:%s" % (str(inds[0]), str(inds[-1]+1), str(y), str(y_1), str(x), str(x_1)))
for local, remote in zip(localvars, remotevars):
if len(shape) == 4:
local[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1] = remote[inds[0]:inds[-1]+1, 0:shape[1], y:y_1, x:x_1]
else:
local[inds[0]:inds[-1]+1, y:y_1, x:x_1] = remote[inds[0]:inds[-1]+1, y:y_1, x:x_1]
def __call__(self, active):
c = 0
self.dataset = CommonDataset.open(self.hydrodataset)
self.remote = self.dataset.nc
# Calculate the datetimes of the model timesteps like
# the particle objects do, so we can figure out unique
# time indices
modelTimestep, newtimes = AsaTransport.get_time_objects_from_model_timesteps(self.times, start=self.start_time)
timevar = self.dataset.gettimevar(self.uname)
# Don't need to grab the last datetime, as it is not needed for forcing, only
# for setting the time of the final particle forcing
time_indexs = timevar.nearest_index(newtimes[0:-1], select='before')
# Have to make sure that we get the plus 1 for the
# linear interpolation of u,v,w,temp,salt
self.inds = np.unique(time_indexs)
self.inds = np.append(self.inds, self.inds.max()+1)
# While there is at least 1 particle still running,
# stay alive, if not break
while self.n_run.value > 1:
if self.caching is False:
logger.debug("Caching is False, not doing much. Just hanging out until all of the particles finish.")
timer.sleep(10)
continue
# If particle asks for data, do the following
if self.get_data.value is True:
logger.debug("Particle asked for data!")
# Wait for particles to get out
while True:
self.read_lock.acquire()
logger.debug("Read count: %d" % self.read_count.value)
if self.read_count.value > 0:
logger.debug("Waiting for write lock on cache file (particles must stop reading)...")
self.read_lock.release()
timer.sleep(2)
else:
break
# Get write lock on the file. Already have read lock.
self.write_lock.acquire()
self.has_write_lock.value = os.getpid()
if c == 0:
logger.debug("Creating cache file")
try:
# Open local cache for writing, overwrites
# existing file with same name
self.local = netCDF4.Dataset(self.cache_path, 'w')
indices = self.dataset.get_indices(self.uname, timeinds=[np.asarray([0])], point=self.start)
self.point_get.value = [self.inds[0], indices[-2], indices[-1]]
# Create dimensions for u and v variables
self.local.createDimension('time', None)
self.local.createDimension('level', None)
self.local.createDimension('x', None)
self.local.createDimension('y', None)
# Create 3d or 4d u and v variables
if self.remote.variables[self.uname].ndim == 4:
self.ndim = 4
dimensions = ('time', 'level', 'y', 'x')
coordinates = "time z lon lat"
elif self.remote.variables[self.uname].ndim == 3:
self.ndim = 3
dimensions = ('time', 'y', 'x')
coordinates = "time lon lat"
shape = self.remote.variables[self.uname].shape
# If there is no FillValue defined in the dataset, use np.nan.
# Sometimes it will work out correctly and other times we will
# have a huge cache file.
try:
fill = self.remote.variables[self.uname].missing_value
except Exception:
fill = np.nan
# Create domain variable that specifies
# where there is data geographically/by time
# and where there is not data,
# Used for testing if particle needs to
# ask cache to update
domain = self.local.createVariable('domain', 'i', dimensions, zlib=False, fill_value=0)
domain.coordinates = coordinates
# Create local u and v variables
u = self.local.createVariable('u', 'f', dimensions, zlib=False, fill_value=fill)
v = self.local.createVariable('v', 'f', dimensions, zlib=False, fill_value=fill)
v.coordinates = coordinates
u.coordinates = coordinates
localvars = [u, v, ]
remotevars = [self.remote.variables[self.uname], self.remote.variables[self.vname]]
# Create local w variable
if self.wname is not None:
w = self.local.createVariable('w', 'f', dimensions, zlib=False, fill_value=fill)
w.coordinates = coordinates
localvars.append(w)
remotevars.append(self.remote.variables[self.wname])
if self.temp_name is not None and self.salt_name is not None:
# Create local temp and salt vars
temp = self.local.createVariable('temp', 'f', dimensions, zlib=False, fill_value=fill)
salt = self.local.createVariable('salt', 'f', dimensions, zlib=False, fill_value=fill)
temp.coordinates = coordinates
salt.coordinates = coordinates
localvars.append(temp)
localvars.append(salt)
remotevars.append(self.remote.variables[self.temp_name])
remotevars.append(self.remote.variables[self.salt_name])
# Create local lat/lon coordinate variables
if self.remote.variables[self.xname].ndim == 2:
lon = self.local.createVariable('lon', 'f', ("y", "x"), zlib=False)
lon[:] = self.remote.variables[self.xname][:, :]
lat = self.local.createVariable('lat', 'f', ("y", "x"), zlib=False)
lat[:] = self.remote.variables[self.yname][:, :]
if self.remote.variables[self.xname].ndim == 1:
lon = self.local.createVariable('lon', 'f', ("x"), zlib=False)
lon[:] = self.remote.variables[self.xname][:]
lat = self.local.createVariable('lat', 'f', ("y"), zlib=False)
lat[:] = self.remote.variables[self.yname][:]
# Create local z variable
if self.zname is not None:
if self.remote.variables[self.zname].ndim == 4:
z = self.local.createVariable('z', 'f', ("time", "level", "y", "x"), zlib=False)
remotez = self.remote.variables[self.zname]
localvars.append(z)
remotevars.append(remotez)
elif self.remote.variables[self.zname].ndim == 3:
z = self.local.createVariable('z', 'f', ("level", "y", "x"), zlib=False)
z[:] = self.remote.variables[self.zname][:, :, :]
elif self.remote.variables[self.zname].ndim == 1:
z = self.local.createVariable('z', 'f', ("level",), zlib=False)
z[:] = self.remote.variables[self.zname][:]
# Create local time variable
time = self.local.createVariable('time', 'f8', ("time",), zlib=False)
if self.tname is not None:
time[:] = self.remote.variables[self.tname][self.inds]
if self.point_get.value[0]+self.time_size > np.max(self.inds):
current_inds = np.arange(self.point_get.value[0], np.max(self.inds)+1)
else:
current_inds = np.arange(self.point_get.value[0], self.point_get.value[0] + self.time_size)
# Get data from remote dataset and add
# to local cache.
# Try 20 times on the first attempt
current_attempt = 1
max_attempts = 20
while True:
try:
assert current_attempt <= max_attempts
self.get_remote_data(localvars, remotevars, current_inds, shape)
except AssertionError:
raise
except:
logger.warn("CachingDataController failed to get remote data. Trying again in 20 seconds. %s attempts left." % str(max_attempts-current_attempt))
logger.exception("Data Access Error")
timer.sleep(20)
current_attempt += 1
else:
break
c += 1
except (Exception, AssertionError):
logger.error("CachingDataController failed to get data (first request)")
raise
finally:
self.local.sync()
self.local.close()
self.has_write_lock.value = -1
self.write_lock.release()
self.get_data.value = False
self.read_lock.release()
logger.debug("Done updating cache file, closing file, and releasing locks")
else:
logger.debug("Updating cache file")
try:
# Open local cache dataset for appending
self.local = netCDF4.Dataset(self.cache_path, 'a')
# Create local and remote variable objects
# for the variables of interest
u = self.local.variables['u']
v = self.local.variables['v']
time = self.local.variables['time']
remoteu = self.remote.variables[self.uname]
remotev = self.remote.variables[self.vname]
# Create lists of variable objects for
# the data updater
localvars = [u, v, ]
remotevars = [remoteu, remotev, ]
if self.salt_name is not None and self.temp_name is not None:
salt = self.local.variables['salt']
temp = self.local.variables['temp']
remotesalt = self.remote.variables[self.salt_name]
remotetemp = self.remote.variables[self.temp_name]
localvars.append(salt)
localvars.append(temp)
remotevars.append(remotesalt)
remotevars.append(remotetemp)
if self.wname is not None:
w = self.local.variables['w']
remotew = self.remote.variables[self.wname]
localvars.append(w)
remotevars.append(remotew)
if self.zname is not None:
remotez = self.remote.variables[self.zname]
if remotez.ndim == 4:
z = self.local.variables['z']
localvars.append(z)
remotevars.append(remotez)
if self.tname is not None:
# remotetime = self.remote.variables[self.tname]
time[self.inds] = self.remote.variables[self.inds]
if self.point_get.value[0]+self.time_size > np.max(self.inds):
current_inds = np.arange(self.point_get.value[0], np.max(self.inds)+1)
else:
current_inds = np.arange(self.point_get.value[0], self.point_get.value[0] + self.time_size)
# Get data from remote dataset and add
# to local cache
while True:
try:
self.get_remote_data(localvars, remotevars, current_inds, shape)
except:
logger.warn("CachingDataController failed to get remote data. Trying again in 30 seconds")
timer.sleep(30)
else:
break
c += 1
except Exception:
logger.error("CachingDataController failed to get data (not first request)")
raise
finally:
self.local.sync()
self.local.close()
self.has_write_lock.value = -1
self.write_lock.release()
self.get_data.value = False
self.read_lock.release()
logger.debug("Done updating cache file, closing file, and releasing locks")
else:
logger.debug("Particles are still running, waiting for them to request data...")
timer.sleep(2)
self.dataset.closenc()
return "CachingDataController"
|
axiom-data-science/paegan-transport
|
paegan/transport/parallel_manager.py
|
Python
|
gpl-3.0
| 20,674
|
[
"NetCDF"
] |
a04a970dca281cd2e6b3e5352f0e70f3a70240af95d77d647890240a4e375446
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import collections
import ctypes
import email
import getpass
import io
import itertools
import optparse
import os
import platform
import re
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import xml.etree.ElementTree
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try: # Python >= 3.3
compat_html_entities_html5 = compat_html_entities.html5
except AttributeError:
# Copied from CPython 3.5.1 html/entities.py
compat_html_entities_html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from html.parser import HTMLParser as compat_HTMLParser
except ImportError: # Python 2
from HTMLParser import HTMLParser as compat_HTMLParser
try: # Python 2
from HTMLParser import HTMLParseError as compat_HTMLParseError
except ImportError: # Python <3.4
try:
from html.parser import HTMLParseError as compat_HTMLParseError
except ImportError: # Python >3.4
# HTMLParseError has been deprecated in Python 3.3 and removed in
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
# and uniform cross-version exceptiong handling
class compat_HTMLParseError(Exception):
pass
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
except ImportError: # Python 2
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
else re.compile(r'([\x00-\x7f]+)'))
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
# is apparently broken (see https://github.com/ytdl-org/youtube-dl/pull/6244)
def compat_urllib_parse_unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(compat_urllib_parse._hextochr[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
except ImportError: # Python 2
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
list_e = encode_list(e)
e = tuple(list_e) if isinstance(e, tuple) else list_e
elif isinstance(e, compat_str):
e = e.encode(encoding)
return e
def encode_dict(d):
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
def encode_list(l):
return [encode_elem(e) for e in l]
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(':', 1)
mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
etree = xml.etree.ElementTree
class _TreeBuilder(etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
try:
# xml.etree.ElementTree.Element is a method in Python <=2.6 and
# the following will crash with:
# TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
isinstance(None, xml.etree.ElementTree.Element)
from xml.etree.ElementTree import Element as compat_etree_Element
except TypeError: # Python <=2.6
from xml.etree.ElementTree import _ElementInterface as compat_etree_Element
if sys.version_info[0] >= 3:
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
try:
_etree_iter = etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
yield el
for sub in _etree_iter(el):
yield sub
# on 2.6 XML doesn't have a parser argument, function copied from CPython
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=_TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if hasattr(etree, 'register_namespace'):
compat_etree_register_namespace = etree.register_namespace
else:
def compat_etree_register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(etree._namespace_map.items()):
if k == uri or v == prefix:
del etree._namespace_map[k]
etree._namespace_map[uri] = prefix
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
def compat_xpath(xpath):
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
return xpath
else:
compat_xpath = lambda xpath: xpath
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, compat_str
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
compat_os_name = os._name if os.name == 'java' else os.name
if compat_os_name == 'nt':
def compat_shlex_quote(s):
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
else:
try:
from shlex import quote as compat_shlex_quote
except ImportError: # Python < 3.3
def compat_shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
try:
args = shlex.split('中文')
assert (isinstance(args, list)
and isinstance(args[0], compat_str)
and args[0] == '中文')
compat_shlex_split = shlex.split
except (AssertionError, UnicodeEncodeError):
# Working around shlex issue with unicode strings on some python 2
# versions (see http://bugs.python.org/issue1548891)
def compat_shlex_split(s, comments=False, posix=True):
if isinstance(s, compat_str):
s = s.encode('utf-8')
return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix)))
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
def compat_setenv(key, value, env=os.environ):
env[key] = value
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
def compat_setenv(key, value, env=os.environ):
def encode(v):
from .utils import get_filesystem_encoding
return v.encode(get_filesystem_encoding()) if isinstance(v, compat_str) else v
env[encode(key)] = encode(value)
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if compat_os_name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif compat_os_name in ('nt', 'ce'):
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif 'HOMEPATH' not in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if compat_os_name == 'nt' and sys.version_info < (3, 8):
# os.path.realpath on Windows does not follow symbolic links
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
def compat_realpath(path):
while os.path.islink(path):
path = os.path.abspath(os.readlink(path))
return path
else:
compat_realpath = os.path.realpath
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
try:
compat_input = raw_input
except NameError: # Python 3
compat_input = input
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
try:
compat_numeric_types = (int, float, long, complex)
except NameError: # Python 3
compat_numeric_types = (int, float, complex)
try:
compat_integer_types = (int, long)
except NameError: # Python 3
compat_integer_types = (int, )
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
# Fix https://github.com/ytdl-org/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
try:
itertools.count(start=0, step=1)
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
def compat_itertools_count(start=0, step=1):
n = start
while True:
yield n
n += step
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
try:
struct.pack('!I', 0)
except TypeError:
# In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument
# See https://bugs.python.org/issue19099
def compat_struct_pack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.pack(spec, *args)
def compat_struct_unpack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.unpack(spec, *args)
class compat_Struct(struct.Struct):
def __init__(self, fmt):
if isinstance(fmt, compat_str):
fmt = fmt.encode('ascii')
super(compat_Struct, self).__init__(fmt)
else:
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
if platform.python_implementation() == 'IronPython' and sys.version_info < (2, 7, 8):
class compat_Struct(struct.Struct):
def unpack(self, string):
if not isinstance(string, buffer): # noqa: F821
string = buffer(string) # noqa: F821
return super(compat_Struct, self).unpack(string)
else:
compat_Struct = struct.Struct
try:
from future_builtins import zip as compat_zip
except ImportError: # not 2.6+ or is 3.x
try:
from itertools import izip as compat_zip # < 2.5 or 3.x
except ImportError:
compat_zip = zip
if sys.version_info < (3, 3):
def compat_b64decode(s, *args, **kwargs):
if isinstance(s, compat_str):
s = s.encode('ascii')
return base64.b64decode(s, *args, **kwargs)
else:
compat_b64decode = base64.b64decode
if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0):
# PyPy2 prior to version 5.4.0 expects byte strings as Windows function
# names, see the original PyPy issue [1] and the youtube-dl one [2].
# 1. https://bitbucket.org/pypy/pypy/issues/2360/windows-ctypescdll-typeerror-function-name
# 2. https://github.com/ytdl-org/youtube-dl/pull/4392
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
real = ctypes.WINFUNCTYPE(*args, **kwargs)
def resf(tpl, *args, **kwargs):
funcname, dll = tpl
return real((str(funcname), dll), *args, **kwargs)
return resf
else:
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
return ctypes.WINFUNCTYPE(*args, **kwargs)
__all__ = [
'compat_HTMLParseError',
'compat_HTMLParser',
'compat_HTTPError',
'compat_Struct',
'compat_b64decode',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_cookies',
'compat_ctypes_WINFUNCTYPE',
'compat_etree_Element',
'compat_etree_fromstring',
'compat_etree_register_namespace',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_http_client',
'compat_http_server',
'compat_input',
'compat_integer_types',
'compat_itertools_count',
'compat_kwargs',
'compat_numeric_types',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_realpath',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
'compat_struct_unpack',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'compat_xpath',
'compat_zip',
'workaround_optparse_bug9161',
]
|
remitamine/youtube-dl
|
youtube_dl/compat.py
|
Python
|
unlicense
| 93,360
|
[
"Bowtie"
] |
8fdb4c807e592bc91cbd7e4a2fde2cf38646d382770b80d9dfb83c6de99f0f47
|
#!/usr/bin/env python
from setuptools import setup, find_packages
exec(open('yeadon/version.py').read())
setup(
name='yeadon',
version=__version__,
author='Chris Dembia',
author_email='chris530d@gmail.com',
url="https://github.com/chrisdembia/yeadon/",
description='Estimates the inertial properties of a human.',
long_description=open('README.rst').read(),
keywords="human inertia yeadon sports biomechanics gymnastics",
license='LICENSE.txt',
packages=find_packages(),
install_requires=['numpy>=1.6.1',
'pyyaml>=3.10'],
extras_require={'gui': ['mayavi>=4.0'],
'doc': ['sphinx', 'numpydoc']},
tests_require=['nose'],
test_suite='nose.collector',
include_package_data=True,
scripts=['bin/yeadon'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Physics',
],
)
|
chrisdembia/yeadon
|
setup.py
|
Python
|
bsd-3-clause
| 1,238
|
[
"Mayavi"
] |
1298ffa7ddbaf5c863b6811fe9c100e5996e79fa03196e38db08da837fd4fc0d
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
import os
import datetime
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit import events
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
def ref_file(filename):
return os.path.join(_test_dir, filename)
def ref_files(*filenames):
return list(map(ref_file, filenames))
class EventsParserTest(PymatgenTest):
def test_mgb2_outputs(self):
"""Testing MgB2 output files."""
# Analyze scf log
parser = events.EventsParser()
report = parser.parse(ref_file("mgb2_scf.log"), verbose=1)
self.assertMSONable(report)
print(report)
assert (report.num_errors, report.num_warnings, report.num_comments) == (0, 0, 0)
assert report.run_completed
fmt = "%a %b %d %H:%M:%S %Y"
assert report.start_datetime == datetime.datetime.strptime("Fri Mar 13 20:08:51 2015", fmt)
assert report.end_datetime == datetime.datetime.strptime("Fri Mar 13 20:08:57 2015", fmt)
# Analyze nscf log
report = events.EventsParser().parse(ref_file("mgb2_nscf.log"), verbose=0)
assert (report.num_errors, report.num_warnings, report.num_comments) == (0, 2, 0)
self.assertMSONable(report)
#d = report.as_dict()
#print(d)
#assert 0
for i, warning in enumerate(report.warnings):
print(warning)
assert warning == report[i]
# Msonable is conflict with YAMLObject
#self.assertMSONable(warning, check_inst=False)
report = parser.report_exception(ref_file("mgb2_scf.log"), "exception")
assert len(report.errors) == 1
def test_parse_bad_yaml_doc(self):
"""Parsing Abinit log file with wrong YAML document."""
parser = events.EventsParser()
report = parser.parse(ref_file("badyaml.log"), verbose=1)
print(report)
assert not report.run_completed
assert (report.num_errors, report.num_warnings, report.num_comments) == (1, 1, 0)
# The event parser should have registered a AbinitYamlWarning and an AbinitYamlError
assert len(report.get_events_of_type(events.AbinitYamlWarning)) == 1
assert len(report.get_events_of_type(events.AbinitYamlError)) == 1
#assert 0
class EventHandlersTest(PymatgenTest):
def test_events(self):
# Autodoc
events.autodoc_event_handlers()
for cls in events.get_event_handler_classes():
# Test pickle
handler = cls()
self.serialize_with_pickle(handler, test_eq=False)
assert events.as_event_class(events.AbinitWarning) == events.AbinitWarning
assert events.as_event_class('!WARNING') == events.AbinitWarning
if __name__ == '__main__':
import unittest
unittest.main()
|
migueldiascosta/pymatgen
|
pymatgen/io/abinit/tests/test_events.py
|
Python
|
mit
| 2,940
|
[
"ABINIT",
"pymatgen"
] |
5767b7a7cb7e30c89e512ef026459e65848fd4d64660852a45efe2cd63f45283
|
from __future__ import absolute_import
from typing import Any, Dict, List, Set, Tuple, TypeVar, \
Union, Optional, Sequence, AbstractSet
from typing.re import Match
from zerver.lib.str_utils import NonBinaryStr
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import Manager
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
import django.contrib.auth
from django.dispatch import receiver
from zerver.lib.cache import cache_with_key, flush_user_profile, flush_realm, \
user_profile_by_id_cache_key, user_profile_by_email_cache_key, \
generic_bulk_cached_fetch, cache_set, flush_stream, \
display_recipient_cache_key, cache_delete, \
get_stream_cache_key, active_user_dicts_in_realm_cache_key, \
active_bot_dicts_in_realm_cache_key, active_user_dict_fields, \
active_bot_dict_fields, flush_message
from zerver.lib.utils import make_safe_digest, generate_random_token
from zerver.lib.str_utils import ModelReprMixin
from django.db import transaction
from zerver.lib.camo import get_camo_url
from django.utils import timezone
from django.contrib.sessions.models import Session
from zerver.lib.timestamp import datetime_to_timestamp
from django.db.models.signals import pre_save, post_save, post_delete
from django.core.validators import MinLengthValidator, RegexValidator
from django.utils.translation import ugettext_lazy as _
from bitfield import BitField
from bitfield.types import BitHandler
from collections import defaultdict
from datetime import timedelta
import pylibmc
import re
import logging
from six import text_type
import time
import datetime
MAX_SUBJECT_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH = 50 # type: int
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[text_type], AbstractSet[text_type])
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
per_request_display_recipient_cache = {} # type: Dict[int, List[Dict[str, Any]]]
def get_display_recipient_by_id(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[text_type, List[Dict[str, Any]]]
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient):
# type: (Recipient) -> Union[text_type, List[Dict[str, Any]]]
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id
)
def flush_per_request_caches():
# type: () -> None
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
@cache_with_key(lambda *args: display_recipient_cache_key(args[0]),
timeout=3600*24*7)
def get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, int) -> Union[text_type, List[Dict[str, Any]]]
"""
returns: an appropriate object describing the recipient. For a
stream this will be the stream name as a string. For a huddle or
personal, it will be an array of dicts about each recipient.
"""
if recipient_type == Recipient.STREAM:
stream = Stream.objects.get(id=recipient_type_id)
return stream.name
# We don't really care what the ordering is, just that it's deterministic.
user_profile_list = (UserProfile.objects.filter(subscription__recipient_id=recipient_id)
.select_related()
.order_by('email'))
return [{'email': user_profile.email,
'domain': user_profile.realm.domain,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy,} for user_profile in user_profile_list]
def get_realm_emoji_cache_key(realm):
# type: (Realm) -> text_type
return u'realm_emoji:%s' % (realm.id,)
class Realm(ModelReprMixin, models.Model):
# domain is a domain in the Internet sense. It must be structured like a
# valid email domain. We use is to restrict access, identify bots, etc.
domain = models.CharField(max_length=40, db_index=True, unique=True) # type: text_type
# name is the user-visible identifier for the realm. It has no required
# structure.
AUTHENTICATION_FLAGS = [u'Google', u'Email', u'GitHub', u'LDAP', u'Dev', u'RemoteUser']
name = models.CharField(max_length=40, null=True) # type: Optional[text_type]
string_id = models.CharField(max_length=40, unique=True) # type: text_type
restricted_to_domain = models.BooleanField(default=False) # type: bool
invite_required = models.BooleanField(default=True) # type: bool
invite_by_admins_only = models.BooleanField(default=False) # type: bool
create_stream_by_admins_only = models.BooleanField(default=False) # type: bool
mandatory_topics = models.BooleanField(default=False) # type: bool
show_digest_email = models.BooleanField(default=True) # type: bool
name_changes_disabled = models.BooleanField(default=False) # type: bool
allow_message_editing = models.BooleanField(default=True) # type: bool
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js
message_content_edit_limit_seconds = models.IntegerField(default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS) # type: int
message_retention_days = models.IntegerField(null=True) # type: Optional[int]
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type = models.PositiveSmallIntegerField(default=COMMUNITY) # type: int
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
notifications_stream = models.ForeignKey('Stream', related_name='+', null=True, blank=True) # type: Optional[Stream]
deactivated = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: text_type
authentication_methods = BitField(flags=AUTHENTICATION_FLAGS,
default=2**31 - 1) # type: BitHandler
DEFAULT_NOTIFICATION_STREAM_NAME = u'announce'
def authentication_methods_dict(self):
# type: () -> Dict[text_type, bool]
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret = {} # type: Dict[text_type, bool]
supported_backends = {backend.__class__ for backend in django.contrib.auth.get_backends()}
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __unicode__(self):
# type: () -> text_type
return u"<Realm: %s %s>" % (self.domain, self.id)
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self):
# type: () -> Dict[text_type, Dict[str, text_type]]
return get_realm_emoji_uncached(self)
@property
def deployment(self):
# type: () -> Any # returns a Deployment from zilencer.models
# see https://github.com/zulip/zulip/issues/1845 before you
# attempt to add test coverage for this method, as we may
# be revisiting the deployments model soon
try:
return self._deployments.all()[0]
except IndexError:
return None
@deployment.setter # type: ignore # https://github.com/python/mypy/issues/220
def set_deployments(self, value):
# type: (Any) -> None
self._deployments = [value] # type: Any
def get_admin_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_realm_admin=True,
is_active=True).select_related()
def get_active_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
@property
def subdomain(self):
# type: () -> text_type
if settings.REALMS_HAVE_SUBDOMAINS:
return self.string_id
return None
@property
def uri(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS and self.subdomain is not None:
return '%s%s.%s' % (settings.EXTERNAL_URI_SCHEME,
self.subdomain, settings.EXTERNAL_HOST)
return settings.SERVER_URI
@property
def host(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS and self.subdomain is not None:
return "%s.%s" % (self.subdomain, settings.EXTERNAL_HOST)
return settings.EXTERNAL_HOST
@property
def is_zephyr_mirror_realm(self):
# type: () -> bool
return self.domain == "mit.edu"
@property
def webathena_enabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
class Meta(object):
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(domain):
# type: (text_type) -> Optional[Realm]
if not domain:
return None
try:
return Realm.objects.get(domain__iexact=domain.strip())
except Realm.DoesNotExist:
return None
# Added to assist with the domain to string_id transition. Will eventually
# be renamed and replace get_realm.
def get_realm_by_string_id(string_id):
# type: (text_type) -> Optional[Realm]
if not string_id:
return None
try:
return Realm.objects.get(string_id=string_id)
except Realm.DoesNotExist:
return None
def completely_open(domain):
# type: (text_type) -> bool
# This domain is completely open to everyone on the internet to
# join. E-mail addresses do not need to match the domain and
# an invite from an existing user is not required.
realm = get_realm(domain)
if not realm:
return False
return not realm.invite_required and not realm.restricted_to_domain
def get_unique_open_realm():
# type: () -> Optional[Realm]
"""We only return a realm if there is a unique non-system-only realm,
it is completely open, and there are no subdomains."""
if settings.REALMS_HAVE_SUBDOMAINS:
return None
realms = Realm.objects.filter(deactivated=False)
# On production installations, the (usually "zulip.com") system
# realm is an empty realm just used for system bots, so don't
# include it in this accounting.
realms = realms.exclude(domain__in=settings.SYSTEM_ONLY_REALMS)
if len(realms) != 1:
return None
realm = realms[0]
if realm.invite_required or realm.restricted_to_domain:
return None
return realm
def name_changes_disabled(realm):
# type: (Optional[Realm]) -> bool
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
class RealmAlias(models.Model):
realm = models.ForeignKey(Realm, null=True) # type: Optional[Realm]
# should always be stored lowercase
domain = models.CharField(max_length=80, db_index=True) # type: text_type
def can_add_alias(domain):
# type: (text_type) -> bool
if settings.REALMS_HAVE_SUBDOMAINS:
return True
if RealmAlias.objects.filter(domain=domain).exists():
return False
return True
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email):
# type: (text_type) -> text_type
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email):
# type: (text_type) -> text_type
return email.split("@")[-1].lower()
class GetRealmByDomainException(Exception):
pass
def get_realm_by_email_domain(email):
# type: (text_type) -> Optional[Realm]
if settings.REALMS_HAVE_SUBDOMAINS:
raise GetRealmByDomainException(
"Cannot get realm from email domain when settings.REALMS_HAVE_SUBDOMAINS = True")
try:
alias = RealmAlias.objects.select_related('realm').get(domain = email_to_domain(email))
return alias.realm
except RealmAlias.DoesNotExist:
return None
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email, realm):
# type: (text_type, Realm) -> bool
if not realm.restricted_to_domain:
return True
domain = email_to_domain(email)
return RealmAlias.objects.filter(realm = realm, domain = domain).exists()
def list_of_domains_for_realm(realm):
# type: (Realm) -> List[text_type]
return list(RealmAlias.objects.filter(realm = realm).values_list('domain', flat=True))
class RealmEmoji(ModelReprMixin, models.Model):
realm = models.ForeignKey(Realm) # type: Realm
# Second part of the regex (negative lookbehind) disallows names ending with one of the punctuation characters
name = models.TextField(validators=[MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-zA-Z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in Emoji name"))]) # type: text_type
# URLs start having browser compatibility problem below 2000
# characters, so 1000 seems like a safe limit.
img_url = models.URLField(max_length=1000) # type: text_type
class Meta(object):
unique_together = ("realm", "name")
def __unicode__(self):
# type: () -> text_type
return u"<RealmEmoji(%s): %s %s>" % (self.realm.domain, self.name, self.img_url)
def get_realm_emoji_uncached(realm):
# type: (Realm) -> Dict[text_type, Dict[str, text_type]]
d = {}
for row in RealmEmoji.objects.filter(realm=realm):
d[row.name] = dict(source_url=row.img_url,
display_url=get_camo_url(row.img_url))
return d
def flush_realm_emoji(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
class RealmFilter(ModelReprMixin, models.Model):
realm = models.ForeignKey(Realm) # type: Realm
pattern = models.TextField() # type: text_type
url_format_string = models.TextField() # type: text_type
class Meta(object):
unique_together = ("realm", "pattern")
def __unicode__(self):
# type: () -> text_type
return u"<RealmFilter(%s): %s %s>" % (self.realm.domain, self.pattern, self.url_format_string)
def get_realm_filters_cache_key(domain):
# type: (text_type) -> text_type
return u'all_realm_filters:%s' % (domain,)
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache = {} # type: Dict[text_type, List[Tuple[text_type, text_type]]]
def domain_in_local_realm_filters_cache(domain):
# type: (text_type) -> bool
return domain in per_request_realm_filters_cache
def realm_filters_for_domain(domain):
# type: (text_type) -> List[Tuple[text_type, text_type]]
domain = domain.lower()
if not domain_in_local_realm_filters_cache(domain):
per_request_realm_filters_cache[domain] = realm_filters_for_domain_remote_cache(domain)
return per_request_realm_filters_cache[domain]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_domain_remote_cache(domain):
# type: (text_type) -> List[Tuple[text_type, text_type]]
filters = []
for realm_filter in RealmFilter.objects.filter(realm=get_realm(domain)):
filters.append((realm_filter.pattern, realm_filter.url_format_string))
return filters
def all_realm_filters():
# type: () -> Dict[text_type, List[Tuple[text_type, text_type]]]
filters = defaultdict(list) # type: Dict[text_type, List[Tuple[text_type, text_type]]]
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm.domain].append((realm_filter.pattern, realm_filter.url_format_string))
return filters
def flush_realm_filter(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_delete(get_realm_filters_cache_key(realm.domain))
try:
per_request_realm_filters_cache.pop(realm.domain.lower())
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
class UserProfile(ModelReprMixin, AbstractBaseUser, PermissionsMixin):
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# Fields from models.AbstractUser minus last_name and first_name,
# which we don't use; email is modified to make it indexed and unique.
email = models.EmailField(blank=False, db_index=True, unique=True) # type: text_type
is_staff = models.BooleanField(default=False) # type: bool
is_active = models.BooleanField(default=True, db_index=True) # type: bool
is_realm_admin = models.BooleanField(default=False, db_index=True) # type: bool
is_bot = models.BooleanField(default=False, db_index=True) # type: bool
bot_type = models.PositiveSmallIntegerField(null=True, db_index=True) # type: Optional[int]
is_api_super_user = models.BooleanField(default=False, db_index=True) # type: bool
date_joined = models.DateTimeField(default=timezone.now) # type: datetime.datetime
is_mirror_dummy = models.BooleanField(default=False) # type: bool
bot_owner = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) # type: Optional[UserProfile]
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
# Our custom site-specific fields
full_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: text_type
short_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: text_type
# pointer points to Message.id, NOT UserMessage.id.
pointer = models.IntegerField() # type: int
last_pointer_updater = models.CharField(max_length=64) # type: text_type
realm = models.ForeignKey(Realm) # type: Realm
api_key = models.CharField(max_length=32) # type: text_type
tos_version = models.CharField(null=True, max_length=10) # type: text_type
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications = models.BooleanField(default=False) # type: bool
enable_stream_sounds = models.BooleanField(default=False) # type: bool
# PM + @-mention notifications.
enable_desktop_notifications = models.BooleanField(default=True) # type: bool
enable_sounds = models.BooleanField(default=True) # type: bool
enable_offline_email_notifications = models.BooleanField(default=True) # type: bool
enable_offline_push_notifications = models.BooleanField(default=True) # type: bool
enable_online_push_notifications = models.BooleanField(default=False) # type: bool
enable_digest_emails = models.BooleanField(default=True) # type: bool
# Old notification field superseded by existence of stream notification
# settings.
default_desktop_notifications = models.BooleanField(default=True) # type: bool
###
last_reminder = models.DateTimeField(default=timezone.now, null=True) # type: Optional[datetime.datetime]
rate_limits = models.CharField(default=u"", max_length=100) # type: text_type # comma-separated list of range:max pairs
# Default streams
default_sending_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_events_register_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+') # type: Optional[Stream]
default_all_public_streams = models.BooleanField(default=False) # type: bool
# UI vars
enter_sends = models.NullBooleanField(default=True) # type: Optional[bool]
autoscroll_forever = models.BooleanField(default=False) # type: bool
left_side_userlist = models.BooleanField(default=False) # type: bool
# display settings
twenty_four_hour_time = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: text_type
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Minutes to wait before warning a bot owner that her bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
AVATAR_FROM_GRAVATAR = u'G'
AVATAR_FROM_USER = u'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1) # type: text_type
TUTORIAL_WAITING = u'W'
TUTORIAL_STARTED = u'S'
TUTORIAL_FINISHED = u'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1) # type: text_type
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps = models.TextField(default=u'[]') # type: text_type
invites_granted = models.IntegerField(default=0) # type: int
invites_used = models.IntegerField(default=0) # type: int
alert_words = models.TextField(default=u'[]') # type: text_type # json-serialized list of strings
# Contains serialized JSON of the form:
# [["social", "mit"], ["devel", "ios"]]
muted_topics = models.TextField(default=u'[]') # type: text_type
objects = UserManager() # type: UserManager
def can_admin_user(self, target_user):
# type: (UserProfile) -> bool
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __unicode__(self):
# type: () -> text_type
return u"<UserProfile: %s %s>" % (self.email, self.realm)
@property
def is_incoming_webhook(self):
# type: () -> bool
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@staticmethod
def emails_from_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, text_type]
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def can_create_streams(self):
# type: () -> bool
if self.is_realm_admin or not self.realm.create_stream_by_admins_only:
return True
else:
return False
def major_tos_version(self):
# type: () -> int
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def receives_offline_notifications(user_profile):
# type: (UserProfile) -> bool
return ((user_profile.enable_offline_email_notifications or
user_profile.enable_offline_push_notifications) and
not user_profile.is_bot)
def receives_online_notifications(user_profile):
# type: (UserProfile) -> bool
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user):
# type: (text_type) -> text_type
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
email = models.EmailField() # type: text_type
referred_by = models.ForeignKey(UserProfile, null=True) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
invited_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
realm_creation = models.BooleanField(default=False)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, null=True) # type: Optional[Realm]
class PushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind = models.PositiveSmallIntegerField(choices=KINDS) # type: int
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token = models.CharField(max_length=4096, unique=True) # type: text_type
last_updated = models.DateTimeField(auto_now=True) # type: datetime.datetime
# The user who's device this is
user = models.ForeignKey(UserProfile, db_index=True) # type: UserProfile
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id = models.TextField(null=True) # type: Optional[text_type]
def generate_email_token_for_stream():
# type: () -> text_type
return generate_random_token(32)
class Stream(ModelReprMixin, models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True) # type: text_type
realm = models.ForeignKey(Realm, db_index=True) # type: Realm
invite_only = models.NullBooleanField(default=False) # type: Optional[bool]
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token = models.CharField(
max_length=32, default=generate_email_token_for_stream) # type: text_type
description = models.CharField(max_length=1024, default=u'') # type: text_type
date_created = models.DateTimeField(default=timezone.now) # type: datetime.datetime
deactivated = models.BooleanField(default=False) # type: bool
def __unicode__(self):
# type: () -> text_type
return u"<Stream: %s>" % (self.name,)
def is_public(self):
# type: () -> bool
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.realm.is_zephyr_mirror_realm
class Meta(object):
unique_together = ("name", "realm")
def num_subscribers(self):
# type: () -> int
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=self.id,
user_profile__is_active=True,
active=True
).count()
# This is stream information that is sent to clients
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name,
stream_id=self.id,
description=self.description,
invite_only=self.invite_only)
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
def valid_stream_name(name):
# type: (text_type) -> bool
return name != ""
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(ModelReprMixin, models.Model):
type_id = models.IntegerField(db_index=True) # type: int
type = models.PositiveSmallIntegerField(db_index=True) # type: int
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta(object):
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle' }
def type_name(self):
# type: () -> str
# Raises KeyError if invalid
return self._type_names[self.type]
def __unicode__(self):
# type: () -> text_type
display_recipient = get_display_recipient(self)
return u"<Recipient: %s (%d, %s)>" % (display_recipient, self.type_id, self.type)
class Client(ModelReprMixin, models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True) # type: text_type
def __unicode__(self):
# type: () -> text_type
return u"<Client: %s>" % (self.name,)
get_client_cache = {} # type: Dict[text_type, Client]
def get_client(name):
# type: (text_type) -> Client
if name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[name] = result
return get_client_cache[name]
def get_client_cache_key(name):
# type: (text_type) -> text_type
return u'get_client:%s' % (make_safe_digest(name),)
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name):
# type: (text_type) -> Client
(client, _) = Client.objects.get_or_create(name=name)
return client
# get_stream_backend takes either a realm id or a realm
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_stream_backend(stream_name, realm):
# type: (text_type, Realm) -> Stream
return Stream.objects.select_related("realm").get(
name__iexact=stream_name.strip(), realm_id=realm.id)
def get_active_streams(realm):
# type: (Realm) -> QuerySet
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name, realm):
# type: (text_type, Realm) -> Optional[Stream]
try:
return get_stream_backend(stream_name, realm)
except Stream.DoesNotExist:
return None
def bulk_get_streams(realm, stream_names):
# type: (Realm, STREAM_NAMES) -> Dict[text_type, Any]
def fetch_streams_by_name(stream_names):
# type: (List[text_type]) -> Sequence[Stream]
#
# This should be just
#
# Stream.objects.select_related("realm").filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(stream_names) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(stream_names))
where_clause = "UPPER(zerver_stream.name::text) IN (%s)" % (upper_list,)
return get_active_streams(realm.id).select_related("realm").extra(
where=[where_clause],
params=stream_names)
return generic_bulk_cached_fetch(lambda stream_name: get_stream_cache_key(stream_name, realm),
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=lambda stream: stream.name.lower())
def get_recipient_cache_key(type, type_id):
# type: (int, int) -> text_type
return u"get_recipient:%s:%s" % (type, type_id,)
@cache_with_key(get_recipient_cache_key, timeout=3600*24*7)
def get_recipient(type, type_id):
# type: (int, int) -> Recipient
return Recipient.objects.get(type_id=type_id, type=type)
def bulk_get_recipients(type, type_ids):
# type: (int, List[int]) -> Dict[int, Any]
def cache_key_function(type_id):
# type: (int) -> text_type
return get_recipient_cache_key(type, type_id)
def query_function(type_ids):
# type: (List[int]) -> Sequence[Recipient]
# TODO: Change return type to QuerySet[Recipient]
return Recipient.objects.filter(type=type, type_id__in=type_ids)
return generic_bulk_cached_fetch(cache_key_function, query_function, type_ids,
id_fetcher=lambda recipient: recipient.type_id)
class Message(ModelReprMixin, models.Model):
sender = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
subject = models.CharField(max_length=MAX_SUBJECT_LENGTH, db_index=True) # type: text_type
content = models.TextField() # type: text_type
rendered_content = models.TextField(null=True) # type: Optional[text_type]
rendered_content_version = models.IntegerField(null=True) # type: Optional[int]
pub_date = models.DateTimeField('date published', db_index=True) # type: datetime.datetime
sending_client = models.ForeignKey(Client) # type: Client
last_edit_time = models.DateTimeField(null=True) # type: Optional[datetime.datetime]
edit_history = models.TextField(null=True) # type: Optional[text_type]
has_attachment = models.BooleanField(default=False, db_index=True) # type: bool
has_image = models.BooleanField(default=False, db_index=True) # type: bool
has_link = models.BooleanField(default=False, db_index=True) # type: bool
def topic_name(self):
# type: () -> text_type
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def __unicode__(self):
# type: () -> text_type
display_recipient = get_display_recipient(self.recipient)
return u"<Message: %s / %s / %r>" % (display_recipient, self.subject, self.sender)
def get_realm(self):
# type: () -> Realm
return self.sender.realm
def save_rendered_content(self):
# type: () -> None
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content, rendered_content_version, bugdown_version):
# type: (Optional[text_type], int, int) -> bool
return rendered_content is None or rendered_content_version < bugdown_version
def to_log_dict(self):
# type: () -> Dict[str, Any]
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_domain = self.sender.realm.domain,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.pub_date))
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
# This is a special purpose function optimized for
# callers like get_old_messages_backend().
fields = [
'id',
'subject',
'pub_date',
'last_edit_time',
'edit_history',
'content',
'rendered_content',
'rendered_content_version',
'recipient_id',
'recipient__type',
'recipient__type_id',
'sender_id',
'sending_client__name',
'sender__email',
'sender__full_name',
'sender__short_name',
'sender__realm__id',
'sender__realm__domain',
'sender__avatar_source',
'sender__is_mirror_dummy',
]
return Message.objects.filter(id__in=needed_ids).values(*fields)
def sent_by_human(self):
# type: () -> bool
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'website', 'ios', 'android')) or \
('desktop app' in sending_client)
@staticmethod
def content_has_attachment(content):
# type: (text_type) -> Match
return re.search(r'[/\-]user[\-_]uploads[/\.-]', content)
@staticmethod
def content_has_image(content):
# type: (text_type) -> bool
return bool(re.search(r'[/\-]user[\-_]uploads[/\.-]\S+\.(bmp|gif|jpg|jpeg|png|webp)', content, re.IGNORECASE))
@staticmethod
def content_has_link(content):
# type: (text_type) -> bool
return ('http://' in content or
'https://' in content or
'/user_uploads' in content or
(settings.ENABLE_FILE_LINKS and 'file:///' in content))
@staticmethod
def is_status_message(content, rendered_content):
# type: (text_type, text_type) -> bool
"""
Returns True if content and rendered_content are from 'me_message'
"""
if content.startswith('/me ') and '\n' not in content:
if rendered_content.startswith('<p>') and rendered_content.endswith('</p>'):
return True
return False
def update_calculated_fields(self):
# type: () -> None
# TODO: rendered_content could also be considered a calculated field
content = self.content
self.has_attachment = bool(Message.content_has_attachment(content))
self.has_image = bool(Message.content_has_image(content))
self.has_link = bool(Message.content_has_link(content))
@receiver(pre_save, sender=Message)
def pre_save_message(sender, **kwargs):
# type: (Any, **Any) -> None
if kwargs['update_fields'] is None or "content" in kwargs['update_fields']:
message = kwargs['instance']
message.update_calculated_fields()
def get_context_for_message(message):
# type: (Message) -> Sequence[Message]
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
pub_date__gt=message.pub_date - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
# Whenever a message is sent, for each user current subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table, which has has columns (id, user profile id, message id,
# flags) indicating which messages each user has received. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred the message, collapsed or was
# mentioned the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class UserMessage(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
message = models.ForeignKey(Message) # type: Message
# We're not using the archived field for now, but create it anyway
# since this table will be an unpleasant one to do schema changes
# on later
ALL_FLAGS = ['read', 'starred', 'collapsed', 'mentioned', 'wildcard_mentioned',
'summarize_in_home', 'summarize_in_stream', 'force_expand', 'force_collapse',
'has_alert_word', "historical", 'is_me_message']
flags = BitField(flags=ALL_FLAGS, default=0) # type: BitHandler
class Meta(object):
unique_together = ("user_profile", "message")
def __unicode__(self):
# type: () -> text_type
display_recipient = get_display_recipient(self.message.recipient)
return u"<UserMessage: %s / %s (%s)>" % (display_recipient, self.user_profile.email, self.flags_list())
def flags_list(self):
# type: () -> List[str]
return [flag for flag in self.flags.keys() if getattr(self.flags, flag).is_set]
def parse_usermessage_flags(val):
# type: (int) -> List[str]
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if val & mask:
flags.append(flag)
mask <<= 1
return flags
class Attachment(ModelReprMixin, models.Model):
MAX_FILENAME_LENGTH = 100
file_name = models.CharField(max_length=MAX_FILENAME_LENGTH, db_index=True) # type: text_type
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id = models.TextField(db_index=True) # type: text_type
owner = models.ForeignKey(UserProfile) # type: UserProfile
realm = models.ForeignKey(Realm, blank=True, null=True) # type: Realm
is_realm_public = models.BooleanField(default=False) # type: bool
messages = models.ManyToManyField(Message) # type: Manager
create_time = models.DateTimeField(default=timezone.now, db_index=True) # type: datetime.datetime
def __unicode__(self):
# type: () -> text_type
return u"<Attachment: %s>" % (self.file_name,)
def is_claimed(self):
# type: () -> bool
return self.messages.count() > 0
def get_old_unclaimed_attachments(weeks_ago):
# type: (int) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone.now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
recipient = models.ForeignKey(Recipient) # type: Recipient
active = models.BooleanField(default=True) # type: bool
in_home_view = models.NullBooleanField(default=True) # type: Optional[bool]
DEFAULT_STREAM_COLOR = u"#c2c2c2"
color = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR) # type: text_type
pin_to_top = models.BooleanField(default=False) # type: bool
desktop_notifications = models.BooleanField(default=True) # type: bool
audible_notifications = models.BooleanField(default=True) # type: bool
# Combination desktop + audible notifications superseded by the
# above.
notifications = models.BooleanField(default=False) # type: bool
class Meta(object):
unique_together = ("user_profile", "recipient")
def __unicode__(self):
# type: () -> text_type
return u"<Subscription: %r -> %s>" % (self.user_profile, self.recipient)
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid):
# type: (int) -> UserProfile
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email):
# type: (text_type) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(active_user_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_user_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True) \
.values(*active_user_dict_fields)
@cache_with_key(active_bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_bot_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=True) \
.values(*active_bot_dict_fields)
def get_owned_bot_dicts(user_profile, include_all_realm_bots_if_admin=True):
# type: (UserProfile, bool) -> List[Dict[str, Any]]
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_active_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_active=True, is_bot=True,
bot_owner=user_profile).values(*active_bot_dict_fields)
# TODO: Remove this import cycle
from zerver.lib.avatar import get_avatar_url
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': get_avatar_url(botdict['avatar_source'], botdict['email']),
}
for botdict in result]
def get_prereg_user_by_email(email):
# type: (text_type) -> PreregistrationUser
# A user can be invited many times, so only return the result of the latest
# invite.
return PreregistrationUser.objects.filter(email__iexact=email.strip()).latest("invited_at")
def get_cross_realm_emails():
# type: () -> Set[text_type]
return set(settings.CROSS_REALM_BOT_EMAILS)
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash = models.CharField(max_length=40, db_index=True, unique=True) # type: text_type
def get_huddle_hash(id_list):
# type: (List[int]) -> text_type
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash):
# type: (text_type) -> text_type
return u"huddle_by_hash:%s" % (huddle_hash,)
def get_huddle(id_list):
# type: (List[int]) -> Huddle
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash, id_list):
# type: (text_type, List[int]) -> Huddle
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
with transaction.atomic():
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
subs_to_create = [Subscription(recipient=recipient,
user_profile=get_user_profile_by_id(user_profile_id))
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
def clear_database():
# type: () -> None
pylibmc.Client(['127.0.0.1']).flush_all()
model = None # type: Any
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
class UserActivity(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
query = models.CharField(max_length=50, db_index=True) # type: text_type
count = models.IntegerField() # type: int
last_visit = models.DateTimeField('last visit') # type: datetime.datetime
class Meta(object):
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
start = models.DateTimeField('start time', db_index=True) # type: datetime.datetime
end = models.DateTimeField('end time', db_index=True) # type: datetime.datetime
class UserPresence(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
client = models.ForeignKey(Client) # type: Client
# Valid statuses
ACTIVE = 1
IDLE = 2
timestamp = models.DateTimeField('presence changed') # type: datetime.datetime
status = models.PositiveSmallIntegerField(default=ACTIVE) # type: int
@staticmethod
def status_to_string(status):
# type: (int) -> str
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
@staticmethod
def get_status_dict_by_realm(realm_id):
# type: (int) -> defaultdict[Any, Dict[Any, Any]]
user_statuses = defaultdict(dict) # type: defaultdict[Any, Dict[Any, Any]]
query = UserPresence.objects.filter(
user_profile__realm_id=realm_id,
user_profile__is_active=True,
user_profile__is_bot=False
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
'user_profile__is_mirror_dummy',
)
mobile_user_ids = [row['user'] for row in PushDeviceToken.objects.filter(
user__realm_id=1,
user__is_active=True,
user__is_bot=False,
).distinct("user").values("user")]
for row in query:
info = UserPresence.to_presence_dict(
client_name=row['client__name'],
status=row['status'],
dt=row['timestamp'],
push_enabled=row['user_profile__enable_offline_push_notifications'],
has_push_devices=row['user_profile__id'] in mobile_user_ids,
is_mirror_dummy=row['user_profile__is_mirror_dummy'],
)
user_statuses[row['user_profile__email']][row['client__name']] = info
return user_statuses
@staticmethod
def to_presence_dict(client_name=None, status=None, dt=None, push_enabled=None,
has_push_devices=None, is_mirror_dummy=None):
# type: (Optional[text_type], Optional[int], Optional[datetime.datetime], Optional[bool], Optional[bool], Optional[bool]) -> Dict[str, Any]
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self):
# type: () -> Dict[str, Any]
return UserPresence.to_presence_dict(
client_name=self.client.name,
status=self.status,
dt=self.timestamp
)
@staticmethod
def status_from_string(status):
# type: (NonBinaryStr) -> Optional[int]
if status == 'active':
status_val = UserPresence.ACTIVE
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class Meta(object):
unique_together = ("user_profile", "client")
class DefaultStream(models.Model):
realm = models.ForeignKey(Realm) # type: Realm
stream = models.ForeignKey(Stream) # type: Stream
class Meta(object):
unique_together = ("realm", "stream")
class Referral(models.Model):
user_profile = models.ForeignKey(UserProfile) # type: UserProfile
email = models.EmailField(blank=False, null=False) # type: text_type
timestamp = models.DateTimeField(auto_now_add=True, null=False) # type: datetime.datetime
# This table only gets used on Zulip Voyager instances
# For reasons of deliverability (and sending from multiple email addresses),
# we will still send from mandrill when we send things from the (staging.)zulip.com install
class ScheduledJob(models.Model):
scheduled_timestamp = models.DateTimeField(auto_now_add=False, null=False) # type: datetime.datetime
type = models.PositiveSmallIntegerField() # type: int
# Valid types are {email}
# for EMAIL, filter_string is recipient_email
EMAIL = 1
# JSON representation of the job's data. Be careful, as we are not relying on Django to do validation
data = models.TextField() # type: text_type
# Kind if like a ForeignKey, but table is determined by type.
filter_id = models.IntegerField(null=True) # type: Optional[int]
filter_string = models.CharField(max_length=100) # type: text_type
|
paxapy/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 55,239
|
[
"VisIt"
] |
ddf6e02b3e7adfea41cf3410e3782263a302009bc27cc2799def50da64ab8c4a
|
import math
import numpy as np
from PyQt5.QtCore import Qt, QPoint, QRect, pyqtSignal
from PyQt5.QtGui import QPainter, QImage, QPen, QColor, QBrush
from PyQt5.QtWidgets import QWidget
# FIXME[todo]: improve the display of the activations: check that the
# space is used not only in a good, but in an optimal way. Check that
# the aspect ratio is correct. Make it configurable to allow for
# explicitly setting different aspects of display.
# FIXME[todo]: we may display positive and negative activation in a
# two-color scheme.
class QActivationView(QWidget):
"""A widget to diplay the activations of a given layer in a
network. Currently there are two types of layers that are
supported: (two-dimensional) convolutional layers and dense
(=fully connected) layers.
The QActivationView widget allows to select an individual unit in
the network layer by a single mouse click (this will either select
a single unit in a dense layer, or a channel in a convolutional
layer). The selection can be moved with the cursor keys and the
unit can be deselected by hitting escape. The widget will signal
such a (de)selection by emitting the "selected" signal.
The QActivationView will try to make good use of the available
space by arranging and scaling the units. However, the current
implementation is still suboptimal and may be improved to allow
for further configuration.
"""
activation : np.ndarray = None
"""The activation values to be displayed in this activation view. None
means that no activation is assigned to this QActivationView and
will result in an empty widget.
"""
padding : int = 2
"""Padding between the individual units in this QActivationView.
"""
selectedUnit : int = None
"""The currently selected unit. The value None means that no unit is
currently selected.
"""
_isConvolution : bool = False
"""A flag indicating if the current QActivationView is currently in
convolution mode (True) or not (False).
"""
selected = pyqtSignal(object)
"""A signal emitted whenever a unit is (de)selected in this
QActivationView. This will be an int (the index of the selected
unit) or None (if no unit is selected). We have to use object not
int here to allow for None values.
"""
def __init__(self, parent : QWidget = None):
'''Initialization of the QMatrixView.
Arguments
---------
parent
The parent argument is sent to the QWidget constructor.
'''
super().__init__(parent)
self.selectedUnit = None
# By default, a QWidget does not accept the keyboard focus, so
# we need to enable it explicitly: Qt.StrongFocus means to
# get focus by "Tab" key as well as by mouse click.
self.setFocusPolicy(Qt.StrongFocus)
def setActivation(self, activation : np.ndarray) -> None:
"""Set the activations to be displayed in this QActivationView.
Currently there are two possible types of activations that are
supported by this widget: 1D, and 2D convolutional.
Arguments
---------
activation:
Either a 1D or a 3D array. The latter one will be
displayed in the convolutional mode. The activation values
are expected to be float values. For display they
will be scaled and converted to 8-bit integers.
"""
old_shape = None if self.activation is None else self.activation.shape
self.activation = activation
if self.activation is not None:
self._isConvolution = (len(self.activation.shape)>2)
# normalization (values should be between 0 and 1)
min_value = self.activation.min()
max_value = self.activation.max()
value_range = max_value - min_value
self.activation = (self.activation - min_value)
if value_range > 0:
self.activation = self.activation/value_range
# check the shape
if self._isConvolution:
# for convolution we want activtation to be of shape
# (output_channels, width, height)
if len(self.activation.shape) == 4:
# activation may include one axis for batches, i.e.,
# the axis of _activation are:
# (batch_size, width, height, output_channels)
# we do not need it - just take the first
# element from the batch
self.activation = self.activation.squeeze(axis=0)
# (width, height, output_channels)
# to (output_channels, width, height)
self.activation = self.activation.transpose([2,0,1])
#self.activation = np.swapaxes(self.activation,0,3)
else:
if len(self.activation.shape) == 2:
# activation may include one axis for batches, i.e.,
# we do not need it - just take the first
# element from the batch
self.activation = self.activation[0]
# change dtype to uint8
self.activation = np.ascontiguousarray(self.activation*255, np.uint8)
## unset selected entry if shape changed
if self.activation is None or old_shape != self.activation.shape:
self.selectUnit()
else:
self.selected.emit(self.selectedUnit)
self._computeGeometry()
self.update()
def selectUnit(self, unit : int = None):
"""(De)select a unit in this QActivationView.
Arguments
=========
unit:
"""
if self.activation is None:
unit = None
elif unit is not None and (unit < 0 or unit >= self.activation.shape[0]):
unit = None
if self.selectedUnit != unit:
self.selectedUnit = unit
self.selected.emit(self.selectedUnit)
self.update()
def getUnitActivation(self, unit = None) -> np.ndarray:
"""Get the activation mask for a given unit.
"""
if unit is None:
unit = self.selectedUnit
if self.activation is None or unit is None or not self._isConvolution:
return None
return self.activation[unit]
def _computeGeometry(self):
if self.activation is None:
self.rows = None
self.columns = None
self.unitWidth = None
self.unitHeight = None
else:
# In case of a convolutional layer, the axes of activation are:
# (batch_size, width, height, output_channels)
# For fully connected (i.e., dense) layers, the axes are:
# (batch_size, units)
# In both cases, batch_size should be 1!
self._isConvolution = (len(self.activation.shape)>2)
n = self.activation.shape[0]
if self._isConvolution:
# unitRatio = width/height
unitRatio = self.activation.shape[1]/self.activation.shape[2]
else:
unitRatio = 1
# FIXME: implement better computation!
# - allow for rectangular (i.e. non-quadratic) widget size
# - allow for rectangular (i.e. non-quadratic) convolution filters
# and maintain aspect ratio ...
# unitRatio = w/h
# unitSize = w*h
unitSize = (self.width() * self.height()) / n
unitHeight = math.floor(math.sqrt(unitSize/unitRatio))
self.rows = math.ceil(self.height()/unitHeight)
self.unitHeight = math.floor(self.height()/self.rows)
unitWidth = math.floor(unitRatio * self.unitHeight)
self.columns = math.ceil(self.width()/unitWidth)
self.unitWidth = math.floor(self.width()/self.columns)
self.update()
def paintEvent(self, event):
'''Process the paint event by repainting this Widget.
Arguments
---------
event : QPaintEvent
'''
qp = QPainter()
qp.begin(self)
if self.activation is None:
self._drawNone(qp)
elif self._isConvolution:
self._drawConvolution(qp)
else:
self._drawDense(qp)
if self.selectedUnit is not None:
self._drawSelection(qp)
qp.end()
def _getUnitRect(self, unit : int, padding : int = None):
'''Get the rectangle (screen position and size) occupied by the given
unit.
Arguments
---------
unit : index of the unit of interest
padding: padding of the unit.
If None is given, standard padding value of this QActivationView
will be use.
'''
if padding is None:
padding = self.padding
return QRect(self.unitWidth * (unit % self.columns) + padding,
self.unitHeight * (unit // self.columns) + padding,
self.unitWidth - 2*padding,
self.unitHeight - 2*padding)
def _unitAtPosition(self, position : QPoint):
'''Compute the entry corresponding to some point in this widget.
Arguments
---------
position
The position of the point in question (in Widget coordinates).
Returns
-------
The unit occupying that position of None
if no entry corresponds to that position.
'''
if self.activation is None:
return None
unit = ((position.y() // self.unitHeight) * self.columns +
(position.x() // self.unitWidth))
if unit >= self.activation.shape[0]:
unit = None
return unit
def _drawConvolution(self, qp):
'''Draw activation values for a convolutional layer.
Arguments
---------
qp : QPainter
'''
# image size: filter size (or a single pixel per neuron)
map_width, map_height = self.activation.shape[1:3]
for unit in range(self.activation.shape[0]):
image = QImage(self.activation[unit],
map_width, map_height,
map_width,
QImage.Format_Grayscale8)
qp.drawImage(self._getUnitRect(unit), image)
def _drawDense(self, qp):
'''Draw activation values for a dense layer.
Arguments
---------
qp : QPainter
'''
for unit, value in enumerate(self.activation):
qp.fillRect(self._getUnitRect(unit),
QBrush(QColor(value,value,value)))
def _drawSelection(self, qp):
'''Mark the currently selected unit in the painter.
Arguments
---------
qp : QPainter
'''
pen_width = 4
pen_color = Qt.red
pen = QPen(pen_color)
pen.setWidth(pen_width)
qp.setPen(pen)
qp.drawRect(self._getUnitRect(self.selectedUnit,0))
def _drawNone(self, qp):
'''Draw a view when no activation values are available.
Arguments
---------
qp : QPainter
'''
qp.drawText(self.rect(), Qt.AlignCenter, "No data!")
def resizeEvent(self, event):
'''Adapt to a change in size. The behavior dependes on the zoom
policy.
Arguments
---------
event : QResizeEvent
'''
# This event handler is called after the Widget has been resized.
# providing the new .size() and the old .oldSize().
self._computeGeometry()
def mousePressEvent(self, event):
'''Process mouse event.
Arguments
---------
event : QMouseEvent
'''
self.selectUnit(self._unitAtPosition(event.pos()))
def mouseReleaseEvent(self, event):
'''Process mouse event.
Arguments
---------
event : QMouseEvent
'''
# As we implement .mouseDoubleClickEvent(), we
# also provide stubs for the other mouse events to not confuse
# other widgets.
pass
def mouseDoubleClickEvent(self, event):
'''Process a double click. We use double click to select a
matrix entry.
Arguments
---------
event : QMouseEvent
'''
self.selectUnit(self._unitAtPosition(event.pos()))
def keyPressEvent(self, event):
'''Process special keys for this widget. Allow moving selected entry
using the cursor keys. Deselect unit using the Escape key.
Arguments
---------
event : QKeyEvent
'''
key = event.key()
# Space will toggle display of tooltips
if key == Qt.Key_Space:
self.setToolTip(not self.toolTipActive)
# Arrow keyes will move the selected entry
elif self.selectedUnit is not None:
row = self.selectedUnit % self.columns
col = self.selectedUnit // self.columns
if key == Qt.Key_Left:
self.selectUnit(self.selectedUnit-1)
elif key == Qt.Key_Up:
self.selectUnit(self.selectedUnit-self.columns)
elif key == Qt.Key_Right:
self.selectUnit(self.selectedUnit+1)
elif key == Qt.Key_Down:
self.selectUnit(self.selectedUnit+self.columns)
elif key == Qt.Key_Escape:
self.selectUnit(None)
else:
event.ignore()
else:
event.ignore()
|
JarnoRFB/qtpyvis
|
qtgui/widgets/activationview.py
|
Python
|
mit
| 13,724
|
[
"NEURON"
] |
a53f9bdbaf1f7854b70e9e2583d0a00a501f5df1803b569c99a09ecbc3a88a8b
|
""" FreeDiskSpacePolicy
FreeDiskSpacePolicy.__bases__:
DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase.PolicyBase
"""
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
class FreeDiskSpacePolicy(PolicyBase):
"""
The FreeDiskSpacePolicy class is a policy class satisfied when a SE has a
low occupancy.
FreeDiskSpacePolicy, given the space left at the element, proposes a new status.
"""
@staticmethod
def _evaluate(commandResult):
"""
Evaluate policy on SE occupancy: Use FreeDiskSpaceCommand
:Parameters:
**commandResult** - S_OK / S_ERROR
result of the command. It is expected ( iff S_OK ) a dictionary like
{ 'Total' : .., 'Free' : ..}
:return:
{
'Status':Error|Active|Bad|Banned,
'Reason': Some lame statements that have to be updated
}
"""
result = {}
if not commandResult['OK']:
result['Status'] = 'Error'
result['Reason'] = commandResult['Message']
return S_OK(result)
commandResult = commandResult['Value']
if not commandResult:
result['Status'] = 'Unknown'
result['Reason'] = 'No values to take a decision'
return S_OK(result)
for key in ['Total', 'Free']:
if key not in commandResult:
result['Status'] = 'Error'
result['Reason'] = 'Key %s missing' % key
return S_OK(result)
free = float(commandResult['Free'])
# Units (TB, GB, MB) may change,
# depending on the configuration of the command in Configurations.py
if free < 0.1:
result['Status'] = 'Banned'
result['Reason'] = 'Too little free space'
elif free < 5:
result['Status'] = 'Degraded'
result['Reason'] = 'Little free space'
else:
result['Status'] = 'Active'
result['Reason'] = 'Enough free space'
return S_OK(result)
#...............................................................................
# EOF
|
chaen/DIRAC
|
ResourceStatusSystem/Policy/FreeDiskSpacePolicy.py
|
Python
|
gpl-3.0
| 2,000
|
[
"DIRAC"
] |
0cea9324f49111fd63dfe0c471d4bdea9d7982f74c39a034e7ceebe952977a77
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Visualize particle dumbbells in the NpT ensemble (constant temperature,
constant pressure, variable volume).
"""
import numpy as np
from threading import Thread
import espressomd
import espressomd.interactions
import espressomd.visualization_opengl
required_features = ["NPT", "LENNARD_JONES"]
espressomd.assert_features(required_features)
system = espressomd.System(box_l=3 * [10])
np.random.seed(seed=42)
visualizer = espressomd.visualization_opengl.openGLLive(
system, background_color=[1, 1, 1], bond_type_radius=[0.2])
system.time_step = 0.0005
system.cell_system.skin = 0.1
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=2, sigma=1, cutoff=3, shift="auto")
system.bonded_inter[0] = espressomd.interactions.HarmonicBond(k=5.0, r_0=1.0)
n_part = 200
for i in range(0, n_part - 1, 2):
part1 = system.part.add(pos=np.random.random(3) * system.box_l)
part2 = system.part.add(pos=np.random.random(3) * system.box_l)
part1.add_bond((system.bonded_inter[0], part2.id))
print("E before minimization:", system.analysis.energy()["total"])
system.integrator.set_steepest_descent(f_max=0.0, gamma=30.0,
max_displacement=0.1)
system.integrator.run(10000)
print("E after minimization:", system.analysis.energy()["total"])
system.thermostat.set_npt(kT=2.0, gamma0=1.0, gammav=0.01, seed=42)
system.integrator.set_isotropic_npt(ext_pressure=1.0, piston=0.01)
def main():
cnt = 0
P = 0
while True:
system.integrator.run(1)
P += system.analysis.pressure()['total']
if cnt > 10000:
print("Pressure:", P / cnt, "Box:", system.box_l)
cnt = 0
P = 0
visualizer.update()
cnt += 1
# Start simulation in separate thread
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.start()
|
espressomd/espresso
|
samples/visualization_npt.py
|
Python
|
gpl-3.0
| 2,563
|
[
"ESPResSo"
] |
3d09a1e4e9669dd69653a3d02a292f7f1b60f3230ac370bcf70b403619f7a9f3
|
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Simple file-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - July 2015
# http://www.basistech.com/python-autopsy-module-tutorial-1-the-file-ingest-module/
#
# Looks for big files that are a multiple of 4096 and makes artifacts
import jarray
import inspect
from java.lang import System
from java.util.logging import Level
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import TskData
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the anlaysis.
class FindBigRoundFilesIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "Big and Round File Finder"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that files large files that are a multiple of 4096."
def getModuleVersionNumber(self):
return "1.0"
# Return true if module wants to get called for each file
def isFileIngestModuleFactory(self):
return True
# can return null if isFileIngestModuleFactory returns false
def createFileIngestModule(self, ingestOptions):
return FindBigRoundFilesIngestModule()
# File-level ingest module. One gets created per thread.
class FindBigRoundFilesIngestModule(FileIngestModule):
_logger = Logger.getLogger(FindBigRoundFilesIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/4.4/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
# TODO: Add any setup code that you need here.
def startUp(self, context):
self.filesFound = 0
# Throw an IngestModule.IngestModuleException exception if there was a problem setting up
# raise IngestModuleException("Oh No!")
pass
# Where the analysis is done. Each file will be passed into here.
# The 'file' object being passed in is of type org.sleuthkit.datamodel.AbstractFile.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/4.4/classorg_1_1sleuthkit_1_1datamodel_1_1_abstract_file.html
def process(self, file):
# Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard()
# Skip non-files
if ((file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS) or
(file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNUSED_BLOCKS) or
(file.isFile() == False)):
return IngestModule.ProcessResult.OK
# Look for files bigger than 10MB that are a multiple of 4096
if ((file.getSize() > 10485760) and ((file.getSize() % 4096) == 0)):
# Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of
# artifact. Refer to the developer docs for other examples.
art = file.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT)
att = BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID(),
FindBigRoundFilesIngestModuleFactory.moduleName, "Big and Round Files")
art.addAttribute(att)
try:
# index the artifact for keyword search
blackboard.indexArtifact(art)
except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Fire an event to notify the UI and others that there is a new artifact
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(FindBigRoundFilesIngestModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT, None))
return IngestModule.ProcessResult.OK
# Where any shutdown code is run and resources are freed.
# TODO: Add any shutdown code that you need here.
def shutDown(self):
None
|
APriestman/autopsy
|
pythonExamples/July2015FileTutorial_BigRound/FindBigRoundFiles.py
|
Python
|
apache-2.0
| 6,704
|
[
"Brian"
] |
57b4fcd652c3596476d431ecbc3d8ccf165d2e14f33dc06a3f85ef711f5ce377
|
#!/usr/bin/env python
"""
Upwind discretization of advection equation
@author Alexander Pletzer
"""
import pnumpy
import numpy
import sys
from mpi4py import MPI
import sys
import operator
import saveVTK
class Upwind:
def __init__(self, velocity, lengths, numCells):
self.rk = MPI.COMM_WORLD.Get_rank()
self.sz = MPI.COMM_WORLD.Get_size()
# decomposition
self.dc = pnumpy.CubeDecomp(self.sz, numCells)
if not self.dc.getDecomp():
print('*** No uniform decomposition could be found for {0} processes'.format(self.sz))
print('*** Please adjust the number of cells {0}'.format(numCells))
sys.exit(1)
# begin/end indices of local sub-domain
self.localSlices = self.dc.getSlab(self.rk)
self.iBeg = numpy.array([s.start for s in self.localSlices])
self.iEnd = numpy.array([s.stop for s in self.localSlices])
self.nsLocal = numpy.array([s.stop - s.start for s in self.localSlices])
print('[{0}] local number of cells: {1}'.format(self.rk, self.nsLocal))
# global number of cells
self.numCells = numCells
self.ndims = len(velocity)
self.deltas = numpy.zeros( (self.ndims,), numpy.float64 )
self.upDirection = numpy.zeros( (self.ndims,), numpy.int )
self.v = velocity
self.lengths = lengths
# number of local field values
self.ntot = 1
for j in range(self.ndims):
self.upDirection[j] = -1
if velocity[j] < 0.: self.upDirection[j] = +1
self.deltas[j] = lengths[j] / numCells[j]
self.ntot *= self.nsLocal[j]
self.coeff = self.v * self.upDirection / self.deltas
# initializing the field
self.f = pnumpy.gdaZeros( self.nsLocal, numpy.float64, numGhosts=1 )
self.fOld = pnumpy.gdaZeros( self.nsLocal, numpy.float64, numGhosts=1 )
# initialize lower corner to one
if self.rk == 0:
self.f[0, 0, 0] = 1
# get the neighboring ranks
self.neighbSide = [[] for i in range(self.ndims)]
direction = numpy.array([0] * self.ndims)
self.neighbRk = numpy.array([0] * self.ndims)
periodic = [True for i in range(self.ndims)]
for i in range(self.ndims):
direction[i] = self.upDirection[i]
self.neighbRk[i] = self.dc.getNeighborProc(self.rk, direction, periodic=periodic)
self.neighbSide[i] = tuple(-direction)
direction[i] = 0
def advect(self, deltaTime):
"""
Advance the field by one time step
"""
self.fOld[:] = self.f
c = deltaTime * numpy.sum(self.coeff)
# handle all local computations first
self.f += c*self.fOld
self.f[1:, :, :] -= deltaTime*self.coeff[0]*self.fOld[:-1, :, :]
self.f[:, 1:, :] -= deltaTime*self.coeff[1]*self.fOld[:, :-1, :]
self.f[:, :, 1:] -= deltaTime*self.coeff[2]*self.fOld[:, :, :-1]
# fetch neighboring data. This is where communication takes place
self.f[:1, :, :] -= deltaTime*self.coeff[0]* \
self.fOld.getData(self.neighbRk[0], self.neighbSide[0])
self.f[:, :1, :] -= deltaTime*self.coeff[1]* \
self.fOld.getData(self.neighbRk[1], self.neighbSide[1])
self.f[:, :, :1] -= deltaTime*self.coeff[2]* \
self.fOld.getData(self.neighbRk[2], self.neighbSide[2])
def checksum(self):
return self.f.reduce(operator.add, 0.0, rootPe=0)
def printOut(self):
for i in range(len(self.f)):
print('{0} {1}'.format(i, self.f[i]))
def __del__(self):
self.f.free()
self.fOld.free()
def gatherRoot(self):
"""
Gather the data on process root
@return array on rank 0, None on other ranks
"""
res = None
if self.rk == 0:
res = numpy.zeros(self.numCells, numpy.float64)
fRoot = MPI.COMM_WORLD.gather(self.f, root=0)
if self.rk == 0:
for rk in range(self.sz):
slab = self.dc.getSlab(rk)
res[slab] = fRoot[rk]
return res
############################################################################################################
def main():
import sys
if len(sys.argv) <= 1:
print("must specify number of cells in each direction.")
return sys.exit(1)
ndims = 3
numCells = [int(sys.argv[1])] * 3
numTimeSteps = 100
if len(sys.argv) > 2:
numTimeSteps = int(sys.argv[2])
doVtk = False
if len(sys.argv) > 3 and sys.argv[3] == 'vtk':
doVtk = True
velocity = numpy.ones( (ndims,), numpy.float64 )
lengths = numpy.ones( (ndims,), numpy.float64 )
# compute dt
courant = 0.1
dt = float('inf')
for j in range(ndims):
dx = lengths[j]/ float(numCells[j])
dt = min(courant * dx / velocity[j], dt)
up = Upwind(velocity, lengths, numCells)
if up.rk == 0:
print("number of cells: {0}".format(numCells))
tic, tac = 0, 0
if up.rk == 0:
tic = MPI.Wtime()
# time iterations
for i in range(numTimeSteps):
up.advect(dt)
if up.rk == 0:
toc = MPI.Wtime()
print('Wall clock time spent in advection loop: {0} [sec]'.format(toc - tic))
chksum = up.checksum()
if up.rk == 0:
print("check sum: {0}".format(chksum))
if doVtk:
data = up.gatherRoot()
if up.rk == 0:
xAxis = numpy.array([0.0 + i*up.deltas[0] for i in range(numCells[0] + 1)])
yAxis = numpy.array([0.0 + j*up.deltas[1] for j in range(numCells[1] + 1)])
zAxis = numpy.array([0.0 + k*up.deltas[1] for k in range(numCells[2] + 1)])
saveVTK.rectilinear('upMPI.vtk', xAxis, yAxis, zAxis, data)
if __name__ == '__main__': main()
|
pletzer/fidibench
|
upwind/python/upwindMPI.py
|
Python
|
mit
| 5,436
|
[
"VTK"
] |
b46bef49878b02169b345491da6b95b532045321dcc1e40705195678b5cc219c
|
"""
Some generic utility routines for number handling and
calculating (specific) variances
"""
import logging
import itertools
import numpy
from tkp.utility import containers
from tkp.utility.memoize import Memoize
from tkp.sourcefinder import utils
from tkp.sourcefinder import stats
from tkp.sourcefinder import extract
try:
import ndimage
except ImportError:
from scipy import ndimage
logger = logging.getLogger(__name__)
#
# Hard-coded configuration parameters; not user settable.
#
INTERPOLATE_ORDER = 1 # Spline order for grid interpolation
MEDIAN_FILTER = 0 # If non-zero, apply a median filter of size
# MEDIAN_FILTER to the background and RMS grids prior
# to interpolating.
MF_THRESHOLD = 0 # If MEDIAN_FILTER is non-zero, only use the filtered
# grid when the (absolute) difference between the raw
# and filtered grids is larger than MF_THRESHOLD.
DEBLEND_MINCONT = 0.005 # Min. fraction of island flux in deblended subisland
STRUCTURING_ELEMENT = [[0,1,0], [1,1,1], [0,1,0]] # Island connectiivty
class ImageData(object):
"""Encapsulates an image in terms of a numpy array + meta/headerdata.
This is your primary contact point for interaction with images: it icludes
facilities for source extraction and measurement, etc.
"""
def __init__(self, data, beam, wcs, margin=0, radius=0, back_size_x=32,
back_size_y=32, residuals=True
):
"""Sets up an ImageData object.
*Args:*
- data (2D numpy.ndarray): actual image data
- wcs (utility.coordinates.wcs): world coordinate system
specification
- beam (3-tuple): beam shape specification as
(semimajor, semiminor, theta)
"""
# Do data, wcs and beam need deepcopy?
# Probably not (memory overhead, in particular for data),
# but then the user shouldn't change them outside ImageData in the
# mean time
self.rawdata = data # a 2D numpy array
self.wcs = wcs # a utility.coordinates.wcs instance
self.beam = beam # tuple of (semimaj, semimin, theta)
self.clip = {}
self.labels = {}
self.freq_low = 1
self.freq_high = 1
self.back_size_x = back_size_x
self.back_size_y= back_size_y
self.margin = margin
self.radius = radius
self.residuals = residuals
###########################################################################
# #
# Properties and attributes. #
# #
# Properties are attributes managed by methods; rather than calling the #
# method directly, the attribute automatically invokes it. We can use #
# this to do cunning transparent caching ("memoizing") etc; see the #
# Memoize class. #
# #
# clearcache() clears all the memoized data, which can get quite large. #
# It may be wise to call this, for example, in an exception handler #
# dealing with MemoryErrors. #
# #
###########################################################################
@Memoize
def _grids(self):
"""Gridded RMS and background data for interpolating"""
return self.__grids()
grids = property(fget=_grids, fdel=_grids.delete)
@Memoize
def _backmap(self):
"""Background map"""
if not hasattr(self, "_user_backmap"):
return self._interpolate(self.grids['bg'])
else:
return self._user_backmap
def _set_backmap(self, bgmap):
self._user_backmap = bgmap
del(self.backmap)
del(self.data_bgsubbed)
backmap = property(fget=_backmap, fdel=_backmap.delete, fset=_set_backmap)
@Memoize
def _get_rm(self):
"""RMS map"""
if not hasattr(self, "_user_noisemap"):
return self._interpolate(self.grids['rms'], roundup=True)
else:
return self._user_noisemap
def _set_rm(self, noisemap):
self._user_noisemap = noisemap
del(self.rmsmap)
rmsmap = property(fget=_get_rm, fdel=_get_rm.delete, fset=_set_rm)
@Memoize
def _get_data(self):
"""Masked image data"""
# We will ignore all the data which is masked for the rest of the
# sourcefinding process. We build up the mask by stacking ("or-ing
# together") a number of different effects:
#
# * A margin from the edge of the image;
# * Any data outside a given radius from the centre of the image;
# * Data which is "obviously" bad (equal to 0 or NaN).
mask = numpy.zeros((self.xdim, self.ydim))
if self.margin:
margin_mask = numpy.ones((self.xdim, self.ydim))
margin_mask[self.margin:-self.margin, self.margin:-self.margin] = 0
mask = numpy.logical_or(mask, margin_mask)
if self.radius:
radius_mask = utils.circular_mask(self.xdim, self.ydim, self.radius)
mask = numpy.logical_or(mask, radius_mask)
mask = numpy.logical_or(mask, numpy.where(self.rawdata == 0, 1, 0))
mask = numpy.logical_or(mask, numpy.isnan(self.rawdata))
return numpy.ma.array(self.rawdata, mask=mask)
data = property(fget=_get_data, fdel=_get_data.delete)
@Memoize
def _get_data_bgsubbed(self):
"""Background subtracted masked image data"""
return self.data - self.backmap
data_bgsubbed = property(fget=_get_data_bgsubbed,
fdel=_get_data_bgsubbed.delete)
@property
def xdim(self):
"""X pixel dimension of (unmasked) data"""
return self.rawdata.shape[0]
@property
def ydim(self):
"""Y pixel dimension of (unmasked) data"""
return self.rawdata.shape[1]
@property
def pixmax(self):
"""Maximum pixel value (pre-background subtraction)"""
return self.data.max()
@property
def pixmin(self):
"""Minimum pixel value (pre-background subtraction)"""
return self.data.min()
def clearcache(self):
"""Zap any calculated data stored in this object.
Clear the background and rms maps, labels, clip, and any locally held
data. All of these can be reconstructed from the data accessor.
Note that this *must* be run to pick up any new settings.
"""
self.labels.clear()
self.clip.clear()
del(self.backmap)
del(self.rmsmap)
del(self.data)
del(self.data_bgsubbed)
del(self.grids)
if hasattr(self, 'residuals_from_gauss_fitting'):
del(self.residuals_from_gauss_fitting)
if hasattr(self, 'residuals_from_deblending'):
del(self.residuals_from_deblending)
###########################################################################
# #
# General purpose image handling. #
# #
# Routines for saving and trimming data, and calculating background/RMS #
# maps (in conjuntion with the properties above). #
# #
###########################################################################
# Private "support" methods
def __grids(self):
"""Calculate background and RMS grids of this image.
These grids can be interpolated up to make maps of the original image
dimensions: see _interpolate().
This is called automatically when ImageData.backmap,
ImageData.rmsmap or ImageData.fdrmap is first accessed.
"""
# there's no point in working with the whole of the data array
# if it's masked.
useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))
assert(len(useful_chunk) == 1)
useful_data = self.data[useful_chunk[0]]
my_xdim, my_ydim = useful_data.shape
rmsgrid, bggrid = [], []
for startx in xrange(0, my_xdim, self.back_size_x):
rmsrow, bgrow = [], []
for starty in xrange(0, my_ydim, self.back_size_y):
chunk = useful_data[
startx:startx + self.back_size_x,
starty:starty + self.back_size_y
].ravel()
if not chunk.any():
rmsrow.append(False)
bgrow.append(False)
continue
chunk, sigma, median, num_clip_its = stats.sigma_clip(
chunk, self.beam)
if len(chunk) == 0 or not chunk.any():
rmsrow.append(False)
bgrow.append(False)
else:
mean = numpy.mean(chunk)
rmsrow.append(sigma)
# In the case of a crowded field, the distribution will be
# skewed and we take the median as the background level.
# Otherwise, we take 2.5 * median - 1.5 * mean. This is the
# same as SExtractor: see discussion at
# <http://terapix.iap.fr/forum/showthread.php?tid=267>.
# (mean - median) / sigma is a quick n' dirty skewness
# estimator devised by Karl Pearson.
if numpy.fabs(mean - median) / sigma >= 0.3:
logger.debug(
'bg skewed, %f clipping iterations', num_clip_its)
bgrow.append(median)
else:
logger.debug(
'bg not skewed, %f clipping iterations', num_clip_its)
bgrow.append(2.5 * median - 1.5 * mean)
rmsgrid.append(rmsrow)
bggrid.append(bgrow)
rmsgrid = numpy.ma.array(
rmsgrid, mask=numpy.where(numpy.array(rmsgrid) == False, 1, 0))
bggrid = numpy.ma.array(
bggrid, mask=numpy.where(numpy.array(bggrid) == False, 1, 0))
return {'rms': rmsgrid, 'bg': bggrid}
def _interpolate(self, grid, roundup=False):
"""
Interpolate a grid to produce a map of the dimensions of the image.
Args:
grid (numpy.ma.MaskedArray)
Kwargs:
roundup (bool)
Returns:
(numpy.ma.MaskedArray)
Used to transform the RMS, background or FDR grids produced by
L{_grids()} to a map we can compare with the image data.
If roundup is true, values of the resultant map which are lower than
the input grid are trimmed.
"""
# there's no point in working with the whole of the data array if it's
# masked.
useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))
assert(len(useful_chunk) == 1)
my_xdim, my_ydim = self.data[useful_chunk[0]].shape
if MEDIAN_FILTER:
f_grid = ndimage.median_filter(grid, MEDIAN_FILTER)
if MF_THRESHOLD:
grid = numpy.where(
numpy.fabs(f_grid - grid) > MF_THRESHOLD, f_grid, grid
)
else:
grid = f_grid
# Bicubic spline interpolation
xratio = float(my_xdim)/self.back_size_x
yratio = float(my_ydim)/self.back_size_y
# First arg: starting point. Second arg: ending point. Third arg:
# 1j * number of points. (Why is this complex? Sometimes, NumPy has an
# utterly baffling API...)
slicex = slice(-0.5, -0.5+xratio, 1j*my_xdim)
slicey = slice(-0.5, -0.5+yratio, 1j*my_ydim)
my_map = numpy.ma.MaskedArray(numpy.zeros(self.data.shape),
mask = self.data.mask)
my_map[useful_chunk[0]] = ndimage.map_coordinates(
grid, numpy.mgrid[slicex, slicey],
mode='nearest', order=INTERPOLATE_ORDER)
# If the input grid was entirely masked, then the output map must
# also be masked: there's no useful data here. We don't search for
# sources on a masked background/RMS, so this data will be cleanly
# skipped by the rest of the sourcefinder
if numpy.ma.getmask(grid).all():
my_map.mask = True
elif roundup:
# In some cases, the spline interpolation may produce values
# lower than the minimum value in the map. If required, these
# can be trimmed off. No point doing this if the map is already
# fully masked, though.
my_map = numpy.ma.MaskedArray(
data = numpy.where(
my_map >= numpy.min(grid), my_map, numpy.min(grid)),
mask = my_map.mask
)
return my_map
###########################################################################
# #
# Source extraction. #
# #
# Provides for both traditional (islands-above-RMS) and FDR source #
# extraction systems. #
# #
###########################################################################
def extract(self, det, anl, noisemap=None, bgmap=None, labelled_data=None,
labels=None, deblend_nthresh=0, force_beam=False):
"""
Kick off conventional (ie, RMS island finding) source extraction.
Kwargs:
det (float): detection threshold, as a multiple of the RMS
noise. At least one pixel in a source must exceed this
for it to be regarded as significant.
anl (float): analysis threshold, as a multiple of the RMS
noise. All the pixels within the island that exceed
this will be used when fitting the source.
noisemap (numpy.ndarray):
bgmap (numpy.ndarray):
deblend_nthresh (int): number of subthresholds to use for
deblending. Set to 0 to disable.
force_beam (bool): force all extractions to have major/minor axes
equal to the restoring beam
Returns:
:class:`tkp.utility.containers.ExtractionResults`
"""
if anl > det:
logger.warn(
"Analysis threshold is higher than detection threshold"
)
if (type(bgmap).__name__ == 'ndarray' or
type(bgmap).__name__ == 'MaskedArray'):
if bgmap.shape != self.backmap.shape:
raise IndexError("Background map has wrong shape")
else:
self.backmap = bgmap
if (type(noisemap).__name__ == 'ndarray' or
type(noisemap).__name__ == 'MaskedArray'):
if noisemap.shape != self.rmsmap.shape:
raise IndexError("Noisemap has wrong shape")
if noisemap.min() < 0:
raise ValueError("RMS noise cannot be negative")
else:
self.rmsmap = noisemap
if labelled_data is not None and labelled_data.shape != self.data.shape:
raise ValueError("Labelled map is wrong shape")
return self._pyse(
det * self.rmsmap, anl * self.rmsmap, deblend_nthresh, force_beam,
labelled_data=labelled_data, labels=labels
)
def reverse_se(self, det):
"""Run source extraction on the negative of this image.
Obviously, there should be no sources in the negative image, so this
tells you about the false positive rate.
We need to clear cached data -- backgroung map, cached clips, etc --
before & after doing this, as they'll interfere with the normal
extraction process. If this is regularly used, we'll want to
implement a separate cache.
"""
self.labels.clear()
self.clip.clear()
self.data_bgsubbed *= -1
results = self.extract(det=det)
self.data_bgsubbed *= -1
self.labels.clear()
self.clip.clear()
return results
def fd_extract(self, alpha, anl=None, noisemap=None,
bgmap=None, deblend_nthresh=0, force_beam=False
):
"""False Detection Rate based source extraction.
The FDR procedure guarantees that <FDR> < alpha.
See `Hopkins et al., AJ, 123, 1086 (2002)
<http://adsabs.harvard.edu/abs/2002AJ....123.1086H>`_.
"""
# The correlation length in config.py is used not only for the
# calculation of error bars with the Condon formulae, but also for
# calculating the number of independent pixels.
corlengthlong, corlengthshort = utils.calculate_correlation_lengths(
self.beam[0], self.beam[1])
C_n = (1.0 / numpy.arange(
round(0.25 * numpy.pi * corlengthlong *
corlengthshort + 1))[1:]).sum()
# Calculate the FDR threshold
# Things will go terribly wrong in the line below if the interpolated
# noise values get very close or below zero. Use INTERPOLATE_ORDER=1
# or the roundup option.
if (type(bgmap).__name__ == 'ndarray' or
type(bgmap).__name__ == 'MaskedArray'):
if bgmap.shape != self.backmap.shape:
raise IndexError("Background map has wrong shape")
else:
self.backmap = bgmap
if (type(noisemap).__name__ == 'ndarray' or
type(noisemap).__name__ == 'MaskedArray'):
if noisemap.shape != self.rmsmap.shape:
raise IndexError("Noisemap has wrong shape")
if noisemap.min()<0:
raise ValueError("RMS noise cannot be negative")
else:
self.rmsmap = noisemap
normalized_data = self.data_bgsubbed/self.rmsmap
n1 = numpy.sqrt(2 * numpy.pi)
prob = numpy.sort(numpy.ravel(numpy.exp(-0.5 * normalized_data**2)/n1))
lengthprob = float(len(prob))
compare = (alpha / C_n) * numpy.arange(lengthprob+1)[1:] / lengthprob
# Find the last undercrossing, see, e.g., fig. 9 in Miller et al., AJ
# 122, 3492 (2001). Searchsorted is not used because the array is not
# sorted.
try:
index = (numpy.where(prob-compare < 0.)[0]).max()
except ValueError:
# Everything below threshold
return containers.ExtractionResults()
fdr_threshold = numpy.sqrt(-2.0 * numpy.log(n1 * prob[index]))
# Default we require that all source pixels are above the threshold,
# not only the peak pixel. This gives a better guarantee that indeed
# the fraction of false positives is less than fdr_alpha in config.py.
# See, e.g., Hopkins et al., AJ 123, 1086 (2002).
if not anl:
anl = fdr_threshold
return self._pyse(fdr_threshold * self.rmsmap, anl * self.rmsmap,
deblend_nthresh, force_beam)
def flux_at_pixel(self, x, y, numpix=1):
"""Return the background-subtracted flux at a certain position
in the map"""
# numpix is the number of pixels to look around the target.
# e.g. numpix = 1 means a total of 9 pixels, 1 in each direction.
return self.data_bgsubbed[y-numpix:y+numpix+1,
x-numpix:x+numpix+1].max()
@staticmethod
def box_slice_about_pixel(x,y,box_radius):
"""
Returns a slice centred about (x,y), of width = 2*int(box_radius) + 1
"""
ibr = int(box_radius)
return (slice(x - ibr, x + ibr + 1),
slice(y - ibr, y + ibr + 1))
def fit_to_point(self, x, y, boxsize, threshold, fixed):
"""Fit an elliptical Gaussian to a specified point on the image.
The fit is carried on a square section of the image, of length
*boxsize* & centred at pixel coordinates *x*, *y*. Any data
below *threshold* * rmsmap is not used for fitting. If *fixed*
is set to ``position``, then the pixel coordinates are fixed
in the fit.
Returns an instance of :class:`tkp.sourcefinder.extract.Detection`.
"""
if ((
# Recent NumPy
hasattr(numpy.ma.core, "MaskedConstant") and
isinstance(self.rmsmap, numpy.ma.core.MaskedConstant)
) or (
# Old NumPy
numpy.ma.is_masked(self.rmsmap[x, y])
)):
logger.error("Background is masked: cannot fit")
return None
chunk = ImageData.box_slice_about_pixel(x, y, boxsize/2.0)
if threshold is not None:
# We'll mask out anything below threshold*self.rmsmap from the fit.
labels, num = self.labels.setdefault( #Dictionary mapping threshold -> islands map
threshold,
ndimage.label(
self.clip.setdefault( #Dictionary mapping threshold -> mask
threshold,
numpy.where(
self.data_bgsubbed > threshold * self.rmsmap, 1, 0
)
)
)
)
mylabel = labels[x, y]
if mylabel == 0: # 'Background'
raise ValueError("Fit region is below specified threshold, fit aborted.")
mask = numpy.where(labels[chunk] == mylabel, 0, 1)
fitme = numpy.ma.array(self.data_bgsubbed[chunk], mask=mask)
if len(fitme.compressed()) < 1:
raise IndexError("Fit region too close to edge or too small")
else:
fitme = self.data_bgsubbed[chunk]
if fitme.size < 1:
raise IndexError("Fit region too close to edge or too small")
if not len(fitme.compressed()):
logger.error("All data is masked: cannot fit")
return None
# set argument for fixed parameters based on input string
if fixed == 'position':
fixed = {'xbar': boxsize/2.0, 'ybar': boxsize/2.0}
elif fixed == 'position+shape':
fixed = {'xbar': boxsize/2.0, 'ybar': boxsize/2.0,
'semimajor': self.beam[0],
'semiminor': self.beam[1],
'theta': self.beam[2]}
elif fixed == None:
fixed = {}
else:
raise TypeError("Unkown fixed parameter")
if threshold is not None:
threshold_at_pixel = threshold * self.rmsmap[x, y]
else:
threshold_at_pixel = None
try:
measurement, residuals = extract.source_profile_and_errors(
fitme,
threshold_at_pixel,
self.rmsmap[x, y],
self.beam,
fixed=fixed
)
except ValueError:
# Fit failed to converge
# Moments are not applicable when holding parameters fixed
logger.error("Gaussian fit failed at %f, %f", x, y)
return None
try:
assert(abs(measurement['xbar']) < boxsize)
assert(abs(measurement['ybar']) < boxsize)
except AssertionError:
logger.warn('Fit falls outside of box.')
measurement['xbar'] += x-boxsize/2.0
measurement['ybar'] += y-boxsize/2.0
measurement.sig = (fitme / self.rmsmap[chunk]).max()
return extract.Detection(measurement, self)
def fit_fixed_positions(self, positions, boxsize, threshold=None,
fixed='position+shape',
ids=None):
"""
Convenience function to fit a list of sources at the given positions
This function wraps around fit_to_point().
Args:
positions (list): list of (RA, Dec) tuples. Positions to be fit,
in decimal degrees.
boxsize: See :py:func:`fit_to_point`
threshold: as above.
fixed: as above.
ids (list): A list of identifiers. If not None, then must match
the length and order of the ``requested_fits``. Any
successfully fit positions will be returned in a tuple
along with the matching id. As these are simply passed back to
calling code they can be a string, tuple or whatever.
In particular, boxsize is in pixel coordinates as in
fit_to_point, not in sky coordinates.
Returns:
list: A list of successful fits.
If ``ids`` is None, returns a single list of
:class:`tkp.sourcefinder.extract.Detection` s.
Otherwise, returns a tuple of two matched lists:
([detections], [matching_ids]).
"""
if ids is not None:
assert len(ids)==len(positions)
successful_fits = []
successful_ids = []
for idx, posn in enumerate(positions):
try:
x, y, = self.wcs.s2p((posn[0], posn[1]))
except RuntimeError, e:
if (str(e).startswith("wcsp2s error: 8:") or
str(e).startswith("wcsp2s error: 9:")):
logger.warning("Input coordinates (%.2f, %.2f) invalid: ",
posn[0], posn[1])
else:
raise
else:
try:
fit_results = self.fit_to_point(x, y,
boxsize=boxsize,
threshold=threshold,
fixed=fixed)
if not fit_results:
# We were unable to get a good fit
continue
if ( fit_results.ra.error == float('inf') or
fit_results.dec.error == float('inf')):
logging.warning("position errors extend outside image")
else:
successful_fits.append(fit_results)
if ids:
successful_ids.append(ids[idx])
except IndexError as e:
logger.warning("Input pixel coordinates (%.2f, %.2f) "
"could not be fit because: " + e.message,
posn[0], posn[1])
if ids:
return successful_fits, successful_ids
return successful_fits
def label_islands(self, detectionthresholdmap, analysisthresholdmap):
"""
Return a lablled array of pixels for fitting.
Args:
detectionthresholdmap (numpy.ndarray):
analysisthresholdmap (numpy.ndarray):
Returns:
list of valid islands (list of int)
labelled islands (numpy.ndarray)
"""
# If there is no usable data, we return an empty set of islands.
if not len(self.rmsmap.compressed()):
logging.warning("RMS map masked; sourcefinding skipped")
return [], numpy.zeros(self.data_bgsubbed.shape, dtype=numpy.int)
# At this point, we select all the data which is eligible for
# sourcefitting. We are actually using three separate filters, which
# exclude:
#
# 1. Anything which has been masked before we reach this point;
# 2. Any pixels which fall below the analysis threshold at that pixel
# position;
# 3. Any pixels corresponding to a position where the RMS noise is
# less than RMS_FILTER (default 0.001) times the median RMS across
# the whole image.
#
# The third filter attempts to exclude those regions of the image
# which contain no usable data; for example, the parts of the image
# falling outside the circular region produced by awimager.
RMS_FILTER = 0.001
clipped_data = numpy.ma.where(
(self.data_bgsubbed > analysisthresholdmap) &
(self.rmsmap >= (RMS_FILTER * numpy.ma.median(self.rmsmap))),
1, 0
).filled(fill_value=0)
labelled_data, num_labels = ndimage.label(clipped_data, STRUCTURING_ELEMENT)
labels_below_det_thr, labels_above_det_thr = [], []
if num_labels > 0:
# Select the labels of the islands above the analysis threshold
# that have maximum values values above the detection threshold.
# Like above we make sure not to select anything where either
# the data or the noise map are masked.
# We fill these pixels in above_det_thr with -1 to make sure
# its labels will not be in labels_above_det_thr.
# NB data_bgsubbed, and hence above_det_thr, is a masked array;
# filled() sets all mased values equal to -1.
above_det_thr = (
self.data_bgsubbed - detectionthresholdmap
).filled(fill_value=-1)
# Note that we avoid label 0 (the background).
maximum_values = ndimage.maximum(
above_det_thr, labelled_data, numpy.arange(1, num_labels + 1)
)
# If there's only one island, ndimage.maximum will return a float,
# rather than a list. The rest of this function assumes that it's
# always a list, so we need to convert it.
if isinstance(maximum_values, float):
maximum_values = [maximum_values]
# We'll filter out the insignificant islands
for i, x in enumerate(maximum_values, 1):
if x < 0:
labels_below_det_thr.append(i)
else:
labels_above_det_thr.append(i)
# Set to zero all labelled islands that are below det_thr:
labelled_data = numpy.where(
numpy.in1d(labelled_data.ravel(), labels_above_det_thr).reshape(labelled_data.shape),
labelled_data, 0
)
return labels_above_det_thr, labelled_data
def _pyse(
self, detectionthresholdmap, analysisthresholdmap,
deblend_nthresh, force_beam, labelled_data=None, labels=[]
):
"""
Run Python-based source extraction on this image.
Args:
detectionthresholdmap (numpy.ndarray):
analysisthresholdmap (numpy.ndarray):
deblend_nthresh (int): number of subthresholds for deblending. 0
disables.
force_beam (bool): force all extractions to have major/minor axes
equal to the restoring beam
labelled_data (numpy.ndarray): labelled island map (output of
numpy.ndimage.label()). Will be calculated automatically if not
provided.
labels (list): list of labels in the island map to use for
fitting.
Returns:
(..utility.containers.ExtractionResults):
This is described in detail in the "Source Extraction System" document
by John Swinbank, available from TKP svn.
"""
# Map our chunks onto a list of islands.
island_list = []
if labelled_data is None:
labels, labelled_data = self.label_islands(
detectionthresholdmap, analysisthresholdmap
)
# Get a bounding box for each island:
# NB Slices ordered by label value (1...N,)
# 'None' returned for missing label indices.
slices = ndimage.find_objects(labelled_data)
for label in labels:
chunk = slices[label-1]
analysis_threshold = (analysisthresholdmap[chunk] /
self.rmsmap[chunk]).max()
# In selected_data only the pixels with the "correct"
# (see above) labels are retained. Other pixel values are
# set to -(bignum).
# In this way, disconnected pixels within (rectangular)
# slices around islands (particularly the large ones) do
# not affect the source measurements.
selected_data = numpy.ma.where(
labelled_data[chunk] == label,
self.data_bgsubbed[chunk].data, -extract.BIGNUM
).filled(fill_value=-extract.BIGNUM)
island_list.append(
extract.Island(
selected_data,
self.rmsmap[chunk],
chunk,
analysis_threshold,
detectionthresholdmap[chunk],
self.beam,
deblend_nthresh,
DEBLEND_MINCONT,
STRUCTURING_ELEMENT
)
)
# If required, we can save the 'left overs' from the deblending and
# fitting processes for later analysis. This needs setting up here:
if self.residuals:
self.residuals_from_gauss_fitting = numpy.zeros(self.data.shape)
self.residuals_from_deblending = numpy.zeros(self.data.shape)
for island in island_list:
self.residuals_from_deblending[island.chunk] += (
island.data.filled(fill_value=0.))
# Deblend each of the islands to its consituent parts, if necessary
if deblend_nthresh:
deblended_list = map(lambda x: x.deblend(), island_list)
#deblended_list = [x.deblend() for x in island_list]
island_list = list(utils.flatten(deblended_list))
# Iterate over the list of islands and measure the source in each,
# appending it to the results list.
results = containers.ExtractionResults()
for island in island_list:
if force_beam:
fixed = {'semimajor': self.beam[0],
'semiminor': self.beam[1],
'theta': self.beam[2]}
else:
fixed = None
fit_results = island.fit(fixed=fixed)
if fit_results:
measurement, residual = fit_results
else:
# Failed to fit; drop this island and go to the next.
continue
try:
det = extract.Detection(measurement, self, chunk=island.chunk)
if (det.ra.error == float('inf') or
det.dec.error == float('inf')):
logger.warn('Bad fit from blind extraction at pixel coords:'
'%f %f - measurement discarded'
'(increase fitting margin?)', det.x, det.y )
else:
results.append(det)
if self.residuals:
self.residuals_from_deblending[island.chunk] -= (
island.data.filled(fill_value=0.))
self.residuals_from_gauss_fitting[island.chunk] += residual
except RuntimeError:
logger.warn("Island not processed; unphysical?")
raise
def is_usable(det):
# Check that both ends of each axis are usable; that is, that they
# fall within an unmasked part of the image.
# The axis will not likely fall exactly on a pixel number, so
# check all the surroundings.
def check_point(x, y):
x = (numpy.floor(x), numpy.ceil(x))
y = (numpy.floor(y), numpy.ceil(y))
for position in itertools.product(x, y):
try:
if self.data.mask[position[0], position[1]]:
# Point falls in mask
return False
except IndexError:
# Point falls completely outside image
return False
# Point is ok
return True
for point in (
(det.start_smaj_x, det.start_smaj_y),
(det.start_smin_x, det.start_smin_y),
(det.end_smaj_x, det.end_smaj_y),
(det.end_smin_x, det.end_smin_y)
):
if not check_point(*point):
logger.debug("Unphysical source at pixel %f, %f" % (det.x.value, det.y.value))
return False
return True
# Filter will return a list; ensure we return an ExtractionResults.
return containers.ExtractionResults(filter(is_usable, results))
|
bartscheers/tkp
|
tkp/sourcefinder/image.py
|
Python
|
bsd-2-clause
| 37,484
|
[
"Gaussian"
] |
6dcd0c5fc52cb04d904ad2310956cbd43066b2f196b191516c3160444f11963e
|
../../../../../../../share/pyshared/orca/scripts/apps/Eclipse/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/Eclipse/__init__.py
|
Python
|
gpl-3.0
| 73
|
[
"ORCA"
] |
3e474af4e6664ef5db25cb625e12f88146a29c48187afa5e4f8e2c1d99913aac
|
"""
Python dot expression completion using Pymacs.
This almost certainly needs work, but if you add
(require 'pycomplete)
to your init.el file and have Pymacs installed, when you hit M-TAB it will
try to complete the dot expression before point. For example, given this
import at the top of the file:
import time
typing "time.cl" then hitting M-TAB should complete "time.clock".
This is unlikely to be done the way Emacs completion ought to be done, but
it's a start. Perhaps someone with more Emacs mojo can take this stuff and
do it right.
See pycomplete.el for the Emacs Lisp side of things.
Most of the public functions in this module have the signature
(s, fname=None, imports=None)
where s is the symbol to complete, fname is the file path and imports
the list of import statements to use. The fname parameter is used as a
key to cache the global and local context and the symbols imported or
evaluated so far. The cache for an fname is cleared when its imports
are changed. When not passing a list of imports (or None), the currently
used imports are preserved. The caching should make subsequent operations
(e.g. another completion or signature lookup after a completion) less
expensive.
"""
# Original Author: Skip Montanaro <skip@pobox.com>
# Maintainer: Urs Fleisch <ufleisch@users.sourceforge.net>
# Created: Oct 2004
# Keywords: python pymacs emacs
# This software is provided as-is, without express or implied warranty.
# Permission to use, copy, modify, distribute or sell this software, without
# fee, for any purpose and by any individual or organization, is hereby
# granted, provided that the above copyright notice and this paragraph
# appear in all copies.
# Along with pycomplete.el this file allows programmers to complete Python
# symbols within the current buffer.
import sys
import types
import inspect
import keyword
import os
import pydoc
import ast
import re
if sys.version_info[0] >= 3: # Python 3
from io import StringIO
def is_num_or_str(obj):
return isinstance(obj, (int, float, str))
def is_class_type(obj):
return type(obj) == type
def get_unbound_function(unbound):
return unbound
def get_method_function(meth):
return meth.__func__
def get_function_code(func):
return func.__code__
def update_with_builtins(keys):
import builtins
keys.update(dir(builtins))
else: # Python 2
from StringIO import StringIO
def is_num_or_str(obj):
return isinstance(obj, (int, long, float, basestring))
def is_class_type(obj):
return type(obj) in (types.ClassType, types.TypeType)
def get_unbound_function(unbound):
return unbound.im_func
def get_method_function(meth):
return meth.im_func
def get_function_code(func):
return func.func_code
def update_with_builtins(keys):
import __builtin__
keys.update(dir(__builtin__))
try:
x = set
except NameError:
from sets import Set as set
else:
del x
class ImportExtractor(ast.NodeVisitor):
"""NodeVisitor to extract the top-level import statements from an AST.
To generate code containing all imports in try-except statements,
call get_import_code(node), where node is a parsed AST.
"""
def visit_FunctionDef(self, node):
# Ignore imports inside functions or methods.
pass
def visit_ClassDef(self, node):
# Ignore imports inside classes.
pass
def generic_visit(self, node):
# Store import statement nodes.
if isinstance(node, (ast.Import, ast.ImportFrom)):
self._import_nodes.append(node)
ast.NodeVisitor.generic_visit(self, node)
def get_import_code(self, node, fname='<string>'):
"""Get compiled code of all top-level import statements found in the
AST of node."""
self._import_nodes = []
self.visit(node)
body = []
for imp_node in self._import_nodes:
if isinstance(imp_node, ast.ImportFrom) and \
imp_node.module == '__future__':
# 'SyntaxError: from __future__ imports must occur at the
# beginning of the file' is raised if a 'from __future__ import'
# is wrapped in try-except, so use only the import statement.
body.append(imp_node)
else:
body.append(ast.TryExcept(body=[imp_node], handlers=[
ast.ExceptHandler(type=None, name=None, body=[ast.Pass()])],
orelse=[]))
node = ast.Module(body=body)
ast.fix_missing_locations(node)
code = compile(node, fname, 'exec')
return code
class CodeRemover(ast.NodeTransformer):
"""NodeTransformer which replaces function statements with 'pass'
and keeps only safe assignments, so that the resulting code can
be used for code completion.
To reduce the code from the node of a parsed AST, call
get_transformed_code(node).
"""
def visit_FunctionDef(self, node):
# Replace all function statements except doc string by 'pass'.
if node.body:
if isinstance(node.body[0], ast.Expr) and \
isinstance(node.body[0].value, ast.Str):
# Keep doc string.
first_stmt = node.body[1] if len(node.body) > 1 else node.body[0]
node.body = [node.body[0]]
else:
first_stmt = node.body[0]
node.body = []
node.body.append(ast.copy_location(ast.Pass(), first_stmt))
return node
return None
def visit_Expr(self, node):
# Remove all expressions except strings to keep doc strings.
if isinstance(node.value, ast.Str):
return node
return None
# Class name in CapCase, as suggested by PEP8 Python style guide
_classNameRe = re.compile(r'^_?[A-Z][A-Za-z0-9]+$')
@staticmethod
def replace_unsafe_value(node, replace_self=None):
"""Modify value from assignment if unsafe.
If replace_self is given, only assignments starting with 'self.' are
processed, the assignment node is returned with 'self.' replaced by
the value of replace_self (typically the class name).
For other assignments, None is returned."""
for i, target in enumerate(node.targets):
if not isinstance(target, (ast.Name, ast.Attribute)):
# Only process assignments to names and attributes,
# not tuples.
return None
if replace_self:
if isinstance(target, ast.Attribute) and \
isinstance(target.value, ast.Name) and \
target.value.id == 'self' and \
isinstance(target.value.ctx, ast.Load):
node.targets[i].value.id = replace_self
else:
return None
if isinstance(node.value, (ast.Str, ast.Num)):
pass
elif isinstance(node.value, (ast.List, ast.Tuple)):
node.value.elts = []
elif isinstance(node.value, ast.Dict):
node.value.keys = []
node.value.values = []
elif isinstance(node.value, ast.ListComp):
node.value = ast.copy_location(ast.List(elts=[], ctx=ast.Load()), node.value)
elif isinstance(node.value, ast.Call):
if isinstance(node.value.func, ast.Name):
name = node.value.func.id
if name == 'open':
if sys.version_info[0] >= 3: # Python 3
node.value = ast.copy_location(
ast.Attribute(value=ast.Name(id='io', ctx=ast.Load()),
attr='BufferedIOBase', ctx=ast.Load()),
node.value)
else: # Python 2
node.value = ast.copy_location(
ast.Name(id='file', ctx=ast.Load()), node.value)
# Wrap class lookup in try-except because it is not fail-safe.
node = ast.copy_location(ast.TryExcept(body=[node], handlers=[
ast.ExceptHandler(type=None, name=None, body=[ast.Pass()])],
orelse=[]), node)
ast.fix_missing_locations(node)
elif CodeRemover._classNameRe.match(name):
node.value = ast.copy_location(
ast.Name(id=name, ctx=ast.Load()), node.value)
# Wrap class lookup in try-except because it is not fail-safe.
node = ast.copy_location(ast.TryExcept(body=[node], handlers=[
ast.ExceptHandler(type=None, name=None, body=[ast.Pass()])],
orelse=[]), node)
ast.fix_missing_locations(node)
else:
node.value = ast.copy_location(
ast.Name(id='None', ctx=ast.Load()), node.value)
elif isinstance(node.value.func, ast.Attribute) and \
CodeRemover._classNameRe.match(node.value.func.attr):
node.value = node.value.func
# Wrap class lookup in try-except because it is not fail-safe.
node = ast.copy_location(ast.TryExcept(body=[node], handlers=[
ast.ExceptHandler(type=None, name=None, body=[ast.Pass()])],
orelse=[]), node)
ast.fix_missing_locations(node)
else:
node.value = ast.copy_location(
ast.Name(id='None', ctx=ast.Load()), node.value)
else:
node.value = ast.copy_location(ast.Name(id='None', ctx=ast.Load()), node.value)
return node
def visit_Assign(self, node):
# Replace unsafe values of assignements by None.
return self.replace_unsafe_value(node)
def visit_Name(self, node):
# Pass names for bases in ClassDef.
return node
def visit_Attribute(self, node):
# Pass attributes for bases in ClassDef.
return node
def visit_ClassDef(self, node):
# Visit nodes of class.
# Store instance member assignments to be added later to generated code.
self_assignments = {}
for child in ast.walk(node):
if isinstance(child, ast.Assign):
new_child = self.replace_unsafe_value(child,
replace_self=node.name)
if new_child:
new_var = child.targets[0].attr
old_assign = self_assignments.get(new_var)
if not old_assign or (
isinstance(old_assign, ast.Assign) and
isinstance(old_assign.value, ast.Name) and
old_assign.value.id == 'None'):
self_assignments[new_var] = new_child
self._class_assignments.extend(list(self_assignments.values()))
return ast.NodeTransformer.generic_visit(self, node)
def visit_Module(self, node):
# Visit nodes of module
return ast.NodeTransformer.generic_visit(self, node)
def generic_visit(self, node):
# Remove everything which is not handled by the methods above
return None
def get_transformed_code(self, node, fname='<string>'):
"""Get compiled code containing only empty functions and methods
and safe assignments found in the AST of node."""
self._class_assignments = []
node = self.visit(node)
# The self members are added as attributes to the class objects
# rather than included as class variables inside the class definition
# so that names starting with '__' are not mangled.
node.body.extend(self._class_assignments)
code = compile(node, fname, 'exec')
return code
class PyCompleteDocument(object):
"""Completion data for Python source file."""
_helpout = StringIO
_stdout = sys.stdout
_instances = {}
def __init__(self, fname=None):
"""Constructor for internal use.
The factory method instance() shall be used instead.
"""
self._fname = fname
self._imports = None
self._globald = globals().copy()
self._symnames = []
self._symobjs = {}
self._parse_source_called = False
@classmethod
def instance(cls, fname):
"""Get PyCompleteDocument object for fname.
If no object for this file name exists, a new object is created and
registered.
"""
obj = cls._instances.get(fname)
if obj is None:
obj = PyCompleteDocument(fname)
cls._instances[fname] = obj
return obj
def _import_modules(self, imports):
"""Import modules using the statements in imports.
If the imports are the same as in the last call, the methods
immediately returns, also if imports is None.
"""
if imports is None and not self._parse_source_called:
self.parse_source()
if imports is None or imports == self._imports:
return
# changes to where the file is
if self._fname:
os.chdir(os.path.dirname(self._fname))
self._globald = globals().copy()
self._symnames = []
self._symobjs = {}
for stmt in imports:
try:
exec(stmt, self._globald)
except TypeError:
raise TypeError('invalid type: %s' % stmt)
except Exception:
continue
self._imports = imports
def _collect_symbol_names(self):
"""Collect the global, local, builtin symbols in _symnames.
If _symnames is already set, the method immediately returns.
"""
if not self._symnames:
keys = set(keyword.kwlist)
keys.update(list(self._globald.keys()))
update_with_builtins(keys)
self._symnames = list(keys)
self._symnames.sort()
def _get_symbol_object(self, s):
"""Get a symbol by evaluating its name or importing a module
or submodule with the name s.
"""
sym = self._symobjs.get(s)
if sym is not None:
return sym
# changes to where the file is
if self._fname:
os.chdir(os.path.dirname(self._fname))
try:
sym = eval(s, self._globald)
except NameError:
try:
sym = __import__(s, self._globald)
self._globald[s] = sym
except ImportError:
pass
except AttributeError:
try:
sym = __import__(s, self._globald)
except ImportError:
pass
except SyntaxError:
pass
if sym is not None:
self._symobjs[s] = sym
return sym
def _load_symbol(self, s, strict=False):
"""Get a symbol for a dotted expression.
Returns the last successfully found symbol object in the
dotted chain. If strict is set True, it returns True as
soon as a symbol is not found. Therefore strict=True can
be used to find exactly the symbol for s, otherwise a
symbol for a parent can be returned, which may be enough
if searching for help on symbol.
"""
sym = self._symobjs.get(s)
if sym is not None:
return sym
dots = s.split('.')
if not s or len(dots) == 1:
sym = self._get_symbol_object(s)
else:
for i in range(1, len(dots) + 1):
s = '.'.join(dots[:i])
if not s:
continue
sym_i = self._get_symbol_object(s)
if sym_i is not None:
sym = sym_i
elif strict:
return None
return sym
def _get_help(self, s, imports=None):
"""Return string printed by help function."""
if not s:
return ''
if s == 'pydoc.help':
# Prevent pydoc from going into interactive mode
s = 'pydoc.Helper'
obj = None
if not keyword.iskeyword(s):
try:
self._import_modules(imports)
obj = self._load_symbol(s, strict=False)
except Exception as ex:
return '%s' % ex
if not obj:
obj = str(s)
out = self._helpout()
try:
sys.stdout = out
pydoc.help(obj)
finally:
sys.stdout = self._stdout
return out.getvalue()
@staticmethod
def _find_constructor(class_ob):
"""Given a class object, return a function object used for the
constructor (ie, __init__() ) or None if we can't find one."""
try:
return get_unbound_function(class_ob.__init__)
except AttributeError:
for base in class_ob.__bases__:
rc = PyCompleteDocument._find_constructor(base)
if rc is not None:
return rc
return None
def get_all_completions(self, s, imports=None):
"""Return contextual completion of s (string of >= zero chars).
If given, imports is a list of import statements to be executed
first.
"""
self._import_modules(imports)
last_dot_pos = s.rfind('.')
if last_dot_pos == -1:
self._collect_symbol_names()
if s:
return [k for k in self._symnames if k.startswith(s)]
else:
return self._symnames
sym = self._load_symbol(s[:last_dot_pos], strict=True)
if sym is not None:
s = s[last_dot_pos + 1:]
return [k for k in dir(sym) if k.startswith(s)]
return []
def complete(self, s, imports=None):
"""Complete symbol if unique, else return list of completions."""
if not s:
return ''
completions = self.get_all_completions(s, imports)
if len(completions) == 0:
return None
else:
dots = s.split(".")
prefix = os.path.commonprefix([k for k in completions])
if len(completions) == 1 or len(prefix) > len(dots[-1]):
return [prefix[len(dots[-1]):]]
return completions
def help(self, s, imports=None):
"""Return help on object."""
try:
return self._get_help(s, imports)
except Exception as ex:
return '%s' % ex
def get_docstring(self, s, imports=None):
"""Return docstring for symbol s."""
if s and not keyword.iskeyword(s):
try:
self._import_modules(imports)
obj = self._load_symbol(s, strict=True)
if obj and not is_num_or_str(obj):
doc = inspect.getdoc(obj)
if doc:
return doc
except:
pass
return ''
def get_signature(self, s, imports=None):
"""Return info about function parameters."""
if not s or keyword.iskeyword(s):
return ''
obj = None
sig = ""
try:
self._import_modules(imports)
obj = self._load_symbol(s, strict=False)
except Exception as ex:
return '%s' % ex
if is_class_type(obj):
# Look for the highest __init__ in the class chain.
ctr = self._find_constructor(obj)
if ctr is not None and type(ctr) in (
types.MethodType, types.FunctionType, types.LambdaType):
obj = ctr
elif type(obj) == types.MethodType:
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
obj = get_method_function(obj)
if type(obj) in [types.FunctionType, types.LambdaType]:
(args, varargs, varkw, defaults) = inspect.getargspec(obj)
sig = ('%s: %s' % (obj.__name__,
inspect.formatargspec(args, varargs, varkw,
defaults)))
doc = getattr(obj, '__doc__', '')
if doc and not sig:
doc = doc.lstrip()
pos = doc.find('\n')
if pos < 0 or pos > 70:
pos = 70
sig = doc[:pos]
return sig
def get_location(self, s, imports=None):
"""Return file path and line number of symbol, None if not found."""
if not s or keyword.iskeyword(s):
return None
try:
self._import_modules(imports)
obj = self._load_symbol(s, strict=False)
if obj is not None:
if is_class_type(obj):
obj = obj.__init__
if type(obj) == types.MethodType:
obj = get_method_function(obj)
if type(obj) in [types.FunctionType, types.LambdaType]:
code = get_function_code(obj)
return (os.path.abspath(code.co_filename), code.co_firstlineno)
# If not found, try using inspect.
return (inspect.getsourcefile(obj), inspect.getsourcelines(obj)[1])
except:
pass
return None
def parse_source(self, only_reload=False):
"""Parse source code to get imports and completions.
If this method is called, the imports parameter for the other methods
must be omitted (or None), so that the imports are taken from the
parsed source code. If only_reload is True, the source is only parsed
if it has been parsed before.
None is returned if OK, else a string describing the error.
"""
if only_reload and not self._parse_source_called:
return None
self._parse_source_called = True
if not self._fname:
return None
try:
with open(self._fname) as fh:
src = fh.read()
except IOError as ex:
return '%s' % ex
# changes to where the file is
os.chdir(os.path.dirname(self._fname))
try:
node = ast.parse(src, self._fname)
import_code = ImportExtractor().get_import_code(node, self._fname)
except (SyntaxError, TypeError) as ex:
return '%s' % ex
old_globald = self._globald.copy()
self._globald = globals().copy()
try:
exec(import_code, self._globald)
except Exception as ex:
self._globald = old_globald
return '%s' % ex
self._symnames = []
self._symobjs = {}
reduced_code = CodeRemover().get_transformed_code(node, self._fname)
try:
exec(reduced_code, self._globald)
except Exception as ex:
return '%s' % ex
return None
def get_all_completions(s, fname=None, imports=None):
"""Get a list of possible completions for s.
The completions extend the expression s after the last dot.
"""
return PyCompleteDocument.instance(fname).get_all_completions(
s, imports)
def pycomplete(s, fname=None, imports=None):
"""Complete the Python expression s.
If multiple completions are found, a list of possible completions
(names after the last dot) is returned.
If one completion is found, a list with a string containing the
remaining characters is returned.
If no completion is found, None is returned.
"""
return PyCompleteDocument.instance(fname).complete(s, imports)
def pyhelp(s, fname=None, imports=None):
"""Return help on object s."""
return PyCompleteDocument.instance(fname).help(s, imports)
def pydocstring(s, fname=None, imports=None):
"""Return docstring of symbol."""
return PyCompleteDocument.instance(fname).get_docstring(s, imports)
def pysignature(s, fname=None, imports=None):
"""Return info about function parameters."""
return PyCompleteDocument.instance(fname).get_signature(s, imports)
def pylocation(s, fname=None, imports=None):
"""Return file path and line number of symbol, None if not found."""
return PyCompleteDocument.instance(fname).get_location(s, imports)
def parse_source(fname, only_reload=False):
"""Parse source code to get imports and completions.
If this function is called, the imports parameter for the other functions
must be omitted (or None), so that the imports are taken from the
parsed source code. If only_reload is True, the source is only parsed if
it has been parsed before.
"""
return PyCompleteDocument.instance(fname).parse_source(only_reload)
# Local Variables :
# pymacs-auto-reload : t
# End :
|
paulgriepentrog/python-mode
|
completion/pycomplete.py
|
Python
|
gpl-3.0
| 25,005
|
[
"VisIt"
] |
3226c80ef437e3a552d7429d4948a5fdc79a8ff3fcb95d4c85dbc2e769a46688
|
'''
ustvnow XBMC Plugin
Copyright (C) 2015 t0mm0, Lunatixz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib import Addon, ustvnow
import sys
import urllib
import xbmc, xbmcgui, xbmcplugin
Addon.plugin_url = sys.argv[0]
Addon.plugin_handle = int(sys.argv[1])
Addon.plugin_queries = Addon.parse_query(sys.argv[2][1:])
email = Addon.get_setting('email')
password = Addon.get_setting('password')
premium = Addon.get_setting('subscription') == "true"
dlg = xbmcgui.Dialog()
if not email:
dlg.ok("USTVnow", "Please visit www.ustvnow.com", "and register for your login credentials")
retval = dlg.input('Enter USTVnow Account Email', type=xbmcgui.INPUT_ALPHANUM)
if retval and len(retval) > 0:
Addon.set_setting('email', str(retval))
email = Addon.get_setting('email')
retval = dlg.input('Enter USTVnow Account Password', type=xbmcgui.INPUT_ALPHANUM)
if retval and len(retval) > 0:
Addon.set_setting('password', str(retval))
password = Addon.get_setting('password')
if dlg.yesno("USTVnow", 'Are you a premium subscriber?'):
Addon.set_setting('subscription', 'true')
else:
Addon.set_setting('subscription', 'false')
if premium == False:
Addon.set_setting('quality', '0')
ustv = ustvnow.Ustvnow(email, password, premium)
Addon.log('plugin url: ' + Addon.plugin_url)
Addon.log('plugin queries: ' + str(Addon.plugin_queries))
Addon.log('plugin handle: ' + str(Addon.plugin_handle))
mode = Addon.plugin_queries['mode']
if mode == 'main':
Addon.log(mode)
Addon.add_directory({'mode': 'live'}, Addon.get_string(30001))
if premium == True:
Addon.add_directory({'mode': 'recordings'}, Addon.get_string(30002))
elif mode == 'live':
Addon.log(mode)
stream_type = ['rtmp', 'rtsp'][int(Addon.get_setting('stream_type'))]
channels = ustv.get_channels(int(Addon.get_setting('quality')),
stream_type)
if channels:
for c in channels:
rURL = "plugin://plugin.video.ustvnow/?name="+c['name']+"&mode=play"
item = xbmcgui.ListItem(path=rURL)
Addon.add_video_item(rURL,
{'title': '%s - %s' % (c['name'],
c['now']['title']),
'plot': c['now']['plot']},
img=c['icon'])
elif mode == 'recordings':
Addon.log(mode)
stream_type = ['rtmp', 'rtsp'][int(Addon.get_setting('stream_type'))]
recordings = ustv.get_recordings(int(Addon.get_setting('quality')),
stream_type)
if recordings:
for r in recordings:
cm_del = (Addon.get_string(30003),
'XBMC.RunPlugin(%s/?mode=delete&del=%s)' %
(Addon.plugin_url, urllib.quote(r['del_url'])))
title = '%s (%s on %s)' % (r['title'], r['rec_date'], r['channel'])
Addon.add_video_item(r['stream_url'], {'title': title,
'plot': r['plot']},
img=r['icon'], cm=[cm_del], cm_replace=True)
elif mode == 'delete':
dialog = xbmcgui.Dialog()
ret = dialog.yesno(Addon.get_string(30000), Addon.get_string(30004),
Addon.get_string(30005))
if ret == 1:
ustv.delete_recording(Addon.plugin_queries['del'])
elif mode=='play':
name = Addon.plugin_queries['name']
try:
stream_type = 'rtmp'
channels = []
quality = int(Addon.get_setting('quality'))
channels = ustv.get_channels(quality,stream_type)
for c in channels:
if c['name'] == name:
print "setResolvedUrl"
item = xbmcgui.ListItem(path=c['url'])
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
except:
pass
Addon.end_of_directory()
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/addons/plugin.video.ustvnow/default.py
|
Python
|
apache-2.0
| 4,591
|
[
"VisIt"
] |
65936512dea7aeb8e66594e5a324eb8425e66a7982aa33132e01a96615490ab2
|
import numpy as np
from gpaw.lfc import BasisFunctions
from gpaw.utilities import unpack
from gpaw.utilities.tools import tri2full
from gpaw import debug
from gpaw.lcao.overlap import NewTwoCenterIntegrals as NewTCI
from gpaw.utilities.blas import gemm, gemmdot
from gpaw.wavefunctions.base import WaveFunctions
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.mpi import serial_comm
from gpaw.lfc import LocalizedFunctionsCollection as LFC
from gpaw.kpoint import KPoint
import warnings
def get_r_and_offsets(nl, spos_ac, cell_cv):
r_and_offset_aao = {}
def add(a1, a2, R_c, offset):
if not (a1, a2) in r_and_offset_aao:
r_and_offset_aao[(a1, a2)] = []
r_and_offset_aao[(a1, a2)].append((R_c, offset))
for a1, spos1_c in enumerate(spos_ac):
a2_a, offsets = nl.get_neighbors(a1)
for a2, offset in zip(a2_a, offsets):
spos2_c = spos_ac[a2] + offset
R_c = np.dot(spos2_c - spos1_c, cell_cv)
add(a1, a2, R_c, offset)
if a1 != a2 or offset.any():
add(a2, a1, -R_c, -offset)
return r_and_offset_aao
def add_paw_correction_to_overlap(setups, P_aqMi, S_qMM, Mstart=0,
Mstop=None):
if Mstop is None:
Mstop = setups.nao
for a, P_qMi in P_aqMi.items():
dO_ii = np.asarray(setups[a].dO_ii, S_qMM.dtype)
for S_MM, P_Mi in zip(S_qMM, P_qMi):
dOP_iM = np.zeros((dO_ii.shape[1], setups.nao),
P_Mi.dtype)
# (ATLAS can't handle uninitialized output array)
gemm(1.0, P_Mi, dO_ii, 0.0, dOP_iM, 'c')
gemm(1.0, dOP_iM, P_Mi[Mstart:Mstop],
1.0, S_MM, 'n')
class LCAOWaveFunctions(WaveFunctions):
def __init__(self, ksl, gd, nvalence, setups, bd,
dtype, world, kd, timer=None):
WaveFunctions.__init__(self, gd, nvalence, setups, bd,
dtype, world, kd, timer)
self.ksl = ksl
self.S_qMM = None
self.T_qMM = None
self.P_aqMi = None
self.timer.start('TCI: Evaluate splines')
self.tci = NewTCI(gd.cell_cv, gd.pbc_c, setups, kd.ibzk_qc, kd.gamma)
self.timer.stop('TCI: Evaluate splines')
self.basis_functions = BasisFunctions(gd,
[setup.phit_j
for setup in setups],
kd,
cut=True)
def empty(self, n=(), global_array=False, realspace=False):
if realspace:
return self.gd.empty(n, self.dtype, global_array)
else:
if isinstance(n, int):
n = (n,)
nao = self.setups.nao
return np.empty(n + (nao,), self.dtype)
def summary(self, fd):
fd.write('Wave functions: LCAO\n')
def set_eigensolver(self, eigensolver):
WaveFunctions.set_eigensolver(self, eigensolver)
eigensolver.initialize(self.gd, self.dtype, self.setups.nao, self.ksl)
def set_positions(self, spos_ac):
self.timer.start('Basic WFS set positions')
WaveFunctions.set_positions(self, spos_ac)
self.timer.stop('Basic WFS set positions')
self.timer.start('Basis functions set positions')
self.basis_functions.set_positions(spos_ac)
self.timer.stop('Basis functions set positions')
if self.ksl is not None:
self.basis_functions.set_matrix_distribution(self.ksl.Mstart,
self.ksl.Mstop)
nq = len(self.kd.ibzk_qc)
nao = self.setups.nao
mynbands = self.bd.mynbands
Mstop = self.ksl.Mstop
Mstart = self.ksl.Mstart
mynao = Mstop - Mstart
if self.ksl.using_blacs: # XXX
# S and T have been distributed to a layout with blacs, so
# discard them to force reallocation from scratch.
#
# TODO: evaluate S and T when they *are* distributed, thus saving
# memory and avoiding this problem
self.S_qMM = None
self.T_qMM = None
S_qMM = self.S_qMM
T_qMM = self.T_qMM
if S_qMM is None: # XXX
# First time:
assert T_qMM is None
if self.ksl.using_blacs: # XXX
self.tci.set_matrix_distribution(Mstart, mynao)
S_qMM = np.empty((nq, mynao, nao), self.dtype)
T_qMM = np.empty((nq, mynao, nao), self.dtype)
for kpt in self.kpt_u:
if kpt.C_nM is None:
kpt.C_nM = np.empty((mynbands, nao), self.dtype)
self.allocate_arrays_for_projections(
self.basis_functions.my_atom_indices)
self.P_aqMi = {}
for a in self.basis_functions.my_atom_indices:
ni = self.setups[a].ni
self.P_aqMi[a] = np.empty((nq, nao, ni), self.dtype)
for kpt in self.kpt_u:
q = kpt.q
kpt.P_aMi = dict([(a, P_qMi[q])
for a, P_qMi in self.P_aqMi.items()])
self.timer.start('TCI: Calculate S, T, P')
# Calculate lower triangle of S and T matrices:
self.tci.calculate(spos_ac, S_qMM, T_qMM, self.P_aqMi)
add_paw_correction_to_overlap(self.setups, self.P_aqMi, S_qMM,
self.ksl.Mstart, self.ksl.Mstop)
self.timer.stop('TCI: Calculate S, T, P')
S_MM = None # allow garbage collection of old S_qMM after redist
S_qMM = self.ksl.distribute_overlap_matrix(S_qMM)
T_qMM = self.ksl.distribute_overlap_matrix(T_qMM)
for kpt in self.kpt_u:
q = kpt.q
kpt.S_MM = S_qMM[q]
kpt.T_MM = T_qMM[q]
if (debug and self.band_comm.size == 1 and self.gd.comm.rank == 0 and
nao > 0 and not self.ksl.using_blacs):
# S and T are summed only on comm master, so check only there
from numpy.linalg import eigvalsh
self.timer.start('Check positive definiteness')
for S_MM in S_qMM:
tri2full(S_MM, UL='L')
smin = eigvalsh(S_MM).real.min()
if smin < 0:
raise RuntimeError('Overlap matrix has negative '
'eigenvalue: %e' % smin)
self.timer.stop('Check positive definiteness')
self.positions_set = True
self.S_qMM = S_qMM
self.T_qMM = T_qMM
def initialize(self, density, hamiltonian, spos_ac):
if density.nt_sG is None:
if self.kpt_u[0].f_n is None or self.kpt_u[0].C_nM is None:
density.initialize_from_atomic_densities(self.basis_functions)
# Initialize GLLB-potential from basis function orbitals
if hamiltonian.xc.type == 'GLLB':
hamiltonian.xc.initialize_from_atomic_orbitals(self.basis_functions)
else:
# We have the info we need for a density matrix, so initialize
# from that instead of from scratch. This will be the case
# after set_positions() during a relaxation
density.initialize_from_wavefunctions(self)
else:
# After a restart, nt_sg doesn't exist yet, so we'll have to
# make sure it does. Of course, this should have been taken care
# of already by this time, so we should improve the code elsewhere
density.calculate_normalized_charges_and_mix()
hamiltonian.update(density)
def initialize_wave_functions_from_lcao(self):
"""
Fill the calc.wfs.kpt_[u].psit_nG arrays with usefull data.
Normally psit_nG is NOT used in lcao mode, but some extensions
(like ase.dft.wannier) want to have it.
This code is adapted from fd.py / initialize_from_lcao_coefficients()
and fills psit_nG with data constructed from the current lcao
coefficients (kpt.C_nM).
(This may or may not work in band-parallel case!)
"""
#print('initialize_wave_functions_from_lcao')
bfs = self.basis_functions
for kpt in self.kpt_u:
#print("kpt: {0}".format(kpt))
kpt.psit_nG = self.gd.zeros(self.bd.nbands, self.dtype)
bfs.lcao_to_grid(kpt.C_nM, kpt.psit_nG[:self.bd.mynbands], kpt.q)
# kpt.C_nM = None
#
def initialize_wave_functions_from_restart_file(self):
"""Dummy function to ensure compatibility to fd mode"""
self.initialize_wave_functions_from_lcao()
#
def calculate_density_matrix(self, f_n, C_nM, rho_MM=None):
# ATLAS can't handle uninitialized output array:
#rho_MM.fill(42)
self.timer.start('Calculate density matrix')
rho_MM = self.ksl.calculate_density_matrix(f_n, C_nM, rho_MM)
self.timer.stop('Calculate density matrix')
return rho_MM
# ----------------------------
if 1:
# XXX Should not conjugate, but call gemm(..., 'c')
# Although that requires knowing C_Mn and not C_nM.
# that also conforms better to the usual conventions in literature
Cf_Mn = C_nM.T.conj() * f_n
self.timer.start('gemm')
gemm(1.0, C_nM, Cf_Mn, 0.0, rho_MM, 'n')
self.timer.stop('gemm')
self.timer.start('band comm sum')
self.bd.comm.sum(rho_MM)
self.timer.stop('band comm sum')
else:
# Alternative suggestion. Might be faster. Someone should test this
from gpaw.utilities.blas import r2k
C_Mn = C_nM.T.copy()
r2k(0.5, C_Mn, f_n * C_Mn, 0.0, rho_MM)
tri2full(rho_MM)
def calculate_density_matrix_delta(self, d_nn, C_nM, rho_MM=None):
# ATLAS can't handle uninitialized output array:
#rho_MM.fill(42)
self.timer.start('Calculate density matrix')
rho_MM = self.ksl.calculate_density_matrix_delta(d_nn, C_nM, rho_MM)
self.timer.stop('Calculate density matrix')
return rho_MM
def add_to_density_from_k_point_with_occupation(self, nt_sG, kpt, f_n):
"""Add contribution to pseudo electron-density. Do not use the standard
occupation numbers, but ones given with argument f_n."""
# Custom occupations are used in calculation of response potential
# with GLLB-potential
if kpt.rho_MM is None:
rho_MM = self.calculate_density_matrix(f_n, kpt.C_nM)
if hasattr(kpt, 'c_on'):
assert self.bd.comm.size == 1
d_nn = np.zeros((self.bd.mynbands, self.bd.mynbands),
dtype=kpt.C_nM.dtype)
for ne, c_n in zip(kpt.ne_o, kpt.c_on):
assert abs(c_n.imag).max() < 1e-14
d_nn += ne * np.outer(c_n.conj(), c_n).real
rho_MM += self.calculate_density_matrix_delta(d_nn, kpt.C_nM)
else:
rho_MM = kpt.rho_MM
self.timer.start('Construct density')
self.basis_functions.construct_density(rho_MM, nt_sG[kpt.s], kpt.q)
self.timer.stop('Construct density')
def add_to_kinetic_density_from_k_point(self, taut_G, kpt):
raise NotImplementedError('Kinetic density calculation for LCAO '
'wavefunctions is not implemented.')
def calculate_forces(self, hamiltonian, F_av):
self.timer.start('LCAO forces')
spos_ac = self.tci.atoms.get_scaled_positions() % 1.0
ksl = self.ksl
nao = ksl.nao
mynao = ksl.mynao
nq = len(self.kd.ibzk_qc)
dtype = self.dtype
tci = self.tci
gd = self.gd
bfs = self.basis_functions
Mstart = ksl.Mstart
Mstop = ksl.Mstop
from gpaw.kohnsham_layouts import BlacsOrbitalLayouts
isblacs = isinstance(ksl, BlacsOrbitalLayouts) # XXX
if not isblacs:
self.timer.start('TCI derivative')
dThetadR_qvMM = np.empty((nq, 3, mynao, nao), dtype)
dTdR_qvMM = np.empty((nq, 3, mynao, nao), dtype)
dPdR_aqvMi = {}
for a in self.basis_functions.my_atom_indices:
ni = self.setups[a].ni
dPdR_aqvMi[a] = np.empty((nq, 3, nao, ni), dtype)
tci.calculate_derivative(spos_ac, dThetadR_qvMM, dTdR_qvMM,
dPdR_aqvMi)
gd.comm.sum(dThetadR_qvMM)
gd.comm.sum(dTdR_qvMM)
self.timer.stop('TCI derivative')
my_atom_indices = bfs.my_atom_indices
atom_indices = bfs.atom_indices
def _slices(indices):
for a in indices:
M1 = bfs.M_a[a] - Mstart
M2 = M1 + self.setups[a].nao
if M2 > 0:
yield a, max(0, M1), M2
def slices():
return _slices(atom_indices)
def my_slices():
return _slices(my_atom_indices)
#
# ----- -----
# \ -1 \ *
# E = ) S H rho = ) c eps f c
# mu nu / mu x x z z nu / n mu n n n nu
# ----- -----
# x z n
#
# We use the transpose of that matrix. The first form is used
# if rho is given, otherwise the coefficients are used.
self.timer.start('Initial')
rhoT_uMM = []
ET_uMM = []
if not isblacs:
if self.kpt_u[0].rho_MM is None:
self.timer.start('Get density matrix')
for kpt in self.kpt_u:
rhoT_MM = ksl.get_transposed_density_matrix(kpt.f_n,
kpt.C_nM)
rhoT_uMM.append(rhoT_MM)
ET_MM = ksl.get_transposed_density_matrix(kpt.f_n
* kpt.eps_n,
kpt.C_nM)
ET_uMM.append(ET_MM)
if hasattr(kpt, 'c_on'):
# XXX does this work with BLACS/non-BLACS/etc.?
assert self.bd.comm.size == 1
d_nn = np.zeros((self.bd.mynbands, self.bd.mynbands), dtype=kpt.C_nM.dtype)
for ne, c_n in zip(kpt.ne_o, kpt.c_on):
d_nn += ne * np.outer(c_n.conj(), c_n)
rhoT_MM += ksl.get_transposed_density_matrix_delta(d_nn, kpt.C_nM)
ET_MM += ksl.get_transposed_density_matrix_delta(d_nn * kpt.eps_n, kpt.C_nM)
self.timer.stop('Get density matrix')
else:
rhoT_uMM = []
ET_uMM = []
for kpt in self.kpt_u:
H_MM = self.eigensolver.calculate_hamiltonian_matrix(hamiltonian, self, kpt)
tri2full(H_MM)
S_MM = kpt.S_MM.copy()
tri2full(S_MM)
ET_MM = np.linalg.solve(S_MM, gemmdot(H_MM,
kpt.rho_MM)).T.copy()
del S_MM, H_MM
rhoT_MM = kpt.rho_MM.T.copy()
rhoT_uMM.append(rhoT_MM)
ET_uMM.append(ET_MM)
self.timer.stop('Initial')
if isblacs: # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
from gpaw.blacs import BlacsGrid, Redistributor
def get_density_matrix(f_n, C_nM, redistributor):
rho1_mm = ksl.calculate_blocked_density_matrix(f_n,
C_nM).conj()
rho_mm = redistributor.redistribute(rho1_mm)
return rho_mm
pcutoff_a = [max([pt.get_cutoff() for pt in setup.pt_j])
for setup in self.setups]
phicutoff_a = [max([phit.get_cutoff() for phit in setup.phit_j])
for setup in self.setups]
# XXX should probably use bdsize x gdsize instead
# That would be consistent with some existing grids
grid = BlacsGrid(ksl.block_comm, self.gd.comm.size,
self.bd.comm.size)
blocksize1 = -(-nao // grid.nprow)
blocksize2 = -(-nao // grid.npcol)
# XXX what are rows and columns actually?
desc = grid.new_descriptor(nao, nao, blocksize1, blocksize2)
rhoT_umm = []
ET_umm = []
redistributor = Redistributor(grid.comm, ksl.mmdescriptor, desc)
Fpot_av = np.zeros_like(F_av)
for u, kpt in enumerate(self.kpt_u):
self.timer.start('Get density matrix')
rhoT_mm = get_density_matrix(kpt.f_n, kpt.C_nM, redistributor)
rhoT_umm.append(rhoT_mm)
self.timer.stop('Get density matrix')
self.timer.start('Potential')
rhoT_mM = ksl.distribute_to_columns(rhoT_mm, desc)
vt_G = hamiltonian.vt_sG[kpt.s]
Fpot_av += bfs.calculate_force_contribution(vt_G, rhoT_mM,
kpt.q)
del rhoT_mM
self.timer.stop('Potential')
self.timer.start('Get density matrix')
for kpt in self.kpt_u:
ET_mm = get_density_matrix(kpt.f_n * kpt.eps_n, kpt.C_nM,
redistributor)
ET_umm.append(ET_mm)
self.timer.stop('Get density matrix')
M1start = blocksize1 * grid.myrow
M2start = blocksize2 * grid.mycol
M1stop = min(M1start + blocksize1, nao)
M2stop = min(M2start + blocksize2, nao)
m1max = M1stop - M1start
m2max = M2stop - M2start
if not isblacs:
# Kinetic energy contribution
#
# ----- d T
# a \ mu nu
# F += 2 Re ) -------- rho
# / d R nu mu
# ----- mu nu
# mu in a; nu
#
Fkin_av = np.zeros_like(F_av)
for u, kpt in enumerate(self.kpt_u):
dEdTrhoT_vMM = (dTdR_qvMM[kpt.q]
* rhoT_uMM[u][np.newaxis]).real
for a, M1, M2 in my_slices():
Fkin_av[a, :] += 2.0 * dEdTrhoT_vMM[:, M1:M2].sum(-1).sum(-1)
del dEdTrhoT_vMM
# Density matrix contribution due to basis overlap
#
# ----- d Theta
# a \ mu nu
# F += -2 Re ) ------------ E
# / d R nu mu
# ----- mu nu
# mu in a; nu
#
Ftheta_av = np.zeros_like(F_av)
for u, kpt in enumerate(self.kpt_u):
dThetadRE_vMM = (dThetadR_qvMM[kpt.q]
* ET_uMM[u][np.newaxis]).real
for a, M1, M2 in my_slices():
Ftheta_av[a, :] += -2.0 * dThetadRE_vMM[:, M1:M2].sum(-1).sum(-1)
del dThetadRE_vMM
if isblacs:
from gpaw.lcao.overlap import TwoCenterIntegralCalculator
self.timer.start('Prepare TCI loop')
M_a = bfs.M_a
Fkin2_av = np.zeros_like(F_av)
Ftheta2_av = np.zeros_like(F_av)
cell_cv = tci.atoms.cell
spos_ac = tci.atoms.get_scaled_positions() % 1.0
overlapcalc = TwoCenterIntegralCalculator(self.kd.ibzk_qc,
derivative=False)
def get_phases(offset):
return overlapcalc.phaseclass(overlapcalc.ibzk_qc, offset)
# XXX this is not parallel *AT ALL*.
self.timer.start('Get neighbors')
nl = tci.atompairs.pairs.neighbors
r_and_offset_aao = get_r_and_offsets(nl, spos_ac, cell_cv)
atompairs = r_and_offset_aao.keys()
atompairs.sort()
self.timer.stop('Get neighbors')
T_expansions = tci.T_expansions
Theta_expansions = tci.Theta_expansions
P_expansions = tci.P_expansions
nq = len(self.ibzk_qc)
dH_asp = hamiltonian.dH_asp
self.timer.start('broadcast dH')
alldH_asp = {}
for a in range(len(self.setups)):
gdrank = bfs.sphere_a[a].rank
if gdrank == gd.rank:
dH_sp = dH_asp[a]
else:
ni = self.setups[a].ni
dH_sp = np.empty((self.nspins, ni * (ni + 1) // 2))
gd.comm.broadcast(dH_sp, gdrank)
# okay, now everyone gets copies of dH_sp
alldH_asp[a] = dH_sp
self.timer.stop('broadcast dH')
# This will get sort of hairy. We need to account for some
# three-center overlaps, such as:
#
# a1
# Phi ~a3 a3 ~a3 a2 a2,a1
# < ---- |p > dH <p |Phi > rho
# dR
#
# To this end we will loop over all pairs of atoms (a1, a3),
# and then a sub-loop over (a3, a2).
from gpaw.lcao.overlap import DerivativeAtomicDisplacement
class Displacement(DerivativeAtomicDisplacement):
def __init__(self, a1, a2, R_c, offset):
phases = overlapcalc.phaseclass(overlapcalc.ibzk_qc,
offset)
DerivativeAtomicDisplacement.__init__(self, None, a1, a2,
R_c, offset, phases)
# Cache of Displacement objects with spherical harmonics with
# evaluated spherical harmonics.
disp_aao = {}
def get_displacements(a1, a2, maxdistance):
# XXX the way maxdistance is handled it can lead to
# bad caching when different maxdistances are passed
# to subsequent calls with same pair of atoms
disp_o = disp_aao.get((a1, a2))
if disp_o is None:
disp_o = []
for r, offset in r_and_offset_aao[(a1, a2)]:
if np.linalg.norm(r) > maxdistance:
continue
disp = Displacement(a1, a2, r, offset)
disp_o.append(disp)
disp_aao[(a1, a2)] = disp_o
return [disp for disp in disp_o if disp.r < maxdistance]
self.timer.stop('Prepare TCI loop')
self.timer.start('Not so complicated loop')
for (a1, a2) in atompairs:
if a1 >= a2:
# Actually this leads to bad load balance.
# We should take a1 > a2 or a1 < a2 equally many times.
# Maybe decide which of these choices
# depending on whether a2 % 1 == 0
continue
m1start = M_a[a1] - M1start
m2start = M_a[a2] - M2start
if m1start >= blocksize1 or m2start >= blocksize2:
continue
T_expansion = T_expansions.get(a1, a2)
Theta_expansion = Theta_expansions.get(a1, a2)
P_expansion = P_expansions.get(a1, a2)
nm1, nm2 = T_expansion.shape
m1stop = min(m1start + nm1, m1max)
m2stop = min(m2start + nm2, m2max)
if m1stop <= 0 or m2stop <= 0:
continue
m1start = max(m1start, 0)
m2start = max(m2start, 0)
J1start = max(0, M1start - M_a[a1])
J2start = max(0, M2start - M_a[a2])
M1stop = J1start + m1stop - m1start
J2stop = J2start + m2stop - m2start
dTdR_qvmm = T_expansion.zeros((nq, 3), dtype=dtype)
dThetadR_qvmm = Theta_expansion.zeros((nq, 3), dtype=dtype)
disp_o = get_displacements(a1, a2,
phicutoff_a[a1] + phicutoff_a[a2])
for disp in disp_o:
disp.evaluate_overlap(T_expansion, dTdR_qvmm)
disp.evaluate_overlap(Theta_expansion, dThetadR_qvmm)
for u, kpt in enumerate(self.kpt_u):
rhoT_mm = rhoT_umm[u][m1start:m1stop, m2start:m2stop]
ET_mm = ET_umm[u][m1start:m1stop, m2start:m2stop]
Fkin_v = 2.0 * (dTdR_qvmm[kpt.q][:, J1start:M1stop,
J2start:J2stop]
* rhoT_mm[np.newaxis]).real.sum(-1).sum(-1)
Ftheta_v = 2.0 * (dThetadR_qvmm[kpt.q][:, J1start:M1stop,
J2start:J2stop]
* ET_mm[np.newaxis]).real.sum(-1).sum(-1)
Fkin2_av[a1] += Fkin_v
Fkin2_av[a2] -= Fkin_v
Ftheta2_av[a1] -= Ftheta_v
Ftheta2_av[a2] += Ftheta_v
Fkin_av = Fkin2_av
Ftheta_av = Ftheta2_av
self.timer.stop('Not so complicated loop')
dHP_and_dSP_aauim = {}
a2values = {}
for (a2, a3) in atompairs:
if not a3 in a2values:
a2values[a3] = []
a2values[a3].append(a2)
Fatom_av = np.zeros_like(F_av)
Frho_av = np.zeros_like(F_av)
self.timer.start('Complicated loop')
for a1, a3 in atompairs:
if a1 == a3:
continue
m1start = M_a[a1] - M1start
if m1start >= blocksize1:
continue
P_expansion = P_expansions.get(a1, a3)
nm1 = P_expansion.shape[0]
m1stop = min(m1start + nm1, m1max)
if m1stop <= 0:
continue
m1start = max(m1start, 0)
J1start = max(0, M1start - M_a[a1])
J1stop = J1start + m1stop - m1start
disp_o = get_displacements(a1, a3,
phicutoff_a[a1] + pcutoff_a[a3])
if len(disp_o) == 0:
continue
dPdR_qvmi = P_expansion.zeros((nq, 3), dtype=dtype)
for disp in disp_o:
disp.evaluate_overlap(P_expansion, dPdR_qvmi)
dPdR_qvmi = dPdR_qvmi[:, :, J1start:J1stop, :].copy()
for a2 in a2values[a3]:
m2start = M_a[a2] - M2start
if m2start >= blocksize2:
continue
P_expansion2 = P_expansions.get(a2, a3)
nm2 = P_expansion2.shape[0]
m2stop = min(m2start + nm2, m2max)
if m2stop <= 0:
continue
disp_o = get_displacements(a2, a3,
phicutoff_a[a2] + pcutoff_a[a3])
if len(disp_o) == 0:
continue
m2start = max(m2start, 0)
J2start = max(0, M2start - M_a[a2])
J2stop = J2start + m2stop - m2start
if (a2, a3) in dHP_and_dSP_aauim:
dHP_uim, dSP_uim = dHP_and_dSP_aauim[(a2, a3)]
else:
P_qmi = P_expansion2.zeros((nq,), dtype=dtype)
for disp in disp_o:
disp.evaluate_direct(P_expansion2, P_qmi)
P_qmi = P_qmi[:, J2start:J2stop].copy()
dH_sp = alldH_asp[a3]
dS_ii = self.setups[a3].dO_ii
dHP_uim = []
dSP_uim = []
for u, kpt in enumerate(self.kpt_u):
dH_ii = unpack(dH_sp[kpt.s])
dHP_im = np.dot(P_qmi[kpt.q], dH_ii).T.conj()
# XXX only need nq of these
dSP_im = np.dot(P_qmi[kpt.q], dS_ii).T.conj()
dHP_uim.append(dHP_im)
dSP_uim.append(dSP_im)
dHP_and_dSP_aauim[(a2, a3)] = dHP_uim, dSP_uim
for u, kpt in enumerate(self.kpt_u):
rhoT_mm = rhoT_umm[u][m1start:m1stop, m2start:m2stop]
ET_mm = ET_umm[u][m1start:m1stop, m2start:m2stop]
dPdRdHP_vmm = np.dot(dPdR_qvmi[kpt.q], dHP_uim[u])
dPdRdSP_vmm = np.dot(dPdR_qvmi[kpt.q], dSP_uim[u])
Fatom_c = 2.0 * (dPdRdHP_vmm
* rhoT_mm).real.sum(-1).sum(-1)
Frho_c = 2.0 * (dPdRdSP_vmm
* ET_mm).real.sum(-1).sum(-1)
Fatom_av[a1] += Fatom_c
Fatom_av[a3] -= Fatom_c
Frho_av[a1] -= Frho_c
Frho_av[a3] += Frho_c
self.timer.stop('Complicated loop')
if not isblacs:
# Potential contribution
#
# ----- / d Phi (r)
# a \ | mu ~
# F += -2 Re ) | ---------- v (r) Phi (r) dr rho
# / | d R nu nu mu
# ----- / a
# mu in a; nu
#
self.timer.start('Potential')
Fpot_av = np.zeros_like(F_av)
for u, kpt in enumerate(self.kpt_u):
vt_G = hamiltonian.vt_sG[kpt.s]
Fpot_av += bfs.calculate_force_contribution(vt_G, rhoT_uMM[u],
kpt.q)
self.timer.stop('Potential')
# Density matrix contribution from PAW correction
#
# ----- -----
# a \ a \ b
# F += 2 Re ) Z E - 2 Re ) Z E
# / mu nu nu mu / mu nu nu mu
# ----- -----
# mu nu b; mu in a; nu
#
# with
# b*
# ----- dP
# b \ i mu b b
# Z = ) -------- dS P
# mu nu / dR ij j nu
# ----- b mu
# ij
#
self.timer.start('Paw correction')
Frho_av = np.zeros_like(F_av)
for u, kpt in enumerate(self.kpt_u):
work_MM = np.zeros((mynao, nao), dtype)
ZE_MM = None
for b in my_atom_indices:
setup = self.setups[b]
dO_ii = np.asarray(setup.dO_ii, dtype)
dOP_iM = np.zeros((setup.ni, nao), dtype)
gemm(1.0, self.P_aqMi[b][kpt.q], dO_ii, 0.0, dOP_iM, 'c')
for v in range(3):
gemm(1.0, dOP_iM, dPdR_aqvMi[b][kpt.q][v][Mstart:Mstop],
0.0, work_MM, 'n')
ZE_MM = (work_MM * ET_uMM[u]).real
for a, M1, M2 in slices():
dE = 2 * ZE_MM[M1:M2].sum()
Frho_av[a, v] -= dE # the "b; mu in a; nu" term
Frho_av[b, v] += dE # the "mu nu" term
del work_MM, ZE_MM
self.timer.stop('Paw correction')
# Atomic density contribution
# ----- -----
# a \ a \ b
# F += -2 Re ) A rho + 2 Re ) A rho
# / mu nu nu mu / mu nu nu mu
# ----- -----
# mu nu b; mu in a; nu
#
# b*
# ----- d P
# b \ i mu b b
# A = ) ------- dH P
# mu nu / d R ij j nu
# ----- b mu
# ij
#
self.timer.start('Atomic Hamiltonian force')
Fatom_av = np.zeros_like(F_av)
for u, kpt in enumerate(self.kpt_u):
for b in my_atom_indices:
H_ii = np.asarray(unpack(hamiltonian.dH_asp[b][kpt.s]), dtype)
HP_iM = gemmdot(H_ii,
np.ascontiguousarray(self.P_aqMi[b][kpt.q].T.conj()))
for v in range(3):
dPdR_Mi = dPdR_aqvMi[b][kpt.q][v][Mstart:Mstop]
ArhoT_MM = (gemmdot(dPdR_Mi, HP_iM) * rhoT_uMM[u]).real
for a, M1, M2 in slices():
dE = 2 * ArhoT_MM[M1:M2].sum()
Fatom_av[a, v] += dE # the "b; mu in a; nu" term
Fatom_av[b, v] -= dE # the "mu nu" term
self.timer.stop('Atomic Hamiltonian force')
F_av += Fkin_av + Fpot_av + Ftheta_av + Frho_av + Fatom_av
self.timer.start('Wait for sum')
ksl.orbital_comm.sum(F_av)
if self.bd.comm.rank == 0:
self.kpt_comm.sum(F_av, 0)
self.timer.stop('Wait for sum')
self.timer.stop('LCAO forces')
def _get_wave_function_array(self, u, n, realspace=True):
kpt = self.kpt_u[u]
if kpt.C_nM is None:
# Hack to make sure things are available after restart
self.lazyloader.load(self)
C_M = kpt.C_nM[n]
if realspace:
psit_G = self.gd.zeros(dtype=self.dtype)
self.basis_functions.lcao_to_grid(C_M, psit_G, kpt.q)
return psit_G
else:
return C_M
def load_lazily(self, hamiltonian, spos_ac):
"""Horrible hack to recalculate lcao coefficients after restart."""
self.basis_functions.set_positions(spos_ac)
class LazyLoader:
def __init__(self, hamiltonian, spos_ac):
self.spos_ac = spos_ac
def load(self, wfs):
wfs.set_positions(self.spos_ac) # this sets rank_a
# Now we need to pass wfs.rank_a or things to work
# XXX WTF why does one have to fiddle with rank_a???
hamiltonian.set_positions(self.spos_ac, wfs.rank_a)
wfs.eigensolver.iterate(hamiltonian, wfs)
del wfs.lazyloader
self.lazyloader = LazyLoader(hamiltonian, spos_ac)
def write(self, writer, write_wave_functions=False):
writer['Mode'] = 'lcao'
if not write_wave_functions:
return
writer.dimension('nbasis', self.setups.nao)
writer.add('WaveFunctionCoefficients',
('nspins', 'nibzkpts', 'nbands', 'nbasis'),
dtype=self.dtype)
for s in range(self.nspins):
for k in range(self.nibzkpts):
C_nM = self.collect_array('C_nM', k, s)
writer.fill(C_nM, s, k)
def read_coefficients(self, reader):
for kpt in self.kpt_u:
kpt.C_nM = self.bd.empty(self.setups.nao, dtype=self.dtype)
for myn, C_M in enumerate(kpt.C_nM):
n = self.bd.global_index(myn)
C_M[:] = reader.get('WaveFunctionCoefficients',
kpt.s, kpt.k, n)
def estimate_memory(self, mem):
nq = len(self.kd.ibzk_qc)
nao = self.setups.nao
ni_total = sum([setup.ni for setup in self.setups])
itemsize = mem.itemsize[self.dtype]
mem.subnode('C [qnM]', nq * self.bd.mynbands * nao * itemsize)
nM1, nM2 = self.ksl.get_overlap_matrix_shape()
mem.subnode('S, T [2 x qmm]', 2 * nq * nM1 * nM2 * itemsize)
mem.subnode('P [aqMi]', nq * nao * ni_total // self.gd.comm.size)
self.tci.estimate_memory(mem.subnode('TCI'))
self.basis_functions.estimate_memory(mem.subnode('BasisFunctions'))
self.eigensolver.estimate_memory(mem.subnode('Eigensolver'),
self.dtype)
|
robwarm/gpaw-symm
|
gpaw/wavefunctions/lcao.py
|
Python
|
gpl-3.0
| 37,969
|
[
"ASE",
"GPAW"
] |
7f59300d22de5fa69f646c411db1085034057d399646a81c1dd4dba3f17b9077
|
# -*- test-case-name: buildbot.broken_test.test_buildreq -*-
from twisted.trial import unittest
from twisted.internet import defer
from buildbot import sourcestamp
from buildbot.buildrequest import BuildRequest
from buildbot.status.builder import SUCCESS, FAILURE
from buildbot.changes.changes import Change
from buildbot.process.properties import Properties
from buildbot.broken_test.runutils import MasterMixin, StallMixin
from buildbot.util.eventual import fireEventually, flushEventualQueue
class Request(unittest.TestCase):
def testMerge(self):
R = BuildRequest
S = sourcestamp.SourceStamp
N = 'test_builder'
b1 = R("why", S("branch1", None, None, None), N)
b1r1 = R("why2", S("branch1", "rev1", None, None), N)
b1r1a = R("why not", S("branch1", "rev1", None, None), N)
b1r2 = R("why3", S("branch1", "rev2", None, None), N)
b2r2 = R("why4", S("branch2", "rev2", None, None), N)
b1r1p1 = R("why5", S("branch1", "rev1", (3, "diff"), None), N)
c1 = Change("alice", [], "changed stuff", branch="branch1")
c2 = Change("alice", [], "changed stuff", branch="branch1")
c3 = Change("alice", [], "changed stuff", branch="branch1")
c4 = Change("alice", [], "changed stuff", branch="branch1")
c5 = Change("alice", [], "changed stuff", branch="branch1")
c6 = Change("alice", [], "changed stuff", branch="branch1")
b1c1 = R("changes", S("branch1", None, None, [c1,c2,c3]), N)
b1c2 = R("changes", S("branch1", None, None, [c4,c5,c6]), N)
self.failUnless(b1.canBeMergedWith(b1))
self.failIf(b1.canBeMergedWith(b1r1))
self.failIf(b1.canBeMergedWith(b2r2))
self.failIf(b1.canBeMergedWith(b1r1p1))
self.failIf(b1.canBeMergedWith(b1c1))
self.failIf(b1r1.canBeMergedWith(b1))
self.failUnless(b1r1.canBeMergedWith(b1r1))
self.failIf(b1r1.canBeMergedWith(b2r2))
self.failIf(b1r1.canBeMergedWith(b1r1p1))
self.failIf(b1r1.canBeMergedWith(b1c1))
self.failIf(b1r2.canBeMergedWith(b1))
self.failIf(b1r2.canBeMergedWith(b1r1))
self.failUnless(b1r2.canBeMergedWith(b1r2))
self.failIf(b1r2.canBeMergedWith(b2r2))
self.failIf(b1r2.canBeMergedWith(b1r1p1))
self.failIf(b1r1p1.canBeMergedWith(b1))
self.failIf(b1r1p1.canBeMergedWith(b1r1))
self.failIf(b1r1p1.canBeMergedWith(b1r2))
self.failIf(b1r1p1.canBeMergedWith(b2r2))
self.failIf(b1r1p1.canBeMergedWith(b1c1))
self.failIf(b1c1.canBeMergedWith(b1))
self.failIf(b1c1.canBeMergedWith(b1r1))
self.failIf(b1c1.canBeMergedWith(b1r2))
self.failIf(b1c1.canBeMergedWith(b2r2))
self.failIf(b1c1.canBeMergedWith(b1r1p1))
self.failUnless(b1c1.canBeMergedWith(b1c1))
self.failUnless(b1c1.canBeMergedWith(b1c2))
sm = b1.mergeWith([])
self.failUnlessEqual(sm.branch, "branch1")
self.failUnlessEqual(sm.revision, None)
self.failUnlessEqual(sm.patch, None)
self.failUnlessEqual(sm.changes, ())
ss = b1r1.mergeWith([b1r1])
self.failUnlessEqual(ss, S("branch1", "rev1", None, None))
why = b1r1.mergeReasons([b1r1])
self.failUnlessEqual(why, "why2")
why = b1r1.mergeReasons([b1r1a])
self.failUnlessEqual(why, "why2, why not")
ss = b1c1.mergeWith([b1c2])
self.failUnlessEqual(ss, S("branch1", None, None, [c1,c2,c3,c4,c5,c6]))
why = b1c1.mergeReasons([b1c2])
self.failUnlessEqual(why, "changes")
# replaces test_buildreq.Set.testBuildSet
# exercises db.DBConnector.:
# def create_buildset(self, ssid, reason, properties, builderNames, t,
# def get_buildrequestids_for_buildset(self, bsid):
# def examine_buildset(self, bsid):
# also uses:
# def get_unclaimed_buildrequests(self, buildername, old, master_name,
# def claim_buildrequests(self, now, master_name, master_incarnation, brids,
# def retire_buildrequests(self, brids, results):
class BuildSet(MasterMixin, StallMixin, unittest.TestCase):
def test_basic(self):
self.basedir = "buildset/BuildSet/basic"
self.create_master()
db = self.master.db
run = db.runInteractionNow
# we must create all the things that go into a buildset
#c1 = Change(who="brian", files=["foo.c", "subdir/bar.c"],
# comments="first change",
# revision="1234")
#db.addChangeToDatabase(c1)
ss = sourcestamp.SourceStamp(branch="branchy")
ssid = run(lambda t: db.get_sourcestampid(ss,t))
props = Properties()
props.setProperty("pname", "pvalue", "psource")
bsid = run(lambda t:
db.create_buildset(ssid, "reason", props, ["bn1", "bn2"], t))
mn, mi = "mastername", "incarnation"
reqs = run(lambda t:
db.get_unclaimed_buildrequests("bn1", 1, mn, mi, t))
self.failUnlessEqual(len(reqs), 1)
self.failUnlessEqual(reqs[0].reason, "reason")
self.failUnlessEqual(reqs[0].builderName, "bn1")
#print reqs[0].properties
#self.failUnlessEqual(reqs[0].properties["pname"], "pvalue") #BROKEN
brids = db.get_buildrequestids_for_buildset(bsid)
bn1_brid = brids["bn1"]
bn2_brid = brids["bn2"]
self.failUnlessEqual(bn1_brid, reqs[0].id)
reqs2 = run(lambda t:
db.get_unclaimed_buildrequests("bn2", 1, mn, mi, t))
self.failUnlessEqual(bn2_brid, reqs2[0].id)
(successful, finished) = db.examine_buildset(bsid)
self.failUnlessEqual(successful, None)
self.failUnlessEqual(finished, False)
bs = self.master.status.getBuildSets()
self.failUnlessEqual(len(bs), 1)
brs = bs[0].getBuilderNamesAndBuildRequests()
self.failUnlessEqual(sorted(brs.keys()), ["bn1", "bn2"])
self.failUnlessEqual(sorted(bs[0].getBuilderNames()), ["bn1", "bn2"])
self.failUnlessEqual(len(bs[0].getBuildRequests()), 2)
ss2 = bs[0].getSourceStamp()
self.failUnlessEqual(ss2.branch, "branchy")
self.failUnlessEqual(bs[0].getReason(), "reason")
self.failUnlessEqual(bs[0].isFinished(), False)
self.failUnlessEqual(bs[0].getResults(), None)
db.retire_buildrequests([bn1_brid], SUCCESS)
self.failUnlessEqual(db.examine_buildset(bsid), (None, False))
db.retire_buildrequests([bn2_brid], SUCCESS)
self.failUnlessEqual(db.examine_buildset(bsid), (True, True))
bsid2 = run(lambda t:
db.create_buildset(ssid, "reason", props, ["bn1","bn2"], t))
brids2 = db.get_buildrequestids_for_buildset(bsid2)
self.failUnlessEqual(db.examine_buildset(bsid2), (None, False))
db.retire_buildrequests([brids2["bn1"]], SUCCESS)
self.failUnlessEqual(db.examine_buildset(bsid2), (None, False))
db.retire_buildrequests([brids2["bn2"]], FAILURE)
self.failUnlessEqual(db.examine_buildset(bsid2), (False, True))
bsid3 = run(lambda t:
db.create_buildset(ssid, "reason", props, ["bn1","bn2"], t))
brids3 = db.get_buildrequestids_for_buildset(bsid3)
self.failUnlessEqual(db.examine_buildset(bsid3), (None, False))
db.retire_buildrequests([brids3["bn1"]], FAILURE)
self.failUnlessEqual(db.examine_buildset(bsid3), (False, False))
db.retire_buildrequests([brids3["bn2"]], SUCCESS)
self.failUnlessEqual(db.examine_buildset(bsid3), (False, True))
def test_subscribe(self):
self.basedir = "buildset/BuildSet/subscribe"
self.create_master()
db = self.master.db
run = db.runInteractionNow
ss = sourcestamp.SourceStamp(branch="branchy")
ssid = run(lambda t: db.get_sourcestampid(ss,t))
d = defer.succeed(None)
d.addCallback(self._setup_subscribe, ssid)
d.addCallback(self._subscribe_test1)
d.addCallback(self._setup_subscribe, ssid)
d.addCallback(self._subscribe_test2)
d.addCallback(self._setup_subscribe, ssid)
d.addCallback(self._subscribe_test3)
return d
def _setup_subscribe(self, ign, ssid):
db = self.master.db
run = db.runInteractionNow
props = Properties()
bsid = run(lambda t:
db.create_buildset(ssid, "reason", props,
["bn1", "bn2"], t))
mn, mi = "mastername", "incarnation"
brids = db.get_buildrequestids_for_buildset(bsid)
bss = self.master.status.getBuildSets()[0]
success_events = []
bss.waitUntilSuccess().addCallback(success_events.append)
finished_events = []
bss.waitUntilFinished().addCallback(finished_events.append)
return brids, bss, success_events, finished_events
def _subscribe_test1(self, res):
db = self.master.db
(brids, bss, success_events, finished_events) = res
d = fireEventually()
def _check1(ign):
self.failUnlessEqual(len(success_events), 0)
self.failUnlessEqual(len(finished_events), 0)
db.retire_buildrequests([brids["bn1"]], SUCCESS)
return flushEventualQueue()
d.addCallback(_check1)
def _check2(ign):
self.failUnlessEqual(len(success_events), 0)
self.failUnlessEqual(len(finished_events), 0)
db.retire_buildrequests([brids["bn2"]], SUCCESS)
return flushEventualQueue()
d.addCallback(_check2)
def _check3(ign):
self.failUnlessEqual(len(success_events), 1)
self.failUnlessEqual(len(finished_events), 1)
self.failUnlessIdentical(bss.__class__,
success_events[0].__class__)
self.failUnlessEqual(success_events[0].isFinished(), True)
self.failUnlessEqual(success_events[0].getResults(), SUCCESS)
self.failUnlessEqual(finished_events[0].getResults(), SUCCESS)
return flushEventualQueue()
d.addCallback(_check3)
return d
def _subscribe_test2(self, res):
db = self.master.db
(brids, bss, success_events, finished_events) = res
d = fireEventually()
def _check1(ign):
self.failUnlessEqual(len(success_events), 0)
self.failUnlessEqual(len(finished_events), 0)
db.retire_buildrequests([brids["bn1"]], SUCCESS)
return flushEventualQueue()
d.addCallback(_check1)
def _check2(ign):
self.failUnlessEqual(len(success_events), 0)
self.failUnlessEqual(len(finished_events), 0)
db.retire_buildrequests([brids["bn2"]], FAILURE)
return flushEventualQueue()
d.addCallback(_check2)
def _check3(ign):
self.failUnlessEqual(len(success_events), 1)
self.failUnlessEqual(len(finished_events), 1)
self.failUnlessIdentical(bss.__class__,
success_events[0].__class__)
self.failUnlessEqual(success_events[0].isFinished(), True)
self.failUnlessEqual(success_events[0].getResults(), FAILURE)
self.failUnlessEqual(finished_events[0].getResults(), FAILURE)
return flushEventualQueue()
d.addCallback(_check3)
return d
def _subscribe_test3(self, res):
db = self.master.db
(brids, bss, success_events, finished_events) = res
d = fireEventually()
def _check1(ign):
self.failUnlessEqual(len(success_events), 0)
self.failUnlessEqual(len(finished_events), 0)
db.retire_buildrequests([brids["bn1"]], FAILURE)
return flushEventualQueue()
d.addCallback(_check1)
def _check2(ign):
self.failUnlessEqual(len(success_events), 1)
self.failUnlessEqual(len(finished_events), 0)
self.failUnlessEqual(success_events[0].isFinished(), False)
self.failUnlessEqual(success_events[0].getResults(), None)
db.retire_buildrequests([brids["bn2"]], SUCCESS)
return flushEventualQueue()
d.addCallback(_check2)
def _check3(ign):
self.failUnlessEqual(len(success_events), 1)
self.failUnlessEqual(len(finished_events), 1)
self.failUnlessIdentical(bss.__class__,
success_events[0].__class__)
self.failUnlessEqual(success_events[0].isFinished(), True)
self.failUnlessEqual(success_events[0].getResults(), FAILURE)
self.failUnlessEqual(finished_events[0].getResults(), FAILURE)
return flushEventualQueue()
d.addCallback(_check3)
return d
|
centrumholdings/buildbot
|
buildbot/broken_test/unit/test_buildreq.py
|
Python
|
gpl-2.0
| 12,862
|
[
"Brian"
] |
bdbd6ccf7833ac6ee3f46174eb2aed71b8a562428adb519d842edcd6cdee873f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Tests for Vowpal Wabbit LDA wrapper.
Will not be run unless the environment variable 'VOWPAL_WABBIT_PATH' is set
and points to the `vw` executable.
"""
import logging
import unittest
import os
import os.path
import tempfile
from collections import defaultdict
import six
from gensim.corpora import Dictionary
import gensim.models.wrappers.ldavowpalwabbit as ldavowpalwabbit
from gensim.models.wrappers.ldavowpalwabbit import LdaVowpalWabbit
from gensim.test.utils import datapath
# set up vars used in testing ("Deerwester" from the web tutorial)
TOPIC_WORDS = [
'cat lion leopard mouse jaguar lynx cheetah tiger kitten puppy'.split(),
'engine car wheel brakes tyre motor suspension cylinder exhaust clutch'.split(),
'alice bob robert tim sue rachel dave harry alex jim'.split(),
'c cplusplus go python haskell scala java ruby csharp erlang'.split(),
'eggs ham mushrooms cereal coffee beans tea juice sausages bacon'.split()
]
def get_corpus():
text_path = datapath('ldavowpalwabbit.txt')
dict_path = datapath('ldavowpalwabbit.dict.txt')
dictionary = Dictionary.load_from_text(dict_path)
with open(text_path) as fhandle:
corpus = [dictionary.doc2bow(l.strip().split()) for l in fhandle]
return corpus, dictionary
class TestLdaVowpalWabbit(unittest.TestCase):
def setUp(self):
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
msg = "Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping tests"
try:
raise unittest.SkipTest(msg)
except AttributeError:
# couldn't find a way of skipping tests in python 2.6
self.vw_path = None
corpus, dictionary = get_corpus()
self.vw_path = vw_path
self.corpus = corpus
self.dictionary = dictionary
def test_save_load(self):
"""Test loading/saving LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
with tempfile.NamedTemporaryFile() as fhandle:
lda.save(fhandle.name)
lda2 = LdaVowpalWabbit.load(fhandle.name)
# ensure public fields are saved/loaded correctly
saved_fields = [
lda.alpha, lda.chunksize, lda.cleanup_files,
lda.decay, lda.eta, lda.gamma_threshold,
lda.id2word, lda.num_terms, lda.num_topics,
lda.passes, lda.random_seed, lda.vw_path
]
loaded_fields = [
lda2.alpha, lda2.chunksize, lda2.cleanup_files,
lda2.decay, lda2.eta, lda2.gamma_threshold,
lda2.id2word, lda2.num_terms, lda2.num_topics,
lda2.passes, lda2.random_seed, lda2.vw_path
]
self.assertEqual(saved_fields, loaded_fields)
# ensure topic matrices are saved/loaded correctly
saved_topics = lda.show_topics(num_topics=5, num_words=10)
loaded_topics = lda2.show_topics(num_topics=5, num_words=10)
self.assertEqual(loaded_topics, saved_topics)
def test_model_update(self):
"""Test updating existing LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=[self.corpus[0]], passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
lda.update(self.corpus[1:])
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_perplexity(self):
"""Test LdaVowpalWabbit perplexity is within expected range."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1)
# varies, but should be between -1 and -5
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_topic_coherence(self):
"""Test LdaVowpalWabbit topic coherence."""
if not self.vw_path: # for python 2.6
return
corpus, dictionary = get_corpus()
lda = LdaVowpalWabbit(
self.vw_path, corpus=corpus, passes=10, chunksize=256,
id2word=dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
lda.print_topics(5, 10)
# map words in known topic to an ID
topic_map = {}
for i, words in enumerate(TOPIC_WORDS):
topic_map[frozenset(words)] = i
n_coherent = 0
for topic_id in range(lda.num_topics):
topic = lda.show_topic(topic_id, topn=20)
# get all words from LDA topic
topic_words = [w[1] for w in topic]
# get list of original topics that each word actually belongs to
ids = []
for word in topic_words:
for src_topic_words, src_topic_id in six.iteritems(topic_map):
if word in src_topic_words:
ids.append(src_topic_id)
# count the number of times each original topic appears
counts = defaultdict(int)
for found_topic_id in ids:
counts[found_topic_id] += 1
# if at least 6/10 words assigned to same topic, consider it coherent
max_count = 0
for count in six.itervalues(counts):
max_count = max(max_count, count)
if max_count >= 6:
n_coherent += 1
# not 100% deterministic, but should always get 3+ coherent topics
self.assertTrue(n_coherent >= 3)
def test_corpus_to_vw(self):
"""Test corpus to Vowpal Wabbit format conversion."""
if not self.vw_path: # for python 2.6
return
corpus = [
[(0, 5), (7, 1), (5, 3), (0, 2)],
[(7, 2), (2, 1), (3, 11)],
[(1, 1)],
[],
[(5, 2), (0, 1)]
]
expected = """
| 0:5 7:1 5:3 0:2
| 7:2 2:1 3:11
| 1:1
|
| 5:2 0:1
""".strip()
result = '\n'.join(ldavowpalwabbit.corpus_to_vw(corpus))
self.assertEqual(result, expected)
def testvwmodel2ldamodel(self):
"""Test copying of VWModel to LdaModel"""
if not self.vw_path:
return
tm1 = LdaVowpalWabbit(vw_path=self.vw_path, corpus=self.corpus, num_topics=2, id2word=self.dictionary)
tm2 = ldavowpalwabbit.vwmodel2ldamodel(tm1)
for document in self.corpus:
element1_1, element1_2 = tm1[document][0]
element2_1, element2_2 = tm2[document][0]
self.assertAlmostEqual(element1_1, element2_1)
self.assertAlmostEqual(element1_2, element2_2, 5)
logging.debug('%d %d', element1_1, element2_1)
logging.debug('%d %d', element1_2, element2_2)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
markroxor/gensim
|
gensim/test/test_ldavowpalwabbit_wrapper.py
|
Python
|
lgpl-2.1
| 7,796
|
[
"Jaguar"
] |
71d1402cd8952fafd4951c2512b6c835ff068075116b61dcbe47379f7fa5d43f
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module is used to estimate the cost of various compounds. Costs are taken
from the a CostDB instance, for example a CSV file via CostDBCSV.
For compounds with no cost listed, a Phase Diagram style convex hull
optimization is performed to determine a set of compositions that can be mixed
to give the desired compound with lowest total cost.
"""
import abc
import csv
import itertools
import os
from collections import defaultdict
import scipy.constants as const
from monty.design_patterns import singleton
from monty.string import unicode2str
from pymatgen.analysis.phase_diagram import PDEntry, PhaseDiagram
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.util.provenance import is_valid_bibtex
__author__ = "Anubhav Jain"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Anubhav Jain"
__email__ = "ajain@lbl.gov"
__date__ = "Aug 27, 2013"
module_dir = os.path.dirname(os.path.abspath(__file__))
class CostEntry(PDEntry):
"""
Extends PDEntry to include a BibTeX reference and include language about
cost
"""
def __init__(self, composition, cost, name, reference):
"""
Args:
composition:
Composition as a pymatgen.core.structure.Composition
cost:
Cost (per mol, NOT per kg) of the full Composition
name:
Optional parameter to name the entry. Defaults to the reduced
chemical formula as in PDEntry.
reference:
Reference data as BiBTeX string
"""
super().__init__(composition, cost, name)
if reference and not is_valid_bibtex(reference):
raise ValueError("Invalid format for cost reference! Should be BibTeX string.")
self.reference = reference
def __repr__(self):
return f"CostEntry : {self.composition} with cost = {self.energy:.4f}"
class CostDB(metaclass=abc.ABCMeta):
"""
Abstract class for representing a Cost database.
Can be extended, e.g. for file-based or REST-based databases
"""
@abc.abstractmethod
def get_entries(self, chemsys):
"""
For a given chemical system, return an array of CostEntries
Args:
chemsys:
array of Elements defining the chemical system.
Returns:
array of CostEntries
"""
return
class CostDBCSV(CostDB):
"""
Read a CSV file to get costs
Format is formula,cost_per_kg,name,BibTeX
"""
def __init__(self, filename):
"""
Args:
filename (str): Filename of cost database.
"""
# read in data from file
self._chemsys_entries = defaultdict(list)
filename = os.path.join(os.path.dirname(__file__), filename)
with open(filename) as f:
reader = csv.reader(f, quotechar=unicode2str("|"))
for row in reader:
comp = Composition(row[0])
cost_per_mol = float(row[1]) * comp.weight.to("kg") * const.N_A
pde = CostEntry(comp.formula, cost_per_mol, row[2], row[3])
chemsys = "-".join(sorted(el.symbol for el in pde.composition.elements))
self._chemsys_entries[chemsys].append(pde)
def get_entries(self, chemsys):
"""
For a given chemical system, return an array of CostEntries
Args:
chemsys:
array of Elements defining the chemical system.
Returns:
array of CostEntries
"""
chemsys = "-".join(sorted(el.symbol for el in chemsys))
return self._chemsys_entries[chemsys]
@singleton
class CostDBElements(CostDBCSV):
"""
Singleton object that provides the cost data for elements
"""
def __init__(self):
"""
Init
"""
CostDBCSV.__init__(self, os.path.join(module_dir, "costdb_elements.csv"))
class CostAnalyzer:
"""
Given a CostDB, figures out the minimum cost solutions via convex hull
"""
def __init__(self, costdb):
"""
Args:
costdb (): Cost database.
"""
self.costdb = costdb
def get_lowest_decomposition(self, composition):
"""
Get the decomposition leading to lowest cost
Args:
composition:
Composition as a pymatgen.core.structure.Composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
entries_list = []
elements = [e.symbol for e in composition.elements]
for i in range(len(elements)):
for combi in itertools.combinations(elements, i + 1):
chemsys = [Element(e) for e in combi]
x = self.costdb.get_entries(chemsys)
entries_list.extend(x)
try:
pd = PhaseDiagram(entries_list)
return pd.get_decomposition(composition)
except IndexError:
raise ValueError("Error during PD building; most likely, cost data does not exist!")
def get_cost_per_mol(self, comp):
"""
Get best estimate of minimum cost/mol based on known data
Args:
comp:
Composition as a pymatgen.core.structure.Composition
Returns:
float of cost/mol
"""
comp = comp if isinstance(comp, Composition) else Composition(comp)
decomp = self.get_lowest_decomposition(comp)
return sum(k.energy_per_atom * v * comp.num_atoms for k, v in decomp.items())
def get_cost_per_kg(self, comp):
"""
Get best estimate of minimum cost/kg based on known data
Args:
comp:
Composition as a pymatgen.core.structure.Composition
Returns:
float of cost/kg
"""
comp = comp if isinstance(comp, Composition) else Composition(comp)
return self.get_cost_per_mol(comp) / (comp.weight.to("kg") * const.N_A)
|
materialsproject/pymatgen
|
pymatgen/analysis/cost.py
|
Python
|
mit
| 6,163
|
[
"pymatgen"
] |
2febcf27999386bbf20905755a01b26c00a28178278c4de88c7db3dc6966a10a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import tempfile
import shutil
from pymatgen.util.testing import PymatgenTest
from monty.functools import lazy_property
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.io.abinit import *
from pymatgen.io.abinit.flows import *
from pymatgen.io.abinit.works import *
from pymatgen.io.abinit.tasks import *
from pymatgen.io.abinit.pseudos import Pseudo
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
def ref_file(filename):
return os.path.join(_test_dir, filename)
class FakeAbinitInput(object):
"""Emulate an Abinit input."""
@lazy_property
def pseudos(self):
return [Pseudo.as_pseudo(ref_file("14si.pspnc"))]
@lazy_property
def structure(self):
coords = []
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
return Structure(lattice, ["Si", "Si"], coords)
def get(self, key, default=None):
"""The real AbinitInput is a dict-like object."""
if default is not None: return default
return key
class FlowUnitTest(PymatgenTest):
"""Provides helper function for testing Abinit flows."""
MANAGER = """\
policy:
autoparal: 1
qadapters:
- &batch
priority: 1
queue:
qtype: slurm
qname: Oban
qparams:
mail_user: nobody@nowhere
limits:
timelimit: 0:20:00
min_cores: 4
max_cores: 12
#condition: {"$eq": {omp_threads: 2}}
hardware:
num_nodes: 10
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
job:
modules:
- intel/compilerpro/13.0.1.117
- fftw3/intel/3.3
shell_env:
PATH: /home/user/tmp_intel13/src/98_main/:/home/user//NAPS/intel13/bin:$PATH
LD_LIBRARY_PATH: /home/user/NAPS/intel13/lib:$LD_LIBRARY_PATH
mpi_runner: mpirun
# Connection to the MongoDb database (optional)
db_connector:
database: abinit
collection: test
#host: 0.0.0.0
#port: 8080
#user: gmatteo
#password: helloworld
batch_adapter: *batch
"""
def setUp(self):
"""Initialization phase."""
super(FlowUnitTest, self).setUp()
# Temporary directory for the flow.
self.workdir = tempfile.mkdtemp()
# Create the TaskManager.
self.manager = TaskManager.from_string(self.MANAGER)
# Fake input file
self.fake_input = FakeAbinitInput()
def tearDown(self):
"""Delete workdir"""
shutil.rmtree(self.workdir)
class FlowTest(FlowUnitTest):
def test_base(self):
"""Testing Flow..."""
aequal, atrue, afalse = self.assertEqual, self.assertTrue, self.assertFalse
flow = Flow(workdir=self.workdir, manager=self.manager)
assert flow.isinstance(Flow)
assert not flow.isinstance(None)
assert not flow.has_scheduler
# Build a work with a task
work = flow.register_task(self.fake_input)
assert work.is_work
task0_w0 = work[0]
atrue(task0_w0.is_task)
print(task0_w0.status.colored)
atrue(len(flow) == 1)
aequal(flow.num_tasks, 1)
atrue(flow.has_db)
#print(task0_w0.input_structure)
print(task0_w0.make_input)
# Task history
assert len(task0_w0.history) == 0
task0_w0.history.info("Hello %s", "world")
assert len(task0_w0.history) == 1
print(task0_w0.history)
record = task0_w0.history.pop()
print(record, repr(record))
assert record.get_message(asctime=False) == "Hello world"
assert len(task0_w0.history) == 0
assert flow.select_tasks(nids=task0_w0.node_id)[0] == task0_w0
assert flow.select_tasks(wslice=slice(0,1,1)) == [task0_w0]
assert flow.select_tasks(task_class="DfptTask") == []
assert flow.get_task_scfcycles() == []
# Build a workflow containing two tasks depending on task0_w0
work = Work()
atrue(work.is_work)
work.register(self.fake_input)
work.register(self.fake_input)
aequal(len(work), 2)
flow.register_work(work, deps={task0_w0: "WFK"})
atrue(flow.is_flow)
aequal(len(flow), 2)
# Add another work without dependencies.
task0_w2 = flow.register_task(self.fake_input)[0]
atrue(len(flow) == 3)
afalse(flow.is_work)
# Allocate internal tables
flow.allocate()
# Check dependecies.
atrue(flow[1].depends_on(task0_w0))
atrue(flow[1][0].depends_on(task0_w0))
atrue(flow[1][0] in task0_w0.get_children())
atrue(task0_w0 in flow[1][0].get_parents())
afalse(flow[2][0].depends_on(task0_w0))
afalse(flow[2][0] in task0_w0.get_children())
afalse(task0_w0 in flow[2][0].get_parents())
aequal(flow[1].pos, 1)
aequal(flow[1][0].pos, (1, 0))
aequal(flow[2][0].pos, (2, 0))
afalse(flow.all_ok)
aequal(flow.num_tasks, 4)
aequal(flow.ncores_used, 0)
# API for iterations
aequal(len(list(flow.iflat_tasks(status="Initialized"))), sum(len(work) for work in flow))
aequal(list(flow.iflat_tasks(nids=task0_w0.node_id)), [task0_w0])
aequal([task0_w0], flow.tasks_from_nids(task0_w0.node_id))
aequal([(0, 0)], flow.wti_from_nids(task0_w0.node_id))
aequal([task0_w2], flow.tasks_from_nids([task0_w2.node_id]))
aequal([(2, 0)], flow.wti_from_nids([task0_w2.node_id]))
# Check for deadlocks
flow.check_dependencies()
# Save the flow in pickle format.
flow.build_and_pickle_dump()
# Find the pickle file in workdir and recreate the flow.
same_flow = Flow.pickle_load(self.workdir)
aequal(same_flow, flow)
# to/from string
# FIXME This does not work with py3k
#s = flow.pickle_dumps(protocol=0)
#same_flow = Flow.pickle_loads(s)
#aequal(same_flow, flow)
self.assertMSONable(flow)
flow.show_info()
flow.show_summary()
flow.show_inputs()
flow.show_inputs(varnames="znucl")
# Test show_status
flow.show_status()
flow.show_tricky_tasks()
flow.show_event_handlers()
def test_workdir(self):
"""Testing if one can use workdir=None in flow.__init__ and then flow.allocate(workdir)."""
flow = Flow(workdir=None, manager=self.manager)
flow.register_task(self.fake_input)
#flow.register_work(work)
work = Work()
work.register_scf_task(self.fake_input)
flow.register_work(work)
# If flow.workdir is None, we should used flow.allocate(workdir)
with self.assertRaises(RuntimeError): flow.allocate()
tmpdir = tempfile.mkdtemp()
flow.allocate(workdir=tmpdir)
print(flow)
assert len(flow) == 2
flow.build()
for i, work in enumerate(flow):
assert work.workdir == os.path.join(tmpdir, "w%d" % i)
for t, task in enumerate(work):
assert task.workdir == os.path.join(work.workdir, "t%d" % t)
class TestFlowInSpectatorMode(FlowUnitTest):
def test_spectator(self):
flow = Flow(workdir=self.workdir, manager=self.manager)
work0 = Work()
gs_task = work0.register_scf_task(self.fake_input)
assert gs_task.isinstance(ScfTask)
assert gs_task.isinstance("ScfTask")
task = work0.register_scf_task(self.fake_input)
assert task.is_abinit_task
assert not task.is_optic_task
assert not task.is_anaddb_task
work1 = Work()
work1.register_scf_task(self.fake_input)
flow.register_work(work0)
flow.register_work(work1)
flow.disconnect_signals()
flow.disconnect_signals()
flow.connect_signals()
flow.connect_signals()
for mode in [False, True]:
flow.set_spectator_mode(mode=mode)
assert flow.in_spectator_mode == mode
for node in flow.iflat_nodes():
assert node.in_spectator_mode == mode
assert len(list(flow.iflat_nodes())) == 1 + len(flow.works) + sum(len(work) for work in flow)
assert flow.node_from_nid(flow.node_id) == flow
flow.set_spectator_mode(mode=False)
flow.build_and_pickle_dump()
# picke load always returns a flow in spectator mode.
flow = Flow.pickle_load(flow.workdir)
assert flow.in_spectator_mode
#with self.assertRaises(flow.SpectatorError): flow.pickle_dump()
#with self.assertRaises(flow.SpectatorError): flow.make_scheduler().start()
work = flow[0]
assert work.send_signal(work.S_OK) is None
#with self.assertRaises(work.SpectatorError): work.on_ok()
#with self.assertRaises(work.SpectatorError): work.on_all_ok()
task = work[0]
assert task.send_signal(task.S_OK) is None
#with self.assertRaises(task.SpectatorError): task._on_done()
#with self.assertRaises(task.SpectatorError): task.on_ok()
#with self.assertRaises(task.SpectatorError): task._on_ok()
class TestBatchLauncher(FlowUnitTest):
def test_batchlauncher(self):
"""Testing BatchLauncher methods."""
# Create the TaskManager.
manager = TaskManager.from_string(self.MANAGER)
print("batch_adapter", manager.batch_adapter)
assert manager.batch_adapter is not None
def build_flow_with_name(name):
"""Build a flow with workdir None and the given name."""
flow = Flow(workdir=None, manager=self.manager)
flow.set_name(name)
flow.register_task(self.fake_input)
work = Work()
work.register_scf_task(self.fake_input)
flow.register_work(work)
return flow
from pymatgen.io.abinit.launcher import BatchLauncher
tmpdir = tempfile.mkdtemp()
batch = BatchLauncher(workdir=tmpdir, manager=manager)
print(batch)
flow0 = build_flow_with_name("flow0")
flow1 = build_flow_with_name("flow1")
flow2_same_name = build_flow_with_name("flow1")
batch.add_flow(flow0)
# Cannot add the same flow twice.
with self.assertRaises(batch.Error):
batch.add_flow(flow0)
batch.add_flow(flow1)
# Cannot add two flows with the same name.
with self.assertRaises(batch.Error):
batch.add_flow(flow2_same_name)
batch.submit(dry_run=True)
for i, flow in enumerate([flow0, flow1]):
assert flow.workdir == os.path.join(batch.workdir, "flow%d" % i)
batch.pickle_dump()
batch_from_pickle = BatchLauncher.pickle_load(batch.workdir)
assert all(f1 == f2 for f1, f2 in zip(batch.flows, batch_from_pickle.flows))
if __name__ == '__main__':
import unittest
unittest.main()
|
johnson1228/pymatgen
|
pymatgen/io/abinit/tests/test_flows.py
|
Python
|
mit
| 11,432
|
[
"ABINIT",
"pymatgen"
] |
f5db368c834f6f2516ed9059bb0b4f5eadc6c173b504b5e236b17d949f821981
|
import re
import sys
import os
###############################################################################
# lexer
###############################################################################
# opaque? unsigned? void?
_keywords = frozenset(["typedef", "namespace", "void", "unsigned", "enum",
"struct", "union", "switch", "case", "default"])
_tokens = ["identifier",
"int",
"obrack",
"cbrack",
"semi",
"ocurl",
"ccurl",
"eq",
"comma",
"carrets",
"star",
"opar",
"cpar",
"colon",
"eof"] \
+ list(_keywords)
_int_types = frozenset(["int", "hyper", "uint_64_t", "u_int32_t"])
_native_types = _int_types | frozenset(["string", "void", "opaque", "bool"])
class _TokHelper:
"""A helper class that emulates an enum for all the tokens in the lexer"""
def __init__(self, tokens):
self._tokens = list(tokens)
for (idx, v) in enumerate(self._tokens):
setattr(self, v.upper(), idx)
def to_string(self, name):
return self._tokens[name]
def of_string(self, name):
return getattr(self, name.upper())
Token = _TokHelper(_tokens)
def ident(scanner, token):
"""Takes the input a string that is either a scanner or a """
if token in _keywords:
return Token.of_string(token), None
return Token.IDENTIFIER, token
scanner = re.Scanner([
(r"\s+", None),
(r"/\*(\n|.)*?\*/", None), # comments
(r"%.*\n", None), # c ignore files
(r"//.*\n", None), # comments
(r"[0-9]+", lambda scanner, token: (Token.INT, int(token))),
(r"[A-Za-z_][A-Za-z_0-9]*", ident),
(r"\[", lambda scanner, token: (Token.OBRACK, None)),
(r"\]", lambda scanner, token: (Token.CBRACK, None)),
(r";", lambda scanner, token: (Token.SEMI, None)),
(r"{", lambda scanner, token: (Token.OCURL, None)),
(r"}", lambda scanner, token: (Token.CCURL, None)),
(r"=", lambda scanner, token: (Token.EQ, None)),
(r",", lambda scanner, token: (Token.COMMA, None)),
(r"<>", lambda scanner, token: (Token.CARRETS, None)),
(r"\*", lambda scanner, token: (Token.STAR, None)),
(r"\(", lambda scanner, token: (Token.OPAR, None)),
(r"\)", lambda scanner, token: (Token.CPAR, None)),
(r":", lambda scanner, token: (Token.COLON, None))
])
class PeekableIterator:
""" An adaptor around an iterator that adds a lookahead of one element"""
class Done(Exception):
"""Tried to peek when all the values had already been read"""
pass
def __init__(self, it):
self._done = False
self._it = it
self._reload()
def _reload(self):
if self._done:
return
try:
self._next = self._it.next()
except StopIteration:
self._next = None
self._done = True
def next(self):
if self._done:
raise StopIteration
res = self._next
self._reload()
return res
def peek(self):
if self._done:
raise Done
return self._next
def done(self):
return self._done
def __iter__(self):
return self
def scan(file):
with open(file, "r") as fd:
data = fd.read()
results, remainder = scanner.scan(data)
assert remainder == ""
results.append((Token.EOF, None))
return PeekableIterator(iter(results))
###############################################################################
class LeafType:
def iter_children(self):
return
yield None
class TypeStatement(LeafType):
def __init__(self):
self.parents = []
class TypeModifier:
def get_base(self):
if isinstance(self.elt, TypeModifier):
return self.elt.get_base()
return self.elt
class Tyref:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def iter_children(self):
assert False
class NativeType(LeafType):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class List(TypeModifier):
def __init__(self, elt):
self.elt = elt
def __str__(self):
return "[%s]" % (self.elt)
class Option(TypeModifier):
def __init__(self, elt):
self.elt = elt
def __str__(self):
return "*%s" % (self.elt)
class TypeAlias(TypeStatement):
def __init__(self, name, target):
TypeStatement.__init__(self)
self.name = name
self.target = target
def __str__(self):
return "%s : %s" % (self.name, self.target)
def iter_children(self):
assert False
class Void(LeafType):
def __str__(self):
return "void"
Void = Void()
class Enum(TypeStatement):
def __init__(self, name, cases):
TypeStatement.__init__(self)
self.name = name
self.cases = []
self._lut = {}
curr_val = 0
for case_name, val in cases:
if val is not None:
curr_val = val
self.cases.append((case_name, curr_val))
self._lut[case_name] = val
curr_val += 1
def __str__(self):
res = "enum %s {\n" % self.name
for name, val in self.cases:
res += " %s = %i\n" % (name, val)
res += "}"
return res
def __getitem__(self, v):
return self._lut[case_name]
class Struct(TypeStatement):
def __init__(self, name, fields):
TypeStatement.__init__(self)
self.name = name
self.fields = fields
def __str__(self):
res = "struct %s {\n" % self.name
for f in self.fields:
res += " " + str(f) + "\n"
res += "}"
return res
def iter_children(self):
for bdg in self.fields:
yield bdg.target
class Union(TypeStatement):
# reversed(range(10))
def __init__(self, name, bdg, fields):
TypeStatement.__init__(self)
self.binding = bdg
self.name = name
self.default = None
self.fields = []
acc = []
for k, v in fields:
if k is None:
assert v is not None
self.default = v
else:
acc.append(k)
if v is not None:
self.fields.append((acc, v))
acc = []
assert acc == []
def __str__(self):
res = "union %s {\n" % self.name
for k, v in self.fields:
res += "case " + ", ".join(k) + ":\n " + str(v) + "\n"
if self.default is not None:
res += "default:\n " + str(self.default) + "\n"
res += "}"
return res
def iter_children(self):
yield self.binding.target
for k, bdg in self.fields:
if bdg is not Void:
yield bdg.target
if self.default:
if self.default is not Void:
yield bdg.default.target
###############################################################################
def consume(scanner, *args):
for expected in args:
tok, _ = scanner.next()
assert tok in expected, "parse error got: %s" % Token.to_string(tok)
def ident(scanner):
tok, val = scanner.next()
assert tok == Token.IDENTIFIER, \
"Got %s, %s" % (Token.to_string(tok), str(val))
return val
def number(scanner):
tok, val = scanner.next()
assert tok == Token.INT
return val
def get_list(scanner, f, end, sep=None):
while True:
yield f(scanner)
token, _ = scanner.peek()
if token == end:
scanner.next()
return
if sep is not None:
assert token == sep
scanner.next()
class Dispatcher:
"""
A helper class that takes a list of functions (one per token type) and
dispatches to the call to the right class. It also offers a singleton
pattern to call the dispatch method implicitly.
"""
def dispatch(self, scanner, **kwargs):
"""Visit a node."""
token, val = scanner.next()
name = Token.to_string(token)
f = getattr(self, name, None)
assert f is not None, \
"Missing method: %s.%s" % (self.__class__.__name__, name)
if val is None:
return f(scanner, **kwargs)
else:
return f(scanner, val, **kwargs)
@classmethod
def f(cls, scanner, **kwargs):
return cls().dispatch(scanner, **kwargs)
# typedef, field in a struct or case in a union. Consumes the scanner until
# the trailing semi-colon.
class TypeBinding(Dispatcher):
def unsigned(self, scanner, end=Token.SEMI):
next_tok, next_val = scanner.peek()
assert next_tok == Token.IDENTIFIER
if next_val in _int_types:
scanner.next()
return self.identifier(scanner, "hyper", end)
def identifier(self, scanner, tgt, end=Token.SEMI):
if tgt in _int_types:
tgt = "int"
star = False
if scanner.peek()[0] == Token.STAR:
star = True
scanner.next()
name = ident(scanner)
if tgt in _native_types:
ty = NativeType(tgt)
else:
ty = Tyref(tgt)
tok, val = scanner.next()
if star:
ty = Option(ty)
assert tok == end
elif tok == end:
pass
elif tok == Token.CARRETS:
if tgt != "string":
ty = List(ty)
consume(scanner, [end])
elif tok == Token.OBRACK:
if tgt != "opaque":
ty = List(ty)
consume(scanner,
[Token.INT, Token.IDENTIFIER],
[Token.CBRACK],
[end])
else:
assert False
return TypeAlias(name, ty)
class VoidableTypeBinding(TypeBinding):
def void(sef, scanner, end=Token.SEMI):
consume(scanner, [end])
return Void
def enum_case(scanner):
name = ident(scanner)
num = None
if scanner.peek()[0] == Token.EQ:
scanner.next()
num = number(scanner)
return name, num
class UnionCase(Dispatcher):
def case(self, scanner):
name = ident(scanner)
consume(scanner, [Token.COLON])
if scanner.peek()[0] in [Token.CASE, Token.DEFAULT]:
return name, None
# TODO: peek to make sure there's a binding
binding = VoidableTypeBinding.f(scanner)
return name, binding
def default(self, scanner):
consume(scanner, [Token.COLON])
return None, VoidableTypeBinding.f(scanner)
class TopLevel(Dispatcher):
def typedef(self, scanner):
return TypeBinding.f(scanner, end=Token.SEMI)
def enum(self, scanner):
name = ident(scanner)
consume(scanner, [Token.OCURL])
cases = list(get_list(scanner, enum_case, Token.CCURL, Token.COMMA))
consume(scanner, [Token.SEMI])
return Enum(name, cases)
def struct(self, scanner):
name = ident(scanner)
consume(scanner, [Token.OCURL])
fields = list(get_list(scanner, TypeBinding.f, Token.CCURL))
consume(scanner, [Token.SEMI])
return Struct(name, fields)
def union(self, scanner):
name = ident(scanner)
consume(scanner, [Token.SWITCH], [Token.OPAR])
bdg = TypeBinding.f(scanner, end=Token.CPAR)
consume(scanner, [Token.OCURL])
cases = list(get_list(scanner, UnionCase.f, Token.CCURL))
consume(scanner, [Token.SEMI])
return Union(name, bdg, cases)
def namespace(self, scanner):
consume(scanner, [Token.IDENTIFIER], [Token.OCURL])
level = 1
while level > 0:
tok, _ = scanner.next()
if tok == Token.OCURL:
level += 1
elif tok == Token.CCURL:
level -= 1
consume(scanner, [Token.SEMI])
# We discard namespaces here...
return Void
def eof(self, scanner):
return None
def parse_prot_file(filename):
"""Parses a prot file, the result is a iterator over the definitions in
that file."""
scanner = scan(filename)
while True:
v = TopLevel.f(scanner)
if v is None:
return
elif v is not Void:
yield v
###############################################################################
class StubEmitter:
""" Please document me... """
def get_base(self, ty):
while isinstance(ty, TypeModifier):
ty = ty.elt
return ty
def unwind(self, ty, parent):
if isinstance(ty, TypeAlias):
ty = self.unwind(ty.target, parent)
elif isinstance(ty, TypeModifier):
ty.elt = self.unwind(ty.elt, parent)
elif isinstance(ty, Tyref):
ty = self.unwind(self._defs[ty.name], parent)
elif isinstance(ty, TypeStatement):
ty.parents.append(parent)
return ty
def _simplify_field(self, parent, fld):
fld.target = self.unwind(fld.target, parent)
def __init__(self, prot_file):
self._defs = {} # the definitions in the prot file that we parsed
self._filename = prot_file
for x in parse_prot_file(prot_file):
self._defs[x.name] = x
# We flatten all the typeref and complete all the unions (that replace
# the default values with cases)...
for k in self._defs:
v = self._defs[k]
if isinstance(v, Struct):
for b in v.fields:
self._simplify_field(v, b)
elif isinstance(v, Union):
if v.default is not None:
tags_ty = self._defs[v.binding.target.name]
remainding_tags = set((k for k, _ in tags_ty.cases))
for keys, _ in v.fields:
for k in keys:
remainding_tags.remove(k)
v.fields.append((list(remainding_tags), v.default))
v.default = None
for _, b in v.fields:
if b is not Void:
self._simplify_field(v, b)
self._simplify_field(v, v.binding)
elif isinstance(v, TypeAlias):
pass
else:
assert isinstance(v, Enum)
self._emited = set()
def mk_fld_name(self, v):
if v == "lambda":
return "body"
return v
def mk_enum_name(self, v, prefix):
if prefix is not None:
v = v[len(prefix):]
return v.upper()
def find_enum_prefix(self, v):
""" Takes all the cases of an enum and finds the prefix that is common
to all of them and should be stripped out."""
if len(v.cases) < 2:
return None
elts = [x.split("_") for x, _ in v.cases]
minlen = min(len(e) for e in elts)
res = []
for i in range(0, minlen - 1):
common = elts[0][i]
all_same = all(x[i] == common for x in elts[1:])
if all_same:
res.append(common)
else:
break
return None if res == [] else "_".join(res)+"_"
def cleanup_ty_name(self, v):
v = v.lower()
elts = v.split("_")
if elts[-1] == "t":
elts = elts[:-1]
if elts[0] == "xpub3":
elts = elts[1:]
return elts
def mk_class_name(self, v):
return "".join([s.capitalize() for s in self.cleanup_ty_name(v)])
def mk_fn_name(self, v):
return "mk_"\
+ "_".join([s for s in self.cleanup_ty_name(v)])
def class_name_to_fnname(self, v):
# camel case to underscore separated
underscore = re.sub('(?!^)([A-Z]+)', r'_\1', v).lower()
return "mk_" + underscore
def build_val(self, ty, src):
base = self.get_base(ty)
if isinstance(base, NativeType):
return src
if isinstance(ty, List):
return "[%s for e in %s]" % (self.build_val(ty.elt, "e"), src)
elif isinstance(ty, Option):
return "%s if %s != [] else None" %\
(self.build_val(ty.elt, "%s[0]" % src), src)
elif isinstance(ty, Struct) or isinstance(base, Enum):
return "%s(%s)" % (self.mk_class_name(ty.name), src)
elif isinstance(ty, Union):
return "%s(%s)" % (self.mk_fn_name(ty.name), src)
else:
assert False
def get_accessor(self, bdg, name="json_val"):
src = "%s[\"%s\"]" % (name, bdg.name)
return self.build_val(bdg.target, src)
def emit(self, name):
tgt = self._defs[name]
assert name in self._defs, "Missing definition for %s" % name
tgt = self._defs[name]
print '''
"""
This file was automatically generated by ast_gen from %s. It contains all the
class required to represent the type `%s` in python and helper functions to
create them from a `json` representation.
"""
from enum import Enum #enum34
class Node:
_fields = []
pass
''' % (os.path.basename(self._filename), name)
self.emit_ty(tgt)
def emit_deps(self, ty):
for c in ty.iter_children():
ty = self.get_base(c)
if isinstance(ty, NativeType) or ty is Void:
pass
else:
self.emit_ty(ty)
def emit_ty(self, ty, classname=None, parentname=None):
if classname is None:
classname = self.mk_class_name(ty.name)
if ty.name in self._emited:
return
self._emited.add(ty.name)
if parentname is None:
parentname = "Node"
method = 'emit_' + ty.__class__.__name__.lower()
f = getattr(self, method, None)
assert f is not None, "Missing emitter %s" % method
f(ty, classname, parentname)
def emit_struct(self, ty, classname, parentname):
print "class %s(%s):" % (classname, parentname)
fields = ['"%s"' % self.mk_fld_name(fld.name) for fld in ty.fields]
print " _fields = [%s]" % ", ".join(fields)
print " __slots__ = [%s]" % ", ".join(fields)
print
print " def __init__(self, json_val):"
for fld in ty.fields:
fld_name = self.mk_fld_name(fld.name)
print " self.%s = %s" % (fld_name, self.get_accessor(fld))
print
print "def %s(json_val):" % self.class_name_to_fnname(classname)
print " return %s(json_val)" % classname
print
self.emit_deps(ty)
def emit_void(self, tgt, classname, parentname):
print "class %s(%s):" % (classname, parentname)
print " __slots__ = []"
print
print "def mk_%s(json_val):" % classname.lower()
print " return %s()" % classname
print
def emit_union(self, ty, classname, parentname):
print "class %s(%s):" % (classname, parentname)
# Assert that this Enum is only referenced in this union
assert ty.binding.target.parents == [ty]
assert isinstance(ty.binding.target, Enum)
assert ty.default is None
print " __slots__ = []"
enum_prefix = self.find_enum_prefix(ty.binding.target)
for k, v in ty.binding.target.cases:
print " %s = %i" % (self.mk_enum_name(k, enum_prefix), v)
print
print "def %s(json_val):" % self.class_name_to_fnname(classname)
print " tag = json_val['%s']" % ty.binding.name
first = True
for k, bdg in ty.fields:
for v in k:
cond = "if" if first else "elif"
first = False
enum_name = self.mk_enum_name(v, enum_prefix)
print " %s tag == %s.%s:" % (cond, classname, enum_name)
subclass_name = self.mk_class_name(v)
if bdg is Void:
print " return %s()" % subclass_name
else:
print " return %s(json_val['%s'])" %\
(subclass_name, bdg.name)
assert not first
print " else:"
print " assert False, 'Unknown tag %i' % tag"
print
for k, bdg in ty.fields:
subtype = Void if bdg is Void else bdg.target
for v in k:
subclass = self.mk_class_name(v)
self.emit_ty(subtype, subclass, classname)
def emit_enum(self, tgt, classname, parentname):
print "class %s(%s, Enum):" % (classname, parentname)
prefix = self.find_enum_prefix(tgt)
for k, v in tgt.cases:
print " %s = %i" % (self.mk_enum_name(k, prefix), v)
print
print "def %s(json_val):" % self.class_name_to_fnname(classname)
print " return %s(json_val)" % classname
#print tgt
def emit_typealias(self, tgt, classname, parentname):
assert False
def main():
v = StubEmitter(sys.argv[1])
v.emit(sys.argv[2])
main()
|
OkCupid/okws
|
tools/ast_gen.py
|
Python
|
gpl-2.0
| 21,231
|
[
"VisIt"
] |
734aad806bbef031d7f6edd32dda6ee5d4b14054f931997e6b6c8e8f9aa2cc62
|
# GromacsWrapper plugin: ls.py
# Copyright (c) 2010 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
ls plugin
=========
This simply lists the files on disk. It is useful for testing the
plugin architecture.
Plugin class
------------
.. autoclass:: Ls
:members: worker_class
:undoc-members:
Worker class
------------
The worker class performs the analysis.
.. autoclass:: _Ls
:members:
"""
from __future__ import with_statement
__docformat__ = "restructuredtext en"
import os.path
import warnings
import gromacs
from gromacs.utilities import AttributeDict
from gromacs.analysis.core import Worker, Plugin
import logging
logger = logging.getLogger('gromacs.analysis.plugins.ls')
# Worker classes that are registered via Plugins (see below)
# ----------------------------------------------------------
# These must be defined before the plugins.
class _Ls(Worker):
"""ls worker class."""
def __init__(self,**kwargs):
"""
:Arguments:
*kwargs*
same as ls ??
"""
# super class init: do this before doing anything else
# (also sets up self.parameters and self.results)
super(_Ls, self).__init__(**kwargs)
# process specific parameters now and set instance variables
# ....
# self.parameters.filenames = { 'xxx': 'yyy', ....}
# ....
# self.simulation might have been set by the super class
# already; just leave this snippet at the end. Do all
# initialization that requires the simulation class in the
# _register_hook() method.
if not self.simulation is None:
self._register_hook()
def _register_hook(self, **kwargs):
"""Run when registering; requires simulation."""
super(_Ls, self)._register_hook(**kwargs)
assert not self.simulation is None
# override 'API' methods of base class
def run(self, *args, **kwargs):
"""List the contents of the simulation directory.
"""
from subprocess import call
lscmd = ['ls', '-la'] + list(args)
cmd = lscmd + [self.simulation.tpr, self.simulation.xtc]
with rulify("TPR and XTC"):
rc = call(cmd) # just print to screen
adir = self.simulation.analysis_dir
cmd = lscmd + [adir]
with rulify("Analysis dir %(adir)s" % vars()):
rc = call(cmd) # just print to screen
def analyze(self,**kwargs):
pass
def plot(self, **kwargs):
pass
from contextlib import contextmanager
@contextmanager
def rulify(header, ncol=79):
toprule = ncol * '='
midrule = ncol * '-'
botrule = toprule
print toprule
print header
print midrule
try:
yield None
finally:
print botrule
print
# Public classes that register the worker classes
#------------------------------------------------
class Ls(Plugin):
"""*ls* plugin.
This simply lists the files on disk. It is useful for testing the
plugin architecture.
.. class:: Ls([name[, simulation]])
:Arguments:
*name* : string
plugin name (used to access it)
*simulation* : instance
The :class:`gromacs.analysis.Simulation` instance that owns the plugin.
"""
worker_class = _Ls
|
pslacerda/GromacsWrapper
|
gromacs/analysis/plugins/ls.py
|
Python
|
gpl-3.0
| 3,435
|
[
"Gromacs"
] |
9ab65c44bfb04a5bcb6e86cc8df3f308b84a45353f99964aca48d29551c4368b
|
#!/usr/bin/env python2.7
#Ryan G. Coleman, Brian K. Shoichet Lab
#reads in file containing terminal rotatable hydrogen definitions.
import string, sys
import math
import geometry_basic
import combinatorics
from collections import defaultdict
class Hydrogens(object):
'''holds parameters that determine what a rotatable terminal hydrogen is
rules have the format (1, Atom, bond, Atom, bond, Atom, Degrees) which is:
Atom - Atom name like "C.ar" or "C" which matches all starting with C
bond - "1", "2", "3" or "ar" or "*" which means any
Degrees - "120,240" or "180" or "-" (or something else crazy) "-" means don't
rotate this hydrogen
OR the format (2, bond, bond, Atom, bond, Atom, Degrees) which is:
where the two bonds at front are both applied to the same Atom (the first)
rules are processed in order so you can exclude certain things with -
all things not specified are - (no rotation)
'''
rulesDefault = [(1, "C.ar", "1", "S", "1", "H", "180"),
(1, "C.ar", "1", "O", "1", "H", "180"),
(1, "C.1", "1", "S", "1", "H", "-"),
(1, "C.1", "1", "O", "1", "H", "-"),
(1, "C", "1", "S", "1", "H", "120,240"),
(1, "C", "1", "O", "1", "H", "120,240"),
(2, "2", "2", "N", "1", "H", "-"),
(2, "1", "2", "N", "1", "H", "180")]
def __init__(self, parameterFileName=None):
'''constructs from defaults or reads from file'''
if parameterFileName is not None:
parameterFile = open(parameterFileName, 'r')
self.rules = []
try:
for line in parameterFile:
tokens = string.split(line)
self.rules.append((int(tokens[0]), tokens[1], tokens[2], tokens[3], \
tokens[4], tokens[5], tokens[6]))
except StopIteration:
pass #EOF
else: #no parameter file, use defaults
self.rules = self.rulesDefault
def printParameters(self):
'''prints to standard out the parameters used in a readable format'''
for rule in self.rules:
for part in rule: #don't print out the distance squared term
print part,
print "" #force newline
def findTerminalHydrogens(self, mol2data):
'''takes a Mol2 class, finds all atoms that meet the rules. called first.'''
atomNums = mol2data.atomNum
atomNums.sort()
mol2data.hydrogenRotAngles = []
mol2data.hydrogensToRotate = 0
mol2data.dihedrals = None #set in findDihedrals later
for count, atomNum in enumerate(atomNums):
atomType = mol2data.atomType[count]
result = "-"
for rule in self.rules:
if 1 == rule[0]: #type 1 rule (1, "C.1", "1", "O", "1", "H", "-"),
if -1 != atomType.find(rule[5]): #-1 means not found
if mol2data.bondedTo(atomNum, rule[3], 1, rule[4]):
if mol2data.bondedTo(atomNum, rule[1], 2, rule[2]):
result = rule[6]
break #quit this, don't look at the rest of the rules
elif 2 == rule[0]: #type 2 rule
if -1 != atomType.find(rule[5]): #-1 means not found
if mol2data.bondedTo(atomNum, rule[3], 1, rule[4]):
if mol2data.bondedTo(atomNum, "", 2, rule[2]):
if mol2data.bondedTo(atomNum, "", 2, rule[2]):
result = rule[6]
break #quit this, don't look at the rest of the rules
#print atomNum, atomType, result
mol2data.hydrogenRotAngles.append(result)
if result != "-":
mol2data.hydrogensToRotate += 1
def _findDihedrals(self, mol2data):
'''private function called from both rotate and reset that finds the atom
numbers to use for dihedral rotations for any non-"-" hydrogen'''
if mol2data.dihedrals is None:
mol2data.dihedrals = {} #maps atom number to dihedral atom numbers
mol2data.rotAngles = {}
#these are 4 atoms that are all bonded in series. last is hydrogen
for count, angles in enumerate(mol2data.hydrogenRotAngles):
if angles != "-": #don't care about the ones that can't get rotate/reset
dihedral = [-1, -1, -1, -1]
hydrogenNum = mol2data.atomNum[count]
dihedral[3] = hydrogenNum
dihedral[2] = mol2data.bondedTo(hydrogenNum, "", 1, None, True)[1]
dihedral[1] = mol2data.bondedTo(hydrogenNum, "", 2, None, True)[1]
dihedral[0] = mol2data.bondedTo(hydrogenNum, "", 3, None, True)[1]
mol2data.dihedrals[hydrogenNum] = dihedral
mol2data.rotAngles[hydrogenNum] = []
for tempAngle in string.split(angles, ","):
mol2data.rotAngles[hydrogenNum].append(float(tempAngle))
def _getCurDihedral(self, atomNum, xyzCount, mol2data):
'''for a given atomNum, get the dihedral in the mol2'''
curDihedralNums = mol2data.dihedrals[atomNum]
curXyz = []
for curDihedralNum in curDihedralNums:
curXyz.append(mol2data.getXyz(xyzCount, curDihedralNum))
dihedral1 = geometry_basic.getDihedralUnited(tuple(curXyz))
return curXyz, dihedral1
def resetHydrogens(self, mol2data):
'''runs after findTerminalHydrogens, resets the rotatable hydrogens to 0
instead of the potentially bad angle they were set at.'''
self._findDihedrals(mol2data)
for xyzCount in xrange(mol2data.xyzCount): #iterate over conformations
for atomIndex, atomNum in enumerate(mol2data.atomNum):
if atomNum in mol2data.dihedrals.keys() and \
180. in mol2data.rotAngles[atomNum]: #only planar get reset
curXyz, dihedral1 = self._getCurDihedral(atomNum, xyzCount, mol2data)
#want to find a new rotation angle, 0, and reset
newTheta = 0 - dihedral1 #radians!!
#print newTheta
newHxyz = geometry_basic.rotateAboutLine(curXyz[1], curXyz[2], \
curXyz[3], newTheta)
#print curXyz[3], newHxyz
mol2data.atomXyz[xyzCount][atomIndex] = newHxyz
mol2data.inputHydrogens[xyzCount] = 1 #means reset
#print curDihedralNums, dihedral1, dihedral1*(180./math.pi)
#following code checks to make sure everything worked as expected
#curXyz = []
#for curDihedralNum in curDihedralNums:
# curXyz.append(mol2data.getXyz(xyzCount, curDihedralNum))
#dihedral2 = geometry_basic.getDihedralUnited(tuple(curXyz))
#print dihedral1, dihedral2, math.degrees(dihedral2)
def rotateHydrogens(self, mol2data):
'''runs after findTerminalHydrogens, rotates the rotatable hydrogens to
the angles specified by the rules. differs in that it copies everything
and makes all combinations of hydrogen rotations'''
self._findDihedrals(mol2data) #first call in case reset was not done
rotatedMol2data = mol2data.copy() #copy this one, put all new in rotated
rotatedMol2data.atomXyz = [] #clear
rotatedMol2data.inputEnergy = [] #clear
rotatedMol2data.inputHydrogens = [] #clear
rotatedMol2data.origXyzCount = mol2data.xyzCount
for xyzCount in xrange(mol2data.xyzCount): #iterate over orig conformations
indexToCoords = [] #list of lists
originals = []
for atomIndex, atomNum in enumerate(mol2data.atomNum):
if atomNum in mol2data.dihedrals.keys():
thisCoords = []
curXyz, dihedral1 = self._getCurDihedral(atomNum, xyzCount, mol2data)
thisCoords.append(curXyz[3]) #original saved here
originals.append(curXyz[3]) #also save orig here
for newAngle in mol2data.rotAngles[atomNum]:
#want to find a new rotation angle and move to that
newTheta = math.radians(newAngle) #convert to radians!!
#print dihedral1, newAngle, newTheta,
newHxyz = geometry_basic.rotateAboutLine(curXyz[1], curXyz[2], \
curXyz[3], newTheta)
thisCoords.append(newHxyz) #save new ones here
indexToCoords.append(thisCoords)
#have to make all possible combinations
combinations = combinatorics.allCombinations(indexToCoords)
for aCombo in combinations:
if aCombo != originals: #don't need to do anything for the original set
#need to copy inputEnergy and atomXyz, then replace atomXyz
atomXyzCopy = mol2data.atomXyz[xyzCount][:]
#replace hydrogen positions here
replaceIndex = 0
for atomIndex, atomNum in enumerate(mol2data.atomNum):
if atomNum in mol2data.dihedrals.keys():
atomXyzCopy[atomIndex] = aCombo[replaceIndex] #replace coord
replaceIndex += 1 #move this counter forward
rotatedMol2data.atomXyz.append(atomXyzCopy)
rotatedMol2data.inputEnergy.append(mol2data.inputEnergy[xyzCount]) #copy
rotatedMol2data.inputHydrogens.append(2) #means rotate
else: #aCombo is the original set
atomXyzCopy = mol2data.atomXyz[xyzCount][:]
rotatedMol2data.atomXyz.append(atomXyzCopy)
rotatedMol2data.inputEnergy.append(mol2data.inputEnergy[xyzCount])
rotatedMol2data.inputHydrogens.append( \
mol2data.inputHydrogens[xyzCount])
rotatedMol2data.xyzCount = len(rotatedMol2data.atomXyz) #finally set this correctly
return rotatedMol2data
def findAngles(self, mol2data):
'''runs after findTerminalHydrogens, find the current angles of the
terminal hydrogens'''
self._findDihedrals(mol2data) #first call in case reset was not done
for xyzCount in xrange(mol2data.xyzCount): #iterate over orig conformations
for atomIndex, atomNum in enumerate(mol2data.atomNum):
if atomNum in mol2data.dihedrals.keys():
curXyz, dihedral1 = self._getCurDihedral(atomNum, xyzCount, mol2data)
print "%+5.10f" % dihedral1
if -1 != string.find(sys.argv[0], "hydrogens.py"):
#if program is called from the command line, assume user wants a copy of the
#default parameter file written to standard out. this is the only command use.
#usually this will be imported and run from somewhere else.
if len(sys.argv) > 1:
Hydrogens(sys.argv[1]).printParameters()
else:
Hydrogens().printParameters()
|
ryancoleman/mol2db2
|
hydrogens.py
|
Python
|
gpl-2.0
| 10,279
|
[
"Brian"
] |
ae7019f85e975bf423db944228a7cc28c4107ec400d8ebf765aa07dc8634f3ad
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rnn_layers."""
import types
from absl.testing import parameterized
import lingvo.compat as tf
from lingvo.core import attention
from lingvo.core import base_layer
from lingvo.core import cluster_factory
from lingvo.core import py_utils
from lingvo.core import rnn_cell
from lingvo.core import rnn_layers
from lingvo.core import test_utils
import numpy as np
from tensorflow.python.ops import inplace_ops
FLAGS = tf.flags.FLAGS
class TimestepAccumulator(base_layer.Accumulator):
"""Simple accumulator for counting timesteps."""
def DefaultValue(self):
return tf.convert_to_tensor(0.0)
def Increment(self):
self.SetValue(self.GetValue() + 1.0)
def AddTimestepAccumulator(layer):
orig_fprop = layer.FProp
def WrappedFProp(*args, **kwargs):
layer.accumulators.ts_count.Increment()
return orig_fprop(*args, **kwargs)
layer.FProp = WrappedFProp
layer.RegisterAccumulator('ts_count', TimestepAccumulator())
class LayersTestBase(test_utils.TestCase):
def _testStackedFRNNHelper(self,
cls,
dtype,
trailing_pad_len=0,
keep_prob=1.0,
bi_directional=False,
input_dim=-1,
output_dim=-1):
batch = 3
dims = 16
slen = 10 + trailing_pad_len
num_layers = 4
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm'
params.output_nonlinearity = True
params.dtype = dtype
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.vn.global_vn = True
params.vn.per_step_vn = False
params.vn.seed = 2938482
params.vn.scale = 0.1
# StackedFRNNParallel executes cells in a different order, causing the
# deterministic random to not match, so we set deterministic = False for
# comparing.
params.vn.deterministic = False
params.num_input_nodes = dims
params.num_output_nodes = dims // 2 if bi_directional else dims
sfrnn_params = cls.Params()
sfrnn_params.name = 'sfrnn'
sfrnn_params.dtype = dtype
sfrnn_params.random_seed = 123456
sfrnn_params.cell_tpl = params
sfrnn_params.num_layers = num_layers
sfrnn_params.skip_start = 2
sfrnn_params.dropout.keep_prob = keep_prob
sfrnn_params.num_input_nodes = input_dim
sfrnn_params.num_output_nodes = output_dim
np.random.seed(12345)
input_dim = input_dim if input_dim > 0 else dims
output_dim = output_dim if output_dim > 0 else dims
inputs = np.random.uniform(size=(slen, batch, input_dim))
paddings = np.zeros([slen, batch, 1])
if trailing_pad_len > 0:
paddings[-trailing_pad_len:, :] = 1.0
paddings[-trailing_pad_len - 3:-trailing_pad_len - 1, :] = 1.0
with self.session(use_gpu=True, graph=tf.Graph()):
sfrnn = sfrnn_params.Instantiate()
self.evaluate(tf.global_variables_initializer())
if bi_directional:
sfrnn_outputs = sfrnn.FPropFullSequence(sfrnn.theta,
tf.constant(inputs, dtype),
tf.constant(paddings, dtype))
sfrnn_outputs = py_utils.HasShape(sfrnn_outputs,
[slen, batch, output_dim])
return self.evaluate(sfrnn_outputs)
else:
sfrnn_outputs, sfrnn_final = sfrnn.FPropDefaultTheta(
tf.constant(inputs, dtype), tf.constant(paddings, dtype))
sfrnn_outputs = py_utils.HasShape(sfrnn_outputs,
[slen, batch, output_dim])
return self.evaluate([sfrnn_outputs, sfrnn_final])
def _testStackedFRNNGradHelper(self, cls, bi_directional=False):
trailing_pad_len = 2
dtype = tf.float64
batch = 3
dims = 16
slen = 10 + trailing_pad_len
num_layers = 4
with self.session(use_gpu=True, graph=tf.Graph()) as sess:
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm'
params.output_nonlinearity = True
params.dtype = dtype
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.vn.global_vn = False
params.vn.per_step_vn = False
params.num_input_nodes = dims
params.num_output_nodes = dims // 2 if bi_directional else dims
sfrnn_params = cls.Params()
sfrnn_params.name = 'sfrnn'
sfrnn_params.dtype = dtype
sfrnn_params.cell_tpl = params
sfrnn_params.num_layers = num_layers
sfrnn_params.skip_start = 2
with tf.name_scope('sfrnn'):
sfrnn = sfrnn_params.Instantiate()
np.random.seed(12345)
inputs = tf.constant(np.random.uniform(size=(slen, batch, dims)), dtype)
paddings = np.zeros([slen, batch, 1])
paddings[-trailing_pad_len:, :] = 1.0
paddings[-trailing_pad_len - 3:-trailing_pad_len - 1, :] = 1.0
paddings = tf.constant(paddings, dtype)
if bi_directional:
sfrnn_outputs = sfrnn.FPropDefaultTheta(inputs, paddings)
loss = tf.reduce_sum(sfrnn_outputs)
else:
sfrnn_outputs, sfrnn_final = sfrnn.FPropDefaultTheta(inputs, paddings)
loss = tf.reduce_sum(sfrnn_outputs)
for fin in sfrnn_final.rnn:
loss += tf.reduce_sum(fin.m) + tf.reduce_sum(fin.c)
xs = sfrnn.vars.Flatten() + [inputs]
dxs = tf.gradients(loss, xs)
# Compares the sym grad against the numeric grads.
self.evaluate(tf.global_variables_initializer())
grad_step = 17
sym_grads = self.evaluate(dxs)
sym_grads = [test_utils.PickEveryN(_, grad_step) for _ in sym_grads]
num_grads = [
test_utils.PickEveryN(
test_utils.ComputeNumericGradient(
sess, loss, v, delta=1e-4, step=grad_step), grad_step)
for v in xs
]
for (sym, num) in zip(sym_grads, num_grads):
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
print('max = ', np.max(np.abs(sym)))
self.assertAllClose(sym, num)
class LayersTest(LayersTestBase, parameterized.TestCase):
def testIdentitySeqLayer(self):
with self.session(use_gpu=False):
rnn_params = rnn_layers.IdentitySeqLayer.Params()
rnn_params.name = 'no_op'
rnn = rnn_params.Instantiate()
np.random.seed(12345)
inputs_sequence = []
paddings_sequence = []
for _ in range(5):
inputs_sequence.append(
tf.constant(np.random.uniform(size=(3, 2)), tf.float32))
paddings_sequence.append(tf.zeros([3, 1]))
paddings_sequence[-1] = tf.constant([[1.0], [1.0], [1.0]])
paddings_sequence[-2] = tf.constant([[1.0], [1.0], [1.0]])
inputs, paddings = tf.stack(inputs_sequence), tf.stack(paddings_sequence)
outputs = rnn.FPropFullSequence(rnn.theta, inputs, paddings)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
inputs_v, outputs_v = self.evaluate([inputs, outputs])
self.assertAllEqual(inputs_v, outputs_v)
def testRNN(self):
with self.session(use_gpu=False):
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.num_input_nodes = 2
params.num_output_nodes = 2
rnn_params = rnn_layers.RNN.Params()
rnn_params.name = 'rnn'
rnn_params.vn.global_vn = True
rnn_params.vn.per_step_vn = False
rnn_params.vn.seed = 2938482
rnn_params.vn.scale = 0.1
rnn_params.cell = params
rnn_params.sequence_length = 10
rnn = rnn_layers.RNN(rnn_params)
np.random.seed(12345)
inputs_sequence = []
paddings_sequence = []
for _ in range(rnn_params.sequence_length):
inputs_sequence.append(
tf.constant(np.random.uniform(size=(3, 2)), tf.float32))
paddings_sequence.append(tf.zeros([3, 1]))
paddings_sequence[-1] = tf.constant([[1.0], [1.0], [1.0]])
paddings_sequence[-2] = tf.constant([[1.0], [1.0], [1.0]])
inputs, paddings = tf.stack(inputs_sequence), tf.stack(paddings_sequence)
outputs, final = rnn.FPropDefaultTheta(inputs, paddings)
outputs *= paddings
sum_outputs = tf.reduce_sum(outputs, axis=0)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(py_utils.NestedMap(sum=sum_outputs, **final))
sum_expected = [[-0.396654, 0.69955], [-0.400491, 0.475449],
[-0.390183, 0.612227]]
m_expected = [[-0.198327, 0.349775], [-0.200246, 0.237725],
[-0.195092, 0.306113]]
c_expected = [[-0.54442, 0.62336], [-0.346681, 0.595387],
[-0.350726, 0.691779]]
self.assertAllClose(sum_expected, actual.sum)
self.assertAllClose(m_expected, actual.m)
self.assertAllClose(c_expected, actual.c)
def testRNNGradientChecker(self):
with self.session(use_gpu=False) as sess:
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.num_input_nodes = 2
params.num_output_nodes = 2
rnn_params = rnn_layers.RNN.Params()
rnn_params.name = 'rnn'
rnn_params.dtype = tf.float64
rnn_params.vn.global_vn = False
rnn_params.vn.per_step_vn = False
rnn_params.cell = params
rnn_params.sequence_length = 10
rnn = rnn_layers.RNN(rnn_params)
np.random.seed(12345)
inputs_sequence = []
paddings_sequence = []
for _ in range(rnn_params.sequence_length):
inputs_sequence.append(
tf.constant(np.random.uniform(size=(3, 2)), tf.float64))
paddings_sequence.append(tf.zeros([3, 1], dtype=tf.float64))
paddings_sequence[-1] = tf.constant(
[[1.0], [1.0], [1.0]], dtype=tf.float64)
paddings_sequence[-2] = tf.constant(
[[1.0], [1.0], [1.0]], dtype=tf.float64)
inputs, paddings = tf.stack(inputs_sequence), tf.stack(paddings_sequence)
outputs, final = rnn.FPropDefaultTheta(inputs, paddings)
outputs *= paddings
sum_outputs = tf.reduce_sum(outputs, axis=0)
loss = tf.reduce_sum(sum_outputs) + tf.reduce_sum(final.m + final.c)
all_vars = py_utils.Flatten(rnn.vars)
assert len(all_vars) == 2
grads = tf.gradients(loss, all_vars)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
symbolic_grads = [gd.eval() for gd in grads]
numerical_grads = []
for v in all_vars:
numerical_grads.append(test_utils.ComputeNumericGradient(sess, loss, v))
for x, y in zip(symbolic_grads, numerical_grads):
self.assertAllClose(x, y)
def testRNNReversed(self):
"""Test an RNN layer with reverse=true.
This should yield the same output as feeding reversed input into
the same RNN with reverse=false (except the output is in reversed order).
"""
timesteps = 10
padding_steps = 2
batch_size = 2
depth = 3
with self.session(use_gpu=True):
lstm_params = rnn_cell.LSTMCellSimple.Params()
lstm_params.output_nonlinearity = True
lstm_params.num_input_nodes = depth
lstm_params.num_output_nodes = depth
rnn_params = rnn_layers.RNN.Params()
rnn_params.vn.global_vn = False
rnn_params.vn.per_step_vn = False
rnn_params.cell = lstm_params
rnn_params.sequence_length = timesteps
fwd_rnn_params = rnn_params.Copy()
fwd_rnn_params.name = 'fwd'
fwd_rnn_params.cell.name = 'fwd_lstm'
fwd_rnn = rnn_layers.RNN(fwd_rnn_params)
bak_rnn_params = rnn_params.Copy()
bak_rnn_params.name = 'bak'
bak_rnn_params.cell.name = 'bak_lstm'
bak_rnn_params.reverse = True
bak_rnn = rnn_layers.RNN(bak_rnn_params)
# Create 8 timesteps of random input, 2 timesteps of zeros, and paddings
# to match.
fwd_inputs = tf.constant(
np.concatenate(
(np.random.uniform(
size=(timesteps - padding_steps, batch_size, depth)),
np.zeros(shape=(padding_steps, batch_size, depth))),
axis=0), tf.float32)
fwd_paddings = tf.concat(
(tf.zeros(shape=(timesteps - padding_steps, batch_size, depth)),
tf.ones(shape=(padding_steps, batch_size, depth))),
axis=0)
bak_inputs = tf.reverse(fwd_inputs, [0])
bak_paddings = tf.reverse(fwd_paddings, [0])
# Run the forward rnn with reversed inputs
reversed_outputs, _ = fwd_rnn.FProp(fwd_rnn.theta, bak_inputs,
bak_paddings)
reversed_outputs = tf.reverse(reversed_outputs, [0])
# Run the backward rnn with forward inputs. Note we reuse the fwd_rnn
# theta so the results should match
bak_outputs, _ = bak_rnn.FProp(fwd_rnn.theta, fwd_inputs, fwd_paddings)
self.evaluate(tf.global_variables_initializer())
actual_reversed_outputs, actual_bak_outputs = self.evaluate(
[reversed_outputs, bak_outputs])
self.assertAllClose(actual_reversed_outputs, actual_bak_outputs)
def testRNNWithConvLSTMCell(self):
with self.session(use_gpu=False):
params = rnn_cell.ConvLSTMCell.Params()
params.name = 'conv_lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.inputs_shape = [None, 4, 2, 3]
params.cell_shape = [None, 4, 2, 2]
params.filter_shape = [3, 2]
rnn_params = rnn_layers.RNN.Params()
rnn_params.name = 'rnn'
rnn_params.vn.global_vn = True
rnn_params.vn.per_step_vn = False
rnn_params.vn.seed = 2938482
rnn_params.vn.scale = 0.1
rnn_params.cell = params
rnn_params.sequence_length = 10
rnn = rnn_layers.RNN(rnn_params)
np.random.seed(12345)
inputs_sequence = []
paddings_sequence = []
for _ in range(rnn_params.sequence_length):
inputs_sequence.append(
tf.constant(np.random.uniform(size=(3, 4, 2, 3)), tf.float32))
paddings_sequence.append(tf.zeros([3, 1]))
paddings_sequence[-1] = tf.constant([[1.0], [1.0], [1.0]])
paddings_sequence[-2] = tf.constant([[1.0], [1.0], [1.0]])
inputs, paddings = tf.stack(inputs_sequence), tf.stack(paddings_sequence)
outputs, final = rnn.FPropDefaultTheta(inputs, paddings)
outputs *= tf.reshape(1.0 - paddings,
[rnn_params.sequence_length, -1, 1, 1, 1])
sum_outputs = tf.reduce_sum(tf.reduce_sum(outputs, axis=0), [1, 2, 3])
sum_final_m = tf.reduce_sum(final.m, [1, 2, 3])
sum_final_c = tf.reduce_sum(final.c, [1, 2, 3])
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(
py_utils.NestedMap(sum=sum_outputs, m=sum_final_m, c=sum_final_c))
print('sum_outputs', np.array_repr(actual.sum))
print('final_m', np.array_repr(actual.m))
print('final_c', np.array_repr(actual.c))
sum_expected = [3.84135, 2.37388, 0.045646]
m_expected = [0.618991, 0.647515, 0.450933]
c_expected = [2.374451, 1.865975, 1.714276]
self.assertAllClose(sum_expected, actual.sum)
self.assertAllClose(m_expected, actual.m)
self.assertAllClose(c_expected, actual.c)
def testFRNNWithConvLSTMCell(self):
params = rnn_cell.ConvLSTMCell.Params()
params.name = 'conv_lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.inputs_shape = [None, 4, 2, 3]
params.cell_shape = [None, 4, 2, 2]
params.filter_shape = [3, 2]
rnn_params = rnn_layers.RNN.Params()
rnn_params.name = 'rnn'
rnn_params.vn.global_vn = True
rnn_params.vn.per_step_vn = False
rnn_params.vn.seed = 2938482
rnn_params.vn.scale = 0.1
rnn_params.cell = params
rnn_params.sequence_length = 10
frnn_params = rnn_layers.FRNN.Params()
frnn_params.name = 'frnn'
frnn_params.cell = params
frnn_params.vn = rnn_params.vn
np.random.seed(12345)
inputs_sequence = []
paddings_sequence = []
for _ in range(rnn_params.sequence_length):
inputs_sequence.append(
np.random.uniform(size=(3, 4, 2, 3)).astype(np.float32))
paddings_sequence.append(np.zeros([3, 1], dtype=np.float32))
paddings_sequence[-1] = [[1.0], [1.0], [1.0]]
paddings_sequence[-2] = [[1.0], [1.0], [1.0]]
with self.session(graph=tf.Graph(), use_gpu=True):
rnn = rnn_params.Instantiate()
self.evaluate(tf.global_variables_initializer())
inputs, paddings = tf.stack(inputs_sequence), tf.stack(paddings_sequence)
outputs, final = rnn.FPropDefaultTheta(inputs, paddings)
outputs *= tf.reshape(1.0 - paddings,
[rnn_params.sequence_length, -1, 1, 1, 1])
rnn_outputs_v, rnn_final_v = self.evaluate([outputs, final])
with self.session(graph=tf.Graph(), use_gpu=True):
frnn = frnn_params.Instantiate()
self.evaluate(tf.global_variables_initializer())
frnn_outputs, frnn_final = frnn.FPropDefaultTheta(
tf.stack(inputs_sequence), tf.stack(paddings_sequence))
paddings = tf.stack(paddings_sequence)
frnn_outputs *= tf.reshape(1.0 - paddings,
tf.concat([tf.shape(paddings), [1, 1]], 0))
frnn_outputs_v, frnn_final_v = self.evaluate([frnn_outputs, frnn_final])
self.assertAllClose(rnn_outputs_v, frnn_outputs_v)
self.assertAllClose(rnn_final_v.m, frnn_final_v.m)
self.assertAllClose(rnn_final_v.c, frnn_final_v.c)
def testRNNWithConvLSTMCellGradientChecker(self):
with self.session(use_gpu=True) as sess:
params = rnn_cell.ConvLSTMCell.Params()
params.name = 'conv_lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.inputs_shape = [None, 4, 2, 3]
params.cell_shape = [None, 4, 2, 2]
params.filter_shape = [3, 2]
rnn_params = rnn_layers.RNN.Params()
rnn_params.name = 'rnn'
rnn_params.vn.global_vn = False
rnn_params.vn.per_step_vn = False
rnn_params.cell = params
rnn_params.sequence_length = 10
rnn = rnn_layers.RNN(rnn_params)
np.random.seed(12345)
inputs_sequence = []
paddings_sequence = []
for _ in range(rnn_params.sequence_length):
inputs_sequence.append(
tf.constant(np.random.uniform(size=(3, 4, 2, 3)), tf.float32))
paddings_sequence.append(tf.zeros([3, 1]))
paddings_sequence[-1] = tf.constant([[1.0], [1.0], [1.0]])
paddings_sequence[-2] = tf.constant([[1.0], [1.0], [1.0]])
inputs, paddings = tf.stack(inputs_sequence), tf.stack(paddings_sequence)
outputs, final = rnn.FPropDefaultTheta(inputs, paddings)
outputs *= tf.reshape(1.0 - paddings,
[rnn_params.sequence_length, -1, 1, 1, 1])
loss = tf.reduce_sum(tf.reduce_sum(
outputs, axis=0)) + tf.reduce_sum(final.m + final.c)
all_vars = tf.trainable_variables()
assert len(all_vars) == 2
grads = tf.gradients(loss, all_vars)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
symbolic_grads = [gd.eval() for gd in grads]
numerical_grads = []
for v in all_vars:
numerical_grads.append(test_utils.ComputeNumericGradient(sess, loss, v))
for x, y in zip(symbolic_grads, numerical_grads):
self.assertAllClose(x, y, rtol=0.1, atol=0.1)
def _testFRNNWithConvLSTMCellGradientChecker(self):
with self.session(use_gpu=True) as sess:
params = rnn_cell.ConvLSTMCell.Params()
params.name = 'conv_lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.inputs_shape = [None, 4, 2, 3]
params.cell_shape = [None, 4, 2, 2]
params.filter_shape = [3, 2]
frnn_params = rnn_layers.FRNN.Params()
frnn_params.name = 'rnn'
frnn_params.vn.global_vn = False
frnn_params.vn.per_step_vn = False
frnn_params.cell = params
frnn = rnn_layers.FRNN(frnn_params)
np.random.seed(12345)
inputs_sequence = tf.constant(
np.random.uniform(size=(10, 3, 4, 2, 3)), tf.float32)
paddings = inplace_ops.inplace_update(
tf.zeros([10, 3, 1]), 1, [[1.0], [0.0], [1.0]])
outputs, final = frnn.FPropDefaultTheta(inputs_sequence, paddings)
outputs *= tf.reshape(paddings, [10, 3, 1, 1, 1])
loss = tf.reduce_sum(outputs) + tf.reduce_sum(final.m + final.c)
all_vars = tf.trainable_variables()
assert len(all_vars) == 2
grads = tf.gradients(loss, all_vars)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
symbolic_grads = [gd.eval() for gd in grads]
numerical_grads = []
for v in all_vars:
numerical_grads.append(test_utils.ComputeNumericGradient(sess, loss, v))
for x, y in zip(symbolic_grads, numerical_grads):
self.assertAllClose(x, y, rtol=0.1, atol=0.1)
def testFRNNWithConvLSTMCellGradientChecker(self):
self._testFRNNWithConvLSTMCellGradientChecker()
def testFRNNWithLSTMCellSimpleGradientChecker(self):
with self.session(use_gpu=True) as sess:
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'conv_lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.deterministic = True
params.zo_prob = 0.25
params.num_input_nodes = 4
params.num_output_nodes = 6
params.dtype = tf.float64
frnn_params = rnn_layers.FRNN.Params()
frnn_params.dtype = tf.float64
frnn_params.name = 'rnn'
frnn_params.cell = params
old_enable_asserts = FLAGS.enable_asserts
FLAGS.enable_asserts = False
frnn = rnn_layers.FRNN(frnn_params)
FLAGS.enable_asserts = old_enable_asserts
np.random.seed(12345)
inputs_sequence = tf.constant(
np.random.uniform(size=(10, 3, 4)), tf.float64)
paddings = inplace_ops.inplace_update(
tf.zeros([10, 3, 1], tf.float64), 1, [[1.0], [0.0], [1.0]])
outputs, _ = frnn.FPropDefaultTheta(inputs_sequence, paddings)
outputs *= (1.0 - tf.reshape(paddings, [10, 3, 1]))
loss = tf.reduce_sum(outputs)
all_vars = tf.trainable_variables()
assert len(all_vars) == 2
grads = tf.gradients(loss, all_vars)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
symbolic_grads = [gd.eval() for gd in grads]
numerical_grads = []
for v in all_vars:
numerical_grads.append(test_utils.ComputeNumericGradient(sess, loss, v))
for x, y in zip(symbolic_grads, numerical_grads):
self.assertAllClose(x, y, rtol=0.00001, atol=0.00001)
@parameterized.parameters((True, True, True), (True, False, True),
(True, False, False), (False, True, True),
(False, False, True), (False, False, False))
def testFRNN(self, inline, per_step_vn, deterministic):
config = py_utils.SessionConfig(inline=inline)
dtype = tf.float32
batch = 3
dims = 16
slen = 10
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm'
params.output_nonlinearity = True
params.dtype = dtype
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.vn.global_vn = True
params.vn.per_step_vn = per_step_vn
params.vn.seed = 2938482
params.vn.scale = 0.1
params.vn.deterministic = deterministic
params.num_input_nodes = dims
params.num_output_nodes = dims
frnn_params = rnn_layers.FRNN.Params()
frnn_params.name = 'frnn'
frnn_params.dtype = dtype
frnn_params.cell = params
rnn_params = rnn_layers.RNN.Params()
rnn_params.name = 'rnn'
rnn_params.dtype = dtype
rnn_params.sequence_length = slen
rnn_params.cell = params
np.random.seed(12345)
inputs = np.random.uniform(size=(slen, batch, dims)).astype(np.float32)
paddings = np.zeros([slen, batch, 1], dtype=np.float32)
paddings[-3:-1, :] = 1.0
with self.session(graph=tf.Graph(), use_gpu=True, config=config):
frnn = frnn_params.Instantiate()
self.evaluate(tf.global_variables_initializer())
AddTimestepAccumulator(frnn.cell)
frnn_out, frnn_final = self.evaluate(
frnn.FPropDefaultTheta(tf.constant(inputs), tf.constant(paddings)))
with self.session(graph=tf.Graph(), use_gpu=True, config=config):
rnn = rnn_params.Instantiate()
self.evaluate(tf.global_variables_initializer())
rnn_out, rnn_final = self.evaluate(
rnn.FPropDefaultTheta(tf.unstack(inputs), tf.unstack(paddings)))
self.assertAllClose(frnn_out, rnn_out)
self.assertAllClose(frnn_final.m, rnn_final.m)
self.assertAllClose(frnn_final.c, rnn_final.c)
def _testFRNNGradHelper(self, config):
dtype = tf.float64 # More stable using float64.
batch = 3
dims = 16
slen = 10
with self.session(use_gpu=True, config=config) as sess:
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(0.02, 429891685)
params.vn.global_vn = False
params.vn.per_step_vn = False
params.num_input_nodes = dims
params.num_output_nodes = dims
params.dtype = dtype
frnn_params = rnn_layers.FRNN.Params()
frnn_params.name = 'frnn'
frnn_params.dtype = dtype
frnn_params.cell = params
frnn = rnn_layers.FRNN(frnn_params)
AddTimestepAccumulator(frnn.cell)
w, b = frnn.theta.cell.wm, frnn.theta.cell.b
np.random.seed(12345)
inputs = tf.constant(
np.random.uniform(-0.02, 0.02, size=(slen, batch, dims)), dtype)
paddings = np.zeros([slen, batch, 1])
paddings[-3:-1, :] = 1.0
paddings = tf.constant(paddings, dtype)
frnn_outputs, frnn_final = frnn.FPropDefaultTheta(inputs, paddings)
loss = tf.reduce_sum(frnn_outputs) + tf.reduce_sum(
frnn_final.m) + tf.reduce_sum(frnn_final.c)
dw, db, dinputs = tf.gradients(loss, [w, b, inputs])
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
grad_step = 7
sym_grads = self.evaluate([db, dw, dinputs])
sym_grads = [test_utils.PickEveryN(_, grad_step) for _ in sym_grads]
num_grads = [
test_utils.PickEveryN(
test_utils.ComputeNumericGradient(
sess, loss, v, delta=1e-4, step=grad_step), grad_step)
for v in [b, w, inputs]
]
for (sym, num) in zip(sym_grads, num_grads):
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
print('max = ', np.max(np.abs(sym)))
self.assertAllClose(sym, num)
def testFRNNGradNoInline(self):
self._testFRNNGradHelper(py_utils.SessionConfig(inline=False))
def testFRNNGradInline(self):
self._testFRNNGradHelper(py_utils.SessionConfig(inline=True))
def testStackedFRNNDropout(self):
v1_out, _ = self._testStackedFRNNHelper(
rnn_layers.StackedFRNNLayerByLayer,
tf.float32,
trailing_pad_len=0,
keep_prob=0.5)
if tf.test.is_gpu_available():
rtol = 1e-5
else:
rtol = 1e-6
self.assertAllClose([175.9741], [np.sum(v1_out * v1_out)], rtol=rtol)
def testStackedFRNNInputOutputDims(self):
v1_out, _ = self._testStackedFRNNHelper(
rnn_layers.StackedFRNNLayerByLayer,
tf.float32,
trailing_pad_len=0,
keep_prob=0.5,
input_dim=5,
output_dim=7)
if tf.test.is_gpu_available():
rtol = 1e-5
else:
rtol = 1e-6
self.assertAllClose([32.74327], [np.sum(v1_out * v1_out)], rtol=rtol)
def testStackedFRNNLayerByLayerGrad(self):
self._testStackedFRNNGradHelper(rnn_layers.StackedFRNNLayerByLayer)
def testStackedFRNNPackedInput(self):
tf.random.set_seed(123456)
batch = 2
dims = 4
slen = 5
num_layers = 4
dtype = tf.float32
with self.session() as sess:
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm'
params.output_nonlinearity = True
params.dtype = dtype
params.params_init = py_utils.WeightInit.Uniform(1.24, 429891685)
params.vn.global_vn = False
params.vn.per_step_vn = False
params.num_input_nodes = dims
params.num_output_nodes = dims
sfrnn_params = rnn_layers.StackedFRNNLayerByLayer.Params()
sfrnn_params.name = 'sfrnn'
sfrnn_params.dtype = dtype
sfrnn_params.random_seed = 123456
sfrnn_params.cell_tpl = params
sfrnn_params.num_layers = num_layers
sfrnn_params.skip_start = 2
sfrnn_params.num_input_nodes = dims
sfrnn_params.num_output_nodes = dims
sfrnn_params.packed_input = True
with tf.name_scope('sfrnn'):
sfrnn = sfrnn_params.Instantiate()
np.random.seed(12345)
inputs = tf.constant(np.random.uniform(size=(slen, batch, dims)), dtype)
paddings = tf.constant(np.zeros([slen, batch, 1]), dtype)
segment_id = tf.constant(np.ones([slen, batch, 1]), dtype)
self.evaluate(tf.global_variables_initializer())
# Output with unpacked inputs.
sfrnn_outputs, _ = sfrnn.FPropDefaultTheta(
inputs, paddings, segment_id=segment_id)
sfrnn_outputs = py_utils.HasShape(sfrnn_outputs, [slen, batch, dims])
def _Pack(x):
# [batch, slen, ...].
x = tf.transpose(x, [1, 0, 2])
# [batch * slen, 1, ...].
return tf.reshape(x, [batch * slen, 1, -1])
# Output with packed inputs.
packed_inputs = _Pack(inputs)
packed_paddings = _Pack(paddings)
packed_segment_id = _Pack(tf.cumsum(segment_id, axis=1))
packed_outputs, _ = sfrnn.FPropDefaultTheta(
packed_inputs, packed_paddings, segment_id=packed_segment_id)
packed_outputs = tf.reshape(packed_outputs, [batch, slen, dims])
packed_outputs = tf.transpose(packed_outputs, [1, 0, 2])
# Check that the outputs are equal.
sfrnn_outputs, packed_outputs = sess.run([sfrnn_outputs, packed_outputs])
self.assertAllClose(sfrnn_outputs, packed_outputs)
def testStackedBiFRNNDropout(self):
v1_out = self._testStackedFRNNHelper(
rnn_layers.StackedBiFRNNLayerByLayer,
tf.float32,
trailing_pad_len=0,
keep_prob=0.5,
bi_directional=True)
if tf.test.is_gpu_available():
rtol = 1e-5
else:
rtol = 1e-6
self.assertAllClose([305.77435], [np.sum(v1_out * v1_out)], rtol=rtol)
def testStackedBiFRNNInputOutputDims(self):
v1_out = self._testStackedFRNNHelper(
rnn_layers.StackedBiFRNNLayerByLayer,
tf.float32,
trailing_pad_len=0,
keep_prob=0.5,
bi_directional=True,
input_dim=5,
output_dim=8)
if tf.test.is_gpu_available():
rtol = 1e-5
else:
rtol = 1e-6
self.assertAllClose([8.116007], [np.sum(v1_out * v1_out)], rtol=rtol)
def testStackedBiFRNNLayerByLayerGrad(self):
self._testStackedFRNNGradHelper(
rnn_layers.StackedBiFRNNLayerByLayer, bi_directional=True)
def _testBidirectionalFRNNHelper(self,
trailing_pad_len=0,
cluster_params=None):
batch = 3
dims = 16
slen = 10 + trailing_pad_len
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm_forward'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(0.02, 429891685)
params.vn.global_vn = True
params.vn.per_step_vn = False
params.vn.seed = 2938482
params.vn.scale = 0.1
params.num_input_nodes = dims
params.num_output_nodes = dims
lstm_forward = params.Copy()
params.name = 'lstm_backward'
params.params_init = py_utils.WeightInit.Uniform(0.02, 83820209838)
lstm_backward = params.Copy()
frnn_params = rnn_layers.BidirectionalFRNN.Params()
frnn_params.name = 'bifrnn'
frnn_params.fwd = lstm_forward.Copy()
frnn_params.bak = lstm_backward.Copy()
rnn_params = rnn_layers.BidirectionalRNN.Params()
rnn_params.name = 'rnn'
rnn_params.fwd = lstm_forward.Copy()
rnn_params.bak = lstm_backward.Copy()
rnn_params.sequence_length = slen
np.random.seed(12345)
inputs = np.random.uniform(size=(slen, batch, dims)).astype(np.float32)
paddings = np.zeros([slen, batch, 1], dtype=np.float32)
paddings[-trailing_pad_len:, :] = 1.0
paddings[-trailing_pad_len - 3:-trailing_pad_len - 1, :] = 1.0
with cluster_factory.Cluster(
cluster_params) if cluster_params else cluster_factory.Current():
with self.session(
graph=tf.Graph(),
use_gpu=True,
config=tf.config_pb2.ConfigProto(allow_soft_placement=True)):
frnn = frnn_params.Instantiate()
self.evaluate(tf.global_variables_initializer())
frnn_outputs = self.evaluate(
frnn.FPropDefaultTheta(tf.constant(inputs), tf.constant(paddings)))
with self.session(
graph=tf.Graph(),
use_gpu=True,
config=tf.config_pb2.ConfigProto(allow_soft_placement=True)):
rnn = rnn_params.Instantiate()
self.evaluate(tf.global_variables_initializer())
rnn_outputs = self.evaluate(
rnn.FPropDefaultTheta(tf.unstack(inputs), tf.unstack(paddings)))
rnn_outputs_val, frnn_outputs_val = [
x[:-trailing_pad_len] for x in [rnn_outputs, frnn_outputs]
]
self.assertAllClose(rnn_outputs_val, frnn_outputs_val)
def testBidirectionalFRNN(self):
self._testBidirectionalFRNNHelper()
def testBidirectionalFRNNTrailingPadding(self):
self._testBidirectionalFRNNHelper(trailing_pad_len=2)
def testBidirectionalFRNNSplit(self):
cluster_params = cluster_factory.Current().params.Copy()
cluster_params.worker.Set(
gpus_per_replica=2, devices_per_split=2, name='/job:localhost')
self._testBidirectionalFRNNHelper(cluster_params=cluster_params)
def testBidirectionalFRNNGrad(self):
dtype = tf.float64 # More stable using float64.
batch = 3
dims = 16
slen = 10
with self.session(use_gpu=True) as sess:
params = rnn_cell.LSTMCellSimple.Params()
params.name = 'lstm_forward'
params.output_nonlinearity = True
params.params_init = py_utils.WeightInit.Uniform(0.02, 429891685)
params.vn.global_vn = False
params.vn.per_step_vn = False
params.dtype = dtype
params.num_input_nodes = dims
params.num_output_nodes = dims
lstm_forward = params.Copy()
params.name = 'lstm_backward'
params.params_init = py_utils.WeightInit.Uniform(0.02, 83820209838)
params.dtype = dtype
lstm_backward = params.Copy()
frnn_params = rnn_layers.BidirectionalFRNN.Params()
frnn_params.name = 'bifrnn'
frnn_params.dtype = dtype
frnn_params.fwd = lstm_forward.Copy()
frnn_params.bak = lstm_backward.Copy()
frnn = rnn_layers.BidirectionalFRNN(frnn_params)
w0, b0 = (frnn.theta.fwd_rnn.cell.wm, frnn.theta.fwd_rnn.cell.b)
w1, b1 = (frnn.theta.bak_rnn.cell.wm, frnn.theta.bak_rnn.cell.b)
np.random.seed(12345)
inputs = tf.constant(np.random.uniform(size=(slen, batch, dims)), dtype)
paddings = np.zeros([slen, batch, 1])
paddings[-3:-1, :] = 1.0
paddings = tf.constant(paddings, dtype)
frnn_outputs = frnn.FPropDefaultTheta(inputs, paddings)
loss = tf.reduce_sum(frnn_outputs)
dw0, db0, dw1, db1, dinputs = tf.gradients(loss, [w0, b0, w1, b1, inputs])
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
grad_step = 13
sym_grads = self.evaluate([dw0, db0, dw1, db1, dinputs])
sym_grads = [test_utils.PickEveryN(_, grad_step) for _ in sym_grads]
num_grads = [
test_utils.PickEveryN(
test_utils.ComputeNumericGradient(
sess, loss, v, delta=1e-4, step=grad_step), grad_step)
for v in [w0, b0, w1, b1, inputs]
]
for (sym, num) in zip(sym_grads, num_grads):
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
print('max = ', np.max(np.abs(sym)))
self.assertAllClose(sym, num)
def _MultiSourceFRNNWithAttentionInputs(self,
single_source=False,
single_source_length=True,
dtype=tf.float32):
np.random.seed(12345)
if single_source:
src_names = ['en']
slens = [10]
sdepths = [4]
elif single_source_length:
src_names = ['en1', 'en2', 'de']
slens = [11, 10, 9]
sdepths = [4, 4, 4]
else:
src_names = ['en1', 'en2', 'de']
slens = [11, 10, 9]
sdepths = [4, 4, 3]
sbatch = 3
tlen = 7
tbatch = 6
dims = 4
src_encs = py_utils.NestedMap()
src_paddings = py_utils.NestedMap()
for sdepth, slen, sname in zip(sdepths, slens, src_names):
src_encs[sname] = tf.constant(
np.random.uniform(size=[slen, sbatch, sdepth]), dtype)
src_paddings[sname] = tf.constant(np.zeros([slen, sbatch]), dtype)
inputs = tf.constant(np.random.uniform(size=(tlen, tbatch, dims)), dtype)
paddings = tf.constant(np.zeros([tlen, tbatch, 1]), dtype)
return (src_encs, src_paddings, inputs, paddings)
def _MultiSourceFRNNWithAttentionParams(self,
single_source=False,
single_source_length=True,
dtype=tf.float32):
dims = 4
alt_depth = 3
if single_source:
src_names = ['en']
else:
src_names = ['en1', 'en2', 'de']
p = rnn_cell.LSTMCellSimple.Params()
p.name = 'lstm'
p.dtype = dtype
p.output_nonlinearity = True
p.params_init = py_utils.WeightInit.Uniform(0.02, 429891685)
p.vn.global_vn = False
p.vn.per_step_vn = False
p.num_input_nodes = dims * 2
p.num_output_nodes = dims
lstm_params = p
p = attention.AdditiveAttention.Params()
p.name = 'atten'
p.dtype = dtype
p.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)
p.source_dim = dims
p.query_dim = dims
p.hidden_dim = dims
p.vn.global_vn = False
p.vn.per_step_vn = False
attention_tpl = p
p = attention.MergerLayer.Params()
p.name = 'merger'
p.dtype = dtype
p.merger_op = ('mean' if single_source else 'atten')
p.source_dim = dims
p.query_dim = dims
p.hidden_dim = dims
merger_tpl = p
p = rnn_layers.MultiSourceFRNNWithAttention.Params()
p.name = 'msrc_frnn_with_atten'
p.dtype = dtype
p.cell = lstm_params
p.attention_tpl = attention_tpl
p.atten_merger = merger_tpl
p.source_names = src_names
if not single_source_length:
de_atten = attention_tpl.Copy()
de_atten.source_dim = alt_depth
p.source_name_to_attention_params = {'de': de_atten}
merger_tpl.pre_proj_input_dims = [dims, dims, alt_depth]
merger_tpl.pre_proj_output_dims = [dims, dims, dims]
merger_tpl.proj_tpl.batch_norm = False
merger_tpl.proj_tpl.weight_norm = True
return p
def testMultiSourceFRNNWithAttention(self):
with self.session(use_gpu=True):
p = self._MultiSourceFRNNWithAttentionParams()
msrc_frnn = p.Instantiate()
(src_encs, src_paddings, inputs,
paddings) = self._MultiSourceFRNNWithAttentionInputs()
a, m = msrc_frnn.FPropDefaultTheta(src_encs, src_paddings, inputs,
paddings)
msrc_frnn_out = tf.concat([a, m], 2)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
ys = self.evaluate([msrc_frnn_out])[0]
self.assertEqual(ys.shape, (7, 6, 8))
print(np.sum(ys, axis=(1, 2)), np.sum(ys, axis=(0, 1)),
np.sum(ys, axis=(0, 2)))
# pyformat: disable
# pylint: disable=bad-whitespace
self.assertAllClose(
np.sum(ys, axis=(1, 2)), [
11.87568951, 11.8436203 , 11.80368233, 11.80167198,
11.82034779, 11.80246162, 11.80818748
])
self.assertAllClose(
np.sum(ys, axis=(0, 1)), [
21.41802788, 20.86244965, 21.48164749, 19.95701981,
-0.54706949, 0.07046284, -0.50449395, 0.0176318
])
self.assertAllClose(
np.sum(ys, axis=(0, 2)), [
13.29822254, 14.01552773, 14.04851151, 13.28098106,
14.05391502, 14.0585041
])
# pyformat: enable
# pylint: enable=bad-whitespace
def testMultiSourceFRNNWithAttentionMultiDepth(self):
with self.session(use_gpu=True):
p = self._MultiSourceFRNNWithAttentionParams(single_source_length=False)
msrc_frnn = p.Instantiate()
(src_encs, src_paddings, inputs, paddings
) = self._MultiSourceFRNNWithAttentionInputs(single_source_length=False)
a, m = msrc_frnn.FPropDefaultTheta(src_encs, src_paddings, inputs,
paddings)
msrc_frnn_out = tf.concat([a, m], 2)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
ys = self.evaluate([msrc_frnn_out])[0]
self.assertEqual(ys.shape, (7, 6, 8))
print(np.sum(ys, axis=(1, 2)), np.sum(ys, axis=(0, 1)),
np.sum(ys, axis=(0, 2)))
# pyformat: disable
# pylint: disable=bad-whitespace
self.assertAllClose(
np.sum(ys, axis=(1, 2)), [
5.976197, 5.932313, 5.917447, 5.907898, 5.907385, 5.90272 ,
5.890248
])
self.assertAllClose(
np.sum(ys, axis=(0, 1)), [
2.635296e+01, 3.177989e+00, 1.024462e+01, 2.403777e+00,
-4.908564e-01, 1.006475e-01, -3.303704e-01, -2.455414e-02
])
self.assertAllClose(
np.sum(ys, axis=(0, 2)), [
6.610287, 6.657996, 7.452699, 6.626875, 6.60216 , 7.484191
])
# pyformat: enable
# pylint: enable=bad-whitespace
def testMultiSourceFRNNWithAttentionSingleSource(self, dtype=tf.float32):
with self.session(
use_gpu=True, config=py_utils.SessionConfig(inline=False)):
p = self._MultiSourceFRNNWithAttentionParams(
single_source=True, dtype=dtype)
frnn = p.Instantiate()
(src_encs, src_paddings, inputs,
paddings) = self._MultiSourceFRNNWithAttentionInputs(
single_source=True, dtype=dtype)
a, m = frnn.FPropDefaultTheta(src_encs, src_paddings, inputs, paddings)
frnn_out = tf.concat([a, m], 2)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
ys, = self.evaluate([frnn_out])
self.assertEqual(ys.shape, (7, 6, 8))
print(np.sum(ys, axis=(1, 2)), np.sum(ys, axis=(0, 1)),
np.sum(ys, axis=(0, 2)))
# These values are identical with FRNNWithAttention.
expected_sum12 = [
13.07380962, 13.03321552, 12.99956226, 13.00612164, 13.01202011,
12.99347878, 12.98680687
]
expected_sum01 = [
2.41238327e+01, 2.11899853e+01, 2.45926647e+01, 2.22827835e+01,
-5.62886238e-01, 2.42760777e-02, -5.79716980e-01, 3.40666063e-02
]
expected_sum02 = [
12.74695969, 16.13114548, 16.66101837, 12.74922562, 16.16581345,
16.65085411
]
self.assertAllClose(np.sum(ys, axis=(1, 2)), expected_sum12)
self.assertAllClose(np.sum(ys, axis=(0, 1)), expected_sum01)
self.assertAllClose(np.sum(ys, axis=(0, 2)), expected_sum02)
def testMultiSourceFRNNWithAttentionGradSingleSource(self, dtype=tf.float64):
with self.session(
use_gpu=True, config=py_utils.SessionConfig(inline=False)) as sess:
p = self._MultiSourceFRNNWithAttentionParams(
single_source=True, dtype=dtype)
frnn = p.Instantiate()
(src_encs, src_paddings, inputs,
paddings) = self._MultiSourceFRNNWithAttentionInputs(
single_source=True, dtype=dtype)
# Fetch all the parameters.
w0, b0 = (frnn.theta.cell.wm, frnn.theta.cell.b)
att0h, att0q, att0s = (frnn.theta.attentions[0].hidden_var,
frnn.theta.attentions[0].query_var,
frnn.theta.attentions[0].source_var)
out, _ = frnn.FPropDefaultTheta(src_encs, src_paddings, inputs, paddings)
loss = tf.reduce_sum(out)
parameters = [w0, b0, inputs, att0h, att0q, att0s]
grads = tf.gradients(loss, parameters)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
sym_grads = self.evaluate(grads)
num_grads = [
test_utils.ComputeNumericGradient(sess, loss, v, delta=1e-5)
for v in parameters
]
for i, (sym, num) in enumerate(zip(sym_grads, num_grads)):
print([
i, sym.shape, num.shape,
np.max(np.abs(sym)),
np.max(np.abs(sym - num)),
np.max(np.abs(sym - num) / np.abs(sym))
])
def Compare(name, sym, num, rtol=1e-5):
print(['name = ', name])
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
self.assertAllClose(sym, num, rtol=rtol, atol=1e-8)
for i, (sym, num) in enumerate(zip(sym_grads, num_grads)):
Compare(parameters[i].name, sym, num)
def testMultiSourceFRNNWithAttentionGrad(self, dtype=tf.float64):
with self.session(
use_gpu=True, config=py_utils.SessionConfig(inline=False)) as sess:
p = self._MultiSourceFRNNWithAttentionParams(dtype=dtype)
frnn = p.Instantiate()
# Fetch all the parameters.
w0, b0 = (frnn.theta.cell.wm, frnn.theta.cell.b)
mh, mq, ms = (frnn.theta.atten_merger.atten.hidden_var,
frnn.theta.atten_merger.atten.query_var,
frnn.theta.atten_merger.atten.source_var)
att0h, att0q, att0s = (frnn.theta.attentions[0].hidden_var,
frnn.theta.attentions[0].query_var,
frnn.theta.attentions[0].source_var)
att1h, att1q, att1s = (frnn.theta.attentions[1].hidden_var,
frnn.theta.attentions[1].query_var,
frnn.theta.attentions[1].source_var)
att2h, att2q, att2s = (frnn.theta.attentions[2].hidden_var,
frnn.theta.attentions[2].query_var,
frnn.theta.attentions[2].source_var)
(src_encs, src_paddings, inputs,
paddings) = self._MultiSourceFRNNWithAttentionInputs(dtype=dtype)
out, _ = frnn.FPropDefaultTheta(src_encs, src_paddings, inputs, paddings)
loss = tf.reduce_sum(out)
parameters = [
w0, b0, inputs, mh, mq, ms, att0h, att0q, att0s, att1h, att1q, att1s,
att2h, att2q, att2s
]
grads = tf.gradients(loss, parameters)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
sym_grads = self.evaluate(grads)
num_grads = [
test_utils.ComputeNumericGradient(sess, loss, v, delta=1e-5)
for v in parameters
]
for i, (sym, num) in enumerate(zip(sym_grads, num_grads)):
print([
i, sym.shape, num.shape,
np.max(np.abs(sym)),
np.max(np.abs(sym - num)),
np.max(np.abs(sym - num) / np.abs(sym))
])
def Compare(name, sym, num, rtol=1e-5):
print(['name = ', name])
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
self.assertAllClose(sym, num, rtol=rtol, atol=1e-8)
for i, (sym, num) in enumerate(zip(sym_grads, num_grads)):
Compare(parameters[i].name, sym, num)
def testMultiSourceFRNNWithAttentionGradMultiDepth(self, dtype=tf.float64):
with self.session(
use_gpu=True, config=py_utils.SessionConfig(inline=False)) as sess:
p = self._MultiSourceFRNNWithAttentionParams(
single_source_length=False, dtype=dtype)
frnn = p.Instantiate()
# Fetch all the parameters.
w0, b0 = (frnn.theta.cell.wm, frnn.theta.cell.b)
mh, mq, ms, mw0, mw1, mw2 = (frnn.theta.atten_merger.atten.hidden_var,
frnn.theta.atten_merger.atten.query_var,
frnn.theta.atten_merger.atten.source_var,
frnn.theta.atten_merger.pre_proj[0].w,
frnn.theta.atten_merger.pre_proj[1].w,
frnn.theta.atten_merger.pre_proj[2].w)
att0h, att0q, att0s = (frnn.theta.attentions[0].hidden_var,
frnn.theta.attentions[0].query_var,
frnn.theta.attentions[0].source_var)
att1h, att1q, att1s = (frnn.theta.attentions[1].hidden_var,
frnn.theta.attentions[1].query_var,
frnn.theta.attentions[1].source_var)
att2h, att2q, att2s = (frnn.theta.attentions[2].hidden_var,
frnn.theta.attentions[2].query_var,
frnn.theta.attentions[2].source_var)
(src_encs, src_paddings, inputs,
paddings) = self._MultiSourceFRNNWithAttentionInputs(
single_source_length=False, dtype=dtype)
out, _ = frnn.FPropDefaultTheta(src_encs, src_paddings, inputs, paddings)
loss = tf.reduce_sum(out)
parameters = [
w0, b0, inputs, mh, mq, ms, att0h, att0q, att0s, att1h, att1q, att1s,
att2h, att2q, att2s, mw0, mw1, mw2
]
grads = tf.gradients(loss, parameters)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
sym_grads = self.evaluate(grads)
num_grads = [
test_utils.ComputeNumericGradient(sess, loss, v, delta=1e-5)
for v in parameters
]
for i, (sym, num) in enumerate(zip(sym_grads, num_grads)):
print([
i, sym.shape, num.shape,
np.max(np.abs(sym)),
np.max(np.abs(sym - num)),
np.max(np.abs(sym - num) / np.abs(sym))
])
def Compare(name, sym, num, rtol=1e-5):
print(['name = ', name])
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
self.assertAllClose(sym, num, rtol=rtol, atol=1e-8)
for i, (sym, num) in enumerate(zip(sym_grads, num_grads)):
Compare(parameters[i].name, sym, num)
def _CreateFRNNWithAttentionParams(self,
dtype,
dims,
slen,
sbatch,
tlen,
tbatch,
input_prev_atten_ctx=True,
output_prev_atten_ctx=False):
# Create RNN Layer.
p = rnn_cell.LSTMCellSimple.Params()
p.name = 'lstm'
p.dtype = dtype
p.output_nonlinearity = True
p.params_init = py_utils.WeightInit.Uniform(0.02, 429891685)
p.vn.global_vn = False
p.vn.per_step_vn = False
p.num_input_nodes = dims * 2 if input_prev_atten_ctx else dims
p.num_output_nodes = dims
lstm_params = p
# Create Attention Layer.
p = attention.AdditiveAttention.Params()
p.name = 'atten'
p.dtype = dtype
p.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)
p.source_dim = dims
p.query_dim = dims
p.hidden_dim = dims
p.vn.global_vn = False
p.vn.per_step_vn = False
atten = p
p = rnn_layers.FRNNWithAttention.Params()
p.name = 'frnn_with_atten'
p.dtype = dtype
p.cell = lstm_params
p.attention = atten
p.input_prev_atten_ctx = input_prev_atten_ctx
p.output_prev_atten_ctx = output_prev_atten_ctx
return p
@parameterized.parameters((False, False), (False, True), (True, False),
(True, True))
def testFRNNWithAttentionSeparateSourceContextIdenticalToSourceEnc(
self, input_prev_atten_ctx, output_prev_atten_ctx):
dtype = tf.float32
dims = 4
slen = 10
sbatch = 3
tlen = 7
tbatch = 6
with self.session(use_gpu=True, config=py_utils.SessionConfig(inline=True)):
np.random.seed(12345)
p = self._CreateFRNNWithAttentionParams(
dtype=dtype,
dims=dims,
slen=slen,
sbatch=sbatch,
tlen=tlen,
tbatch=tbatch,
input_prev_atten_ctx=input_prev_atten_ctx,
output_prev_atten_ctx=output_prev_atten_ctx)
frnn = p.Instantiate()
src_encs = tf.constant(
np.random.uniform(size=[slen, sbatch, dims]), dtype)
src_paddings = tf.constant(np.zeros([slen, sbatch]), dtype)
inputs = tf.constant(np.random.uniform(size=(tlen, tbatch, dims)), dtype)
paddings = tf.constant(np.zeros([tlen, tbatch, 1]), dtype)
# Run without specifying source context vectors.
atten_ctx, rnn_out, atten_prob, _ = frnn.FPropDefaultTheta(
src_encs, src_paddings, inputs, paddings)
frnn_out = tf.concat([atten_ctx, rnn_out, atten_prob], 2)
# Run after providing separate source context vectors set to the src_encs
# should provide the same answer.
(atten_ctx_src_ctx, rnn_out_src_ctx, atten_prob_src_ctx,
_) = frnn.FPropDefaultTheta(
src_encs, src_paddings, inputs, paddings, src_contexts=src_encs)
frnn_out_src_ctx = tf.concat(
[atten_ctx_src_ctx, rnn_out_src_ctx, atten_prob_src_ctx], 2)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
frnn_out_v, frnn_out_src_ctx_v = self.evaluate(
[frnn_out, frnn_out_src_ctx])
# Expected last dimensions for atten_ctx_src_ctx, rnn_out_src_ctx,
# atten_prob_src_ctx are respectively, (dims, dims, slen).
self.assertEqual(frnn_out_v.shape, (tlen, tbatch, 2 * dims + slen))
self.assertEqual(frnn_out_src_ctx_v.shape, frnn_out_v.shape)
self.assertAllClose(frnn_out_v, frnn_out_src_ctx_v)
@parameterized.parameters((False, False), (False, True), (True, False),
(True, True))
def testFRNNWithAttentionSeparateSourceContextDifferentFromSourceEnc(
self, input_prev_atten_ctx, output_prev_atten_ctx):
dtype = tf.float32
dims = 4
slen = 10
sbatch = 3
tlen = 7
tbatch = 6
with self.session(use_gpu=True, config=py_utils.SessionConfig(inline=True)):
np.random.seed(12345)
p = self._CreateFRNNWithAttentionParams(
dtype=dtype,
dims=dims,
slen=slen,
sbatch=sbatch,
tlen=tlen,
tbatch=tbatch,
input_prev_atten_ctx=input_prev_atten_ctx,
output_prev_atten_ctx=output_prev_atten_ctx)
frnn = p.Instantiate()
src_encs = tf.constant(
np.random.uniform(size=[slen, sbatch, dims]), dtype)
src_paddings = tf.constant(np.zeros([slen, sbatch]), dtype)
# We create src_contexts with even dimensions (0, 2) set to all zero, the
# rest are set randomly.
src_contexts = np.random.uniform(size=[slen, sbatch, dims])
src_contexts[:, :, 0:dims:2] = 0.0
src_contexts = tf.constant(src_contexts, dtype=dtype)
inputs = tf.constant(np.random.uniform(size=(tlen, tbatch, dims)), dtype)
paddings = tf.constant(np.zeros([tlen, tbatch, 1]), dtype)
# Run after providing separate source context vectors set to the src_encs
# should provide the same answer.
atten_ctx, _, _, _ = frnn.FPropDefaultTheta(
src_encs, src_paddings, inputs, paddings, src_contexts=src_contexts)
# Initialize all the variables, and then run one step.
self.evaluate(tf.global_variables_initializer())
atten_ctx_v = self.evaluate(atten_ctx)
self.assertEqual(atten_ctx_v.shape, (tlen, tbatch, dims))
# Verify that the output also has zeros in the locations that the
# source context has zeros.
self.assertAllClose(
np.zeros(shape=(tlen, tbatch, dims // 2)),
atten_ctx_v[:, :, 0:dims:2])
def _testFRNNWithAttentionUseZeroAttenState(self, zero_atten_state_fn):
dtype = tf.float32
dims = 5
slen = 4
tlen = 3
sbatch = 2
tbatch = 6
with self.session(use_gpu=True):
p = self._CreateFRNNWithAttentionParams(
dtype=dtype,
dims=dims,
slen=slen,
sbatch=sbatch,
tlen=tlen,
tbatch=tbatch)
p.use_zero_atten_state = True
p.atten_context_dim = dims
frnn = p.Instantiate()
# Override the ZeroAttentionState to have the desired output type
frnn.atten.ZeroAttentionState = types.MethodType(zero_atten_state_fn,
frnn.atten)
src_encs = tf.constant(
np.random.uniform(size=[slen, sbatch, dims]), dtype)
src_paddings = tf.constant(np.zeros([slen, sbatch]), dtype)
inputs = tf.constant(np.random.uniform(size=(tlen, tbatch, dims)), dtype)
paddings = tf.constant(np.zeros([tlen, tbatch, 1]), dtype)
atten_ctx, rnn_out, atten_prob, _ = frnn.FPropDefaultTheta(
src_encs, src_paddings, inputs, paddings)
self.evaluate(tf.global_variables_initializer())
atten_ctx, rnn_out, atten_prob = self.evaluate(
[atten_ctx, rnn_out, atten_prob])
# Check shapes
self.assertEqual(atten_ctx.shape, (tlen, tbatch, dims))
self.assertEqual(rnn_out.shape, (tlen, tbatch, dims))
self.assertEqual(atten_prob.shape, (tlen, tbatch, slen))
def testFRNNWithAttentionUseZeroAttenStateTensor(self):
def _TensorZeroAttenState(self, source_seq_length, decoder_batch_size):
del source_seq_length
p = self.params
zs = tf.zeros([decoder_batch_size, 1], dtype=py_utils.FPropDtype(p))
return zs
self._testFRNNWithAttentionUseZeroAttenState(_TensorZeroAttenState)
def testFRNNWithAttentionUseZeroAttenStateNestedMap(self):
def _NestedMapZeroAttenState(self, source_seq_length, decoder_batch_size):
del source_seq_length
p = self.params
zs = tf.zeros([decoder_batch_size, 1], dtype=py_utils.FPropDtype(p))
return py_utils.NestedMap(z=zs)
self._testFRNNWithAttentionUseZeroAttenState(_NestedMapZeroAttenState)
if __name__ == '__main__':
tf.test.main()
|
tensorflow/lingvo
|
lingvo/core/rnn_layers_test.py
|
Python
|
apache-2.0
| 60,274
|
[
"Gaussian"
] |
c5bc4d373b3589b604fae506726ffb16b5533d620f2cfb9b1299c7103d67a476
|
# -*- coding: utf-8 -*-
"""
A real simple app for using webapp2 with auth and session.
It just covers the basics. Creating a user, login, logout
and a decorator for protecting certain handlers.
Routes are setup in routes.py and added in main.py
"""
# standard library imports
import logging
import random
import re
import json
# related third party imports
import webapp2
import httpagentparser
from webapp2_extras import security
from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError
from webapp2_extras.i18n import gettext as _
from webapp2_extras.appengine.auth.models import Unique
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.api.datastore_errors import BadValueError
from google.appengine.runtime import apiproxy_errors
from github import github
from linkedin import linkedin
# local application/library specific imports
import models
import forms as forms
from lib import utils, captcha, twitter
from lib.basehandler import BaseHandler
from lib.basehandler import user_required
from lib import facebook
from baymodels import models as bmodels
class AbTestHandler(BaseHandler):
"""
AB Testing experiments are communly used with landing pages, but is not limited to them.
If the rendered page contains a form (i.e. newsletter subscription),
manage the post request in a different handler
For complex A/B test, you can use the 2 templates instead of one.
By default only one template is used as abtest_b.html is a soft link to abtest_a.html
"""
def get(self):
a = True
template = 'abtest_a.html'
if random.randint(0,1) :
a = False
template = 'abtest_b.html'
params = { 'a': a , 'b': not a }
return self.render_template(template, **params)
class LoginRequiredHandler(BaseHandler):
def get(self):
continue_url, = self.request.get('continue',allow_multiple=True)
self.redirect(users.create_login_url(dest_url=continue_url))
class RegisterBaseHandler(BaseHandler):
"""
Base class for handlers with registration and login forms.
"""
@webapp2.cached_property
def form(self):
if self.is_mobile:
return forms.RegisterMobileForm(self)
else:
return forms.RegisterForm(self)
class SendEmailHandler(BaseHandler):
"""
Core Handler for sending Emails
Use with TaskQueue
"""
def post(self):
from google.appengine.api import mail, app_identity
to = self.request.get("to")
subject = self.request.get("subject")
body = self.request.get("body")
sender = self.request.get("sender")
if sender != '' or not utils.is_email_valid(sender):
if utils.is_email_valid(self.app.config.get('contact_sender')):
sender = self.app.config.get('contact_sender')
else:
app_id = app_identity.get_application_id()
sender = "%s <no-reply@%s.appspotmail.com>" % (app_id, app_id)
if self.app.config['log_email']:
try:
logEmail = models.LogEmail(
sender = sender,
to = to,
subject = subject,
body = body,
when = utils.get_date_time("datetimeProperty")
)
logEmail.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Email Log in datastore")
message = mail.EmailMessage()
message.sender = sender
message.to = to
message.subject = subject
message.html = body
message.send()
class LoginHandler(BaseHandler):
"""
Handler for authentication
"""
def get(self):
""" Returns a simple HTML form for login """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('login.html', **params)
def post(self):
"""
username: Get the username from POST dict
password: Get the password from POST dict
"""
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
continue_url = self.request.get('continue_url').encode('ascii', 'ignore')
try:
if utils.is_email_valid(username):
user = models.User.get_by_email(username)
if user:
auth_id = user.auth_ids[0]
else:
raise InvalidAuthIdError
else:
auth_id = "own:%s" % username
user = models.User.get_by_auth_id(auth_id)
password = self.form.password.data.strip()
remember_me = True if str(self.request.POST.get('remember_me')) == 'on' else False
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Try to login user with password
# Raises InvalidAuthIdError if user is not found
# Raises InvalidPasswordError if provided password
# doesn't match with specified user
self.auth.get_user_by_password(
auth_id, password, remember=remember_me)
# if user account is not activated, logout and redirect to home
if (user.activated == False):
# logout
self.auth.unset_session()
# redirect to home with error message
resend_email_uri = self.uri_for('resend-account-activation', user_id=user.get_id(),
token=models.User.create_resend_token(user.get_id()))
message = _('Your account has not yet been activated. Please check your email to activate it or') +\
' <a href="'+resend_email_uri+'">' + _('click here') + '</a> ' + _('to resend the email.')
self.add_message(message, 'error')
return self.redirect_to('home')
# check twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user.key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user = user.key,
provider = 'twitter',
uid = str(twitter_association_data['id']),
extra_data = twitter_association_data
)
social_user.put()
# check facebook association
fb_data = None
try:
fb_data = json.loads(self.session['facebook'])
except:
pass
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user = user.key,
provider = 'facebook',
uid = str(fb_data['id']),
extra_data = fb_data
)
social_user.put()
# check linkedin association
li_data = None
try:
li_data = json.loads(self.session['linkedin'])
except:
pass
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user = user.key,
provider = 'linkedin',
uid = str(li_data['id']),
extra_data = li_data
)
social_user.put()
# end linkedin
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Your username or password is incorrect. "
"Please try again (make sure your caps lock is off)")
self.add_message(message, 'error')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.LoginForm(self)
class SocialLoginHandler(BaseHandler):
"""
Handler for Social authentication
"""
def get(self, provider_name):
provider = self.provider_info[provider_name]
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
if provider_name == "twitter":
twitter_helper = twitter.TwitterAuth(self, redirect_uri=callback_url)
self.redirect(twitter_helper.auth_url())
elif provider_name == "facebook":
self.session['linkedin'] = None
perms = ['email', 'publish_stream']
self.redirect(facebook.auth_url(self.app.config.get('fb_api_key'), callback_url, perms))
elif provider_name == 'linkedin':
self.session['facebook'] = None
link = linkedin.LinkedIn(self.app.config.get('linkedin_api'), self.app.config.get('linkedin_secret'), callback_url)
if link.request_token():
self.session['request_token']=link._request_token
self.session['request_token_secret']=link._request_token_secret
self.redirect(link.get_authorize_url())
elif provider_name == "github":
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'), self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'), self.app.config.get('github_redirect_uri'), scope)
self.redirect( github_helper.get_authorize_url() )
elif provider_name in models.SocialUser.open_id_providers():
continue_url = self.request.get('continue_url')
if continue_url:
dest_url=self.uri_for('social-login-complete', provider_name=provider_name, continue_url=continue_url)
else:
dest_url=self.uri_for('social-login-complete', provider_name=provider_name)
try:
login_url = users.create_login_url(federated_identity=provider['uri'], dest_url=dest_url)
self.redirect(login_url)
except users.NotAllowedError:
self.add_message('You must enable Federated Login Before for this application.<br> '
'<a href="http://appengine.google.com" target="_blank">Google App Engine Control Panel</a> -> '
'Administration -> Application Settings -> Authentication Options', 'error')
self.redirect_to('login')
else:
message = _('%s authentication is not yet implemented.' % provider.get('label'))
self.add_message(message, 'warning')
self.redirect_to('login')
class CallbackSocialLoginHandler(BaseHandler):
"""
Callback (Save Information) for Social Authentication
"""
def get(self, provider_name):
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
continue_url = self.request.get('continue_url')
if provider_name == "twitter":
oauth_token = self.request.get('oauth_token')
oauth_verifier = self.request.get('oauth_verifier')
twitter_helper = twitter.TwitterAuth(self)
user_data = twitter_helper.auth_complete(oauth_token,
oauth_verifier)
logging.info('twitter user_data: ' + str(user_data))
if self.user:
# new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'twitter', str(user_data['user_id'])):
social_user = models.SocialUser(
user = user_info.key,
provider = 'twitter',
uid = str(user_data['user_id']),
extra_data = user_data
)
social_user.put()
message = _('Twitter association added.')
self.add_message(message, 'success')
else:
message = _('This Twitter account is already in use.')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with twitter
social_user = models.SocialUser.get_by_provider_and_uid('twitter',
str(user_data['user_id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['user_id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# github association
elif provider_name == "github":
# get our request code back from the social login handler above
code = self.request.get('code')
# create our github auth object
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'), self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'), self.app.config.get('github_redirect_uri'), scope)
# retrieve the access token using the code and auth object
access_token = github_helper.get_access_token(code)
user_data = github_helper.get_user_info(access_token)
logging.info('github user_data: ' + str(user_data))
if self.user:
# user is already logged in so we set a new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'github', str(user_data['login'])):
social_user = models.SocialUser(
user = user_info.key,
provider = 'github',
uid = str(user_data['login']),
extra_data = user_data
)
social_user.put()
message = _('Github association added.')
self.add_message(message, 'success')
else:
message = _('This Github account is already in use.')
self.add_message(message, 'error')
self.redirect_to('edit-profile')
else:
# user is not logged in, but is trying to log in via github
social_user = models.SocialUser.get_by_provider_and_uid('github', str(user_data['login']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end github
# facebook association
elif provider_name == "facebook":
code = self.request.get('code')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
token = facebook.get_access_token_from_code(code, callback_url, self.app.config.get('fb_api_key'), self.app.config.get('fb_secret'))
access_token = token['access_token']
fb = facebook.GraphAPI(access_token)
user_data = fb.get_object('me')
logging.info('facebook user_data: ' + str(user_data))
if self.user:
# new association with facebook
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'facebook', str(user_data['id'])):
social_user = models.SocialUser(
user = user_info.key,
provider = 'facebook',
uid = str(user_data['id']),
extra_data = user_data
)
social_user.put()
message = _('Facebook association added!')
self.add_message(message,'success')
else:
message = _('This Facebook account is already in use!')
self.add_message(message,'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Facebook
social_user = models.SocialUser.get_by_provider_and_uid('facebook',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# end facebook
# association with linkedin
elif provider_name == "linkedin":
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
link = linkedin.LinkedIn(self.app.config.get('linkedin_api'), self.app.config.get('linkedin_secret'), callback_url)
request_token = self.session['request_token']
request_token_secret= self.session['request_token_secret']
link._request_token = request_token
link._request_token_secret = request_token_secret
verifier = self.request.get('oauth_verifier')
#~ print 'test'
#~ print 'request_token= %s ; request_token_secret= %s ;verifier = %s ' % (request_token, request_token_secret, verifier)
link.access_token(verifier=verifier)
u_data = link.get_profile()
user_key = re.search(r'key=(\d+)', u_data.private_url).group(1)
user_data={'first_name':u_data.first_name, 'last_name':u_data.last_name ,'id':user_key}
self.session['linkedin'] = json.dumps(user_data)
logging.info('linkedin user_data: ' + str(user_data))
if self.user:
# new association with linkedin
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'linkedin', str(user_data['id'])):
social_user = models.SocialUser(
user = user_info.key,
provider = 'linkedin',
uid = str(user_data['id']),
extra_data = user_data
)
social_user.put()
message = _('Linkedin association added!')
self.add_message(message,'success')
else:
message = _('This Linkedin account is already in use!')
self.add_message(message,'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Linkedin
social_user = models.SocialUser.get_by_provider_and_uid('linkedin',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end linkedin
# google, myopenid, yahoo OpenID Providers
elif provider_name in models.SocialUser.open_id_providers():
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
# get info passed from OpenId Provider
from google.appengine.api import users
current_user = users.get_current_user()
if current_user:
if current_user.federated_identity():
uid = current_user.federated_identity()
else:
uid = current_user.user_id()
email = current_user.email()
else:
message = _('No user authentication information received from %s. '
'Please ensure you are logging in from an authorized OpenID Provider (OP).'
% provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
if self.user:
# add social account to user
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, provider_name, uid):
social_user = models.SocialUser(
user = user_info.key,
provider = provider_name,
uid = uid
)
social_user.put()
message = _('%s association successfully added.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with OpenId Provider
social_user = models.SocialUser.get_by_provider_and_uid(provider_name, uid)
if social_user:
# Social user found. Authenticate the user
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
self.create_account_from_social_provider(provider_name, uid, email, continue_url)
else:
message = _('This authentication method is not yet implemented.')
self.add_message(message, 'warning')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
def create_account_from_social_provider(self, provider_name, uid, email=None, continue_url=None, user_data=None):
"""Social user does not exist yet so create it with the federated identity provided (uid)
and create prerequisite user and log the user account in
"""
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
if models.SocialUser.check_unique_uid(provider_name, uid):
# create user
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
# Assume provider has already verified email address
# if email is provided so set activated to True
auth_id = "%s:%s" % (provider_name, uid)
if email:
unique_properties = ['email']
user_info = self.auth.store.user_model.create_user(
auth_id, unique_properties, email=email,
activated=True
)
else:
user_info = self.auth.store.user_model.create_user(
auth_id, activated=True
)
if not user_info[0]: #user is a tuple
message = _('The account %s is already in use.' % provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('register')
user = user_info[1]
# create social user and associate with user
social_user = models.SocialUser(
user = user.key,
provider = provider_name,
uid = uid,
)
if user_data:
social_user.extra_data = user_data
self.session[provider_name] = json.dumps(user_data) # TODO is this needed?
social_user.put()
# authenticate user
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
message = _('Welcome! You have been registered as a new user through %s and logged in.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
class DeleteSocialProviderHandler(BaseHandler):
"""
Delete Social association with an account
"""
@user_required
def post(self, provider_name):
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if len(user_info.get_social_providers_info()['used']) > 1 or (user_info.password is not None):
social_user = models.SocialUser.get_by_user_and_provider(user_info.key, provider_name)
if social_user:
social_user.key.delete()
message = _('%s successfully disassociated.' % provider_name)
self.add_message(message, 'success')
else:
message = _('Social account on %s not found for this user.' % provider_name)
self.add_message(message, 'error')
else:
message = ('Social account on %s cannot be deleted for user.'
' Please create a username and password to delete social account.' % provider_name)
self.add_message(message, 'error')
self.redirect_to('edit-profile')
class LogoutHandler(BaseHandler):
"""
Destroy user session and redirect to login
"""
def get(self):
if self.user:
message = _("You've signed out successfully. Warning: Please clear all cookies and logout "
"of OpenId providers too if you logged in on a public computer.")
self.add_message(message, 'info')
self.auth.unset_session()
# User is logged out, let's try redirecting to login page
try:
self.redirect(self.auth_config['login_url'])
except (AttributeError, KeyError), e:
logging.error("Error logging out: %s" % e)
message = _("User is logged out, but there was an error on the redirection.")
self.add_message(message, 'error')
return self.redirect_to('home')
class RegisterHandler(RegisterBaseHandler):
"""
Handler for Sign Up Users
"""
def get(self):
""" Returns a simple HTML form for create a new user """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('register.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
email = self.form.email.data.lower()
password = self.form.password.data.strip()
country = self.form.country.data
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Passing password_raw=password so password will be hashed
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
unique_properties = ['username', 'email']
auth_id = "own:%s" % username
user = self.auth.store.user_model.create_user(
auth_id, unique_properties, password_raw=password,
username=username, name=name, last_name=last_name, email=email,
ip=self.request.remote_addr, country=country
)
if not user[0]: #user is a tuple
if "username" in str(user[1]):
message = _('Sorry, The username %s is already registered.' % '<strong>{0:>s}</strong>'.format(username) )
elif "email" in str(user[1]):
message = _('Sorry, The email %s is already registered.' % '<strong>{0:>s}</strong>'.format(email) )
else:
message = _('Sorry, The user is already registered.')
self.add_message(message, 'error')
return self.redirect_to('register')
else:
# User registered successfully
# But if the user registered using the form, the user has to check their email to activate the account ???
try:
user_info = user[1]
if (user_info.activated == False):
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user_info.get_id(),
token = models.User.create_auth_token(user_info.get_id()),
_full = True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url = email_url, params={
'to': str(email),
'subject' : subject,
'body' : body,
})
#### Richard: creates a BasicSettings entity so when the user creates a contractor profile before configuring Basic Settings,
#### the handler finds an entity with properties like display_full_name (instead of returning property from Non existant object)
a = bmodels.BasicSettings()
a.user = user_info.key
a.put()
message = _('You were successfully registered. '
'Please check your email to activate your account.')
self.add_message(message, 'success')
return self.redirect_to('home')
# If the user didn't register using registration form ???
db_user = self.auth.get_user_by_password(user[1].auth_ids[0], password)
# Check twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user[1].key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user = user[1].key,
provider = 'twitter',
uid = str(twitter_association_data['id']),
extra_data = twitter_association_data
)
social_user.put()
#check facebook association
fb_data = json.loads(self.session['facebook'])
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user = user.key,
provider = 'facebook',
uid = str(fb_data['id']),
extra_data = fb_data
)
social_user.put()
#check linkedin association
li_data = json.loads(self.session['linkedin'])
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user = user.key,
provider = 'linkedin',
uid = str(li_data['id']),
extra_data = li_data
)
social_user.put()
message = _('Welcome %s, you are now logged in.' % '<strong>{0:>s}</strong>'.format(username) )
self.add_message(message, 'success')
return self.redirect_to('home')
except (AttributeError, KeyError), e:
logging.error('Unexpected error creating the user %s: %s' % (username, e ))
message = _('Unexpected error creating the user %s' % username )
self.add_message(message, 'error')
return self.redirect_to('home')
class AccountActivationHandler(BaseHandler):
"""
Handler for account activation
"""
def get(self, user_id, token):
try:
if not models.User.validate_auth_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
# activate the user's account
user.activated = True
user.put()
# Login User
self.auth.get_user_by_token(int(user_id), token)
# Delete token
models.User.delete_auth_token(user_id, token)
message = _('Congratulations, Your account %s has been successfully activated.'
% '<strong>{0:>s}</strong>'.format(user.username) )
self.add_message(message, 'success')
self.redirect_to('home')
except (AttributeError, KeyError, InvalidAuthIdError, NameError), e:
logging.error("Error activating an account: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ResendActivationEmailHandler(BaseHandler):
"""
Handler to resend activation email
"""
def get(self, user_id, token):
try:
if not models.User.validate_resend_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
email = user.email
if (user.activated == False):
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id = user.get_id(),
token = models.User.create_auth_token(user.get_id()),
_full = True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": user.username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url = email_url, params={
'to': str(email),
'subject' : subject,
'body' : body,
})
models.User.delete_resend_token(user_id, token)
message = _('The verification email has been resent to %s. '
'Please check your email to activate your account.' % email)
self.add_message(message, 'success')
return self.redirect_to('home')
else:
message = _('Your account has been activated. Please <a href="/login/">sign in</a> to your account.')
self.add_message(message, 'warning')
return self.redirect_to('home')
except (KeyError, AttributeError), e:
logging.error("Error resending activation email: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ContactHandler(BaseHandler):
"""
Handler for Contact Form
"""
def get(self):
""" Returns a simple HTML for contact form """
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if user_info.name or user_info.last_name:
self.form.name.data = user_info.name + " " + user_info.last_name
if user_info.email:
self.form.email.data = user_info.email
params = {
"exception" : self.request.get('exception')
}
return self.render_template('contact.html', **params)
def post(self):
""" validate contact form """
if not self.form.validate():
return self.get()
remoteip = self.request.remote_addr
user_agent = self.request.user_agent
exception = self.request.POST.get('exception')
name = self.form.name.data.strip()
email = self.form.email.data.lower()
message = self.form.message.data.strip()
try:
# parsing user_agent and getting which os key to use
# windows uses 'os' while other os use 'flavor'
logging.info(user_agent)
ua = httpagentparser.detect(user_agent)
os = ua.has_key('flavor') and 'flavor' or 'os'
operating_system_full_name = str(ua[os]['name'])
if 'version' in ua[os]:
operating_system_full_name += ' '+str(ua[os]['version'])
if 'dist' in ua:
operating_system_full_name += ' '+str(ua['dist'])
template_val = {
"name": name,
"email": email,
"browser": str(ua['browser']['name']),
"browser_version": str(ua['browser']['version']),
"operating_system": operating_system_full_name,
"ip": remoteip,
"message": message
}
except Exception as e:
logging.error("error getting user agent info: %s" % e)
try:
subject = _("Contact")
# exceptions for error pages that redirect to contact
if exception != "":
subject = subject + " (Exception error: %s)" % exception
body_path = "emails/contact.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url = email_url, params={
'to': self.app.config.get('contact_recipient'),
'subject' : subject,
'body' : body,
'sender' : self.app.config.get('contact_sender'),
})
message = _('Your message was sent successfully.')
self.add_message(message, 'success')
return self.redirect_to('contact')
except (AttributeError, KeyError), e:
logging.error('Error sending contact form: %s' % e)
message = _('Error sending the message. Please try again later.')
self.add_message(message, 'error')
return self.redirect_to('contact')
@webapp2.cached_property
def form(self):
return forms.ContactForm(self)
class EditProfileHandler(BaseHandler):
"""
Handler for Edit User Profile
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit profile """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
# self.form.username.data = user_info.username
self.form.name.data = user_info.name
self.form.last_name.data = user_info.last_name
self.form.country.data = user_info.country
providers_info = user_info.get_social_providers_info()
if not user_info.password:
params['local_account'] = False
else:
params['local_account'] = True
params['used_providers'] = providers_info['used']
params['unused_providers'] = providers_info['unused']
params['country'] = user_info.country
return self.render_template('edit_profile.html', **params)
def post(self):
""" Get fields from POST dict """
print self.form.validate()
if not self.form.validate():
return self.get()
# username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
country = self.form.country.data
try:
print self.user_id
user_info = models.User.get_by_id(long(self.user_id))
try:
message=''
# update username if it has changed and it isn't already taken
"""
if username != user_info.username:
user_info.unique_properties = ['username','email']
uniques = [
'User.username:%s' % username,
'User.auth_id:own:%s' % username,
]
# Create the unique username and auth_id.
success, existing = Unique.create_multi(uniques)
if success:
# free old uniques
Unique.delete_multi(['User.username:%s' % user_info.username, 'User.auth_id:own:%s' % user_info.username])
# The unique values were created, so we can save the user.
user_info.username=username
user_info.auth_ids[0]='own:%s' % username
message+= _('Your new username is %s.' % '<strong>{0:>s}</strong>'.format(username) )
else:
message+= _('The username %s is already taken. Please choose another.'
% '<strong>{0:>s}</strong>'.format(username) )
# At least one of the values is not unique.
self.add_message(message, 'error')
return self.get()
"""
user_info.name=name
user_info.last_name=last_name
user_info.country=country
user_info.put()
message+= " " + _('Settings have been successfully saved.')
self.add_message(message, 'success')
return self.get()
except (AttributeError, KeyError, ValueError), e:
logging.error('Error updating profile: ' + e)
message = _('Unable to update profile. Please try again later.')
self.add_message(message, 'error')
return self.get()
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditProfileForm(self)
class EditPasswordHandler(BaseHandler):
"""
Handler for Edit User Password
"""
@user_required
def get(self):
""" Returns a simple HTML form for editing password """
params = {}
return self.render_template('edit_password.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
current_password = self.form.current_password.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
current_password = utils.hashing(current_password, self.app.config.get('salt'))
try:
user = models.User.get_by_auth_password(auth_id, current_password)
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# send email
subject = self.app.config.get('app_name') + " Account Password Changed"
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"email": user.email,
"reset_password_url": self.uri_for("password-reset", _full=True)
}
email_body_path = "emails/password_changed.txt"
email_body = self.jinja2.render_template(email_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url = email_url, params={
'to': user.email,
'subject' : subject,
'body' : email_body,
'sender' : self.app.config.get('contact_sender'),
})
#Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('edit-profile')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-password')
except (AttributeError,TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
if self.is_mobile:
return forms.EditPasswordMobileForm(self)
else:
return forms.EditPasswordForm(self)
class EditEmailHandler(BaseHandler):
"""
Handler for Edit User's Email
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit email """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
params['current_email'] = user_info.email
return self.render_template('edit_email.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
new_email = self.form.new_email.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
try:
# authenticate user by its password
user = models.User.get_by_auth_password(auth_id, password)
# if the user change his/her email address
if new_email != user.email:
# check whether the new email has been used by another user
aUser = models.User.get_by_email(new_email)
if aUser is not None:
message = _("The email %s is already registered." % new_email)
self.add_message(message, 'error')
return self.redirect_to("edit-email")
# send email
subject = _("%s Email Changed Notification" % self.app.config.get('app_name'))
user_token = models.User.create_auth_token(self.user_id)
confirmation_url = self.uri_for("email-changed-check",
user_id = user_info.get_id(),
encoded_email = utils.encode(new_email),
token = user_token,
_full = True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"new_email": new_email,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
old_body_path = "emails/email_changed_notification_old.txt"
old_body = self.jinja2.render_template(old_body_path, **template_val)
new_body_path = "emails/email_changed_notification_new.txt"
new_body = self.jinja2.render_template(new_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url = email_url, params={
'to': user.email,
'subject' : subject,
'body' : old_body,
})
taskqueue.add(url = email_url, params={
'to': new_email,
'subject' : subject,
'body' : new_body,
})
# display successful message
msg = _("Please check your new email for confirmation. Your email will be updated after confirmation.")
self.add_message(msg, 'success')
return self.redirect_to('edit-profile')
else:
self.add_message(_("You didn't change your email."), "warning")
return self.redirect_to("edit-email")
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-email')
except (AttributeError,TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message,'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditEmailForm(self)
class PasswordResetHandler(BaseHandler):
"""
Password Reset Handler with Captcha
"""
def get(self):
chtml = captcha.displayhtml(
public_key=self.app.config.get('captcha_public_key'),
use_ssl=(self.request.scheme == 'https'),
error = None)
if self.app.config.get('captcha_public_key') == "PUT_YOUR_RECAPTCHA_PUBLIC_KEY_HERE" or \
self.app.config.get('captcha_private_key') == "PUT_YOUR_RECAPTCHA_PUBLIC_KEY_HERE":
chtml = '<div class="alert alert-error"><strong>Error</strong>: You have to ' \
'<a href="http://www.google.com/recaptcha/whyrecaptcha" target="_blank">sign up ' \
'for API keys</a> in order to use reCAPTCHA.</div>' \
'<input type="hidden" name="recaptcha_challenge_field" value="manual_challenge" />' \
'<input type="hidden" name="recaptcha_response_field" value="manual_challenge" />'
params = {
'captchahtml': chtml,
'public_key': self.app.config.get('captcha_public_key'),
}
return self.render_template('password_reset.html', **params)
def post(self):
# check captcha
challenge = self.request.POST.get('recaptcha_challenge_field')
response = self.request.POST.get('recaptcha_response_field')
remoteip = self.request.remote_addr
cResponse = captcha.submit(
challenge,
response,
self.app.config.get('captcha_private_key'),
remoteip)
if cResponse.is_valid:
# captcha was valid... carry on..nothing to see here
pass
else:
_message = _('Wrong image verification code. Please try again.')
self.add_message(_message, 'error')
return self.redirect_to('password-reset')
#check if we got an email or username
email_or_username = str(self.request.POST.get('email_or_username')).lower().strip()
if utils.is_email_valid(email_or_username):
user = models.User.get_by_email(email_or_username)
_message = _("If the e-mail address you entered") + " (<strong>%s</strong>) " % email_or_username
else:
auth_id = "own:%s" % email_or_username
user = models.User.get_by_auth_id(auth_id)
_message = _("If the username you entered") + " (<strong>%s</strong>) " % email_or_username
_message = _message + _("is associated with an account in our records, you will receive "
"an e-mail from us with instructions for resetting your password. "
"<br>If you don't receive instructions within a minute or two, "
"check your email's spam and junk filters, or ") +\
'<a href="' + self.uri_for('contact') + '">' + _('contact us') + '</a> ' + _("for further assistance.")
if user is not None:
user_id = user.get_id()
token = models.User.create_auth_token(user_id)
email_url = self.uri_for('taskqueue-send-email')
reset_url = self.uri_for('password-reset-check', user_id=user_id, token=token, _full=True)
subject = _("%s Password Assistance" % self.app.config.get('app_name'))
# load email's template
template_val = {
"username": user.username,
"email": user.email,
"reset_password_url": reset_url,
"support_url": self.uri_for("contact", _full=True),
"app_name": self.app.config.get('app_name'),
}
body_path = "emails/reset_password.txt"
body = self.jinja2.render_template(body_path, **template_val)
taskqueue.add(url = email_url, params={
'to': user.email,
'subject' : subject,
'body' : body,
'sender' : self.app.config.get('contact_sender'),
})
self.add_message(_message, 'warning')
return self.redirect_to('login')
class PasswordResetCompleteHandler(BaseHandler):
"""
Handler to process the link of reset password that received the user
"""
def get(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
params = {}
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid. '
'Enter your details again below to get a new one.')
self.add_message(message, 'warning')
return self.redirect_to('password-reset')
else:
return self.render_template('password_reset_complete.html', **params)
def post(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
user = verify[0]
password = self.form.password.data.strip()
if user and self.form.validate():
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# Delete token
models.User.delete_auth_token(int(user_id), token)
# Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('home')
else:
self.add_message(_('The two passwords must match.'), 'error')
return self.redirect_to('password-reset-check', user_id=user_id, token=token)
@webapp2.cached_property
def form(self):
if self.is_mobile:
return forms.PasswordResetCompleteMobileForm(self)
else:
return forms.PasswordResetCompleteForm(self)
class EmailChangedCompleteHandler(BaseHandler):
"""
Handler for completed email change
Will be called when the user click confirmation link from email
"""
def get(self, user_id, encoded_email, token):
verify = models.User.get_by_auth_token(int(user_id), token)
email = utils.decode(encoded_email)
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid.')
self.add_message(message, 'warning')
self.redirect_to('home')
else:
# save new email
user = verify[0]
user.email = email
user.put()
# delete token
models.User.delete_auth_token(int(user_id), token)
# add successful message and redirect
message = _('Your email has been successfully updated.')
self.add_message(message, 'success')
self.redirect_to('edit-profile')
class HomeRequestHandler(RegisterBaseHandler):
"""
Handler to show the home page
"""
def get(self):
""" Returns a simple HTML form for home """
params = {}
return self.render_template('home.html', **params)
|
rchaber/publishbay
|
boilerplate/handlers.py
|
Python
|
lgpl-3.0
| 65,778
|
[
"VisIt"
] |
c3cbb35e14e8fcd432f652eb84d4ed6e607610293c41cfcc923c46c6d080e18d
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import numbers
def is_integral(x):
"""Any integer value"""
try:
return isinstance(int(x), numbers.Integral) and not isinstance(x, bool)
except ValueError:
return False
class Netcdf(AutotoolsPackage):
"""NetCDF is a set of software libraries and self-describing,
machine-independent data formats that support the creation, access,
and sharing of array-oriented scientific data."""
homepage = "http://www.unidata.ucar.edu/software/netcdf"
url = "http://www.gfd-dennou.org/arch/netcdf/unidata-mirror/netcdf-4.3.3.tar.gz"
# Version 4.4.1.1 is having problems in tests
# https://github.com/Unidata/netcdf-c/issues/343
version('4.4.1.1', '503a2d6b6035d116ed53b1d80c811bda')
# netcdf@4.4.1 can crash on you (in real life and in tests). See:
# https://github.com/Unidata/netcdf-c/issues/282
version('4.4.1', '7843e35b661c99e1d49e60791d5072d8')
version('4.4.0', 'cffda0cbd97fdb3a06e9274f7aef438e')
version('4.3.3.1', '5c9dad3705a3408d27f696e5b31fb88c')
version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
variant('mpi', default=True, description='Enables MPI parallelism')
variant('hdf4', default=False, description='Enable HDF4 support')
variant('shared', default=True, description='Enable shared library')
variant('parallel-netcdf', default=False, description='Enable PnetCDF support')
variant('dap', default=False, description='Enable DAP support')
variant('cdmremote', default=False, description='Enable CDM Remote support')
# These variants control the number of dimensions (i.e. coordinates and
# attributes) and variables (e.g. time, entity ID, number of coordinates)
# that can be used in any particular NetCDF file.
variant(
'maxdims',
default=1024,
description='Defines the maximum dimensions of NetCDF files.',
values=is_integral
)
variant(
'maxvars',
default=8192,
description='Defines the maximum variables of NetCDF files.',
values=is_integral
)
depends_on("m4", type='build')
depends_on("hdf", when='+hdf4')
depends_on("curl@7.18.0:", when='+dap')
depends_on("curl@7.18.0:", when='+cdmremote')
depends_on('parallel-netcdf', when='@4.2.1.1:+parallel-netcdf')
# Required for NetCDF-4 support
depends_on("zlib@1.2.5:")
depends_on('hdf5')
# NetCDF 4.4.0 and prior have compatibility issues with HDF5 1.10 and later
# https://github.com/Unidata/netcdf-c/issues/250
depends_on('hdf5@:1.8', when='@:4.4.0')
def patch(self):
try:
max_dims = int(self.spec.variants['maxdims'].value)
max_vars = int(self.spec.variants['maxvars'].value)
except (ValueError, TypeError):
raise TypeError('NetCDF variant values max[dims|vars] must be '
'integer values.')
ff = FileFilter(join_path('include', 'netcdf.h'))
ff.filter(r'^(#define\s+NC_MAX_DIMS\s+)\d+(.*)$',
r'\1{0}\2'.format(max_dims))
ff.filter(r'^(#define\s+NC_MAX_VARS\s+)\d+(.*)$',
r'\1{0}\2'.format(max_vars))
def configure_args(self):
spec = self.spec
# Workaround until variant forwarding works properly
if '+mpi' in spec and spec.satisfies('^hdf5~mpi'):
raise RuntimeError('Invalid spec. Package netcdf requires '
'hdf5+mpi, but spec asked for hdf5~mpi.')
# Environment variables
CFLAGS = []
CPPFLAGS = []
LDFLAGS = []
LIBS = []
config_args = [
"--enable-fsync",
"--enable-v2",
"--enable-utilities",
"--enable-static",
"--enable-largefile",
# necessary for HDF5 support
"--enable-netcdf-4",
"--enable-dynamic-loading",
]
if '+shared' in spec:
config_args.append('--enable-shared')
else:
config_args.append('--disable-shared')
# We don't have shared libraries but we still want it to be
# possible to use this library in shared builds
CFLAGS.append(self.compiler.pic_flag)
if '+dap' in spec:
config_args.append('--enable-dap')
else:
config_args.append('--disable-dap')
if '+cdmremote' in spec:
config_args.append('--enable-cdmremote')
else:
config_args.append('--disable-cdmremote')
if '+dap' in spec or '+cdmremote' in spec:
# Make sure Netcdf links against Spack's curl, otherwise it may
# pick up system's curl, which can give link errors, e.g.:
# undefined reference to `SSL_CTX_use_certificate_chain_file`
LIBS.append("-lcurl")
CPPFLAGS.append("-I%s" % spec['curl'].prefix.include)
LDFLAGS.append("-L%s" % spec['curl'].prefix.lib)
if '+mpi' in spec:
config_args.append('--enable-parallel4')
config_args.append('CC=%s' % spec['mpi'].mpicc)
CPPFLAGS.append("-I%s/include" % spec['hdf5'].prefix)
LDFLAGS.append("-L%s/lib" % spec['hdf5'].prefix)
# HDF4 support
# As of NetCDF 4.1.3, "--with-hdf4=..." is no longer a valid option
# You must use the environment variables CPPFLAGS and LDFLAGS
if '+hdf4' in spec:
config_args.append("--enable-hdf4")
CPPFLAGS.append("-I%s/include" % spec['hdf'].prefix)
LDFLAGS.append("-L%s/lib" % spec['hdf'].prefix)
LIBS.append("-l%s" % "jpeg")
if '+szip' in spec:
CPPFLAGS.append("-I%s/include" % spec['szip'].prefix)
LDFLAGS.append("-L%s/lib" % spec['szip'].prefix)
LIBS.append("-l%s" % "sz")
# PnetCDF support
if '+parallel-netcdf' in spec:
config_args.append('--enable-pnetcdf')
config_args.append('CC=%s' % spec['mpi'].mpicc)
CPPFLAGS.append("-I%s/include" % spec['parallel-netcdf'].prefix)
LDFLAGS.append("-L%s/lib" % spec['parallel-netcdf'].prefix)
# Fortran support
# In version 4.2+, NetCDF-C and NetCDF-Fortran have split.
# Use the netcdf-fortran package to install Fortran support.
config_args.append('CFLAGS=%s' % ' '.join(CFLAGS))
config_args.append('CPPFLAGS=%s' % ' '.join(CPPFLAGS))
config_args.append('LDFLAGS=%s' % ' '.join(LDFLAGS))
config_args.append('LIBS=%s' % ' '.join(LIBS))
return config_args
def check(self):
# h5_test fails when run in parallel
make('check', parallel=False)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/netcdf/package.py
|
Python
|
lgpl-2.1
| 8,015
|
[
"NetCDF"
] |
e44e89a8ddfe99648e9bdf5e506af250e3ce1f64e794c6548e192b73fe54d19e
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from sqlalchemy.sql.functions import count
from Core.db import session
from Core.maps import Galaxy, Planet, Alliance, Intel
from Core.loadable import loadable, route
class bumchums(loadable):
"""Pies"""
usage = " <alliance> [number]"
@route(r"(\S+)(?:\s+(\d+))?", access = "member")
def execute(self, message, user, params):
alliance = Alliance.load(params.group(1))
if alliance is None:
message.reply("No alliance matching '%s' found"%(params.group(1),))
return
bums = int(params.group(2) or 2)
Q = session.query(Galaxy.x, Galaxy.y, count())
Q = Q.join(Galaxy.planets)
Q = Q.join(Planet.intel)
Q = Q.filter(Galaxy.active == True)
Q = Q.filter(Planet.active == True)
Q = Q.filter(Intel.alliance==alliance)
Q = Q.group_by(Galaxy.x, Galaxy.y)
Q = Q.having(count() >= bums)
result = Q.all()
if len(result) < 1:
message.reply("No galaxies with at least %s bumchums from %s"%(bums,alliance.name,))
return
prev=[]
for x, y, chums in result:
prev.append("%s:%s (%s)"%(x, y, chums))
reply="Galaxies with at least %s bums from %s: "%(bums,alliance.name)+ ' | '.join(prev)
message.reply(reply)
|
ellonweb/merlin
|
Hooks/intel/bumchums.py
|
Python
|
gpl-2.0
| 2,299
|
[
"Galaxy"
] |
badf135dee3da231841c02821570ac8f64c3ab5dfe0aeabc7b73a0130a24495c
|
#!/usr/bin/env python
"""
Created on Mon Feb 01 15:03:56 2016
@author: Nablaquabla
"""
import h5py
import os
import numpy as np
import easyfit as ef
import sys
# ============================================================================
# Run program
# ============================================================================
def main(args):
mainDir = args[1]
run = args[2]
# Declare main and run dirs
runDir = mainDir + run
# Get all days in given run folder
daysInRun = [x.split('.')[0] for x in os.listdir(runDir)]
speCharges = {'Time': []}
# For each day in the run folder read the HDF5 file and fit SPEQ spectra
for day in daysInRun:
h5In = h5py.File(runDir + '/' + day + '.h5', 'r+')
# Get SPE charge fits for the current day
speCharges['Time'] = h5In['/SPEQ/vanilla/Times'][...]
# For both signal and background window calculate the number of PE from charge for both gaussian and polya spe dists
for wd in ['S','B']:
# Get charge data and timestamps from the hdf5 file
times = h5In['/%s/timestamp'%wd][...]
# Variables for getting the correct SPEQ from fits based on the timestamp
qIdx = 0
qSize = len(speCharges['Time']) - 1
qIdxUpdate = True
speQIdxArray = []
if qSize == 0:
qIdxUpdate = False
# For each event get the timestamp and charge. Get the correct SPEQ and convert the charge to NPE
for t in times:
if qIdxUpdate:
if t >= speCharges['Time'][qIdx+1]:
qIdx += 1
if qIdx >= qSize:
qIdxUpdate = False
speQIdxArray.append(qIdx)
h5key = '/%s/speQindex'%wd
if h5key in h5In:
del h5In[h5key]
h5In.create_dataset(h5key, data=speQIdxArray)
h5In.close()
# ============================================================================
# Run program
# ============================================================================
if __name__ == '__main__':
main(sys.argv)
|
Nablaquabla/csi-analysis
|
_getSPEQIndex.py
|
Python
|
gpl-3.0
| 2,267
|
[
"Gaussian"
] |
2229d86c4f6daeb47145daa3a515473346b9182d11e488c89b1a482a692c72a9
|
## Automatically adapted for numpy.oldnumeric Mar 26, 2007 by alter_code1.py
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
## last $Author: graik $
## last $Date: 2007-06-22 17:34:10 +0200 (Fri, 22 Jun 2007) $
## $Revision: 459 $
"""
Fetch a PDBModel from the remote or a local NCBI PDB database.
@see L{PDBModel}
@see L{PDBParserFactory}
"""
import numpy.oldnumeric as N
import urllib, re, tempfile, os
import Biskit.tools as T
import Biskit.settings as settings
import Biskit as B
from PDBParser import PDBParser, PDBParserError
from PDBParseModel import PDBParseModel
class PDBParseNCBI( PDBParseModel ):
ex_resolution = re.compile(\
'REMARK 2 RESOLUTION\. *([0-9\.]+|NOT APPLICABLE)' )
## resolution assigned to NMR structures
NMR_RESOLUTION = 3.5
@staticmethod
def supports( source ):
"""
The method is static and can thus be called directly with the parser
class rather than with an instance::
>>> if ParsePDBModel.supports( model ):
>>> ...
@return: True if the given source is supported by this parser
@rtype: bool
"""
r = isinstance( source, str )
return r and len(source) == 4
@staticmethod
def description():
"""
The method is static and can thus be called directly with the parser
class rather than with an instance::
>>> if ParsePDBModel.description():
>>> ...
@return: short free text description of the supported format
@rtype: str
"""
return 'fetch PDB entry from NCBI'
def getLocalPDBHandle( self, id, db_path=settings.pdb_path ):
"""
Get the coordinate file from a local pdb database.
@param id: pdb code, 4 characters
@type id: str
@param db_path: path to local pdb database
(default: L{settings.pdb_path})
@type db_path: str
@return: the requested pdb file as a file handle
@rtype: open file handle
@raise PDBParserError: if couldn't find PDB file
"""
id = str.lower( id )
filenames = [os.path.join( db_path, '%s.pdb' % id),
db_path + '/pdb%s.ent' % id,
db_path + '/%s/pdb%s.ent.Z' %( id[1:3], id ) ]
for f in filenames:
if os.path.exists( f ):
## gzipped pdb file
if f[-3:]=='.gz':
return gzip.open(f)
## the gzip module doesn't handle .Z files
## doesn't return open file handle
elif f[-2:]=='.Z':
p = subprocess.Popen( [ 'gunzip', '-c', f ],
stdout=subprocess.PIPE )
return p.communicate()[0]
## uncompressed
else:
return open(f)
raise PDBParserError( "Couldn't find PDB file locally.")
def getRemotePDBHandle( self, id, rcsb_url=settings.rcsb_url ):
"""
Get the coordinate file remotely from the RCSB.
@param id: pdb code, 4 characters
@type id: str
@param rcsb_url: template url for pdb download
(default: L{settings.rcsb_url})
@type rcsb_url: str
@return: the requested pdb file as a file handle
@rtype: open file handle
@raise PDBParserError: if couldn't retrieve PDB file
"""
try:
from Bio import File
except:
raise PDBParserError('Could not find Biopython - ' + \
'remote fetching of PDBs is not supported.')
handle = urllib.urlopen( rcsb_url% (id,id) )
uhandle = File.UndoHandle(handle)
if not uhandle.peekline():
raise PDBParserError( "Couldn't retrieve ", rcsb_url )
return uhandle
def parsePdbFromHandle(self, handle, first_model_only=True ):
"""
Parse PDB from file/socket or string handle into memory.
@param handle: fresh open file/socket handle to PDB ressource or string
@type handle: open file-like object or str
@param first_model_only: only take first of many NMR models [True]
@type first_model_only: bool
@return: pdb file as list of strings, dictionary with resolution
@rtype: [str], {'resolution':float }
@raise PDBParserError: if passed in string is too short
"""
lines = []
res_match = None
infos = {}
if type( handle ) is str:
if len(handle) < 5000:
raise PDBParserError( "Couldn't extract PDB Info." )
handle = cStringIO.StringIO( handle )
## if handle.peekline()[:6] != 'TITLE':
## raise PDBParserError, 'Ressource does not seem to be a PDB:\n%r' %\
## handle.peekline()
for l in handle:
lines += [ l ]
res_match = res_match or self.ex_resolution.search( l )
if first_model_only and l[:6] == 'ENDMDL':
break
if len(lines) < 10 and '<div>' in lines[0]:
raise PDBParserError, 'No PDB found with this ID.'
if res_match:
if res_match.groups()[0] == 'NOT APPLICABLE':
infos['resolution'] = self.NMR_RESOLUTION
else:
infos['resolution'] = float( res_match.groups()[0] )
else:
raise PDBParserError, 'No resolution record found in PDB.'
return lines, infos
def fetchPDB( self, id ):
try:
h = self.getLocalPDBHandle( id )
except:
h = self.getRemotePDBHandle( id )
fname = tempfile.mktemp( '.pdb', 'ncbiparser_' )
lines, infos = self.parsePdbFromHandle( h, first_model_only=1 )
## close if it is a handle
try: h.close()
except:
pass
f = open( fname, 'w', 1 )
f.writelines( lines )
f.close()
m = B.PDBModel( fname )
m.disconnect()
m.pdbCode = id
m.info.update( infos )
T.tryRemove( fname )
return m
def update( self, model, source, skipRes=None, updateMissing=0, force=0,
headPatterns=[]):
"""
Update empty or missing fields of model from the source.
Profiles that are taken from the source are labeled 'changed'=0.
The same holds for coordinates (xyzChanged=0).
However, existing profiles or coordinates or fields remain untouched.
@param model: existing model
@type model: PDBModel
@param source: PDB code
@type source: str
@param skipRes: list residue names that should not be parsed
@type skipRes: [ str ]
@param updateMissing: check source for additional profiles [0]
@type updateMissing: 1|0
"""
try:
if force or updateMissing or self.needsUpdate( model ):
s = self.fetchPDB( source )
super( PDBParseNCBI, self ).update(
model, s, skipRes=skipRes, updateMissing=updateMissing,
force=force )
except Exception, why:
raise PDBParserError, "Cannot fetch PDB from %s, "\
% str(source) + "Reason:\n" + str(why)
## override source set by PDBParseModel
model.source = source
#############
## TESTING
#############
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Test"""
def test_PDBParseNCBI( self ):
"""PDBParseNCBI test"""
## loading output file from X-plor
if self.local:
print 'Loading pdb file ..'
self.p = PDBParseNCBI()
self.m = self.p.parse2new( '1A2P')
self.assert_( len(self.m) == 3042 )
def test_PDBParseNCBI_fail(self):
"""PDBParseNCBI wrong ID test"""
## loading output file from X-plor
if self.local:
print 'Requesting non-existing ID ..'
self.p = PDBParseNCBI()
with self.assertRaises(PDBParserError):
self.p.parse2new('liv5')
if __name__ == '__main__':
BT.localTest()
|
ostrokach/biskit
|
Biskit/PDBParseNCBI.py
|
Python
|
gpl-3.0
| 9,024
|
[
"Biopython"
] |
1f5a991dab9e26456be6bd0cb87651c36968813a967730c84851c83b8bad1282
|
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from . import MinCovDet
from ..utils.validation import check_is_fitted
from ..metrics import accuracy_score
from ..base import OutlierMixin
class EllipticEnvelope(OutlierMixin, MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `[n_sample + n_features + 1] / 2`.
Range is (0, 1).
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Range is (0, 0.5].
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling
the data. Pass an int for reproducible results across multiple function
calls. See :term:`Glossary <random_state>`.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated robust location.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
The offset depends on the contamination parameter and is defined in
such a way we obtain the expected number of outliers (samples with
decision function < 0) in training.
.. versionadded:: 0.20
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
minimum covariance determinant estimator" Technometrics 41(3), 212
(1999)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EllipticEnvelope
>>> true_cov = np.array([[.8, .3],
... [.3, .4]])
>>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
... cov=true_cov,
... size=500)
>>> cov = EllipticEnvelope(random_state=0).fit(X)
>>> # predict returns 1 for an inlier and -1 for an outlier
>>> cov.predict([[0, 0],
... [3, 3]])
array([ 1, -1])
>>> cov.covariance_
array([[0.7411..., 0.2535...],
[0.2535..., 0.3053...]])
>>> cov.location_
array([0.0813... , 0.0427...])
"""
def __init__(
self,
*,
store_precision=True,
assume_centered=False,
support_fraction=None,
contamination=0.1,
random_state=None,
):
super().__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state,
)
self.contamination = contamination
def fit(self, X, y=None):
"""Fit the EllipticEnvelope model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
if self.contamination != "auto":
if not (0.0 < self.contamination <= 0.5):
raise ValueError(
"contamination must be in (0, 0.5], got: %f" % self.contamination
)
super().fit(X)
self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)
return self
def decision_function(self, X):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
decision : ndarray of shape (n_samples,)
Decision function of the samples.
It is equal to the shifted Mahalanobis distances.
The threshold for being an outlier is 0, which ensures a
compatibility with other outlier detection algorithms.
"""
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
return negative_mahal_dist - self.offset_
def score_samples(self, X):
"""Compute the negative Mahalanobis distances.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
negative_mahal_distances : array-like of shape (n_samples,)
Opposite of the Mahalanobis distances.
"""
check_is_fitted(self)
return -self.mahalanobis(X)
def predict(self, X):
"""
Predict labels (1 inlier, -1 outlier) of X according to fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
values = self.decision_function(X)
is_inlier = np.full(values.shape[0], -1, dtype=int)
is_inlier[values >= 0] = 1
return is_inlier
def score(self, X, y, sample_weight=None):
"""Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
manhhomienbienthuy/scikit-learn
|
sklearn/covariance/_elliptic_envelope.py
|
Python
|
bsd-3-clause
| 8,996
|
[
"Gaussian"
] |
6f7f6b48697aad3c00e4b0251cec12e77dcde8c8209cd6f3d6b75957208de226
|
"""
========================================================
Time-frequency on simulated data (Multitaper vs. Morlet)
========================================================
This examples demonstrates on simulated data the different time-frequency
estimation methods. It shows the time-frequency resolution trade-off
and the problem of estimation variance.
"""
# Authors: Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from mne import create_info, EpochsArray
from mne.time_frequency import tfr_multitaper, tfr_stockwell, tfr_morlet
print(__doc__)
###############################################################################
# Simulate data
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # 1 second long epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
###############################################################################
# Consider different parameter possibilities for multitaper convolution
freqs = np.arange(5., 100., 3.)
# You can trade time resolution or frequency resolution or both
# in order to get a reduction in variance
# (1) Least smoothing (most variance/background fluctuations).
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
title='Sim: Least smoothing, most variance')
# (2) Less frequency smoothing, more time smoothing.
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
title='Sim: Less frequency smoothing, more time smoothing')
# (3) Less time smoothing, more frequency smoothing.
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
title='Sim: Less time smoothing, more frequency smoothing')
# #############################################################################
# Stockwell (S) transform
# S uses a Gaussian window to balance temporal and spectral resolution
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors.
fmin, fmax = freqs[[0, -1]]
for width in (0.7, 3.0):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean',
title='Sim: Using S transform, width '
'= {:0.1f}'.format(width), show=True)
# #############################################################################
# Finally, compare to morlet wavelet
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
title='Sim: Using Morlet wavelet')
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/examples/time_frequency/plot_time_frequency_simulated.py
|
Python
|
bsd-3-clause
| 4,504
|
[
"Gaussian"
] |
7eb9de660e9ef0716cc32c88a5513da7460ddbb6d9e22058aa22a777fdfa8e59
|
# Orca
#
# Copyright 2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Exposes Orca as a DBus service for testing and watchdog purposes."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2008 Sun Microsystems Inc."
__license__ = "LGPL"
import dbus
import dbus.service
import debug
import settings
# Handlers for logging speech and braille output.
#
loggingFileHandlers = {}
loggingStreamHandlers = {}
# pylint: disable-msg=R0923
# Server: Interface not implemented
bus = None
name = None
obj = None
class Server(dbus.service.Object):
def __init__(self, object_path, bus_name):
dbus.service.Object.__init__(self, None, object_path, bus_name)
@dbus.service.method(dbus_interface='org.gnome.Orca.Logging',
in_signature='si', out_signature='')
def setDebug(self, debugFile, debugLevel):
"""Sets the file to send detailed debug information."""
if not settings.enableRemoteLogging:
return
debug.println(debug.LEVEL_FINEST,
"DBus Logging.setDebug(%s, %d)" \
% (debugFile, debugLevel))
if debug.debugFile:
debug.debugFile.close()
debug.debugFile = None
if debugFile and len(debugFile):
debug.debugFile = open('%s.debug' % debugFile, 'w', 0)
debug.debugLevel = debugLevel
@dbus.service.method(dbus_interface='org.gnome.Orca.Logging',
in_signature='s', out_signature='')
def setLogFile(self, logFile):
"""Sets the file to send speech and braille logging information."""
if not settings.enableRemoteLogging:
return
import logging
debug.println(debug.LEVEL_FINEST,
"DBus Logging.setLogFile(%s)" % logFile)
for logger in ['braille', 'speech']:
log = logging.getLogger(logger)
formatter = logging.Formatter('%(message)s')
try:
loggingFileHandlers[logger].flush()
loggingFileHandlers[logger].close()
log.removeHandler(loggingFileHandlers[logger])
except:
pass
if logFile and len(logFile):
loggingFileHandlers[logger] = logging.FileHandler(
'%s.%s' % (logFile, logger), 'w')
loggingFileHandlers[logger].setFormatter(formatter)
log.addHandler(loggingFileHandlers[logger])
log.setLevel(logging.INFO)
@dbus.service.method(dbus_interface='org.gnome.Orca.Logging',
in_signature='', out_signature='')
def startRecording(self):
"""Tells Orca to start logging speech and braille output."""
if not settings.enableRemoteLogging:
return
debug.println(debug.LEVEL_FINEST, "DBus Logging.startRecording")
import logging
import StringIO
for logger in ['braille', 'speech']:
log = logging.getLogger(logger)
try:
[stringIO, handler] = loggingStreamHandlers[logger]
handler.close()
log.removeHandler(handler)
stringIO.close()
except:
pass
formatter = logging.Formatter('%(message)s')
stringIO = StringIO.StringIO()
handler = logging.StreamHandler(stringIO)
handler.setFormatter(formatter)
log.addHandler(handler)
loggingStreamHandlers[logger] = [stringIO, handler]
log.setLevel(logging.INFO)
@dbus.service.method(dbus_interface='org.gnome.Orca.Logging',
in_signature='', out_signature='s')
def stopRecording(self):
"""Tells Orca to stop logging speech and braille output and
to return whatever was recorded since the last call to
startRecording."""
if not settings.enableRemoteLogging:
return ""
debug.println(debug.LEVEL_FINEST, "DBus Logging.stopRecording")
import logging
import StringIO
result = ''
for logger in ['braille', 'speech']:
log = logging.getLogger(logger)
try:
[stringIO, handler] = loggingStreamHandlers[logger]
handler.flush()
handler.close()
log.removeHandler(handler)
result += stringIO.getvalue()
stringIO.close()
except:
debug.printException(debug.LEVEL_OFF)
stringIO = StringIO.StringIO()
return result
def init():
"""Sets up the Orca DBus service. This will only take effect once
the Orca main loop starts."""
global bus
global name
global obj
if obj or bus or name:
return
try:
bus = dbus.SessionBus()
name = dbus.service.BusName('org.gnome.Orca',
bus=bus,
allow_replacement=False,
replace_existing=False)
obj = Server('/', name)
except:
debug.println(debug.LEVEL_WARNING,
"dbusserver.py: Could not initialize DBus server")
debug.printException(debug.LEVEL_WARNING)
def shutdown():
pass
def main():
import pyatspi
init()
pyatspi.Registry.start()
if __name__ == "__main__":
main()
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/dbusserver.py
|
Python
|
gpl-3.0
| 6,167
|
[
"ORCA"
] |
05695ae56b15464e6d802c56d22f8e210e3b3410381b75c7b66a827502e25680
|
#!/usr/bin/env python3
import argparse
import razer.client
import sys
def _clamp_u8(value):
if value > 255:
return 255
elif value < 0:
return 0
else:
return value
def _print_table(header_list, rows):
column_lengths = [len(header_item) for header_item in header_list]
for row in rows:
for index, column in enumerate(row):
column = str(column)
cell_length = len(column)
try:
if column_lengths[index] < cell_length:
column_lengths[index] = cell_length
except IndexError:
pass
# spaces in between columns + total column length
max_line_length = ((len(column_lengths) - 1) * 4) + sum(column_lengths)
# Got maximum column widths
result = ''
for index, header_item in enumerate(header_list):
pad = ' ' * (column_lengths[index] - len(header_item))
result += '{0}{1} '.format(header_item, pad)
# Remove trailing space, add newline
result += (' ' * (max_line_length - len(result))) + '\n'
# Add ----- seperator and newline
result += ('-' * max_line_length) + '\n'
for row in rows:
line = ''
for index, column in enumerate(row):
column = str(column)
pad = ' ' * (column_lengths[index] - len(column))
line += '{0}{1} '.format(column, pad)
line += (' ' * (max_line_length - len(line))) + '\n'
result += line
print(result)
def _get_devices() -> list:
"""
Gets devices ordered by serial
:return: List of devices ordered by serial
:rtype: list
"""
device_manager = razer.client.DeviceManager()
devices = sorted(device_manager.devices, key=lambda x: (str(x.serial), str(x.type)))
return devices
def _get_devices_by_serial() -> dict:
device_manager = razer.client.DeviceManager()
devices = {device.serial: device for device in device_manager.devices}
return devices
def _get_devices_by_type() -> dict:
device_manager = razer.client.DeviceManager()
devices = {}
for device in device_manager.devices:
dev_type = device.type
try:
devices[dev_type].append(device)
except KeyError:
devices[dev_type] = [device]
# Sort devices
for key in devices:
devices[key] = sorted(devices[key], key=lambda x: str(x.serial))
return devices
def _get_device_from_filter(args):
if args.serial is not None:
device = _get_devices_by_serial().get(args.serial, None)
elif '--keyboard' in sys.argv:
if args.keyboard is None:
args.keyboard = 0
try:
device = _get_devices_by_type().get('keyboard', [])[args.keyboard]
except IndexError:
device = None
elif '--mouse' in sys.argv:
if args.mouse is None:
args.mouse = 0
try:
device = _get_devices_by_type().get('mouse', [])[args.mouse]
except IndexError:
device = None
elif '--firefly' in sys.argv:
if args.firefly is None:
args.firefly = 0
try:
device = _get_devices_by_type().get('firefly', [])[args.firefly]
except IndexError:
device = None
elif '--tartarus' in sys.argv:
if args.tartarus is None:
args.tartarus = 0
try:
device = _get_devices_by_type().get('tartarus', [])[args.tartarus]
except IndexError:
device = None
else:
# Theoretically impossible to land here
device = None
if device is None:
print("Could not find device")
sys.exit(1)
else:
return device
def list_devices(args):
"""
Subcommand to list all devices
:param args: Argparse arguments
"""
devices = _get_devices()
headers = ['ID', 'Device Name', 'Device Type', 'Serial']
rows = []
for index, device in enumerate(devices):
rows.append([
index,
device.name,
device.type.title(),
device.serial
])
_print_table(headers, rows)
def brightness_func(args):
device = _get_device_from_filter(args)
if args.set is None:
# Get brightness
if args.raw:
print(str(device.brightness))
else:
print("Brightness: {0}%".format(device.brightness))
else:
brightness_value = float(_clamp_u8(args.set))
if not args.raw:
print("Setting brightness to {0}%".format(brightness_value))
device.brightness = brightness_value
def parse_args():
def add_filter_group(sub):
group = sub.add_mutually_exclusive_group(required=True)
group.add_argument('--serial', help='Select device via its serial')
group.add_argument('--keyboard', nargs='?', default=0, type=int, help='Select keyboard, if ID is omitted the first is used')
group.add_argument('--mouse', nargs='?', default=0, type=int, help='Select mouse, if ID is omitted the first is used')
group.add_argument('--firefly', nargs='?', default=0, type=int, help='Select Firefly, if ID is omitted the first is used')
group.add_argument('--tartarus', nargs='?', default=0, type=int, help='Select Tartarus, if ID is omitted the first is used')
def add_raw(sub):
sub.add_argument('--raw', action='store_true', help="Raw output")
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(dest='command', help='commands')
subparser.required = True
help_parser = subparser.add_parser('help', help='The help command will display help, running "help <command>" will display more detailed help')
help_parser.add_argument('help', nargs='?', metavar='COMMAND', default=None, type=str)
# No need to assign to a var as it has no args
subparser.add_parser('list', help='Lists Razer Devices')
# Brightness
brightness_parser = subparser.add_parser('brightness', help='Get or set the brightness')
add_filter_group(brightness_parser)
add_raw(brightness_parser)
brightness_parser.add_argument('--set', metavar='BRIGHTNESS', type=float, default=None, help='Gets brightness if omitted')
# Macro
macro_parser = subparser.add_parser('macro', help='Manage macros')
add_filter_group(macro_parser)
macro_exclusive_group = macro_parser.add_mutually_exclusive_group(required=True)
macro_exclusive_group.add_argument('--list', action='store_true', help="List active macros")
macro_exclusive_group.add_argument('--add-script', nargs=2, type=str, metavar=('BIND_KEY', 'SCRIPT_PATH'), help="Bind the given script to the given macro key. If you require script arguments either create a wrapper or use the API direct.")
macro_exclusive_group.add_argument('--add-url', nargs=2, type=str, metavar=('BIND_KEY', 'URL'), help="Bind the given URL to the given macro key, so that xdg-open will open a tab.")
macro_exclusive_group.add_argument('--add-keys', nargs='+', type=str, metavar=('BIND_KEY', 'KEYS'), help="Bind the given key string to the given macro key.")
args = parser.parse_args()
if args.command == 'help':
if args.help == 'brightness':
brightness_parser.print_help()
elif args.help == 'macro':
macro_parser.print_help()
else:
parser.print_help()
sys.exit(0)
return args
CMD_MAP = {
'list': list_devices,
'brightness': brightness_func
}
def run():
args = parse_args()
if args.command in CMD_MAP:
CMD_MAP[args.command](args)
else:
print('Someone forgot to add mapping for command "{0}"'.format(args.command))
print()
if __name__ == '__main__':
run()
|
z3ntu/razer-drivers
|
scripts/daemon/controller.py
|
Python
|
gpl-2.0
| 7,782
|
[
"Firefly"
] |
db5ed729e7eafb0b12eb5dd0a6f74065103dc8fc30ca298c1dbd2a20e559a933
|
#!/usr/bin/env python
"""
compliance_checker/protocols/cdl.py
"""
import os
def is_cdl(filename):
"""
Quick check for .cdl ascii file
Example:
netcdf sample_file {
dimensions:
name_strlen = 7 ;
time = 96 ;
variables:
float lat ;
lat:units = "degrees_north" ;
lat:standard_name = "latitude" ;
lat:long_name = "station latitude" ;
etc...
:param str filename: Absolute path of file to check
:param str data: First chuck of data from file to check
"""
if os.path.splitext(filename)[-1] != ".cdl":
return False
with open(filename, "rb") as f:
data = f.read(32)
if data.startswith(b"netcdf") or b"dimensions" in data:
return True
return False
|
ioos/compliance-checker
|
compliance_checker/protocols/cdl.py
|
Python
|
apache-2.0
| 822
|
[
"NetCDF"
] |
7ccaa0f44c95d97eb93ae16e9acfd7fea16117c37fd66d405058e6f925947d5e
|
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
D = 126.0
downstreamPosition = [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0];
lineDir = '../lineData'
time = 1050.0
controlDict_foam = OpenFOAMReader( FileName='/scratch/mchurchf/ALM_comparison_JHU/run.Smag_Cs08_eps10/system/controlDict.foam', TimestepValues=[1800.0], CaseType='Decomposed Case', MeshRegions='internalMesh',CellArrays=['UMean','uuMean'])
controlDict_foam.CellArrays = ['cellDist', 'flm', 'fmm', 'nuSgs', 'p', 'U']
controlDict_foam.MeshRegions = ['internalMesh']
AnimationScene2 = GetAnimationScene()
controlDict_foam.CellArrays = []
controlDict_foam.CaseType = 'Decomposed Case'
controlDict_foam.MeshRegions = ['internalMesh']
AnimationScene2.AnimationTime = time
controlDict_foam.CellArrays = ['bodyForce', 'flm_0', 'fmm_0', 'kMean', 'omega', 'pMean', 'ppMean', 'Q', 'RMean', 'U_0', 'UMean', 'upMean', 'uuMean', 'uuRTotal']
controlDict_foam.MeshRegions = ['internalMesh']
AnimationScene2.AnimationTime = time
AnimationScene2.PlayMode = 'Snap To TimeSteps'
AnimationScene2.EndTime = time
RenderView2 = GetRenderView()
DataRepresentation1 = Show()
DataRepresentation1.ScalarOpacityUnitDistance = 11.382656425624814
DataRepresentation1.Representation = 'Outline'
DataRepresentation1.EdgeColor = [0.0, 0.0, 0.5000076295109483]
RenderView2.CenterOfRotation = [1323.0, 0.0, 0.0]
AnimationScene2.AnimationTime = time
RenderView2.CameraPosition = [1323.0, 0.0, 6889.069671254006]
RenderView2.ViewTime = time
RenderView2.CameraFocalPoint = [1323.0, 0.0, 0.0]
RenderView2.CameraClippingRange = [6067.958974541466, 7939.295716322816]
RenderView2.CameraParallelScale = 1783.0224339586982
controlDict_foam.CellArrays = ['UMean', 'uuMean']
controlDict_foam.MeshRegions = ['internalMesh']
for i in range(len(downstreamPosition)):
for j in range(2):
SetActiveSource(controlDict_foam)
PlotOverLine1 = PlotOverLine( Source="High Resolution Line Source" )
PlotOverLine1.Source.Resolution = 600
if (j == 1):
print 'Processing line ' + str(i+1) + 'y of ' + str(len(downstreamPosition)) + '...'
PlotOverLine1.Source.Point1 = [downstreamPosition[i]*D, -378.0, 0.0]
PlotOverLine1.Source.Point2 = [downstreamPosition[i]*D, 378.0, 0.0]
else:
print 'Processing line ' + str(i+1) + 'z of ' + str(len(downstreamPosition)) + '...'
PlotOverLine1.Source.Point1 = [downstreamPosition[i]*D, 0.0, -378.0]
PlotOverLine1.Source.Point2 = [downstreamPosition[i]*D, 0.0, 378.0]
XYChartView1 = CreateXYPlotView()
XYChartView1.ViewTime = time
DataRepresentation2 = Show()
DataRepresentation2.XArrayName = 'arc_length'
DataRepresentation2.SeriesVisibility = ['UMean (0)', '0', 'UMean (1)', '0', 'UMean (2)', '0', 'uuMean (0)', '0', 'uuMean (1)', '0', 'uuMean (2)', '0', 'uuMean (3)', '0', 'uuMean (4)', '0', 'uuMean (5)', '0', 'vtkValidPointMask', '0', 'arc_length', '0', 'Points (0)', '0', 'Points (1)', '0', 'Points (2)', '0', 'Points (Magnitude)', '0', 'vtkOriginalIndices', '0']
DataRepresentation2.UseIndexForXAxis = 0
AnimationScene2.ViewModules = [ RenderView2, XYChartView1 ]
RenderView2.CameraClippingRange = [6067.958974541466, 7939.295716322816]
DataRepresentation2.SeriesColor = ['uuMean (Magnitude)', '0.976471', '0.513725', '0.141176']
DataRepresentation2.SeriesVisibility = ['UMean (0)', '0', 'UMean (1)', '0', 'UMean (2)', '0', 'uuMean (0)', '0', 'uuMean (1)', '0', 'uuMean (2)', '0', 'uuMean (3)', '0', 'uuMean (4)', '0', 'uuMean (5)', '0', 'vtkValidPointMask', '0', 'arc_length', '0', 'Points (0)', '0', 'Points (1)', '0', 'Points (2)', '0', 'Points (Magnitude)', '0', 'vtkOriginalIndices', '0', 'uuMean (Magnitude)', '1']
Render()
if (j == 1):
fileName = lineDir + '/line_y_' + str(int(downstreamPosition[i])) + '.csv'
else:
fileName = lineDir + '/line_z_' + str(int(downstreamPosition[i])) + '.csv'
writer = CreateWriter(fileName, PlotOverLine1)
writer.FieldAssociation = "Points"
writer.UpdatePipeline()
del writer
del PlotOverLine1
del DataRepresentation2
|
gzt200361/SOWFA
|
tutorials/turbine/NREL5MWRef.uniformFlow/system/sampleLine.py
|
Python
|
gpl-3.0
| 4,368
|
[
"ParaView"
] |
51f918ac90d8b06553ce0cb67df739cc108b227b7352394bf02655cdfd3dd69c
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.dllib.utils.common import JavaValue
from bigdl.dllib.utils.file_utils import callZooFunc
from bigdl.dllib.keras.engine.topology import KerasNet
from bigdl.dllib.nn.layer import Layer
import warnings
class InferenceModel(JavaValue):
"""
Model for thread-safe inference.
To do inference, you need to first initiate an InferenceModel instance, then call
load|load_caffe|load_openvino to load a pre-trained model, and finally call predict.
# Arguments
supported_concurrent_num: Int. How many concurrent threads to invoke. Default is 1.
"""
def __init__(self, supported_concurrent_num=1, bigdl_type="float"):
super(InferenceModel, self).__init__(None, bigdl_type, supported_concurrent_num)
def load_bigdl(self, model_path, weight_path=None):
"""
Load a pre-trained Analytics Zoo or BigDL model.
:param model_path: String. The file path to the model.
:param weight_path: String. The file path to the weights if any. Default is None.
"""
callZooFunc(self.bigdl_type, "inferenceModelLoadBigDL",
self.value, model_path, weight_path)
# deprecated in "0.8.0"
def load(self, model_path, weight_path=None):
"""
Load a pre-trained Analytics Zoo or BigDL model.
:param model_path: String. The file path to the model.
:param weight_path: String. The file path to the weights if any. Default is None.
"""
warnings.warn("deprecated in 0.8.0")
callZooFunc(self.bigdl_type, "inferenceModelLoad",
self.value, model_path, weight_path)
def load_caffe(self, model_path, weight_path):
"""
Load a pre-trained Caffe model.
:param model_path: String. The file path to the prototxt file.
:param weight_path: String. The file path to the Caffe model.
"""
callZooFunc(self.bigdl_type, "inferenceModelLoadCaffe",
self.value, model_path, weight_path)
def load_openvino(self, model_path, weight_path, batch_size=0):
"""
Load an OpenVINI IR.
:param model_path: String. The file path to the OpenVINO IR xml file.
:param weight_path: String. The file path to the OpenVINO IR bin file.
:param batch_size: Int. Set batch Size, default is 0 (use default batch size).
"""
callZooFunc(self.bigdl_type, "inferenceModelLoadOpenVINO",
self.value, model_path, weight_path, batch_size)
def load_openvino_ng(self, model_path, weight_path, batch_size=0):
"""
Load an OpenVINI IR.
:param model_path: String. The file path to the OpenVINO IR xml file.
:param weight_path: String. The file path to the OpenVINO IR bin file.
:param batch_size: Int. Set batch Size, default is 0 (use default batch size).
"""
callZooFunc(self.bigdl_type, "inferenceModelLoadOpenVINONg",
self.value, model_path, weight_path, batch_size)
def load_tensorflow(self, model_path, model_type="frozenModel", intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1, use_per_session_threads=True):
"""
Load a TensorFlow model using tensorflow.
:param model_path: String. The file path to the TensorFlow model.
:param model_type: String. The type of the tensorflow model file. Default is "frozenModel"
:param intra_op_parallelism_threads: Int. The number of intraOpParallelismThreads.
Default is 1.
:param inter_op_parallelism_threads: Int. The number of interOpParallelismThreads.
Default is 1.
:param use_per_session_threads: Boolean. Whether to use perSessionThreads. Default is True.
"""
callZooFunc(self.bigdl_type, "inferenceModelLoadTensorFlow",
self.value, model_path, model_type, intra_op_parallelism_threads,
inter_op_parallelism_threads, use_per_session_threads)
def load_tensorflow(self, model_path, model_type,
inputs, outputs, intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1, use_per_session_threads=True):
"""
Load a TensorFlow model using tensorflow.
:param model_path: String. The file path to the TensorFlow model.
:param model_type: String. The type of the tensorflow model file: "frozenModel" or
"savedModel".
:param inputs: Array[String]. the inputs of the model.
inputs outputs: Array[String]. the outputs of the model.
:param intra_op_parallelism_threads: Int. The number of intraOpParallelismThreads.
Default is 1.
:param inter_op_parallelism_threads: Int. The number of interOpParallelismThreads.
Default is 1.
:param use_per_session_threads: Boolean. Whether to use perSessionThreads. Default is True.
"""
callZooFunc(self.bigdl_type, "inferenceModelLoadTensorFlow",
self.value, model_path, model_type,
inputs, outputs, intra_op_parallelism_threads,
inter_op_parallelism_threads, use_per_session_threads)
def load_torch(self, model_path):
"""
Load a pytorch model.
:param model_path: the path of saved pytorch model
"""
assert(model_path, str)
import os
import io
import torch
from bigdl.orca.torch import zoo_pickle_module
model = torch.load(model_path, pickle_module=zoo_pickle_module)
bys = io.BytesIO()
torch.save(model, bys, pickle_module=zoo_pickle_module)
callZooFunc(self.bigdl_type, "inferenceModelLoadPytorch",
self.value, bys.getvalue())
def predict(self, inputs):
"""
Do prediction on inputs.
:param inputs: A numpy array or a list of numpy arrays or JTensor or a list of JTensors.
"""
jinputs, input_is_table = Layer.check_input(inputs)
output = callZooFunc(self.bigdl_type,
"inferenceModelPredict",
self.value,
jinputs,
input_is_table)
return KerasNet.convert_output(output)
def distributed_predict(self, inputs, sc):
data_type = inputs.map(lambda x: x.__class__.__name__).first()
input_is_table = False
if data_type == "list":
input_is_table = True
jinputs = inputs.map(lambda x: Layer.check_input(x)[0])
output = callZooFunc(self.bigdl_type,
"inferenceModelDistriPredict",
self.value,
sc,
jinputs,
input_is_table)
return output.map(lambda x: KerasNet.convert_output(x))
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/inference/inference_model.py
|
Python
|
apache-2.0
| 7,656
|
[
"ORCA"
] |
a392baf1b0b67434b201fc0fbda2a11d4bd9d2b52541c76afd2b2224a03b20cf
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_consistent_length
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X = check_array(X)
y = np.asarray(y)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
check_consistent_length(X, y)
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ self.random_state.rand(self.theta0.size).reshape(
self.theta0.shape) * np.log10(self.thetaU
/ self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if not self.optimizer in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/gaussian_process/gaussian_process.py
|
Python
|
bsd-3-clause
| 34,397
|
[
"Gaussian"
] |
e3d86093b4e2ed630f9ba5a030737011f56196b23c05985d06c6dd3887a154dd
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
from __future__ import division
import math
import unittest
import numpy as np
from singa import tensor
from singa import singa_wrap as singa_api
from singa import autograd
from singa.proto import core_pb2
from cuda_helper import gpu_dev, cpu_dev
class TestTensorMethods(unittest.TestCase):
def setUp(self):
self.shape = (2, 3)
self.t = tensor.Tensor(self.shape)
self.s = tensor.Tensor(self.shape)
self.t.set_value(0)
self.s.set_value(0)
def test_tensor_fields(self):
t = self.t
shape = self.shape
self.assertTupleEqual(t.shape, shape)
self.assertEqual(t.shape[0], shape[0])
self.assertEqual(t.shape[1], shape[1])
self.assertEqual(tensor.product(shape), 2 * 3)
self.assertEqual(t.ndim(), 2)
self.assertEqual(t.size(), 2 * 3)
self.assertEqual(t.memsize(), 2 * 3 * tensor.sizeof(core_pb2.kFloat32))
self.assertFalse(t.is_transpose())
def test_unary_operators(self):
t = self.t
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
t -= 0.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23 - 0.23)
t *= 2.5
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5)
t /= 2
self.assertAlmostEqual(
tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5 / 2)
def test_binary_operators(self):
t = self.t
t += 3.2
s = self.s
s += 2.1
a = t + s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 + 2.1, 5)
a = t - s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 - 2.1, 5)
a = t * s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 * 2.1, 5)
''' not implemented yet
a = t / s
self.assertAlmostEqual(tensor.to_numpy(a)[0,0], 3.2/2.1, 5)
'''
def test_comparison_operators(self):
t = self.t
t += 3.45
a = t < 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t <= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t > 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t >= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.lt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.le(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.gt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.ge(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
def test_tensor_copy(self):
t = tensor.Tensor((2, 3))
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tc = t.copy()
tdc = t.deepcopy()
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
def test_copy_data(self):
t = self.t
t += 1.23
s = self.s
s += 5.43
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tensor.copy_data_to_from(t, s, 2)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)
def test_global_method(self):
t = self.t
t += 12.34
a = tensor.log(t)
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], math.log(12.34))
def test_random(self):
x = tensor.Tensor((1000,))
x.gaussian(1, 0.01)
self.assertAlmostEqual(tensor.average(x), 1, 3)
def test_radd(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 + x
self.assertEqual(tensor.average(y), 2.)
def test_rsub(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 - x
self.assertEqual(tensor.average(y), 0.)
def test_rmul(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 * x
self.assertEqual(tensor.average(y), 2.)
def test_rdiv(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 / x
self.assertEqual(tensor.average(y), 2.)
def test_tensor_inplace_api(self):
""" tensor inplace methods alter internal state and also return self
"""
x = tensor.Tensor((3,))
y = x.set_value(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.uniform(1, 2)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.bernoulli(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.gaussian(1, 2)
self.assertTrue(y is x)
def test_numpy_convert(self):
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0)
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0.)
def test_transpose(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
A1 = np.transpose(a)
tA1 = tensor.transpose(ta)
TA1 = tensor.to_numpy(tA1)
A2 = np.transpose(a, [0, 2, 1])
tA2 = tensor.transpose(ta, [0, 2, 1])
TA2 = tensor.to_numpy(tA2)
np.testing.assert_array_almost_equal(TA1, A1)
np.testing.assert_array_almost_equal(TA2, A2)
def test_einsum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.einsum('kij,kij->kij', a, a)
tres1 = tensor.einsum('kij,kij->kij', ta, ta)
Tres1 = tensor.to_numpy(tres1)
res2 = np.einsum('kij,kih->kjh', a, a)
tres2 = tensor.einsum('kij,kih->kjh', ta, ta)
Tres2 = tensor.to_numpy(tres2)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)
def test_repeat(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
ta_repeat1 = tensor.repeat(ta, 2, axis=None)
a_repeat1 = np.repeat(a, 2, axis=None)
Ta_repeat1 = tensor.to_numpy(ta_repeat1)
ta_repeat2 = tensor.repeat(ta, 4, axis=1)
a_repeat2 = np.repeat(a, 4, axis=1)
Ta_repeat2 = tensor.to_numpy(ta_repeat2)
self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)
def test_sum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
a_sum0 = np.sum(a)
ta_sum0 = tensor.sum(ta)
Ta_sum0 = tensor.to_numpy(ta_sum0)
a_sum1 = np.sum(a, axis=1)
ta_sum1 = tensor.sum(ta, axis=1)
Ta_sum1 = tensor.to_numpy(ta_sum1)
a_sum2 = np.sum(a, axis=2)
ta_sum2 = tensor.sum(ta, axis=2)
Ta_sum2 = tensor.to_numpy(ta_sum2)
self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)
def test_tensordot(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.tensordot(a, a, axes=1)
tres1 = tensor.tensordot(ta, ta, axes=1)
Tres1 = tensor.to_numpy(tres1)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
np.testing.assert_array_almost_equal(Tres1, res1)
res2 = np.tensordot(a, a, axes=([0, 1], [2, 1]))
tres2 = tensor.tensordot(ta, ta, axes=([0, 1], [2, 1]))
np.testing.assert_array_almost_equal(tensor.to_numpy(tres2), res2)
def test_reshape(self):
a = np.array([[[1.1, 1.1, 1.4], [1.1, 1.1, 1.1]],
[[1.1, 1.1, 1.3], [1.6, 1.1, 1.2]]])
ta = tensor.from_numpy(a)
tb = tensor.reshape(ta, [2, 6])
self.assertAlmostEqual(tb.shape[0], 2., places=3)
self.assertAlmostEqual(tb.shape[1], 6., places=3)
np.testing.assert_array_almost_equal(tensor.to_numpy(tb),
a.reshape((2, 6)))
def test_transpose_then_reshape(self):
a = np.array([[[1.1, 1.1], [1.1, 1.1], [1.4, 1.3]],
[[1.1, 1.6], [1.1, 1.1], [1.1, 1.2]]])
TRANSPOSE_AXES = (2, 0, 1)
RESHAPE_DIMS = (2, 6)
ta = tensor.from_numpy(a)
ta = ta.transpose(TRANSPOSE_AXES)
ta = ta.reshape(RESHAPE_DIMS)
np.testing.assert_array_almost_equal(
tensor.to_numpy(ta),
np.reshape(a.transpose(TRANSPOSE_AXES), RESHAPE_DIMS))
def _concatenate_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
t3 = tensor.concatenate((t1, t2), 3)
np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)
def test_concatenate_cpu(self):
self._concatenate_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concatenate_gpu(self):
self._concatenate_helper(gpu_dev)
def _subscription_helper(self, dev):
np1 = np.random.random((5, 5, 5, 5)).astype(np.float32)
sg_tensor = tensor.Tensor(device=dev, data=np1)
sg_tensor_ret = sg_tensor[1:3, :, 1:, :-1]
np.testing.assert_array_almost_equal((tensor.to_numpy(sg_tensor_ret)),
np1[1:3, :, 1:, :-1])
def test_subscription_cpu(self):
self._subscription_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_subscription_gpu(self):
self._subscription_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.ceil(t1)
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _astype_helper(self, dev):
shape1 = [2, 3]
shape2 = [3, 2]
np_flt = np.random.random(shape1).astype(np.float32)
np_flt = np_flt * 10 - 5
np_int = np_flt.astype(np.int32)
np_flt2 = np_int.astype(np.float32)
t2 = tensor.Tensor(device=dev, data=np_flt)
t2 = t2.as_type('int')
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np_int)
t1 = t2.reshape(shape2)
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_int.reshape(shape2))
t1 = t1.as_type('float')
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_flt2.reshape(shape2))
def test_astype_cpu(self):
self._astype_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_astype_gpu(self):
self._astype_helper(gpu_dev)
def _3d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 3).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_3d_matmul_cpu(self):
self._3d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_3d_matmul_gpu(self):
self._3d_matmul_helper(gpu_dev)
def _4d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 256).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 1024).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_4d_matmul_cpu(self):
self._4d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_4d_matmul_gpu(self):
self._4d_matmul_helper(gpu_dev)
def _matmul_transpose_helper(self, dev):
X = np.random.random((1, 256, 12, 64)).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random((1, 256, 12, 64)).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
X = np.transpose(X, (0, 2, 1, 3))
W = np.transpose(W, (0, 2, 1, 3))
W = np.transpose(W, (0, 1, 3, 2))
Y = np.matmul(X, W)
x = autograd.transpose(x, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 1, 3, 2))
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(x), X)
np.testing.assert_array_almost_equal(tensor.to_numpy(w), W)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), Y)
def test_matmul_transpose_cpu(self):
self._matmul_transpose_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_transpose_gpu(self):
self._matmul_transpose_helper(gpu_dev)
if __name__ == '__main__':
unittest.main()
|
nudles/incubator-singa
|
test/python/test_tensor.py
|
Python
|
apache-2.0
| 16,386
|
[
"Gaussian"
] |
b516c39cc914ad7bd108b7ed1990da63a5981b56276ab040d1e75c0891fd40bc
|
#!/usr/bin/env python
# Copyright 2008, 2009
# CAMd (see accompanying license files for details).
import os
from optparse import OptionParser, SUPPRESS_HELP
import ase.gui.i18n
from gettext import gettext as _
# Grrr, older versions (pre-python2.7) of optparse have a bug
# which prevents non-ascii descriptions. How do we circumvent this?
# For now, we'll have to use English in the command line options then.
def build_parser():
parser = OptionParser(usage='%prog [options] [file[, file2, ...]]',
version='%prog 0.1',
description='See the online manual ' +
'(https://wiki.fysik.dtu.dk/ase/ase/gui.html) ' +
'for more information.')
parser.add_option('-n', '--image-number',
default=':', metavar='NUMBER',
help='Pick image(s) from trajectory. NUMBER can be a '
'single number (use a negative number to count from '
'the back) or a range: start:stop:step, where the '
'":step" part can be left out - default values are '
'0:nimages:1.')
parser.add_option('-u', '--show-unit-cell', type='int',
default=1, metavar='I',
help="0: Don't show unit cell. 1: Show unit cell. "
'2: Show all of unit cell.')
parser.add_option('-r', '--repeat',
default='1',
help='Repeat unit cell. Use "-r 2" or "-r 2,3,1".')
parser.add_option('-R', '--rotations', default='',
help='Examples: "-R -90x", "-R 90z,-30x".')
parser.add_option('-o', '--output', metavar='FILE',
help='Write configurations to FILE.')
parser.add_option('-g', '--graph',
# TRANSLATORS: EXPR abbreviates 'expression'
metavar='EXPR',
help='Plot x,y1,y2,... graph from configurations or '
'write data to sdtout in terminal mode. Use the '
'symbols: i, s, d, fmax, e, ekin, A, R, E and F. See '
'https://wiki.fysik.dtu.dk/ase/ase/gui.html'
'#plotting-data for more details.')
parser.add_option('-t', '--terminal',
action='store_true',
default=False,
help='Run in terminal window - no GUI.')
parser.add_option('--aneb',
action='store_true',
default=False,
help='Read ANEB data.')
parser.add_option('--interpolate',
type='int', metavar='N',
help='Interpolate N images between 2 given images.')
parser.add_option('-b', '--bonds',
action='store_true',
default=False,
help='Draw bonds between atoms.')
parser.add_option('-s', '--scale', dest='radii_scale', metavar='FLOAT',
default=None, type=float,
help='Scale covalent radii.')
return parser
def main():
parser = build_parser()
opt, args = parser.parse_args()
try:
import ase
except ImportError:
import sys
from os.path import dirname, join, pardir
sys.path.append(join(dirname(__file__), pardir))
from ase.gui.images import Images
from ase.atoms import Atoms
def run(opt, args):
images = Images()
if opt.aneb:
opt.image_number = '-1'
if len(args) > 0:
from ase.io import string2index
images.read(args, string2index(opt.image_number))
else:
images.initialize([Atoms()])
if opt.interpolate:
images.interpolate(opt.interpolate)
if opt.aneb:
images.aneb()
if opt.repeat != '1':
r = opt.repeat.split(',')
if len(r) == 1:
r = 3 * r
images.repeat_images([int(c) for c in r])
if opt.radii_scale:
images.set_radii(opt.radii_scale)
if opt.output is not None:
images.write(opt.output, rotations=opt.rotations,
show_unit_cell=opt.show_unit_cell)
opt.terminal = True
if opt.terminal:
if opt.graph is not None:
data = images.graph(opt.graph)
for line in data.T:
for x in line:
print x,
print
else:
from ase.gui.gui import GUI
import ase.gui.gtkexcepthook
gui = GUI(images, opt.rotations, opt.show_unit_cell, opt.bonds)
gui.run(opt.graph)
import traceback
try:
run(opt, args)
except KeyboardInterrupt:
pass
except Exception:
traceback.print_exc()
print(_("""
An exception occurred! Please report the issue to
ase-developers@listserv.fysik.dtu.dk - thanks! Please also report this if
it was a user error, so that a better error message can be provided
next time."""))
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/gui/ag.py
|
Python
|
gpl-2.0
| 5,192
|
[
"ASE"
] |
98c68b17fceed590e5a54455efab281f8707899ceaa35638e5991efc56e918af
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
##
## last $Author$
## $Date$
## $Revision$
import Biskit.Mod.modUtils as modUtils
from Biskit.Mod import *
import Biskit.tools as tools
from Biskit import EHandler
from Biskit import LogFile
import Biskit.Mod.settings as settings
import sys, os.path, os
def _use( o ):
print """
Syntax: search_templates.py [-q |target.fasta| -o |outFolder| -log |logFile|
-db |database| -e |e-value-cutoff| -limit |max_clusters|
-aln |n_alignments| -psi
-... additional options for blastall (see SequenceSearcher.py) ]
Result:
Options:
-q fasta file with query sequence (default: ./target.fasta)
-o output folder for results (default: .)
-log log file (default: STDOUT)
-db sequence data base
-limit Largest number of clusters allowed
-e E-value cutoff for sequence search
-aln number of alignments to be returned
-simcut similarity threshold for blastclust (score < 3 or % identity)
-simlen length threshold for clustering
-ncpu number of CPUs for clustering
-psi use PSI Blast instead, experimental!!
Default options:
"""
for key, value in o.items():
print "\t-",key, "\t",value
sys.exit(0)
def defaultOptions():
return {'q':None,
'o':'.',
'db' : settings.db_pdbaa,
'log': None,
'e':0.001,
'limit':20,
'aln':200,
'simcut':1.75,
'simlen':0.9,
'ncpu':1
}
def blastOptions( options ):
"""
identify options that have to be passed on to blastall
"""
result = {}
def_keys = defaultOptions().keys() + ['psi']
for k, v in options.items():
if not k in def_keys:
result[ k ] = v
return result
### MAIN ###
options = tools.cmdDict( defaultOptions() )
outFolder = tools.absfile( options['o'] )
f_target = tools.absfile( options['q'] )
f_target = f_target or outFolder + SequenceSearcher.F_FASTA_TARGET
if not (f_target and os.path.exists( f_target ) ):
_use( defaultOptions() )
if '?' in options or 'help' in options:
_use( defaultOptions() )
tmp_db = options['db']
e = float( options['e'] )
clustLim = int( options['limit'])
aln = int( options['aln'])
simCut = float( options['simcut'] )
simLen = float( options['simlen'] )
nCpu = int( options['ncpu'] )
log = None
if options['log']:
log = LogFile( outFolder + '/' + options['log'], 'a' )
ext_options = blastOptions( options )
###################
## TemplateSearcher
##
## Find modelling templates, blasting the target sequence against "tmp_db"
## Cluster the sequences and download the pdbs to templates/all
## input: target.fasta
##
## output: templates/blast.out
## templates/all.fasta
## templates/cluster_result.out
## templates/nr.fasta (input for Aligner)
## templates/all/*.pdb
## templates/nr/chain_index.txt (input for TemplateCleaner)
## /*.pdb ( " " " )
searcher = TemplateSearcher( outFolder, verbose=1,
clusterLimit=clustLim, log=None )
## if it looks like local Blast is installed
if os.environ.has_key('BLASTDB') and settings.blast_bin:
tools.flushPrint('Performing local blast search\n')
searcher.localBlast( f_target, tmp_db, 'blastp', alignments=aln, e=e )
## try remote Blast
else:
tools.flushPrint('Performing remote blast search\n')
searcher.remoteBlast( f_target, tmp_db, 'blastp', alignments=aln, e=e )
searcher.retrievePDBs()
## expects all.fasta
searcher.clusterFastaIterative( simCut=simCut, lenCut=simLen, ncpu=nCpu )
searcher.writeFastaClustered()
fn = searcher.saveClustered()
|
ostrokach/biskit
|
scripts/Mod/search_templates.py
|
Python
|
gpl-3.0
| 4,647
|
[
"BLAST"
] |
a5737262273482bc2e4d887a0437034db236613d663ddc19abfac646677e6b1d
|
#!/usr/bin/env python
##########################################################################
#
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
import json
import optparse
import re
import sys
def strip_object_hook(obj):
if '__class__' in obj:
return None
for name in obj.keys():
if name.startswith('__') and name.endswith('__'):
del obj[name]
return obj
class Visitor:
def visit(self, node, *args, **kwargs):
if isinstance(node, dict):
return self.visitObject(node, *args, **kwargs)
elif isinstance(node, list):
return self.visitArray(node, *args, **kwargs)
else:
return self.visitValue(node, *args, **kwargs)
def visitObject(self, node, *args, **kwargs):
pass
def visitArray(self, node, *args, **kwargs):
pass
def visitValue(self, node, *args, **kwargs):
pass
class Dumper(Visitor):
def __init__(self, stream = sys.stdout):
self.stream = stream
self.level = 0
def _write(self, s):
self.stream.write(s)
def _indent(self):
self._write(' '*self.level)
def _newline(self):
self._write('\n')
def visitObject(self, node):
self.enter_object()
members = node.keys()
members.sort()
for i in range(len(members)):
name = members[i]
value = node[name]
self.enter_member(name)
self.visit(value)
self.leave_member(i == len(members) - 1)
self.leave_object()
def enter_object(self):
self._write('{')
self._newline()
self.level += 1
def enter_member(self, name):
self._indent()
self._write('%s: ' % name)
def leave_member(self, last):
if not last:
self._write(',')
self._newline()
def leave_object(self):
self.level -= 1
self._indent()
self._write('}')
if self.level <= 0:
self._newline()
def visitArray(self, node):
self.enter_array()
for i in range(len(node)):
value = node[i]
self._indent()
self.visit(value)
if i != len(node) - 1:
self._write(',')
self._newline()
self.leave_array()
def enter_array(self):
self._write('[')
self._newline()
self.level += 1
def leave_array(self):
self.level -= 1
self._indent()
self._write(']')
def visitValue(self, node):
self._write(json.dumps(node))
class Comparer(Visitor):
def __init__(self, ignore_added = False, tolerance = 2.0 ** -24):
self.ignore_added = ignore_added
self.tolerance = tolerance
def visitObject(self, a, b):
if not isinstance(b, dict):
return False
if len(a) != len(b) and not self.ignore_added:
return False
ak = a.keys()
bk = b.keys()
ak.sort()
bk.sort()
if ak != bk and not self.ignore_added:
return False
for k in ak:
ae = a[k]
try:
be = b[k]
except KeyError:
return False
if not self.visit(ae, be):
return False
return True
def visitArray(self, a, b):
if not isinstance(b, list):
return False
if len(a) != len(b):
return False
for ae, be in zip(a, b):
if not self.visit(ae, be):
return False
return True
def visitValue(self, a, b):
if isinstance(a, float) or isinstance(b, float):
if a == 0:
return abs(b) < self.tolerance
else:
return abs((b - a)/a) < self.tolerance
else:
return a == b
class Differ(Visitor):
def __init__(self, stream = sys.stdout, ignore_added = False):
self.dumper = Dumper(stream)
self.comparer = Comparer(ignore_added = ignore_added)
def visit(self, a, b):
if self.comparer.visit(a, b):
return
Visitor.visit(self, a, b)
def visitObject(self, a, b):
if not isinstance(b, dict):
self.replace(a, b)
else:
self.dumper.enter_object()
names = set(a.keys())
if not self.comparer.ignore_added:
names.update(b.keys())
names = list(names)
names.sort()
for i in range(len(names)):
name = names[i]
ae = a.get(name, None)
be = b.get(name, None)
if not self.comparer.visit(ae, be):
self.dumper.enter_member(name)
self.visit(ae, be)
self.dumper.leave_member(i == len(names) - 1)
self.dumper.leave_object()
def visitArray(self, a, b):
if not isinstance(b, list):
self.replace(a, b)
else:
self.dumper.enter_array()
max_len = max(len(a), len(b))
for i in range(max_len):
try:
ae = a[i]
except IndexError:
ae = None
try:
be = b[i]
except IndexError:
be = None
self.dumper._indent()
if self.comparer.visit(ae, be):
self.dumper.visit(ae)
else:
self.visit(ae, be)
if i != max_len - 1:
self.dumper._write(',')
self.dumper._newline()
self.dumper.leave_array()
def visitValue(self, a, b):
if a != b:
self.replace(a, b)
def replace(self, a, b):
self.dumper.visit(a)
self.dumper._write(' -> ')
self.dumper.visit(b)
#
# Unfortunately JSON standard does not include comments, but this is a quite
# useful feature to have on regressions tests
#
_token_res = [
r'//[^\r\n]*', # comment
r'"[^"\\]*(\\.[^"\\]*)*"', # string
]
_tokens_re = re.compile(r'|'.join(['(' + token_re + ')' for token_re in _token_res]), re.DOTALL)
def _strip_comment(mo):
if mo.group(1):
return ''
else:
return mo.group(0)
def _strip_comments(data):
'''Strip (non-standard) JSON comments.'''
return _tokens_re.sub(_strip_comment, data)
assert _strip_comments('''// a comment
"// a comment in a string
"''') == '''
"// a comment in a string
"'''
def load(stream, strip_images = True, strip_comments = True):
if strip_images:
object_hook = strip_object_hook
else:
object_hook = None
if strip_comments:
data = stream.read()
data = _strip_comments(data)
return json.loads(data, strict=False, object_hook = object_hook)
else:
return json.load(stream, strict=False, object_hook = object_hook)
def main():
optparser = optparse.OptionParser(
usage="\n\t%prog [options] <ref_json> <src_json>")
optparser.add_option(
'--keep-images',
action="store_false", dest="strip_images", default=True,
help="compare images")
(options, args) = optparser.parse_args(sys.argv[1:])
if len(args) != 2:
optparser.error('incorrect number of arguments')
a = load(open(sys.argv[1], 'rt'), options.strip_images)
b = load(open(sys.argv[2], 'rt'), options.strip_images)
if False:
dumper = Dumper()
dumper.visit(a)
differ = Differ()
differ.visit(a, b)
if __name__ == '__main__':
main()
|
PeterLValve/apitrace
|
scripts/jsondiff.py
|
Python
|
mit
| 8,804
|
[
"VisIt"
] |
ef83ceaf62e7f56babc9c6af09b3c399ef1865c9bae1329ced57546743507101
|
# Settings for Zulip Voyager
### MANDATORY SETTINGS
#
# These settings MUST be set in production. In a development environment,
# sensible default values will be used.
# The user-accessible Zulip hostname for this installation, e.g.
# zulip.example.com
EXTERNAL_HOST = 'zulip.example.com'
# The email address for the person or team who maintain the Zulip
# Voyager installation. Will also get support emails. (e.g. zulip-admin@example.com)
ZULIP_ADMINISTRATOR = 'zulip-admin@example.com'
# The domain for your organization, e.g. example.com
ADMIN_DOMAIN = 'example.com'
# Enable at least one of the following authentication backends.
AUTHENTICATION_BACKENDS = (
# 'zproject.backends.EmailAuthBackend', # Email and password; see SMTP setup below
# 'zproject.backends.ZulipRemoteUserBackend', # Local SSO
# 'zproject.backends.GoogleMobileOauth2Backend', # Google Apps, setup below
# 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below
)
# Google Oauth requires a bit of configuration; you will need to go to
# do the following:
#
# (1) Visit https://console.developers.google.com, setup an
# Oauth2 client ID that allows redirects to
# e.g. https://zulip.example.com/accounts/login/google/done/.
#
# (2) Then click into the APIs and Auth section (in the sidebar on the
# left side of the page), APIs, then under "Social APIs" click on
# "Google+ API" and click the button to enable the API.
#
# (3) put your client secret as "google_oauth2_client_secret" in
# zulip-secrets.conf, and your client ID right here:
# GOOGLE_OAUTH2_CLIENT_ID=<your client ID from Google>
# If you are using the ZulipRemoteUserBackend authentication backend,
# set this to your domain (e.g. if REMOTE_USER is "username" and the
# corresponding email address is "username@example.com", set
# SSO_APPEND_DOMAIN = "example.com")
SSO_APPEND_DOMAIN = None
# Configure the outgoing SMTP server below. The default configuration
# is prepopulated for GMail servers. Change EMAIL_HOST for other
# hosts, or leave it unset or empty to skip sending email. Note if
# you are using a GMail account to send outgoing email, you will
# likely need to configure that account as "less secure" here:
# https://support.google.com/accounts/answer/6010255.
#
# With the exception of reading EMAIL_HOST_PASSWORD from the Zulip
# secrets file, Zulip uses Django's standard EmailBackend, so if
# you're having issues, you may want to search for documentation on
# using your email provider with Django.
#
# A common problem you may encounter when trying to get this working
# is that some hosting providers block outgoing SMTP traffic.
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = ''
# If you're using password auth, you will need to put the password in
# /etc/zulip/zulip-secrets.conf as email_password.
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# The email From address to be used for automatically generated emails
DEFAULT_FROM_EMAIL = "Zulip <zulip@example.com>"
# The noreply address to be used as Reply-To for certain generated emails.
# Messages sent to this address should not be delivered anywhere.
NOREPLY_EMAIL_ADDRESS = "noreply@example.com"
# A list of strings representing the host/domain names that this
# Django site can serve. You should reset it to be a list of
# domains/IP addresses for your site. This is a security measure to
# prevent an attacker from poisoning caches and triggering password
# reset emails with links to malicious hosts by submitting requests
# with a fake HTTP Host header.
ALLOWED_HOSTS = ['*']
### OPTIONAL SETTINGS
# Controls whether session cookies expire when the browser closes
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Session cookie expiry in seconds after the last page load
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks
# Controls whether or not there is a feedback button in the UI.
ENABLE_FEEDBACK = False
# By default, the feedback button will submit feedback to the Zulip
# developers. If you set FEEDBACK_EMAIL to be an email address
# (e.g. ZULIP_ADMINISTRATOR), feedback sent by your users will instead
# be sent to that email address.
FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR
# Controls whether or not error reports are sent to Zulip. Error
# reports are used to improve the quality of the product and do not
# include message contents; please contact Zulip support with any
# questions.
ERROR_REPORTING = True
# Controls whether or not Zulip will provide inline image preview when
# a link to an image is referenced in a message.
INLINE_IMAGE_PREVIEW = True
# By default, files uploaded by users and user avatars are stored
# directly on the Zulip server. If file storage in Amazon S3 is
# desired, you can configure that by setting s3_key and s3_secret_key
# in /etc/zulip/zulip-secrets.conf to be the S3 access and secret keys
# that you want to use, and setting the S3_AUTH_UPLOADS_BUCKET and
# S3_AVATAR_BUCKET to be the S3 buckets you've created to store file
# uploads and user avatars, respectively.
LOCAL_UPLOADS_DIR = "/home/zulip/uploads"
# Controls whether name changes are completely disabled for this installation
# This is useful in settings where you're syncing names from an integrated LDAP/Active Directory
NAME_CHANGES_DISABLED = False
# Controls whether users who have not uploaded an avatar will receive an avatar
# from gravatar.com.
ENABLE_GRAVATAR = True
# To override the default avatar image if ENABLE_GRAVATAR is False, place your
# custom default avatar image at /home/zulip/local-static/default-avatar.png
# and uncomment the following line.
#DEFAULT_AVATAR_URI = '/local-static/default-avatar.png'
### TWITTER INTEGRATION
# Zulip supports showing inline Tweet previews when a tweet is linked
# to in a message. To support this, Zulip must have access to the
# Twitter API via OAuth. To obtain the various access tokens needed
# below, you must register a new application under your Twitter
# account by doing the following:
#
# 1. Log in to http://dev.twitter.com.
# 2. In the menu under your username, click My Applications. From this page, create a new application.
# 3. Click on the application you created and click "create my access token".
# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,
# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.
### EMAIL GATEWAY INTEGRATION
# The email gateway provides, for each stream, an email address that
# you can send email to in order to have the email's content be posted
# to that stream. Emails received at the per-stream email address
# will be converted into a Zulip message
# There are two ways to make use of local email mirroring:
# 1. Local delivery: A MTA runs locally and passes mail directly to Zulip
# 2. Polling: Checks an IMAP inbox every minute for new messages.
# A Puppet manifest for local delivery via Postfix is available in
# puppet/zulip/manifests/postfix_localmail.pp. To use the manifest, add it to
# puppet_classes in /etc/zulip/zulip.conf. This manifest assumes you'll receive
# mail addressed to the hostname of your Zulip server.
#
# Users of other mail servers will need to configure it to pass mail to the
# email mirror; see `python manage.py email-mirror --help` for details.
# The email address pattern to use for auto-generated stream emails
# The %s will be replaced with a unique token, and the resulting email
# must be delivered to the EMAIL_GATEWAY_IMAP_FOLDER of the
# EMAIL_GATEWAY_LOGIN account below, or piped in to the email-mirror management
# command as indicated above.
#
# Example: zulip+%s@example.com
EMAIL_GATEWAY_PATTERN = ""
# The following options are relevant if you're using mail polling.
#
# A sample cron job for mail polling is available at puppet/zulip/files/cron.d/email-mirror
#
# The Zulip username of the bot that the email pattern should post as.
# Example: emailgateway@example.com
EMAIL_GATEWAY_BOT = ""
# Configuration of the email mirror mailbox
# The IMAP login and password
EMAIL_GATEWAY_LOGIN = ""
EMAIL_GATEWAY_PASSWORD = ""
# The IMAP server & port to connect to
EMAIL_GATEWAY_IMAP_SERVER = ""
EMAIL_GATEWAY_IMAP_PORT = 993
# The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above
# must be delivered to this folder
EMAIL_GATEWAY_IMAP_FOLDER = "INBOX"
### LDAP integration configuration
# Zulip supports retrieving information about users via LDAP, and
# optionally using LDAP as an authentication mechanism. For using
# LDAP authentication, you will need to enable the
# zproject.backends.ZulipLDAPAuthBackend auth backend in
# AUTHENTICATION_BACKENDS above.
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
# URI of your LDAP server. If set, LDAP is used to prepopulate a user's name in
# Zulip. Example: "ldaps://ldap.example.com"
AUTH_LDAP_SERVER_URI = ""
# This DN and password will be used to bind to your server. If unset, anonymous
# binds are performed.
AUTH_LDAP_BIND_DN = ""
AUTH_LDAP_BIND_PASSWORD = ""
# Specify the search base and the property to filter on that corresponds to the
# username.
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com",
ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
# If the value of a user's "uid" (or similar) property is not their email
# address, specify the domain to append here.
LDAP_APPEND_DOMAIN = ADMIN_DOMAIN
# This map defines how to populate attributes of a Zulip user from LDAP.
AUTH_LDAP_USER_ATTR_MAP = {
# Populate the Django user's name from the LDAP directory.
"full_name": "cn",
}
CAMO_URI = ''
|
zachallaun/zulip
|
zproject/local_settings_template.py
|
Python
|
apache-2.0
| 9,628
|
[
"VisIt"
] |
ac3d3fa82130cbf02ff10f884c6905ad601582c2b41bd3680c40e97aa2962778
|
# $Id$
#
# Copyright (C) 2001-2008 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""utility functions for clustering
"""
def GetNodeList(cluster):
"""returns an ordered list of all nodes below cluster
the ordering is done using the lengths of the child nodes
**Arguments**
- cluster: the cluster in question
**Returns**
- a list of the leaves below this cluster
"""
if len(cluster) == 1:
return [cluster]
else:
children = cluster.GetChildren()
children.sort(key=lambda x: len(x), reverse=True)
res = []
for child in children:
res += GetNodeList(child)
res += [cluster]
return res
def GetNodesDownToCentroids(cluster, above=1):
"""returns an ordered list of all nodes below cluster
"""
if hasattr(cluster, '_isCentroid'):
cluster._aboveCentroid = 0
above = -1
else:
cluster._aboveCentroid = above
if len(cluster) == 1:
return [cluster]
else:
res = []
children = cluster.GetChildren()
children.sort(key=lambda x: len(x), reverse=True)
# children.sort(lambda x, y: cmp(len(y), len(x)))
for child in children:
res = res + GetNodesDownToCentroids(child, above)
res = res + [cluster]
return res
def FindClusterCentroidFromDists(cluster, dists):
""" find the point in a cluster which has the smallest summed
Euclidean distance to all others
**Arguments**
- cluster: the cluster to work with
- dists: the distance matrix to use for the points
**Returns**
- the index of the centroid point
"""
children = cluster.GetPoints()
pts = [x.GetData() for x in children]
best = 1e24
bestIdx = -1
for pt in pts:
dAccum = 0.0
# loop over others and add'em up
for other in pts:
if other != pt:
if other > pt:
row, col = pt, other
else:
row, col = other, pt
dAccum += dists[col * (col - 1) / 2 + row]
if dAccum >= best:
# minor efficiency hack
break
if dAccum < best:
best = dAccum
bestIdx = pt
for i in range(len(pts)):
pt = pts[i]
if pt != bestIdx:
if pt > bestIdx:
row, col = bestIdx, pt
else:
row, col = pt, bestIdx
children[i]._distToCenter = dists[col * (col - 1) / 2 + row]
else:
children[i]._distToCenter = 0.0
children[i]._clustCenter = bestIdx
cluster._clustCenter = bestIdx
cluster._distToCenter = 0.0
return bestIdx
def _BreadthFirstSplit(cluster, n):
""" *Internal Use Only*
"""
if len(cluster) < n:
raise ValueError('Cannot split cluster of length %d into %d pieces' % (len(cluster), n))
if len(cluster) == n:
return cluster.GetPoints()
clusters = [cluster]
nxtIdx = 0
for _ in range(n - 1):
while nxtIdx < len(clusters) and len(clusters[nxtIdx]) == 1:
nxtIdx += 1
assert nxtIdx < len(clusters)
children = clusters[nxtIdx].GetChildren()
children.sort(key=lambda x: x.GetMetric(), reverse=True)
for child in children:
clusters.append(child)
del clusters[nxtIdx]
return clusters
def _HeightFirstSplit(cluster, n):
""" *Internal Use Only*
"""
if len(cluster) < n:
raise ValueError('Cannot split cluster of length %d into %d pieces' % (len(cluster), n))
if len(cluster) == n:
return cluster.GetPoints()
clusters = [cluster]
for _ in range(n - 1):
nxtIdx = 0
while nxtIdx < len(clusters) and len(clusters[nxtIdx]) == 1:
nxtIdx += 1
assert nxtIdx < len(clusters)
children = clusters[nxtIdx].GetChildren()
for child in children:
clusters.append(child)
del clusters[nxtIdx]
clusters.sort(key=lambda x: x.GetMetric(), reverse=True)
return clusters
def SplitIntoNClusters(cluster, n, breadthFirst=True):
""" splits a cluster tree into a set of branches
**Arguments**
- cluster: the root of the cluster tree
- n: the number of clusters to include in the split
- breadthFirst: toggles breadth first (vs depth first) cleavage
of the cluster tree.
**Returns**
- a list of sub clusters
"""
if breadthFirst:
return _BreadthFirstSplit(cluster, n)
else:
return _HeightFirstSplit(cluster, n)
|
ptosco/rdkit
|
rdkit/ML/Cluster/ClusterUtils.py
|
Python
|
bsd-3-clause
| 4,413
|
[
"RDKit"
] |
feadea0bbee542253218bfbc41f5516bd3ced24cbe43dab5b0da9928255f229e
|
"""
.. _point_spread:
Corrupt known signal with point spread
======================================
The aim of this tutorial is to demonstrate how to put a known signal at a
desired location(s) in a :class:`mne.SourceEstimate` and then corrupt the
signal with point-spread by applying a forward and inverse solution.
"""
import os.path as op
import numpy as np
from mayavi import mlab
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.simulation import simulate_stc, simulate_evoked
###############################################################################
# First, we set some parameters.
seed = 42
# parameters for inverse method
method = 'sLORETA'
snr = 3.
lambda2 = 1.0 / snr ** 2
# signal simulation parameters
# do not add extra noise to the known signals
evoked_snr = np.inf
T = 100
times = np.linspace(0, 1, T)
dt = times[1] - times[0]
# Paths to MEG data
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis-ave.fif')
###############################################################################
# Load the MEG data
# -----------------
fwd = mne.read_forward_solution(fname_fwd, force_fixed=True,
surf_ori=True)
fwd['info']['bads'] = []
inv_op = read_inverse_operator(fname_inv)
raw = mne.io.RawFIF(op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw.fif'))
events = mne.find_events(raw)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True)
epochs.info['bads'] = []
evoked = epochs.average()
labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir)
label_names = [l.name for l in labels]
n_labels = len(labels)
###############################################################################
# Estimate the background noise covariance from the baseline period
# -----------------------------------------------------------------
cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
###############################################################################
# Generate sinusoids in two spatially distant labels
# --------------------------------------------------
# The known signal is all zero-s off of the two labels of interest
signal = np.zeros((n_labels, T))
idx = label_names.index('inferiorparietal-lh')
signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times)
idx = label_names.index('rostralmiddlefrontal-rh')
signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times)
###############################################################################
# Find the center vertices in source space of each label
# ------------------------------------------------------
#
# We want the known signal in each label to only be active at the center. We
# create a mask for each label that is 1 at the center vertex and 0 at all
# other vertices in the label. This mask is then used when simulating
# source-space data.
hemi_to_ind = {'lh': 0, 'rh': 1}
for i, label in enumerate(labels):
# The `center_of_mass` function needs labels to have values.
labels[i].values.fill(1.)
# Restrict the eligible vertices to be those on the surface under
# consideration and within the label.
surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']
restrict_verts = np.intersect1d(surf_vertices, label.vertices)
com = labels[i].center_of_mass(subject='sample',
subjects_dir=subjects_dir,
restrict_vertices=restrict_verts,
surf='white')
# Convert the center of vertex index from surface vertex list to Label's
# vertex list.
cent_idx = np.where(label.vertices == com)[0][0]
# Create a mask with 1 at center vertex and zeros elsewhere.
labels[i].values.fill(0.)
labels[i].values[cent_idx] = 1.
###############################################################################
# Create source-space data with known signals
# -------------------------------------------
#
# Put known signals onto surface vertices using the array of signals and
# the label masks (stored in labels[i].values).
stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,
value_fun=lambda x: x)
###############################################################################
# Plot original signals
# ---------------------
#
# Note that the original signals are highly concentrated (point) sources.
#
kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,
time_unit='s', initial_time=0.05, size=1200,
views=['lat', 'med'])
clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])
figs = [mlab.figure(1), mlab.figure(2), mlab.figure(3), mlab.figure(4)]
brain_gen = stc_gen.plot(clim=clim, figure=figs, **kwargs)
###############################################################################
# Simulate sensor-space signals
# -----------------------------
#
# Use the forward solution and add Gaussian noise to simulate sensor-space
# (evoked) data from the known source-space signals. The amount of noise is
# controlled by `evoked_snr` (higher values imply less noise).
#
evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, evoked_snr,
tmin=0., tmax=1., random_state=seed)
# Map the simulated sensor-space data to source-space using the inverse
# operator.
stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method)
###############################################################################
# Plot the point-spread of corrupted signal
# -----------------------------------------
#
# Notice that after applying the forward- and inverse-operators to the known
# point sources that the point sources have spread across the source-space.
# This spread is due to the minimum norm solution so that the signal leaks to
# nearby vertices with similar orientations so that signal ends up crossing the
# sulci and gyri.
figs = [mlab.figure(5), mlab.figure(6), mlab.figure(7), mlab.figure(8)]
brain_inv = stc_inv.plot(figure=figs, **kwargs)
###############################################################################
# Exercises
# ---------
# - Change the `method` parameter to either `dSPM` or `MNE` to explore the
# effect of the inverse method.
# - Try setting `evoked_snr` to a small, finite value, e.g. 3., to see the
# effect of noise.
|
nicproulx/mne-python
|
tutorials/plot_point_spread.py
|
Python
|
bsd-3-clause
| 6,797
|
[
"Gaussian",
"Mayavi"
] |
b7eb4454ab07b2fa293585fa7f48945ca7f09f326b909c943ab97b31c416f624
|
## Absolute location where all raw files are
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Oct_10_2016_HuR_Human_Mouse_Liver/rna-seq/Penalva_L_08182016/human'
## Output directory
OUT_DIR = '/staging/as/skchoudh/Oct_10_2016_HuR_Human_Mouse_Liver/RNA-Seq_human'
## Absolute location to 're-ribo/scripts' directory
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/re-ribo/scripts'
## Genome fasta location
GENOME_FASTA = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.fa'
## Chromosome sizes location
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
## Path to STAR index (will be generated if does not exist)
STAR_INDEX = '/home/cmb-panasas2/skchoudh/genomes/hg38/star_annotated'
## GTF path
GTF = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.annotation.without_rRNA_tRNA.gtf'
## GenePred bed downloaded from UCSC
## (this is used for inferring the type of experiment i.e stranded/non-stranded
## and hence is not required)
GENE_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v24.genes.bed'
## Path to bed file with start codon coordinates
START_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.start_codon.bed'
## Path to bed file with stop codon coordinates
STOP_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.stop_codon.bed'
## Path to bed file containing CDS coordinates
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.cds.bed'
# We don't have these so just use CDs bed to get the pipeline running
UTR5_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR5.bed'
UTR3_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR3.bed'
## Name of python2 environment
## The following package needs to be installed in that environment
## numpy scipy matploltib seaborn pysam pybedtools htseq
## you can do: conda create -n python2 PYTHON=2 && source activate python2 && conda install numpy scipy matploltib seaborn pysam pybedtools htseq
PYTHON2ENV = 'python2'
############################################Do Not Edit#############################################
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
|
saketkc/ribo-seq-snakemake
|
configs/Oct_10_2016_HuR_Human_rna.py
|
Python
|
bsd-3-clause
| 2,343
|
[
"HTSeq",
"pysam"
] |
b348557fe16a6b8833f944f40f9fdb32374b165727381e1dbd860ea3dea1d0ca
|
# pylint: disable=missing-docstring, invalid-name
import unittest
import numpy as np
from obsoper.exceptions import StepNotFound
from obsoper import (cell,
cursors,
walk)
class TestWalk(unittest.TestCase):
def setUp(self):
longitudes, latitudes = np.meshgrid([100, 104, 106, 107],
[80, 83, 84],
indexing="ij")
self.fixture = walk.Walk.from_lonlats(longitudes,
latitudes)
def test_query_given_multiple_locations_and_starting_indices(self):
longitudes, latitudes = np.array([100, 106.1]), np.array([80, 83])
i, j = np.array([1, 1]), np.array([1, 1])
result = self.fixture.query(longitudes, latitudes, i, j)
expect = np.array([0, 2]), np.array([0, 1])
np.testing.assert_array_equal(expect, result)
def test_grid_walk_given_point_in_maximum_grid_cell(self):
self.check_walk((106.9, 83.9), i=0, j=0, expect=(2, 1))
def test_grid_walk_given_point_one_grid_cell_north(self):
self.check_walk((100.1, 83.9), i=0, j=0, expect=(0, 1))
def test_grid_walk_given_point_two_grid_cells_east(self):
self.check_walk((106.9, 80), i=0, j=0, expect=(2, 0))
def test_grid_walk_starting_at_north_east_corner_travelling_south(self):
self.check_walk((100, 80), i=2, j=1, expect=(0, 0))
def test_grid_walk_given_i_maximum_performs_search(self):
self.check_walk((100, 80), i=3, j=0, expect=(0, 0))
def test_grid_walk_given_j_maximum_performs_search(self):
self.check_walk((100, 80), i=0, j=2, expect=(0, 0))
def check_walk(self, point, i, j, expect):
result = self.fixture.query_one(point, i, j)
np.testing.assert_array_almost_equal(expect, result)
def test_walk_across_northfold(self):
# North fold along 100th meridian
lon0, dlon = 100, 0.5
lat0, lat1, lat2 = 80, 81, 82
# Neighbouring grid boxes either side of fold
longitudes = np.array([[lon0 + dlon, lon0],
[lon0 + dlon, lon0],
[lon0 - dlon, lon0],
[lon0 - dlon, lon0],
[lon0 - dlon, lon0]])
latitudes = np.array([[lat1, lat1],
[lat0, lat0],
[lat0, lat0],
[lat1, lat1],
[lat2, lat2]])
# Point in center of second grid box
point = (lon0 - dlon / 2., (lat1 + lat0) / 2.)
fixture = walk.Walk.tripolar(longitudes, latitudes, fold_index=1)
result = fixture.query_one(point, i=0, j=0)
expect = (2, 0)
self.assertEqual(expect, result)
def test_walk_across_dateline(self):
"""Stepping algorithms should walk in direction of point"""
longitudes, latitudes = np.meshgrid([178, 179, -179, -178],
[10, 20, 30, 40],
indexing="ij")
fixture = walk.Walk.from_lonlats(longitudes,
latitudes)
point = (-178.5, 25)
fixture = walk.Walk.from_lonlats(longitudes, latitudes)
result = fixture.query_one(point, i=0, j=1)
expect = (2, 1)
self.assertEqual(expect, result)
def test_diagonal_stepping_edge_case(self):
"""Extremely rare edge case consisting of diagonal neighbours
neither of which contain the point. And whose displacement vectors
produce (di, dj) pairs both of which are greater than 1.
.. note:: case taken directly from orca grid and real observation
"""
longitudes = np.array([[82.96250916, 78.00720978, 73.],
[83.98399353, 78.52962494, 73.],
[85.23661041, 79.1739502, 73.]])
latitudes = np.array([[88.83627319, 88.84931946, 88.85367584],
[88.94418335, 88.95856476, 88.96337891],
[89.05162811, 89.06764984, 89.07302094]])
point = (78.830612, 88.958031)
fixture = walk.Walk.from_lonlats(longitudes, latitudes)
result = fixture.query_one(point, i=0, j=0)
expect = (1, 0)
self.assertEqual(expect, result)
def test_point_above_line_edge_case(self):
"""Simple edge case taken from real data that produces an infinite loop
.. note:: case taken directly from orca grid and real observation
"""
longitudes = np.array([[-122.912, -121.936, -120.955, -119.968],
[-122.601, -121.643, -120.679, -119.711],
[-122.301, -121.360, -120.413, -119.462],
[-122.011, -121.086, -120.156, -119.222]])
latitudes = np.array([[84.358, 84.387, 84.414, 84.439],
[84.258, 84.286, 84.312, 84.337],
[84.158, 84.185, 84.211, 84.235],
[84.057, 84.085, 84.110, 84.134]])
point = (-121.27652, 84.300476)
fixture = walk.Walk.from_lonlats(longitudes, latitudes)
result = fixture.query_one(point, i=2, j=1)
expect = (0, 1)
self.assertEqual(expect, result)
def test_point_inside_great_circle_and_lonlat_line(self):
"""Great circle segments are not straight lines in longitude/latitude
space.
.. note:: Case taken directly from orca grid and real observation.
The effect is sensitive to the precision of the numbers.
"""
longitudes = np.array([[-125.14778137, -123.9805603],
[-124.73101807, -123.58721924],
[-124.33137512, -123.21022034]])
latitudes = np.array([[85.28964233, 85.32194519],
[85.18976593, 85.22135925],
[85.08974457, 85.12065887]])
point = (-124.52118, 85.195602)
fixture = walk.Walk.from_lonlats(longitudes, latitudes)
result = fixture.query_one(point, i=2, j=0)
expect = (2, 0)
self.assertEqual(expect, result)
class TestWalkStep(unittest.TestCase):
def setUp(self):
ni, nj = 4, 3
longitudes, latitudes = np.meshgrid([100, 104, 106, 107],
[80, 83, 84],
indexing="ij")
self.cells = cell.Collection.from_lonlats(longitudes, latitudes)
self.cursor = cursors.Cursor(ni, nj)
self.fixture = walk.Walk(self.cells, self.cursor)
def test_query_one_given_point_in_cell_0_0(self):
self.check_query_one((100, 80), (0, 0))
def test_query_one_given_point_in_cell_1_0(self):
self.check_query_one((104.1, 80), (1, 0))
def test_query_one_given_point_in_cell_0_1(self):
self.check_query_one((100, 83.1), (0, 1))
def test_query_one_given_point_in_northernmost_cell(self):
self.check_query_one((100, 84), (0, 1))
def test_query_one_given_point_in_easternmost_cell(self):
self.check_query_one((107, 80), (2, 0))
def check_query_one(self, point, expect):
result = self.fixture.query_one(point)
self.assertEqual(expect, result)
def test_direction_given_east_point(self):
self.check_direction(0, 0, (107, 80), (1, 0))
def test_direction_given_north_point(self):
self.check_direction(0, 0, (100, 84), (0, 1))
def test_direction_given_west_point(self):
self.check_direction(1, 0, (100, 80), (-1, 0))
def test_direction_given_south_point(self):
self.check_direction(1, 1, (104, 80), (0, -1))
def check_direction(self, i, j, point, expect):
result = self.fixture.direction(self.cells[i, j], point)
self.assertEqual(expect, result)
def test_direction_given_parallelogram_cell_intersecting_0E(self):
longitudes = np.array([[-0.12521362, 0.27581787],
[0.95932007, 1.37954712]])
latitudes = np.array([[84.45360565, 84.56639099],
[84.41712189, 84.52923584]])
i, j = 0, 0
point = (0.90654248, 84.620514)
cells = cell.Collection.from_lonlats(longitudes, latitudes)
cursor = cursors.Cursor(ni=2, nj=2)
fixture = walk.Walk(cells, cursor)
result = fixture.direction(cells[i, j], point)
expect = (0, 1)
self.assertEqual(expect, result)
class TestNextStep(unittest.TestCase):
def setUp(self):
self.vertices = np.array([(0., 0.),
(1., 0.),
(1., 1.),
(0., 1.)],
dtype=np.double)
self.center = (0.5, 0.5)
self.due_north = (0.1, 1.1)
self.due_south = (0.1, -0.1)
self.due_east = (1.1, 0.1)
self.due_west = (-0.1, 0.1)
self.north = (0, 1)
self.south = (0, -1)
self.east = (1, 0)
self.west = (-1, 0)
self.inside_arc = (0.5, 1.00001)
self.inside_box = (0.5, 0.5)
def test_next_step_given_point_north_returns_north(self):
self.check_next_step(self.due_north, self.north)
def test_next_step_given_point_south_returns_south(self):
self.check_next_step(self.due_south, self.south)
def test_next_step_given_point_east_returns_east(self):
self.check_next_step(self.due_east, self.east)
def test_next_step_given_point_west_returns_west(self):
self.check_next_step(self.due_west, self.west)
def test_next_step_given_point_inside_great_circle_raises_exception(self):
with self.assertRaises(StepNotFound):
walk.next_step(self.vertices, self.center, self.inside_arc)
def test_next_step_given_point_inside_box_raises_exception(self):
with self.assertRaises(StepNotFound):
walk.next_step(self.vertices, self.center, self.inside_box)
def check_next_step(self, position, expect):
result = walk.next_step(self.vertices, self.center, position)
self.assertEqual(expect, result)
|
met-office-ocean/obsoper
|
obsoper/test/test_walk.py
|
Python
|
bsd-3-clause
| 10,332
|
[
"ORCA"
] |
c18cfe94bfd948ad4c8a42d686fe0b946ee1d822a07546e61e38bf089999630b
|
#!/usr/bin/env python
#from sumatra.projects import load_project
#from sumatra.parameters import build_parameters
#from sumatra.decorators import capture
from ruffus import *
import sys
import os
import time
import datetime
import drmaa
from omics_pipe.utils import *
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.modules.fastqc import fastqc
from omics_pipe.modules.bwa import bwa_mem
from omics_pipe.modules.picard_mark_duplicates import picard_mark_duplicates
from omics_pipe.modules.GATK_preprocessing_WGS import GATK_preprocessing_WGS
from omics_pipe.modules.GATK_variant_discovery_group import GATK_variant_discovery_group
from omics_pipe.modules.GATK_variant_joint_analysis import GATK_variant_joint_analysis
from omics_pipe.modules.GATK_variant_filtering import GATK_variant_filtering_group
p = Bunch(default_parameters)
os.chdir(p.WORKING_DIR)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
print p
for step in p.STEPS:
vars()['inputList_' + step] = []
for sample in p.SAMPLE_LIST:
vars()['inputList_' + step].append([sample, "%s/%s_%s_completed.flag" % (p.FLAG_PATH, step, sample)])
print vars()['inputList_' + step]
#FASTQC
@parallel(inputList_fastqc)
@check_if_uptodate(check_file_exists)
def run_fastqc(sample, fastqc_flag):
fastqc(sample, fastqc_flag)
return
#BWA
@parallel(inputList_bwa_mem)
@check_if_uptodate(check_file_exists)
def run_bwa_mem(sample, bwa_mem_flag):
bwa_mem(sample, bwa_mem_flag)
return
#picard_mark_duplicates
@parallel(inputList_picard_mark_duplicates)
@check_if_uptodate(check_file_exists)
@follows(run_bwa_mem)
def run_picard_mark_duplicates(sample, picard_mark_duplicates_flag):
picard_mark_duplicates(sample, picard_mark_duplicates_flag)
return
#GATK_preprocessing
@parallel(inputList_GATK_preprocessing_WGS)
@check_if_uptodate(check_file_exists)
@follows(run_picard_mark_duplicates)
def run_GATK_preprocessing_WGS(sample, GATK_preprocessing_WGS_flag):
GATK_preprocessing_WGS(sample, GATK_preprocessing_WGS_flag)
return
#GATK group variant_discovery
@parallel(inputList_GATK_variant_discovery_group)
@check_if_uptodate(check_file_exists)
@follows(run_GATK_preprocessing_WGS)
def run_GATK_variant_discovery_group(sample, GATK_variant_discovery_group_flag):
GATK_variant_discovery_group(sample, GATK_variant_discovery_group_flag)
return
#GATK variant joint analysis
@parallel([["combined", "%s/GATK_variant_joint_analysis_flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
@follows(run_GATK_variant_discovery_group)
def run_GATK_variant_joint_analysis(sample, GATK_variant_joint_analysis_flag):
GATK_variant_joint_analysis(sample, GATK_variant_joint_analysis_flag)
return
#GATK_filter_variants
@parallel([["combined", "%s/GATK_variant_filtering_group_flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
@follows(run_GATK_variant_joint_analysis)
def run_GATK_variant_filtering_group(sample, GATK_variant_filtering_group_flag):
GATK_variant_filtering_group(sample, GATK_variant_filtering_group_flag)
return
@parallel([["combined", "%s/last_function_flag" % (p.FLAG_PATH)]])
@check_if_uptodate(check_file_exists)
@follows(run_GATK_variant_filtering_group, run_fastqc)
def last_function(sample, last_function_flag):
print "PIPELINE HAS FINISHED SUCCESSFULLY!!! YAY!"
pipeline_graph_output = p.FLAG_PATH + "/pipeline_" + sample + "_" + str(date) + ".pdf"
#pipeline_printout_graph (pipeline_graph_output,'pdf', step, no_key_legend=False)
stage = "last_function"
flag_file = "%s/%s_%s_completed.flag" % (p.FLAG_PATH, stage, sample)
open(flag_file, 'w').close()
return
if __name__ == '__main__':
pipeline_run(p.STEP, multiprocess = p.PIPE_MULTIPROCESS, verbose = p.PIPE_VERBOSE, gnu_make_maximal_rebuild_mode = p.PIPE_REBUILD)
|
adammaikai/OmicsPipe2.0
|
omics_pipe/WGS_GATK_group_calling.py
|
Python
|
mit
| 3,992
|
[
"BWA"
] |
0e222f0e4da5ee562b34049249244efdd95a4fdb9bed44bbbdc4aa6242a6f3ed
|
""" The MJF utility calculates the amount of wall clock time
left for a given batch system slot or VM. This is essential for the
'Filling Mode' where several jobs may be executed in the same slot.
Machine Job/Features are used following HSF-TN-2016-02 if available.
Otherwise values are filled in using the batch system and CS
information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import ssl
import time
from urllib.request import urlopen
import DIRAC
from DIRAC import gLogger, gConfig
__RCSID__ = "$Id$"
class MJF(object):
"""Machine/Job Features methods"""
mjfKeys = {
"MACHINEFEATURES": ["total_cpu", "hs06", "shutdowntime", "grace_secs"],
"JOBFEATURES": [
"allocated_cpu",
"hs06_job",
"shutdowntime_job",
"grace_secs_job",
"jobstart_secs",
"job_id",
"wall_limit_secs",
"cpu_limit_secs",
"max_rss_bytes",
"max_swap_bytes",
"scratch_limit_bytes",
],
}
#############################################################################
def __init__(self):
"""Standard constructor"""
self.log = gLogger.getSubLogger("MJF")
capath = DIRAC.Core.Security.Locations.getCAsLocation()
if not capath:
raise Exception("Unable to find CA files location! Not in /etc/grid-security/certificates/ etc.")
# Used by urllib when talking to HTTPS web servers
self.context = ssl.create_default_context(capath=capath)
def updateConfig(self, pilotStartTime=None):
"""Populate /LocalSite/MACHINEFEATURES and /LocalSite/JOBFEATURES with MJF values
This is run early in the job to update the configuration file that subsequent DIRAC
scripts read when they start.
"""
if pilotStartTime:
gConfig.setOptionValue("/LocalSite/JOBFEATURES/jobstart_secs", str(pilotStartTime))
for mORj in ["MACHINEFEATURES", "JOBFEATURES"]:
for key in self.mjfKeys[mORj]:
value = self.__fetchMachineJobFeature(mORj, key)
if value is not None:
gConfig.setOptionValue("/LocalSite/%s/%s" % (mORj, key), value)
def getMachineFeature(self, key):
"""Returns MACHINEFEATURES/key value saved in /LocalSite configuration by
updateConfigFile() unless MACHINEFEATURES/shutdowntime when we try to fetch
from the source URL itself again in case it changes.
"""
if key == "shutdowntime":
value = self.__fetchMachineJobFeature("MACHINEFEATURES", "shutdowntime")
# If unable to fetch shutdowntime, go back to any value in /LocalSite
# in case HTTP(S) server is down
if value is not None:
return value
return gConfig.getValue("/LocalSite/MACHINEFEATURES/" + key, None)
def getIntMachineFeature(self, key):
"""Returns MACHINEFEATURES/key as an int or None if not an int or not present"""
value = self.getMachineFeature(key)
try:
return int(value)
except ValueError:
return None
def getJobFeature(self, key):
"""Returns JOBFEATURES/key value saved in /LocalSite configuration by
updateConfigFile() unless JOBFEATURES/shutdowntime_job when we try to fetch
from the source URL itself again in case it changes.
"""
if key == "shutdowntime_job":
value = self.__fetchMachineJobFeature("JOBFEATURES", "shutdowntime_job")
# If unable to fetch shutdowntime_job, go back to any value in /LocalSite
# in case HTTP(S) server is down
if value is not None:
return value
return gConfig.getValue("/LocalSite/JOBFEATURES/" + key, None)
def getIntJobFeature(self, key):
"""Returns JOBFEATURES/key as an int or None if not an int or not present"""
value = self.getJobFeature(key)
try:
return int(value)
except ValueError:
return None
def __fetchMachineJobFeature(self, mORj, key):
"""Returns raw MJF value for a given key, perhaps by HTTP(S), perhaps from a local file
mORj must be MACHINEFEATURES or JOBFEATURES
If the value cannot be found, then return None. There are many legitimate ways for
a site not to provide some MJF values so we don't log errors, failures etc.
"""
if mORj != "MACHINEFEATURES" and mORj != "JOBFEATURES":
raise Exception("Must request MACHINEFEATURES or JOBFEATURES")
if mORj not in os.environ:
return None
url = os.environ[mORj] + "/" + key
# Simple if a file
if url[0] == "/":
try:
with open(url, "r") as fd:
return fd.read().strip()
except Exception:
return None
# Otherwise make sure it's an HTTP(S) URL
if not url.startswith("http://") and not url.startswith("https://"):
return None
# We could have used urlopen() for local files too, but we also
# need to check HTTP return code in case we get an HTML error page
# instead of a true key value.
try:
mjfUrl = urlopen(url=url, context=self.context)
# HTTP return codes other than 2xx mean failure
if int(mjfUrl.getcode() / 100) != 2:
return None
return mjfUrl.read().strip()
except Exception:
return None
finally:
try:
mjfUrl.close()
except UnboundLocalError:
pass
def getWallClockSecondsLeft(self):
"""Returns the number of seconds until either the wall clock limit
or the shutdowntime(_job) is reached.
"""
now = int(time.time())
secondsLeft = None
jobstartSecs = self.getIntJobFeature("jobstart_secs")
wallLimitSecs = self.getIntJobFeature("wall_limit_secs")
shutdowntimeJob = self.getIntJobFeature("shutdowntime_job")
shutdowntime = self.getIntMachineFeature("shutdowntime")
# look for local shutdown file
try:
with open("/var/run/shutdown_time", "r") as fd:
shutdowntimeLocal = int(fd.read().strip())
except (IOError, ValueError):
shutdowntimeLocal = None
if jobstartSecs is not None and wallLimitSecs is not None:
secondsLeft = jobstartSecs + wallLimitSecs - now
if shutdowntimeJob is not None:
if secondsLeft is None:
secondsLeft = shutdowntimeJob - now
elif shutdowntimeJob - now < secondsLeft:
secondsLeft = shutdowntimeJob - now
if shutdowntime is not None:
if secondsLeft is None:
secondsLeft = shutdowntime - now
elif shutdowntime - now < secondsLeft:
secondsLeft = shutdowntime - now
if shutdowntimeLocal is not None:
if secondsLeft is None:
secondsLeft = shutdowntimeLocal - now
elif shutdowntimeLocal - now < secondsLeft:
secondsLeft = shutdowntimeLocal - now
# Wall Clock time left or None if unknown
return secondsLeft
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/MJF.py
|
Python
|
gpl-3.0
| 7,448
|
[
"DIRAC"
] |
5c992eb2d426f55bcdc41bb70fac25076d4514cd299f303b7649059fd099bdd0
|
"""
Traits View definition file.
The view trait of the parent class is extracted from the model definition
file. This file can either be exec()ed or imported. See
core/base.py:Base.trait_view() for what is currently used. Using exec()
allows view changes without needing to restart Mayavi, but is slower than
importing.
"""
# Authors: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Judah De Paula <judah@enthought.com>
# Copyright (c) 2005-2008, Enthought, Inc.
# License: BSD Style.
from traitsui.api import Item, Group, View, EnumEditor
from mayavi.core.module_manager import LUT_DATA_MODE_TYPES
view = View(Group(Item('scalar_lut_manager', style='custom'),
label='Scalar LUT', show_labels=False,
selected=True),
Group(Item('vector_lut_manager', style='custom'),
label='Vector LUT', show_labels=False),
Group(Item('lut_data_mode',
style='custom',
editor = EnumEditor(values=LUT_DATA_MODE_TYPES)),
label='ModuleManager',
selected=False),
)
|
dmsurti/mayavi
|
mayavi/core/ui/module_manager.py
|
Python
|
bsd-3-clause
| 1,127
|
[
"Mayavi"
] |
b2b88f550429f400ef3b8c754f63737c97a73c0aa9136db251745987b1dbf75d
|
"""Optimise the cache."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import os, sys
def _already_linked(a, b):
ai = os.stat(a)
bi = os.stat(b)
return (ai.st_dev, ai.st_ino) == (bi.st_dev, bi.st_ino)
def _byte_identical(a, b):
with open(a, 'rb') as af:
with open(b, 'rb') as bf:
while True:
adata = af.read(100)
bdata = bf.read(100)
if adata != bdata:
return False
if not adata:
return True
def _link(a, b, tmpfile):
"""Keep 'a', delete 'b' and hard-link to 'a'"""
if not _byte_identical(a, b):
logger.warning(_("Files should be identical, but they're not!\n%(file_a)s\n%(file_b)s"), {'file_a': a, 'file_b': b})
b_dir = os.path.dirname(b)
old_mode = os.lstat(b_dir).st_mode
os.chmod(b_dir, old_mode | 0o200) # Need write access briefly
try:
os.link(a, tmpfile)
try:
os.rename(tmpfile, b)
except:
os.unlink(tmpfile)
raise
finally:
os.chmod(b_dir, old_mode)
def optimise(impl_dir):
"""Scan an implementation cache directory for duplicate files, and
hard-link any duplicates together to save space.
@param impl_dir: a $cache/0install.net/implementations directory
@type impl_dir: str
@return: (unique bytes, duplicated bytes, already linked, manifest size)
@rtype: (int, int, int, int)"""
first_copy = {} # TypeDigest -> Path
dup_size = uniq_size = already_linked = man_size = 0
import random
from zeroinstall.zerostore import BadDigest, parse_algorithm_digest_pair
for x in range(10):
tmpfile = os.path.join(impl_dir, 'optimise-%d' % random.randint(0, 1000000))
if not os.path.exists(tmpfile):
break
else:
raise Exception(_("Can't generate unused tempfile name!"))
dirs = os.listdir(impl_dir)
total = len(dirs)
msg = ""
def clear():
print("\r" + (" " * len(msg)) + "\r", end='')
for i, impl in enumerate(dirs):
clear()
msg = _("[%(done)d / %(total)d] Reading manifests...") % {'done': i, 'total': total}
print(msg, end='')
sys.stdout.flush()
try:
alg, manifest_digest = parse_algorithm_digest_pair(impl)
except BadDigest:
logger.warning(_("Skipping non-implementation '%s'"), impl)
continue
manifest_path = os.path.join(impl_dir, impl, '.manifest')
try:
ms = open(manifest_path, 'rt')
except OSError as ex:
logger.warning(_("Failed to read manifest file '%(manifest_path)s': %(exception)s"), {'manifest': manifest_path, 'exception': str(ex)})
continue
if alg == 'sha1':
ms.close()
continue
man_size += os.path.getsize(manifest_path)
dir = ""
for line in ms:
if line[0] == 'D':
itype, path = line.split(' ', 1)
assert path.startswith('/')
dir = path[1:-1] # Strip slash and newline
continue
if line[0] == "S":
itype, digest, size, rest = line.split(' ', 3)
uniq_size += int(size)
continue
assert line[0] in "FX"
itype, digest, mtime, size, path = line.split(' ', 4)
path = path[:-1] # Strip newline
size = int(size)
key = (itype, digest, mtime, size)
loc_path = (impl, dir, path)
first_loc = first_copy.get(key, None)
if first_loc:
first_full = os.path.join(impl_dir, *first_loc)
new_full = os.path.join(impl_dir, *loc_path)
if _already_linked(first_full, new_full):
already_linked += size
else:
_link(first_full, new_full, tmpfile)
dup_size += size
else:
first_copy[key] = loc_path
uniq_size += size
ms.close()
clear()
return (uniq_size, dup_size, already_linked, man_size)
|
dsqmoore/0install
|
zeroinstall/zerostore/optimise.py
|
Python
|
lgpl-2.1
| 3,568
|
[
"VisIt"
] |
bbc7e3ebe2a7dfa93526623502b81e557b1d0f10261d2f7c27d981ff22c22ef1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlsplit, urlparse, parse_qs, urljoin
except:
from urlparse import urlsplit, urlparse, parse_qs, urljoin
import json
import os
import re
import time
import urllib
from base64 import b64decode
from core import httptools
from platformcode import config, logger
def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
rec = re.compile(regex, flags=flags)
match = rec.search(text)
if not match:
return False
return match.group(1)
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net|thouth\.net|uclaut.net|gloyah.net|xterca.net|larati.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
_shst_regex = r'sh\.st|festyy\.com|ceesty\.com'
_hrefli_regex = r'href\.li'
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_cryptmango_regex = r'cryptmango'
_maxretries = 5
_this_dir, _this_filename = os.path.split(__file__)
_timeout = 10
def unshorten(self, uri, type=None):
domain = urlsplit(uri).netloc
if not domain:
return uri, "No domain found in URI!"
had_google_outbound, uri = self._clear_google_outbound_proxy(uri)
if re.search(self._adfly_regex, domain,
re.IGNORECASE) or type == 'adfly':
return self._unshorten_adfly(uri)
if re.search(self._adfocus_regex, domain,
re.IGNORECASE) or type == 'adfocus':
return self._unshorten_adfocus(uri)
if re.search(self._linkbucks_regex, domain,
re.IGNORECASE) or type == 'linkbucks':
return self._unshorten_linkbucks(uri)
if re.search(self._lnxlu_regex, domain,
re.IGNORECASE) or type == 'lnxlu':
return self._unshorten_lnxlu(uri)
if re.search(self._shrink_service_regex, domain, re.IGNORECASE):
return self._unshorten_shrink_service(uri)
if re.search(self._shst_regex, domain, re.IGNORECASE):
return self._unshorten_shst(uri)
if re.search(self._hrefli_regex, domain, re.IGNORECASE):
return self._unshorten_hrefli(uri)
if re.search(self._anonymz_regex, domain, re.IGNORECASE):
return self._unshorten_anonymz(uri)
if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE):
return self._unshorten_rapidcrypt(uri)
if re.search(self._cryptmango_regex, uri, re.IGNORECASE):
return self._unshorten_cryptmango(uri)
return uri, 0
def unwrap_30x(self, uri, timeout=10):
def unwrap_30x(uri, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
try:
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = httptools.downloadpage(uri, timeout=self._timeout)
return r.url, r.code
# p.ost.im uses meta http refresh to redirect.
if domain == 'p.ost.im':
r = httptools.downloadpage(uri, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0]
return uri, r.code
retries = 0
while True:
r = httptools.downloadpage(
uri,
timeout=self._timeout,
cookies=False,
follow_redirects=False)
if not r.sucess:
return uri, -1
if '4snip' not in r.url and 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
r.headers['location'],
cookies=False,
follow_redirects=False)
uri = r.url
retries += 1
else:
return r.url, r.code
except Exception as e:
return uri, str(e)
uri, code = unwrap_30x(uri, timeout)
if 'vcrypt' in uri and 'fastshield' in uri:
# twince because of cookies
httptools.downloadpage(
uri,
timeout=self._timeout,
post='go=go')
r = httptools.downloadpage(
uri,
timeout=self._timeout,
post='go=go')
return r.url, r.code
return uri, code
def _clear_google_outbound_proxy(self, url):
'''
So google proxies all their outbound links through a redirect so they can detect outbound links.
This call strips them out if they are present.
This is useful for doing things like parsing google search results, or if you're scraping google
docs, where google inserts hit-counters on all outbound links.
'''
# This is kind of hacky, because we need to check both the netloc AND
# part of the path. We could use urllib.parse.urlsplit, but it's
# easier and just as effective to use string checks.
if url.startswith("http://www.google.com/url?") or \
url.startswith("https://www.google.com/url?"):
qs = urlparse(url).query
query = parse_qs(qs)
if "q" in query: # Google doc outbound links (maybe blogspot, too)
return True, query["q"].pop()
elif "url" in query: # Outbound links from google searches
return True, query["url"].pop()
else:
raise ValueError(
"Google outbound proxy URL without a target url ('%s')?" %
url)
return False, url
def _unshorten_adfly(self, uri):
try:
r = httptools.downloadpage(
uri, timeout=self._timeout, cookies=False)
html = r.data
ysmm = re.findall(r"var ysmm =.*\;?", html)
if len(ysmm) > 0:
ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
left = ''
right = ''
for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]:
left += c[0]
right = c[1] + right
# Additional digit arithmetic
encoded_uri = list(left + right)
numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n))
for first, second in zip(numbers, numbers):
xor = int(first[1]) ^ int(second[1])
if xor < 10:
encoded_uri[first[0]] = str(xor)
decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode()
if re.search(r'go\.php\?u\=', decoded_uri):
decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode()
return decoded_uri, r.code
else:
return uri, 'No ysmm variable found'
except Exception as e:
return uri, str(e)
def _unshorten_linkbucks(self, uri):
'''
(Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase.
This has necessidated a license change.
'''
if config.is_xbmc():
import xbmc
r = httptools.downloadpage(uri, timeout=self._timeout)
firstGet = time.time()
baseloc = r.url
if "/notfound/" in r.url or \
"(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data:
return uri, 'Error: Link not found or requires a survey!'
link = None
content = r.data
regexes = [
r"<div id=\"lb_header\">.*?/a>.*?<a.*?href=\"(.*?)\".*?class=\"lb",
r"AdBriteInit\(\"(.*?)\"\)",
r"Linkbucks\.TargetUrl = '(.*?)';",
r"Lbjs\.TargetUrl = '(http://[^<>\"]*?)'",
r"src=\"http://static\.linkbucks\.com/tmpl/mint/img/lb\.gif\" /></a>.*?<a href=\"(.*?)\"",
r"id=\"content\" src=\"([^\"]*)",
]
for regex in regexes:
if self.inValidate(link):
link = find_in_text(regex, content)
if self.inValidate(link):
match = find_in_text(r"noresize=\"[0-9+]\" src=\"(http.*?)\"", content)
if match:
link = find_in_text(r"\"frame2\" frameborder.*?src=\"(.*?)\"", content)
if self.inValidate(link):
scripts = re.findall("(<script type=\"text/javascript\">[^<]+</script>)", content)
if not scripts:
return uri, "No script bodies found?"
js = False
for script in scripts:
# cleanup
script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script)
if re.search(r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script):
js = script
if not js:
return uri, "Could not find correct script?"
token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js)
if not token:
token = find_in_text(r"\?t=([a-f0-9]{40})", js)
assert token
authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y"
l1 = find_in_text(r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js)
l2 = find_in_text(
r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);",
js)
if any([not l1, not l2, not token]):
return uri, "Missing required tokens?"
authkey = int(l1) + int(l2)
p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token))
r2 = httptools.downloadpage(p1_url, timeout=self._timeout)
p1_url = urljoin(baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1))
r2 = httptools.downloadpage(p1_url, timeout=self._timeout)
time_left = 5.033 - (time.time() - firstGet)
if config.is_xbmc():
xbmc.sleep(max(time_left, 0) * 1000)
else:
time.sleep(5 * 1000)
p3_url = urljoin(baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false".format(tok=token,
key=str(authkey)))
r3 = httptools.downloadpage(p3_url, timeout=self._timeout)
resp_json = json.loads(r3.data)
if "Url" in resp_json:
return resp_json['Url'], r3.code
return "Wat", "wat"
def inValidate(self, s):
# Original conditional:
# (s == null || s != null && (s.matches("[\r\n\t ]+") || s.equals("") || s.equalsIgnoreCase("about:blank")))
if not s:
return True
if re.search("[\r\n\t ]+", s) or s.lower() == "about:blank":
return True
else:
return False
def _unshorten_adfocus(self, uri):
orig_uri = uri
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
adlink = re.findall("click_url =.*;", html)
if len(adlink) > 0:
uri = re.sub('^click_url = "|"\;$', '', adlink[0])
if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri):
http_header = dict()
http_header["Host"] = "adfoc.us"
http_header["Referer"] = orig_uri
r = httptools.downloadpage(uri, headers=http_header, timeout=self._timeout)
uri = r.url
return uri, r.code
else:
return uri, 'No click_url variable found'
except Exception as e:
return uri, str(e)
def _unshorten_lnxlu(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
code = re.findall('/\?click\=(.*)\."', html)
if len(code) > 0:
payload = {'click': code[0]}
r = httptools.downloadpage(
'http://lnx.lu?' + urllib.urlencode(payload),
timeout=self._timeout)
return r.url, r.code
else:
return uri, 'No click variable found'
except Exception as e:
return uri, str(e)
def _unshorten_shst(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = dict()
http_header["Content-Type"] = "application/x-www-form-urlencoded"
http_header["Host"] = "sh.st"
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
if config.is_xbmc():
import xbmc
xbmc.sleep(5 * 1000)
else:
time.sleep(5 * 1000)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = httptools.downloadpage(
'http://sh.st/shortest-url/end-adsession?' +
urllib.urlencode(payload),
headers=http_header,
timeout=self._timeout)
response = r.data[6:-2].decode('utf-8')
if r.code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
uri = resp_uri
else:
return uri, 'Error extracting url'
else:
return uri, 'Error extracting url'
return uri, r.code
except Exception as e:
return uri, str(e)
def _unshorten_hrefli(self, uri):
try:
# Extract url from query
parsed_uri = urlparse(uri)
extracted_uri = parsed_uri.query
if not extracted_uri:
return uri, 200
# Get url status code
r = httptools.downloadpage(
extracted_uri,
timeout=self._timeout,
follow_redirects=False)
return r.url, r.code
except Exception as e:
return uri, str(e)
def _unshorten_anonymz(self, uri):
# For the moment they use the same system as hrefli
return self._unshorten_hrefli(uri)
def _unshorten_shrink_service(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r"<input type='hidden' name='\d+' id='\d+' value='([^']+)'>", html)[0]
from core import scrapertools
uri = scrapertools.decodeHtmlentities(uri)
uri = uri.replace("/", "/") \
.replace(":", ":") \
.replace(".", ".") \
.replace("!", "!") \
.replace("#", "#") \
.replace("?", "?") \
.replace("_", "_")
return uri, r.code
except Exception as e:
return uri, str(e)
def _unshorten_rapidcrypt(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
if 'embed' in uri:
uri = re.findall(r'<a class="play-btn" href=([^">]*)>', html)[0]
else:
uri = re.findall(r'<a class="push_button blue" href=([^>]+)>', html)[0]
return uri, r.code
except Exception as e:
return uri, 0
def _unshorten_cryptmango(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r'<iframe src="([^"]+)"[^>]+>', html)[0]
return uri, r.code
except Exception as e:
return uri, str(e)
def unwrap_30x_only(uri, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
return uri, status
def unshorten_only(uri, type=None, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unshorten(uri, type=type)
return uri, status
def unshorten(uri, type=None, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
uri, status = unshortener.unshorten(uri, type=type)
if status == 200:
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
return uri, status
|
alfa-jor/addon
|
plugin.video.alfa/lib/unshortenit.py
|
Python
|
gpl-3.0
| 17,786
|
[
"ADF"
] |
9079d165a7fe4f54123350ef8f04182101ad646aa2c2478e6bac763ce7df90a9
|
from flask_restful import Resource
class Status(Resource):
def get(self):
status = {
'serviceName': 'GO Analysis Service',
'description': 'Perform GO enrichment analysis with Bioconductor',
'apiVersion': 'v1',
'buildVersion': '0.1.0'
}
return status
|
cytoscape-ci/go-enrichment
|
api/status.py
|
Python
|
mit
| 331
|
[
"Bioconductor"
] |
0a50d1a46afb6a432c768deaeb1c05c56ca4f42fb98533ada48564dc35613fd1
|
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QVariant, QTimer
from PyQt5.QtWidgets import QWidget, QFrame, QMenu, QAction, \
QPushButton, QLabel, QVBoxLayout, QHBoxLayout, QLineEdit, QToolButton, \
QMessageBox, QGridLayout, QScrollArea, QLayout
from PyQt5.QtGui import QIcon, QCursor, QPixmap, QFont
from ui.xnova.xn_data import XNPlanet, XNCoords, XNPlanetBuildingItem, \
XNResourceBundle
from ui.xnova.xn_world import XNovaWorld_instance, XNovaWorld
from ui.xnova import xn_logger
from ui.customwidgets.collapsible_frame import CollapsibleFrame
from ui.customwidgets.input_string_dialog import input_string_dialog
from ui.customwidgets.build_progress_widget import BuildProgressWidget
from ui.widget_utils import number_format, time_seconds_to_str
logger = xn_logger.get(__name__, debug=True)
class Planet_BasicInfoPanel(QFrame):
requestOpenGalaxy = pyqtSignal(XNCoords)
requestRefreshPlanet = pyqtSignal()
requestRenamePlanet = pyqtSignal(int, str) # planet_id, new_planet_name
def __init__(self, parent: QWidget):
super(Planet_BasicInfoPanel, self).__init__(parent)
#
self._planet_pic_url = ''
self._pixmap = QPixmap()
self._planet = XNPlanet()
# setup frame
self.setFrameShape(QFrame.StyledPanel)
self.setFrameShadow(QFrame.Raised)
# bold font
font = self.font()
font.setWeight(QFont.Bold)
# resource pictures
self._pix_met = QPixmap(':/i/s_metall.png')
self._pix_cry = QPixmap(':/i/s_kristall.png')
self._pix_deit = QPixmap(':/i/s_deuterium.png')
self._pix_energy = QPixmap(':/i/s_energy.png')
# layout
self._layout = QHBoxLayout()
self._layout.setContentsMargins(5, 5, 5, 5)
self._layout.setSpacing(5)
self.setLayout(self._layout)
self._vlayout = QVBoxLayout()
self._hlayout_name_coords = QHBoxLayout()
self._hlayout_fields = QHBoxLayout()
self._hlayout_res = QHBoxLayout()
self._hlayout_resmax = QHBoxLayout()
self._hlayout_energy = QHBoxLayout()
# labels
self._lbl_img = QLabel(self)
self._lbl_name = QLabel(self)
self._lbl_coords = QLabel(self)
self._lbl_coords.linkActivated.connect(self.on_coords_link_activated)
self._lbl_fields = QLabel()
# resource labels
self._lbl_res_on_planet = QLabel(self.tr('Resources:'), self)
self._lbl_metal = QLabel(self)
self._lbl_crystal = QLabel(self)
self._lbl_deit = QLabel(self)
self._lbl_cur_met = QLabel(self)
self._lbl_cur_cry = QLabel(self)
self._lbl_cur_deit = QLabel(self)
self._lbl_cur_met.setFont(font)
self._lbl_cur_cry.setFont(font)
self._lbl_cur_deit.setFont(font)
self._lbl_metal.setPixmap(self._pix_met)
self._lbl_crystal.setPixmap(self._pix_cry)
self._lbl_deit.setPixmap(self._pix_deit)
# resource max
self._lbl_res_max = QLabel(self.tr('Capacity:'), self)
self._lbl_metal2 = QLabel(self)
self._lbl_crystal2 = QLabel(self)
self._lbl_deit2 = QLabel(self)
self._lbl_max_met = QLabel(self)
self._lbl_max_cry = QLabel(self)
self._lbl_max_deit = QLabel(self)
self._lbl_max_met.setFont(font)
self._lbl_max_cry.setFont(font)
self._lbl_max_deit.setFont(font)
self._lbl_metal2.setPixmap(self._pix_met)
self._lbl_crystal2.setPixmap(self._pix_cry)
self._lbl_deit2.setPixmap(self._pix_deit)
# energy labels
self._lbl_energy = QLabel(self.tr('Energy, charge:'), self)
self._lbl_energy_pix = QLabel(self)
self._lbl_energy_pix.setPixmap(self._pix_energy)
self._lbl_energy_stats = QLabel(self)
self._lbl_energy_stats.setFont(font)
# button
self._btn_refresh = QPushButton(self.tr('Refresh planet'), self)
self._btn_refresh.setIcon(QIcon(':/i/reload.png'))
self._btn_refresh.clicked.connect(self.on_btn_refresh_clicked)
self._btn_refresh.setMinimumHeight(25)
self._btn_tools = QToolButton(self)
self._btn_tools.setIcon(QIcon(':/i/tools_32.png'))
self._btn_tools.setText(self.tr('Actions...'))
self._btn_tools.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self._btn_tools.setPopupMode(QToolButton.InstantPopup)
self._btn_tools.setMinimumHeight(25)
self._actions_menu = QMenu(self)
self._action_renameplanet = QAction(self.tr('Rename planet'), self)
self._action_leaveplanet = QAction(self.tr('Leave planet'), self)
self._actions_menu.addAction(self._action_renameplanet)
self._actions_menu.addAction(self._action_leaveplanet)
self._btn_tools.setMenu(self._actions_menu)
self._action_renameplanet.triggered.connect(self.on_action_renameplanet)
self._action_leaveplanet.triggered.connect(self.on_action_leaveplanet)
# finalize layout
self._hlayout_name_coords.addWidget(self._lbl_name)
self._hlayout_name_coords.addWidget(self._lbl_coords)
self._hlayout_name_coords.addWidget(self._btn_refresh)
self._hlayout_name_coords.addWidget(self._btn_tools)
self._hlayout_name_coords.addStretch()
#
self._hlayout_fields.addWidget(self._lbl_fields)
self._hlayout_fields.addStretch()
#
self._hlayout_res.addWidget(self._lbl_res_on_planet)
self._hlayout_res.addWidget(self._lbl_metal)
self._hlayout_res.addWidget(self._lbl_cur_met)
self._hlayout_res.addWidget(self._lbl_crystal)
self._hlayout_res.addWidget(self._lbl_cur_cry)
self._hlayout_res.addWidget(self._lbl_deit)
self._hlayout_res.addWidget(self._lbl_cur_deit)
self._hlayout_res.addStretch()
#
self._hlayout_resmax.addWidget(self._lbl_res_max)
self._hlayout_resmax.addWidget(self._lbl_metal2)
self._hlayout_resmax.addWidget(self._lbl_max_met)
self._hlayout_resmax.addWidget(self._lbl_crystal2)
self._hlayout_resmax.addWidget(self._lbl_max_cry)
self._hlayout_resmax.addWidget(self._lbl_deit2)
self._hlayout_resmax.addWidget(self._lbl_max_deit)
self._hlayout_resmax.addStretch()
# minimum widths for res labels
lbl_width = 100
res_width = 120
self._lbl_res_on_planet.setMinimumWidth(lbl_width)
self._lbl_res_max.setMinimumWidth(lbl_width)
self._lbl_energy.setMinimumWidth(lbl_width)
self._lbl_cur_met.setMinimumWidth(res_width)
self._lbl_cur_cry.setMinimumWidth(res_width)
self._lbl_cur_deit.setMinimumWidth(res_width)
self._lbl_max_met.setMinimumWidth(res_width)
self._lbl_max_cry.setMinimumWidth(res_width)
self._lbl_max_deit.setMinimumWidth(res_width)
#
self._hlayout_energy.addWidget(self._lbl_energy)
self._hlayout_energy.addWidget(self._lbl_energy_pix)
self._hlayout_energy.addWidget(self._lbl_energy_stats)
self._hlayout_energy.addStretch()
#
self._vlayout.addLayout(self._hlayout_name_coords)
self._vlayout.addLayout(self._hlayout_fields)
self._vlayout.addLayout(self._hlayout_res)
self._vlayout.addLayout(self._hlayout_resmax)
self._vlayout.addLayout(self._hlayout_energy)
self._vlayout.addStretch()
#
self._layout.addWidget(self._lbl_img, 0, Qt.AlignTop | Qt.AlignHCenter)
self._layout.addLayout(self._vlayout)
self._layout.addStretch()
def setup_from_planet(self, planet: XNPlanet):
# store references
self._planet = planet
self._planet_pic_url = planet.pic_url
# deal with planet pic
file_name = './cache/img/{0}'.format(
self._planet_pic_url.replace('/', '_'))
self._pixmap = QPixmap(file_name)
self._lbl_img.setPixmap(self._pixmap)
# setup widget max height based on picture size and layout's margins
margins = self._layout.contentsMargins()
top_margin = margins.top()
bottom_margin = margins.bottom()
max_height = self._pixmap.height() + top_margin + bottom_margin
if max_height < 130:
max_height = 130
self.setMaximumHeight(max_height)
# planet name, corods, fields
self._lbl_name.setText(planet.name)
self._lbl_coords.setText('<a href="{0}">{0}</a>'.format(
planet.coords.coords_str()))
fields_left_str = '{0}: {1}'.format(
self.tr('left'),
planet.fields_total - planet.fields_busy)
self._lbl_fields.setText(
self.tr('Fields:') +
' {0} / {1} ({2})'.format(planet.fields_busy,
planet.fields_total,
fields_left_str))
# resources
self.update_resources()
def update_resources(self):
# update planet resources
color_enough = '#008800'
color_exceed = '#AA0000'
# cur metal
color = color_enough
if self._planet.res_current.met > self._planet.res_max_silos.met:
color = color_exceed
self._lbl_cur_met.setText('<font color="{0}">{1}</font>'.format(
color, number_format(int(self._planet.res_current.met))))
# cur crystal
color = color_enough
if self._planet.res_current.cry > self._planet.res_max_silos.cry:
color = color_exceed
self._lbl_cur_cry.setText('<font color="{0}">{1}</font>'.format(
color, number_format(int(self._planet.res_current.cry))))
# cur deit
color = color_enough
if self._planet.res_current.deit > self._planet.res_max_silos.deit:
color = color_exceed
self._lbl_cur_deit.setText('<font color="{0}">{1}</font>'.format(
color, number_format(int(self._planet.res_current.deit))))
# update res max
self._lbl_max_met.setText(number_format(
int(self._planet.res_max_silos.met)))
self._lbl_max_cry.setText(number_format(
int(self._planet.res_max_silos.cry)))
self._lbl_max_deit.setText(number_format(
int(self._planet.res_max_silos.deit)))
# energy
self._lbl_energy_stats.setText('{0} / {1} ({2}%)'.format(
self._planet.energy.energy_left,
self._planet.energy.energy_total,
self._planet.energy.charge_percent))
@pyqtSlot(str)
def on_coords_link_activated(self, link: str):
coords = XNCoords()
coords.parse_str(link, raise_on_error=True)
self.requestOpenGalaxy.emit(coords)
@pyqtSlot()
def on_btn_refresh_clicked(self):
self.requestRefreshPlanet.emit()
@pyqtSlot()
def on_action_renameplanet(self):
new_name = input_string_dialog(
self,
self.tr('Rename planet'),
self.tr('Enter new planet name:'),
self._planet.name)
if (new_name is not None) and (new_name != self._planet.name):
self.requestRenamePlanet.emit(self._planet.planet_id, new_name)
@pyqtSlot()
def on_action_leaveplanet(self):
QMessageBox.warning(self, self.tr('Not done'),
self.tr('Leaving planet is not done!'))
class Planet_BuildItemWidget(QFrame):
# bitem, optional quantity for shipyard
requestBuildItem = pyqtSignal(XNPlanetBuildingItem, int)
# downgrade a building
requestDowngradeItem = pyqtSignal(XNPlanetBuildingItem)
def __init__(self, parent: QWidget):
super(Planet_BuildItemWidget, self).__init__(parent)
# data members
self._bitem = XNPlanetBuildingItem()
self._pix = QPixmap()
self._pix_met = QPixmap(':/i/s_metall.png')
self._pix_cry = QPixmap(':/i/s_kristall.png')
self._pix_deit = QPixmap(':/i/s_deuterium.png')
self._pix_energy = QPixmap(':/i/s_energy.png')
# setup frame
self.setFrameShape(QFrame.StyledPanel)
self.setFrameShadow(QFrame.Raised)
# font
font = self.font()
font.setWeight(QFont.Bold)
# layout
self._layout = QHBoxLayout()
self._layout_v = QVBoxLayout()
self.setLayout(self._layout)
# label with building image
self._lbl_pix = QLabel(self)
# labels for name and level
self._layout_nl = QHBoxLayout()
self._lbl_name = QLabel(self)
self._lbl_name.setFont(font)
self._lbl_lvl = QLabel(self)
# labels for time
self._layout_buildtime = QHBoxLayout()
self._lbl_time = QLabel(self.tr('Time:'), self)
self._lbl_timestr = QLabel(self)
# labels for price
self._layout_price1 = QHBoxLayout()
self._layout_price2 = QHBoxLayout()
self._layout_price3 = QHBoxLayout()
self._layout_price4 = QHBoxLayout()
self._lbl_price_met_ico = QLabel(self)
self._lbl_price_met_ico.setPixmap(self._pix_met)
self._lbl_price_met = QLabel()
self._lbl_price_cry_ico = QLabel()
self._lbl_price_cry_ico.setPixmap(self._pix_cry)
self._lbl_price_cry = QLabel()
self._lbl_price_deit_ico = QLabel()
self._lbl_price_deit_ico.setPixmap(self._pix_deit)
self._lbl_price_deit = QLabel()
self._lbl_price_energy_ico = QLabel()
self._lbl_price_energy_ico.setPixmap(self._pix_energy)
self._lbl_price_energy = QLabel()
# buttons
# upgrade
self._layout_buttons = QHBoxLayout()
self._btn_upgrade = QPushButton(self.tr('Upgrade'), self)
self._btn_upgrade.setIcon(QIcon(':/i/build.png'))
self._btn_upgrade.setMaximumHeight(25)
self._btn_upgrade.clicked.connect(self.on_upgrade_clicked)
# downgrade, hidden by default
self._btn_downgrade = QPushButton('', self)
self._btn_downgrade.setIcon(QIcon(':/i/arrow_down_red_16.png'))
self._btn_downgrade.setMaximumHeight(25)
self._btn_downgrade.clicked.connect(self.on_downgrade_clicked)
self._btn_downgrade.setToolTip(self.tr('Dismantle'))
self._btn_downgrade.hide()
# line edit for quantity for shipyard items, hidden by default
self._lineedit_quantity = QLineEdit(self)
self._lineedit_quantity.setMaximumWidth(50)
self._lineedit_quantity.hide()
# construct layout
# name, level
self._layout_nl.addWidget(self._lbl_name)
self._layout_nl.addWidget(self._lbl_lvl)
self._layout_nl.addStretch()
self._layout_v.addLayout(self._layout_nl)
# build time
self._layout_buildtime.addWidget(self._lbl_time)
self._layout_buildtime.addWidget(self._lbl_timestr)
self._layout_buildtime.addStretch()
self._layout_v.addLayout(self._layout_buildtime)
# price met
self._layout_price1.addWidget(self._lbl_price_met_ico)
self._layout_price1.addWidget(self._lbl_price_met)
self._layout_price1.addStretch()
# price cry
self._layout_price2.addWidget(self._lbl_price_cry_ico)
self._layout_price2.addWidget(self._lbl_price_cry)
self._layout_price2.addStretch()
# price deit
self._layout_price3.addWidget(self._lbl_price_deit_ico)
self._layout_price3.addWidget(self._lbl_price_deit)
self._layout_price3.addStretch()
# price energy
self._layout_price4.addWidget(self._lbl_price_energy_ico)
self._layout_price4.addWidget(self._lbl_price_energy)
self._layout_price4.addStretch()
self._layout_v.addLayout(self._layout_price1)
self._layout_v.addLayout(self._layout_price2)
self._layout_v.addLayout(self._layout_price3)
self._layout_v.addLayout(self._layout_price4)
# buttons
self._layout_buttons.addWidget(self._btn_upgrade)
self._layout_buttons.addWidget(self._btn_downgrade)
self._layout_buttons.addWidget(self._lineedit_quantity)
self._layout_buttons.addStretch()
self._layout_v.addLayout(self._layout_buttons)
#
self._layout.addWidget(self._lbl_pix, 0, Qt.AlignTop | Qt.AlignHCenter)
self._layout.addLayout(self._layout_v)
def set_building_item(self,
bitem: XNPlanetBuildingItem,
res_cur: XNResourceBundle,
energy_cur: int):
self._bitem = bitem
# load pixmap
pix_fn = 'ui/i/building_{0}.gif'.format(bitem.gid)
if not self._pix.load(pix_fn):
logger.warn('Failed to load pixmap from: [{0}]'.format(pix_fn))
else:
self._lbl_pix.setPixmap(self._pix.scaled(80, 80))
# self._lbl_pix.setPixmap(self._pix)
# name, level
self._lbl_name.setText(bitem.name)
if self._bitem.is_shipyard_item:
# shipyard items use quantity instead of level
self._lbl_lvl.setText(str(bitem.quantity))
else:
self._lbl_lvl.setText(str(bitem.level))
# time
if bitem.seconds_total != -1:
self._lbl_timestr.setText(time_seconds_to_str(bitem.seconds_total))
else:
self._lbl_timestr.setText('-')
# colors
color_enough = '#008800'
color_notenough = '#AA0000'
enough_met = True
enough_cry = True
enough_deit = True
enough_energy = True
# price met
if bitem.cost_met > 0:
setstr = number_format(bitem.cost_met)
color = color_enough
if res_cur.met < bitem.cost_met:
setstr += ' (-{0})'.format(number_format(
int(bitem.cost_met - res_cur.met)))
color = color_notenough
enough_met = False
self._lbl_price_met.setText('<font color="{0}">{1}</font>'.format(
color, setstr))
self._lbl_price_met_ico.show()
self._lbl_price_met.show()
else:
self._lbl_price_met_ico.hide()
self._lbl_price_met.hide()
# price cry
if bitem.cost_cry > 0:
setstr = number_format(bitem.cost_cry)
color = color_enough
if res_cur.cry < bitem.cost_cry:
setstr += ' (-{0})'.format(number_format(
int(bitem.cost_cry - res_cur.cry)))
color = color_notenough
enough_cry = False
self._lbl_price_cry.setText('<font color="{0}">{1}</font>'.format(
color, setstr))
self._lbl_price_cry_ico.show()
self._lbl_price_cry.show()
else:
self._lbl_price_cry_ico.hide()
self._lbl_price_cry.hide()
# price deit
if bitem.cost_deit > 0:
setstr = number_format(bitem.cost_deit)
color = color_enough
if res_cur.deit < bitem.cost_deit:
setstr += ' (-{0})'.format(number_format(
int(bitem.cost_deit - res_cur.deit)))
color = color_notenough
enough_deit = False
self._lbl_price_deit.setText('<font color="{0}">{1}</font>'.format(
color, setstr))
self._lbl_price_deit_ico.show()
self._lbl_price_deit.show()
else:
self._lbl_price_deit_ico.hide()
self._lbl_price_deit.hide()
# price energy
if bitem.cost_energy > 0:
setstr = number_format(bitem.cost_energy)
color = color_enough
if energy_cur < bitem.cost_energy:
setstr += ' (-{0})'.format(number_format(
int(bitem.cost_energy - energy_cur)))
color = color_notenough
enough_energy = False
self._lbl_price_energy.setText(
'<font color="{0}">{1}</font>'.format(color, setstr))
self._lbl_price_energy_ico.show()
self._lbl_price_energy.show()
else:
self._lbl_price_energy_ico.hide()
self._lbl_price_energy.hide()
#
# enable or disable buttons
if enough_met and enough_cry and enough_deit and \
enough_energy and (self._bitem.seconds_total > 0):
self._btn_upgrade.setEnabled(True)
else:
self._btn_upgrade.setEnabled(False)
# logger.debug('Disabling build {0}: {1} {2} {3} {4} {5}'.format(
# self._bitem.name, enough_met, enough_cry, enough_deit,
# enough_energy, self._bitem.seconds_total))
if self._bitem.is_building_item:
if self._bitem.level > 0:
self._btn_downgrade.setEnabled(True)
else: # nothing to downgrade
self._btn_downgrade.setEnabled(False)
# set button text
if self._bitem.is_shipyard_item:
self._btn_upgrade.setText(self.tr('Build'))
# calculate maximum available to build
counts = [0, 0, 0, 0]
# check metal cost
if self._bitem.cost_met > 0:
counts[0] = int(res_cur.met // self._bitem.cost_met)
# check crystal cost
if self._bitem.cost_cry > 0:
counts[1] = int(res_cur.cry // self._bitem.cost_cry)
# check deit cost
if self._bitem.cost_deit > 0:
counts[2] = int(res_cur.deit // self._bitem.cost_deit)
# check energy
if self._bitem.cost_energy > 0:
counts[3] = int(energy_cur // self._bitem.cost_energy)
maxb = 0
for cnt in counts: # find first count, which is > 0
if cnt > 0:
maxb = cnt
break
for cnt in counts: # find minimum count, which is > 0
if cnt > 0:
if cnt < maxb:
maxb = cnt
# set tip
self._lineedit_quantity.setPlaceholderText(str(maxb))
elif self._bitem.is_research_item or self._bitem.is_researchfleet_item:
self._btn_upgrade.setText(self.tr('Research'))
# show/hide additional buttons
if self._bitem.is_building_item:
self._btn_upgrade.show()
self._btn_downgrade.show()
self._lineedit_quantity.hide()
elif self._bitem.is_shipyard_item:
self._btn_upgrade.show()
self._btn_downgrade.hide()
self._lineedit_quantity.show()
elif self._bitem.is_research_item or self._bitem.is_researchfleet_item:
self._btn_upgrade.show()
self._btn_downgrade.hide()
self._lineedit_quantity.hide()
@pyqtSlot()
def on_upgrade_clicked(self):
if not self._bitem.is_shipyard_item:
# all buildings and researches, quantity is ignored (send 1)
self.requestBuildItem.emit(self._bitem, 1)
else:
# shipyard build item case, read also quantity
qtext = self._lineedit_quantity.text()
qint = -1
try:
qint = int(qtext)
except ValueError:
pass
if qint < 1: # invalid format
QMessageBox.warning(self,
self.tr('Invalid format!'),
self.tr('Please input positive integer value!'))
return
# clear lineedit text
self._lineedit_quantity.setText('')
# emit with qint as quantity
self.requestBuildItem.emit(self._bitem, qint)
@pyqtSlot()
def on_downgrade_clicked(self):
self.requestDowngradeItem.emit(self._bitem)
class Planet_BuildItemsPanel(QFrame):
TYPE_BUILDINGS = 'buildings'
TYPE_SHIPYARD = 'shipyard'
TYPE_RESEARCHES = 'researches'
# bitem, optional quantity (for shipyard)
requestBuildItem = pyqtSignal(XNPlanetBuildingItem, int)
# downgrade a building
requestDowngradeItem = pyqtSignal(XNPlanetBuildingItem)
def __init__(self, parent: QWidget):
super(Planet_BuildItemsPanel, self).__init__(parent)
# data members
self._type = ''
self._planet = XNPlanet()
# setup frame
self.setFrameShape(QFrame.NoFrame)
self.setFrameShadow(QFrame.Raised)
# layout
self._layout = QGridLayout()
self._layout_lastcol = 0
self._layout_lastrow = 0
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.setSpacing(3)
self._layout.setSizeConstraint(QLayout.SetMinimumSize)
self.setLayout(self._layout)
#
# build item widgets
self._biws = dict()
# "constants"
self.MAX_COLS = 3
def get_type(self) -> str:
return self._type
def get_planet(self) -> XNPlanet:
return self._planet
def set_type(self, typ: str):
# cannot set type twice
if self._type != '':
raise ValueError('Planet_BuildItemsPanel: Cannot set type twice!')
if typ not in [self.TYPE_BUILDINGS, self.TYPE_SHIPYARD, \
self.TYPE_RESEARCHES]:
raise ValueError('Planet_BuildItemsPanel: '
'invalid type: [{0}]!'.format(typ))
self._type = typ
def set_planet(self, planet: XNPlanet):
self._planet = planet
# update info in widgets
if self._type == self.TYPE_BUILDINGS:
for bitem in self._planet.buildings_items:
biw = self.biw_for_gid(bitem.gid)
biw.set_building_item(bitem, self._planet.res_current,
self._planet.energy.energy_total)
biw.show()
elif self._type == self.TYPE_SHIPYARD:
for bitem in self._planet.shipyard_tems:
biw = self.biw_for_gid(bitem.gid)
biw.set_building_item(bitem, self._planet.res_current,
self._planet.energy.energy_total)
biw.show()
# also defense items
for bitem in self._planet.defense_items:
biw = self.biw_for_gid(bitem.gid)
biw.set_building_item(bitem, self._planet.res_current,
self._planet.energy.energy_total)
biw.show()
elif self._type == self.TYPE_RESEARCHES:
for bitem in self._planet.research_items:
biw = self.biw_for_gid(bitem.gid)
biw.set_building_item(bitem, self._planet.res_current,
self._planet.energy.energy_total)
biw.show()
# also research_fleet items
for bitem in self._planet.researchfleet_items:
biw = self.biw_for_gid(bitem.gid)
biw.set_building_item(bitem, self._planet.res_current,
self._planet.energy.energy_total)
biw.show()
def biw_for_gid(self, gid: int) -> Planet_BuildItemWidget:
"""
Gets existing child widget for build item, or creates it
:param gid: building id
:return: Planet_BuildItemWidget
"""
if gid not in self._biws:
biw = Planet_BuildItemWidget(self)
biw.hide()
biw.requestBuildItem.connect(self.on_request_build_item)
biw.requestDowngradeItem.connect(self.on_request_downgrade_item)
self._biws[gid] = biw
self._layout.addWidget(biw, self._layout_lastrow,
self._layout_lastcol)
self._layout_lastcol += 1
if self._layout_lastcol > (self.MAX_COLS -1):
self._layout_lastcol = 0
self._layout_lastrow += 1
else:
biw = self._biws[gid]
return biw
@pyqtSlot(XNPlanetBuildingItem, int)
def on_request_build_item(self, bitem: XNPlanetBuildingItem,
quantity: int):
self.requestBuildItem.emit(bitem, quantity)
@pyqtSlot(XNPlanetBuildingItem)
def on_request_downgrade_item(self, bitem: XNPlanetBuildingItem):
self.requestDowngradeItem.emit(bitem)
class PlanetWidget(QFrame):
"""
Provides view of galaxy/solarsystem contents as table widget
"""
requestOpenGalaxy = pyqtSignal(XNCoords)
def __init__(self, parent: QWidget):
super(PlanetWidget, self).__init__(parent)
#
self.world = XNovaWorld_instance()
self._planet = XNPlanet()
# setup frame
self.setFrameShape(QFrame.StyledPanel)
self.setFrameShadow(QFrame.Raised)
# layout
self._layout = QVBoxLayout()
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.setSpacing(3)
self.setLayout(self._layout)
# basic info panel
self._bipanel = Planet_BasicInfoPanel(self)
self._bipanel.requestOpenGalaxy.connect(self.on_request_open_galaxy)
self._bipanel.requestRefreshPlanet.connect(
self.on_request_refresh_planet)
self._bipanel.requestRenamePlanet.connect(self.on_request_rename_planet)
# build progress widgets
self._bpw_buildings = BuildProgressWidget(self)
self._bpw_buildings.hide()
self._bpw_buildings.hide_planet_name()
self._bpw_buildings.layout().setContentsMargins(5, 2, 5, 2)
self._bpw_shipyard = BuildProgressWidget(self)
self._bpw_shipyard.hide()
self._bpw_shipyard.hide_planet_name()
self._bpw_shipyard.layout().setContentsMargins(5, 2, 5, 2)
self._bpw_research = BuildProgressWidget(self)
self._bpw_research.hide()
self._bpw_research.hide_planet_name()
self._bpw_research.layout().setContentsMargins(5, 2, 5, 2)
# buildings
self._cf_buildings = CollapsibleFrame(self)
self._cf_buildings.setTitle(self.tr('Buildings'))
self._sa_buildings = QScrollArea(self._cf_buildings)
self._bip_buildings = Planet_BuildItemsPanel(self._sa_buildings)
self._bip_buildings.set_type(Planet_BuildItemsPanel.TYPE_BUILDINGS)
self._bip_buildings.show()
self._sa_buildings.setWidget(self._bip_buildings)
self._cf_buildings.addWidget(self._sa_buildings)
# shipyard
self._cf_shipyard = CollapsibleFrame(self)
self._cf_shipyard.setTitle(self.tr('Shipyard'))
self._sa_shipyard = QScrollArea(self._cf_shipyard)
self._bip_shipyard = Planet_BuildItemsPanel(self._cf_shipyard)
self._bip_shipyard.set_type(Planet_BuildItemsPanel.TYPE_SHIPYARD)
self._sa_shipyard.setWidget(self._bip_shipyard)
self._cf_shipyard.addWidget(self._sa_shipyard)
# research
self._cf_research = CollapsibleFrame(self)
self._cf_research.setTitle(self.tr('Research'))
self._sa_research = QScrollArea(self._cf_research)
self._bip_research = Planet_BuildItemsPanel(self._cf_research)
self._bip_research.set_type(Planet_BuildItemsPanel.TYPE_RESEARCHES)
self._sa_research.setWidget(self._bip_research)
self._cf_research.addWidget(self._sa_research)
# layout finalize
self._layout.addWidget(self._bipanel)
self._layout.addWidget(self._bpw_buildings)
self._layout.addWidget(self._bpw_shipyard)
self._layout.addWidget(self._bpw_research)
self._layout.addWidget(self._cf_buildings)
self._layout.addWidget(self._cf_shipyard)
self._layout.addWidget(self._cf_research)
# expand buildings frame by default
self._cf_buildings.expand()
#
# connect signals
self._cf_buildings.expanded.connect(self.on_frame_buildings_expanded)
self._cf_buildings.collapsed.connect(self.on_frame_buildings_collapsed)
self._cf_shipyard.expanded.connect(self.on_frame_shipyard_expanded)
self._cf_shipyard.collapsed.connect(self.on_frame_shipyard_collapsed)
self._cf_research.expanded.connect(self.on_frame_research_expanded)
self._cf_research.collapsed.connect(self.on_frame_research_collapsed)
#
self._bpw_buildings.requestCancelBuild.connect(
self.on_request_cancel_build)
self._bpw_research.requestCancelBuild.connect(
self.on_request_cancel_build)
#
self._bip_buildings.requestBuildItem.connect(self.on_request_build_item)
self._bip_buildings.requestDowngradeItem.connect(
self.on_request_downgrade_item)
self._bip_shipyard.requestBuildItem.connect(self.on_request_build_item)
self._bip_research.requestBuildItem.connect(self.on_request_build_item)
#
# create timer
self._timer = QTimer(self)
self._timer.timeout.connect(self.on_timer)
def get_tab_type(self) -> str:
return 'planet'
def setPlanet(self, planet: XNPlanet):
self._planet = planet
# setup basic info panel
self._bipanel.setup_from_planet(self._planet)
# setup build progress widgets
self._bpw_buildings.update_from_planet(planet, typ='')
self._bpw_shipyard.update_from_planet(planet,
typ=BuildProgressWidget.BPW_TYPE_SHIPYARD)
self._bpw_research.update_from_planet(planet,
typ=BuildProgressWidget.BPW_TYPE_RESEARCH)
# setup build items panels (in collapsible frames)
self._bip_buildings.set_planet(planet)
self._bip_shipyard.set_planet(planet)
self._bip_research.set_planet(planet)
#
# start/restart timer
self._timer.stop()
self._timer.setInterval(1000)
self._timer.setSingleShot(False)
self._timer.start()
def planet(self) -> XNPlanet:
return self._planet
@pyqtSlot()
def on_timer(self):
# update basic info panel - refresh resources
self._bipanel.update_resources()
# update build progress widgets - tick builds
self._bpw_buildings.update_from_planet(self._planet)
self._bpw_shipyard.update_from_planet(self._planet,
BuildProgressWidget.BPW_TYPE_SHIPYARD)
self._bpw_research.update_from_planet(self._planet,
BuildProgressWidget.BPW_TYPE_RESEARCH)
@pyqtSlot(XNCoords)
def on_request_open_galaxy(self, coords: XNCoords):
self.requestOpenGalaxy.emit(coords)
@pyqtSlot()
def on_request_refresh_planet(self):
self.world.signal(self.world.SIGNAL_RELOAD_PLANET,
planet_id=self._planet.planet_id)
@pyqtSlot(int, str)
def on_request_rename_planet(self, planet_id: int, planet_name: str):
self.world.signal(self.world.SIGNAL_RENAME_PLANET,
planet_id=planet_id, new_name=planet_name)
@pyqtSlot(XNPlanetBuildingItem)
def on_request_cancel_build(self, bitem: XNPlanetBuildingItem):
if bitem is None:
return
if (bitem.remove_link is None) or (bitem.remove_link == ''):
return
self.world.signal(XNovaWorld.SIGNAL_BUILD_CANCEL,
planet_id=self._planet.planet_id,
bitem=bitem)
@pyqtSlot(XNPlanetBuildingItem, int)
def on_request_build_item(self, bitem: XNPlanetBuildingItem,
quantity: int):
if bitem is None:
return
self.world.signal(XNovaWorld.SIGNAL_BUILD_ITEM,
planet_id=self._planet.planet_id,
bitem=bitem,
quantity=quantity)
@pyqtSlot(XNPlanetBuildingItem)
def on_request_downgrade_item(self, bitem: XNPlanetBuildingItem):
if bitem is None:
return
if not bitem.is_building_item:
logger.warn('Cannot dismantle item that is '
'not building: {0}'.format(bitem))
return
downgrade_price = '{0} {3}, {1} {4}, {2} {5}'.format(
self.tr('Metal'), self.tr('Crystal'), self.tr('Deit'),
int(bitem.cost_met//2),
int(bitem.cost_cry // 2),
int(bitem.cost_deit // 2))
btn = QMessageBox.question(self,
self.tr('Downgrade building'),
self.tr('Are you sure you want to downgrade this building?')
+ '\n' + '{0} {1} {2}\n{3}: {4}'.format(
bitem.name,
self.tr('lv.'),
bitem.level,
self.tr('Cost'),
downgrade_price),
QMessageBox.Yes | QMessageBox.No)
if btn == QMessageBox.Yes:
self.world.signal(XNovaWorld.SIGNAL_BUILD_DISMANTLE,
planet_id=self._planet.planet_id,
bitem=bitem)
@pyqtSlot()
def on_frame_buildings_collapsed(self):
pass
@pyqtSlot()
def on_frame_buildings_expanded(self):
# collapse other frames
self._cf_shipyard.collapse()
self._cf_research.collapse()
@pyqtSlot()
def on_frame_shipyard_collapsed(self):
pass
@pyqtSlot()
def on_frame_shipyard_expanded(self):
# collapse other frames
self._cf_buildings.collapse()
self._cf_research.collapse()
@pyqtSlot()
def on_frame_research_collapsed(self):
pass
@pyqtSlot()
def on_frame_research_expanded(self):
# collapse other frames
self._cf_buildings.collapse()
self._cf_shipyard.collapse()
|
minlexx/xnovacmd
|
ui/planet_widget.py
|
Python
|
gpl-2.0
| 37,758
|
[
"CRYSTAL",
"Galaxy"
] |
7963953c4684e8f33973a510385d68f782e263d8b33e852948e2b8e99e7afdb8
|
'''
This program is to identify those multiply mapped reads which are mapped to different isoform of the same genes.
This program has three steps.
1. Load the realtions of ENSTs and ENSGs.
2. Select the reads which maps to different ENSTs of the same ENSG
3. Dump them to the output file
The Ensembl Annotation lists exons from 5' to 3'. The start and end positions of each exon is based on coordinates of positive strand. If the transcript is on the reverse strand, then the start and end positions are based on the coordinates of positive strand. But the DNA sequence in the cDNA file is from the reverse strand.
Note that the reported position of alignment in SAM format is always 1-based coordiantes of refernce strand. (Although we will assume the reference is postive strand usually. It is not always ture when the reference is transcriptome.) Not like BLAST/BLAT, they will report algined positions by the head of query string. If the reversed query string aligned to the reference string, then BLAST will report the position of end of query string.
E.g., the query string is CTGAAGAAGT, and its complementary string aligns to the reference.
CGGCAGTAGGCAATAAACTTCTTCAGCTTGGCCAGGTCAATCTCGCCCTCCGCAGC
ACTTCTTCAG
The Blast will report 26, and SAM will be 17.
If the gene is on the reversed strand, then it become more complex.
CGGCAGTAGGCAATAAACTTCTTCAGCTTGGCCAGGTCAATCTCGCCCTCCGCAGC
ACTTCTTCAG
The Blast will report 31, and SAM will be 40.
'''
import re
from optparse import OptionParser
from array import *
from interval_tree import *
from map_and_tables import *
from sys import stdout
import subprocess
chr_boundary=dict(zip(chr_id,chr_length))
chr_id_name_map=dict(zip(chr_id,chr_name))
chr_name_id_map=dict(zip(chr_name,chr_id))
SAM_XA_parser = re.compile(r"([^:;,]+,[\+|\-][0-9]+,[^;]+,[^;]+);")
ENSG_parser = re.compile('gene_id \"([^\"]*)\";')
ENST_parser = re.compile('transcript_id \"([^\"]*)\";')
exon_number_parser = re.compile('exon_number \"([0-9]*)\";')
CIGAR_parser = re.compile(r'([0-9]+[M|I|D|N|S|H|P])')
Number_parser = re.compile(r'([0-9]+)')
Insertion_parser = re.compile(r'([0-9]+)I')
Deletion_parser = re.compile(r'([0-9]+)D')
Skip_parser = re.compile(r'([0-9]+)N')
SoftClip_parser = re.compile(r'([0-9]+)S')
def Is_SAM_header(line,extension=[]):
SAM_spec=["@HD","@SQ","@RG","@PG","@CO"]+extension
SAM_spec=set(SAM_spec)
if line[:3] in SAM_spec:
return True
return False
def extract_SAM_XA(line):
XA_pos=line.find("XA:Z:")
if XA_pos == -1:
return []
result=line[XA_pos+5:].strip(" \r\n\t").split(";")
j=len(result)
for i in xrange(len(result)):
'''
if 3!=result[i].count(","):
j=i
break
'''
if (result[i]=='' or (-1!=result[i].find(" ")) or (-1!=result[i].find("\t"))):
j=i
break
return result[:j]
def get_exon_width(exon_number,ENST):
# ENST is ENST_structures[ENST_id]
size=len(ENST[1])
if exon_number>=size:
return False
elif exon_number==0:
return ENST[1][0]
else:
return ENST[1][exon_number]-ENST[1][exon_number-1]
def extract_exon_info(line):
global ENSG_parser
global ENST_parser
global exon_number_parser
a=ENSG_parser.search(line)
b=ENST_parser.search(line)
c=exon_number_parser.search(line)
if a!=None:
a=a.group(1)
if b!=None:
b=b.group(1)
if c!=None:
c=c.group(1)
return (a,b,c)
def extend_ENST_info_records(gtf_file,ENST_structures):
global ENST_parser
f=open(gtf_file,"r")
for line in f:
li=line.split("\t")
if li[1]!="protein_coding":
continue
tmp=ENST_parser.findall(line)
if tmp == [] or len(tmp)>1:
return False
else:
tmp=tmp[0]
if tmp in ENST_structures:
ENST_structures[tmp]+=("C",)
f.close()
for ENST in ENST_structures.iterkeys():
if len(ENST_structures[ENST])<5:
ENST_structures[ENST]+=("NC",)
return True
def extract_gene_isoform_relation(gtf_file):
ENST2ENSG=dict()
fhr=open(gtf_file,"r")
for line in fhr:
line_tmp=line.strip(" \r\n\t")
line_tmp=line_tmp.split("\t")
# we only consider exons
if line_tmp[2]=="exon":
tmp_ENSG,tmp_ENST,tmp_exon_number=extract_exon_info(line)
if tmp_ENST not in ENST2ENSG:
ENST2ENSG[tmp_ENST]=tmp_ENSG
fhr.close()
return ENST2ENSG
def complement_sequence(seq):
seq=list(seq)
seq.reverse()
for i in xrange(len(seq)):
if seq[i]=="N":
continue
elif seq[i]=="A":
seq[i]="T"
elif seq[i]=="G":
seq[i]="C"
elif seq[i]=="C":
seq[i]="G"
elif seq[i]=="T":
seq[i]="A"
else:
print "Wrong Sequence Format"
exit(2)
return "".join(seq)
def reverse_CIGAR(CIGAR):
CIGAR=CIGAR_parser.findall(CIGAR)
CIGAR.reverse()
return "".join(CIGAR)
def extract_gene_structure_from_bed(annotation_file):
'''
BED format
http://genome.ucsc.edu/FAQ/FAQformat#format1
This format use zero-based semi-open notaton, i.e., [a,b).
It means the b-th base is not included in the interval.
The BED format is not defined very precisely. Thus it is not
define the order of exons for each isoform. Hence, this program
is based on the output of UCSC GB.
http://genome.ucsc.edu/cgi-bin/hgTables (choose output format as BED)
'''
global chr_id
chr_id_tmp=[]
for i in chr_id:
if i=="MT":
chr_id_tmp.append("chrM")
else:
chr_id_tmp.append("chr"+i)
mapping=dict(zip(chr_id_tmp,chr_id))
ENST_structures=dict()
fhr=open(annotation_file,"r")
for line in fhr:
line_tmp=line.strip(" \r\n\t")
line_tmp=line_tmp.split("\t")
chrom=line_tmp[0]
chromStart=int(line_tmp[1])
if chrom not in mapping:
continue
try:
ref_name=line_tmp[3]
strand=line_tmp[5]
exon_number=int(line_tmp[9])
exon_sizes=line_tmp[10].split(",")
exon_starts=line_tmp[11].split(",")
except IndexError:
print "IndexError A:",line
continue
if len(exon_sizes)==len(exon_starts):
for i in xrange(len(exon_sizes)):
if exon_sizes[i]=='':
del exon_sizes[i]
else:
exon_sizes[i]=int(exon_sizes[i])
if exon_starts[i]=='':
del exon_starts[i]
else:
exon_starts[i]=int(exon_starts[i])+chromStart+1
if strand=="-":
exon_sizes.reverse()
exon_starts.reverse()
else:
print "exon_sizes and exon_starts do not agreee"
continue
if len(exon_sizes)==exon_number:
if ref_name not in ENST_structures:
for i in xrange(1,exon_number):
exon_sizes[i]=exon_sizes[i]+exon_sizes[i-1]
ENST_structures[ref_name]=(array("L",exon_starts),array("L",exon_sizes),strand,mapping[chrom])
else:
print "Duplicate Transcript", line
continue
else:
print "exon numbers disagrees"
continue
return ENST_structures
def extract_gene_structure_from_gtf(annotation_file):
'''
GTF2.2: A Gene Annotation Format
http://mblab.wustl.edu/GTF22.html
'''
ENST_structures=dict()
prev_ENST=None
prev_ENST_strand=None
prev_ENST_chromosme=None
curr_block=dict()
fhr=open(annotation_file,"r")
for line in fhr:
line_tmp=line.strip(" \r\n\t")
line_tmp=line_tmp.split("\t")
# we only consider exons
if line_tmp[2]=="exon":
# we only consider 25 chromsomes
if line_tmp[0] not in chr_name_id_map:
continue
tmp_ENSG,tmp_ENST,tmp_exon_number=extract_exon_info(line)
tmp_exon_number=int(tmp_exon_number)
if prev_ENST==tmp_ENST:
prev_ENST_strand=line_tmp[6]
prev_ENST_chromosme=line_tmp[0]
if tmp_exon_number not in curr_block:
curr_block[tmp_exon_number]=(int(line_tmp[3]),int(line_tmp[4]))
else:
print prev_ENST
print "Two the same exons !!!", line
print "tmp_ENST,tmp_ENSG,tmp_exon_number",tmp_ENST,tmp_ENSG,tmp_exon_number
print curr_block
exit(2)
else:
'''
Enter new ENST block of some gene.
'''
try:
#Dump the old one first.
tmp_len=len(curr_block)
exon_widths=array("L")
exon_starts=array("L")
if tmp_len>0:
for w in xrange(1,tmp_len+1):
exon_widths.append(curr_block[w][1]-curr_block[w][0]+1)
exon_starts.append(curr_block[w][0])
for w in xrange(1,tmp_len):
exon_widths[w]=exon_widths[w]+exon_widths[w-1]
if prev_ENST not in ENST_structures:
ENST_structures[prev_ENST]=(exon_starts,exon_widths,prev_ENST_strand,prev_ENST_chromosme)
else:
print "Two the same ENST !!!",line
exit(2)
# Initialization for new block
prev_ENST=tmp_ENST
prev_ENST_strand=line_tmp[6]
prev_ENST_chromosme=line_tmp[0]
curr_block=None
curr_block=dict()
curr_block[tmp_exon_number]=(int(line_tmp[3]),int(line_tmp[4]))
except KeyError:
print "KeyError:",line
print "tmp_ENST,tmp_ENSG,tmp_exon_number",tmp_ENST,tmp_ENSG,tmp_exon_number
print curr_block
exit(2)
fhr.close()
# If the last block does not dump yet, dump it!
tmp_len=len(curr_block)
exon_widths=array("L")
exon_starts=array("L")
if tmp_len>0:
for w in xrange(1,tmp_len+1):
exon_widths.append(curr_block[w][1]-curr_block[w][0]+1)
exon_starts.append(curr_block[w][0])
for w in xrange(1,tmp_len):
exon_widths[w]=exon_widths[w]+exon_widths[w-1]
if prev_ENST not in ENST_structures:
ENST_structures[prev_ENST]=(exon_starts,exon_widths,prev_ENST_strand,prev_ENST_chromosme)
else:
print "Two the same ENST !!!",line
exit(2)
return ENST_structures
def extract_gene_structure(annotation_file,file_type="GTF"):
if file_type=="GTF":
return extract_gene_structure_from_gtf(annotation_file)
elif file_type=="BED":
return extract_gene_structure_from_bed(annotation_file)
else:
return False
def compact_CIGARs(old_CIGAR):
'''
This function is to compact the CIGAR strings, e.g.,
it converts 3M30M500D20D7M1000D30M5M to 33M529D7M1000D35M.
'''
Cparser = re.compile(r'([+|-]*[0-9]+[M|I|D|N|S|H|P])')
old_CIGAR=Cparser.findall(old_CIGAR)
for i in xrange(1,len(old_CIGAR)):
first,second=old_CIGAR[i-1:i+1]
if first[-1]==second[-1]:
old_CIGAR[i]=str(int(first[:-1])+int(second[:-1]))+second[-1]
old_CIGAR[i-1]=""
return "".join(old_CIGAR)
def merge_CIGARs(transcript_CIGAR,genome_CIGAR):
'''
If we align reads to transcriptome, then we will have transcript_CIGAR, however this aligned region will have a genome_GICAR according to gene structure. Thus we need to merge these two CIGARs.
'''
global CIGAR_parser,Number_parser
# There is no given CIGAR string, thus we just return the CIGAR.
t_structure=CIGAR_parser.findall(transcript_CIGAR)
g_structure=CIGAR_parser.findall(genome_CIGAR)
#m_structure=[]
output_CIGAR=[]
pop_g_structure=True
pop_t_structure=True
g_type='X'
t_type='X'
tmp=0
try:
while(not (g_type=='' and t_type=='')):
if (pop_g_structure==True):
if len(g_structure)>0:
i=g_structure.pop(0)
g_type=i[-1:]
g_value=int(i[:-1])
pop_g_structure=False
else:
g_type=''
if (pop_t_structure==True):
if len(t_structure)>0:
j=t_structure.pop(0)
t_type=j[-1:]
t_value=int(j[:-1])
pop_t_structure=False
else:
t_type=''
'''
This condition is only for that transcript_CIGAR is generated by aligner non supporting long gap alignment.
'''
if t_type=='M' and g_type=='M':
tmp=min(g_value,t_value)
output_CIGAR.append(str(tmp)+'M')
g_value-=tmp
t_value-=tmp
elif t_type=='I' and g_type=='M':
output_CIGAR.append(str(t_value)+'I')
t_value=0
elif t_type=='D' and g_type=='M':
tmp=min(g_value,t_value)
output_CIGAR.append(str(tmp)+'D')
t_value-=tmp
g_value-=tmp
elif t_type=='N' and g_type=='M':
tmp=min(g_value,t_value)
output_CIGAR.append(str(tmp)+'N')
t_value-=tmp
g_value-=tmp
elif t_type=='S' and g_type=='M':
output_CIGAR.append(str(t_value)+'S')
t_value=0
g_value-=t_value
elif t_type=='M' and g_type=='N':
output_CIGAR.append(str(g_value)+'N')
g_value=0
elif t_type=='I' and g_type=='N':
output_CIGAR.append(str(t_value)+'I')
output_CIGAR.append(str(g_value)+'N')
g_value=0
t_value=0
elif t_type=='D' and g_type=='N':
output_CIGAR.append(str(g_value)+'N')
g_value=0
elif t_type=='N' and g_type=='N':
output_CIGAR.append(str(g_value+t_value)+'N')
g_value=0
t_value=0
elif t_type=='S' and g_type=='N':
output_CIGAR.append(str(t_value)+'S')
t_value=0
g_value-=g_value
elif t_type=='' and g_type=='':
pass
elif t_type=='':
output_CIGAR.append(str(g_value)+g_type)
g_value=0
elif g_type=='':
output_CIGAR.append(str(t_value)+t_type)
t_value=0
else:
print "BUG:"+transcript_CIGAR
break
if g_value<=0:
pop_g_structure=True
if t_value<=0:
pop_t_structure=True
except TypeError:
print "TypeError:"
return "".join(output_CIGAR)
def build_CIGAR(pos_in_exon,exon_number,DNA_structure,read_length=75,align_sense="+"):
'''
Compute the CIGAR string based on DNA_structure (a.k.a ENST_structures[ENST])
Cf. http://samtools.sourceforge.net/SAM1.pdf
Now we know this read is start from the i-th exon, and we know the read starting at the
position (1-based) in the i-th exon.
We always presume that the pos_in_exon is the position of the first base of the read when the gene is from postive strand, and the pos_in_exon is the position of the last base of the read when the gene is from reversed strand.
'''
global CIGAR_parser,Number_parser
CIGAR=[]
tmp=0
tmp_bp=read_length
exon_count=len(DNA_structure[1])
if DNA_structure[2]=="+":
# Compute the length of mathced subsequence in this exon.
if exon_number==0:
tmp=DNA_structure[1][0]-pos_in_exon+1
else:
tmp=(DNA_structure[1][exon_number]-DNA_structure[1][exon_number-1])-pos_in_exon+1
if tmp<=0:
# pos_in_exon is larger than the length of exon `exon_number`.
return False
for j in xrange(exon_number,exon_count):
if tmp_bp<=tmp:
CIGAR.append(str(tmp_bp)+"M")
break
else:
CIGAR.append(str(tmp)+"M")
tmp_bp=tmp_bp-tmp
if j+1>=exon_count:
return False
else:
# compute the length of the j-th intron.
if j==0:
tmp=DNA_structure[0][1]-(DNA_structure[0][0]+DNA_structure[1][0])
else:
tmp=DNA_structure[0][j+1]-(DNA_structure[0][j]+(DNA_structure[1][j]-DNA_structure[1][j-1]))
CIGAR.append(str(tmp)+"N")
# Compute the length of the next exon.
tmp=(DNA_structure[1][j+1]-DNA_structure[1][j])
else:
'''
Note that, since we trace current exon down to 0-th exon according postive strand position, the returned CIGAR format will coordinate to positive strand.
'''
# Compute the length of mathced subsequence in this exon.
if exon_number==0:
tmp=DNA_structure[1][0]-pos_in_exon+1
else:
tmp=(DNA_structure[1][exon_number]-DNA_structure[1][exon_number-1])-pos_in_exon+1
if tmp<=0:
# pos_in_exon is larger than the length of exon `exon_number`.
return False
for j in xrange(exon_number,-1,-1):
#print tmp
if tmp_bp<=tmp:
CIGAR.append(str(tmp_bp)+"M")
break
else:
CIGAR.append(str(tmp)+"M")
tmp_bp=tmp_bp-tmp
if j-1<0:
return False
else:
# compute the length of the intron between j-th exon and (j-1)-th exon
if j==0:
# There is no intron between 0-th exon and (-1)-th exon
return False
else:
tmp=DNA_structure[0][j-1]-(DNA_structure[0][j]+(DNA_structure[1][j]-DNA_structure[1][j-1]))
CIGAR.append(str(tmp)+"N")
# Compute the length of the next exon.
if j-2>=0:
tmp=(DNA_structure[1][j-1]-DNA_structure[1][j-2])
else:
tmp=DNA_structure[1][j-1]
return "".join(CIGAR)
def genome_GPS(ENST,pos,ENST_structures,read_length=75,align_sense="+"):
'''
Given ENST name and position in ENST, this function will return the genomic position
Note that pos is 1-based coordinates.
'''
if ENST not in ENST_structures:
print ENST," not in Your annoation."
#tmp=ENST_structures.keys()
#print tmp[:100]
return False
else:
'''
ENST_structures: prev_ENST -> (exon_starts,exon_widths,prev_ENST_strand,prev_ENST_chromosme)
Note that exon_widths is accumulated.
'''
if ENST_structures[ENST][2]=="-":
pos=pos+read_length-1
offset=0
tmp_len=len(ENST_structures[ENST][1])
for i in xrange(tmp_len):
if pos<=ENST_structures[ENST][1][i]:
'''
Given a read ACTTCCCTTG, we say the rightest base is the first base of this read,
and the leftest base is the last base of this read, i.e., A and G respectively.
The SAM format will return the position of the first base in cDNA cooridnate.
However, when we align this read to the cDNA sequence of a gene on reversed strand,
we need to convert the position to the last base of this read to make the notations
consistent to positive strand.
'''
if ENST_structures[ENST][2]=="+":
if i==0:
offset=pos-1
else:
offset=pos-ENST_structures[ENST][1][i-1]-1
else:
if i==0:
offset=ENST_structures[ENST][1][0]-pos
else:
offset=ENST_structures[ENST][1][i]-pos
pos_in_exon=offset+1
return [ENST_structures[ENST][3],ENST_structures[ENST][0][i]+(offset),build_CIGAR(pos_in_exon,i,ENST_structures[ENST],read_length)]
# This line will be executed only if pos > the length of whole tanscript ENST.
return False
def verification_for_bowtie_PIPE(input_file,output_file,ENST_structures):
global options
sucessful_count=0
prev_alignment=None
curr_block=[]
fhr=subprocess.Popen(input_file, shell=True,stdout=subprocess.PIPE).stdout
fhw=open(output_file,"w+")
for line in fhr:
if Is_SAM_header(line):
continue
line_tmp=line.strip(" \r\n\t")
line_tmp=line_tmp.split("\t")
if line_tmp[2]=='*':
continue
if int(line_tmp[1]) & 4 ==4:
continue
'''
Since the output of bowtie will be one read by one reads,
the alignments of one read will be put together as a cluster or a block.
Thus we compute one block by one block.
'''
if prev_alignment!=line_tmp[0]:
# Enter a new block
# Dump previously block
tmp_len=len(curr_block)
pos_set=set()
if tmp_len>0:
read_name=set()
for alignment in curr_block:
read_name.add(alignment[0])
tmp_read_length=adjust_length_by_CIGAR(alignment[5],len(alignment[9]))
target=genome_GPS(alignment[2],int(alignment[3]),ENST_structures,tmp_read_length)
if target==False or target[1]==False or target[2]==False:
print "FAIL in genome_GPS:",alignment,target
read_name=[]
pos_set=[]
break
pos_set.add((target[0],target[1]))
if len(read_name)>1:
#If this program wrongly put the alignments of different reads into the same block, then we just exit!
print "Alignments of each read should be put together.\n",read_name,curr_block,pos_set
raise SystemExit
if len(pos_set)==1:
# All alignments has the same genomic position.
sucessful_count+=1
alignment=curr_block[0]
if ENST_structures[alignment[2]][2]=="-":
# Since we align cDNA file, we need to convert it to either positive or negitive strand accodring to ENST.
alignment[5]=reverse_CIGAR(alignment[5])
if (int(alignment[1]) & 16)==16:
alignment[1]= str(int(alignment[1]) - 16)
else:
alignment[1]= str(int(alignment[1]) | 16)
alignment[9]=complement_sequence(alignment[9])
alignment[10]=alignment[10][::-1] # reverse the Phread Qualities
alignment[5]=merge_CIGARs(alignment[5],target[2])
alignment[2]=ENST_structures[alignment[2]][3]
tmp=pos_set.pop()
alignment[3]=str(tmp[1])
fhw.write("\t".join(alignment)+"\n")
else:
print read_name,"::",str(len(curr_block)),"::",pos_set
# initialize for new block
prev_alignment=line_tmp[0]
curr_block=[line_tmp]
else:
curr_block.append(line_tmp)
#fhr.close()
# Dump the last block
tmp_len=len(curr_block)
pos_set=set()
if tmp_len>0:
read_name=set()
for alignment in curr_block:
read_name.add(alignment[0])
tmp_read_length=adjust_length_by_CIGAR(alignment[5],len(alignment[9]))
target=genome_GPS(alignment[2],int(alignment[3]),ENST_structures,tmp_read_length)
if target==False or target[1]==False or target[2]==False:
print alignment,target
continue
pos_set.add((target[0],target[1]))
if len(read_name)>1:
#If this program wrongly put the alignments of different reads into the same block, then we just exit!
print "Alignments of each read should be put together.\n",read_name,curr_block,pos_set
raise SystemExit
if len(pos_set)==1:
# All alignments has the same genomic position.
sucessful_count+=1
alignment=curr_block[0]
if ENST_structures[alignment[2]][2]=="-":
# Since we align cDNA file, we need to convert it to either positive or negitive strand accodring to ENST.
alignment[5]=reverse_CIGAR(alignment[5])
if (int(alignment[1]) & 16)==16:
alignment[1]= str(int(alignment[1]) - 16)
else:
alignment[1]= str(int(alignment[1]) | 16)
alignment[9]=complement_sequence(alignment[9])
alignment[10]=alignment[10][::-1] # reverse the Phread Qualities
alignment[5]=merge_CIGARs(alignment[5],target[2])
alignment[2]=ENST_structures[alignment[2]][3]
tmp=pos_set.pop()
alignment[3]=str(tmp[1])
fhw.write("\t".join(alignment)+"\n")
else:
print read_name,"::",str(len(curr_block)),"::",pos_set
fhw.close()
print sucessful_count
return True
def verification_for_bowtie(input_file,output_u_file,ENST_structures,output_m_file=stdout):
sucessful_count=0
prev_alignment=None
curr_block=[]
fhr=open(input_file,"r")
fhw=open(output_u_file,"w+")
if output_m_file!=stdout:
fhw2=open(output_m_file,"w+")
else:
fhw2=stdout
for line in fhr:
if Is_SAM_header(line):
continue
line_tmp=line.strip(" \r\n\t")
line_tmp=line_tmp.split("\t")
if line_tmp[2]=='*':
continue
if int(line_tmp[1]) & 4 ==4:
continue
'''
Since the output of bowtie will be one read by one reads,
the alignments of one read will be put together as a cluster or a block.
Thus we compute one block by one block.
'''
if prev_alignment!=line_tmp[0]:
# Enter a new block
# Dump previously block
tmp_len=len(curr_block)
pos_set=set()
if tmp_len>0:
read_name=set()
for alignment in curr_block:
read_name.add(alignment[0])
tmp_read_length=adjust_length_by_CIGAR(alignment[5],len(alignment[9]))
target=genome_GPS(alignment[2],int(alignment[3]),ENST_structures,tmp_read_length)
if target==False or target[1]==False or target[2]==False:
print "FAIL in genome_GPS:",alignment,target
read_name=[]
pos_set=[]
break
pos_set.add((target[0],target[1]))
if len(read_name)>1:
#If this program wrongly put the alignments of different reads into the same block, then we just exit!
print "Alignments of each read should be put together.\n",read_name,curr_block,pos_set
raise SystemExit
if len(pos_set)==1:
# All alignments has the same genomic position.
sucessful_count+=1
alignment=curr_block[0]
if ENST_structures[alignment[2]][2]=="-":
# Since we align cDNA file, we need to convert it to either positive or negitive strand accodring to ENST.
alignment[5]=reverse_CIGAR(alignment[5])
if (int(alignment[1]) & 16)==16:
alignment[1]= str(int(alignment[1]) - 16)
else:
alignment[1]= str(int(alignment[1]) | 16)
alignment[9]=complement_sequence(alignment[9])
alignment[10]=alignment[10][::-1] # reverse the Phread Qualities
alignment[5]=merge_CIGARs(alignment[5],target[2])
isofrom_strand=ENST_structures[alignment[2]][2]
alignment[2]=ENST_structures[alignment[2]][3]
tmp=pos_set.pop()
alignment[3]=str(tmp[1])
if -1!=alignment[5].find("N"):
alignment.append("XS:A:"+isofrom_strand)
fhw.write("\t".join(alignment)+"\n")
else:
print >>fhw2, read_name,"::",str(len(curr_block)),"::",pos_set
# initialize for new block
prev_alignment=line_tmp[0]
curr_block=[line_tmp]
else:
curr_block.append(line_tmp)
fhr.close()
# Dump the last block
tmp_len=len(curr_block)
pos_set=set()
if tmp_len>0:
read_name=set()
for alignment in curr_block:
read_name.add(alignment[0])
tmp_read_length=adjust_length_by_CIGAR(alignment[5],len(alignment[9]))
target=genome_GPS(alignment[2],int(alignment[3]),ENST_structures,tmp_read_length)
if target==False or target[1]==False or target[2]==False:
print alignment,target
continue
pos_set.add((target[0],target[1]))
if len(read_name)>1:
#If this program wrongly put the alignments of different reads into the same block, then we just exit!
print "Alignments of each read should be put together.\n",read_name,curr_block,pos_set
raise SystemExit
if len(pos_set)==1:
# All alignments has the same genomic position.
sucessful_count+=1
alignment=curr_block[0]
if ENST_structures[alignment[2]][2]=="-":
# Since we align cDNA file, we need to convert it to either positive or negitive strand accodring to ENST.
alignment[5]=reverse_CIGAR(alignment[5])
if (int(alignment[1]) & 16)==16:
alignment[1]= str(int(alignment[1]) - 16)
else:
alignment[1]= str(int(alignment[1]) | 16)
alignment[9]=complement_sequence(alignment[9])
alignment[10]=alignment[10][::-1] # reverse the Phread Qualities
alignment[5]=merge_CIGARs(alignment[5],target[2])
isofrom_strand=ENST_structures[alignment[2]][2]
alignment[2]=ENST_structures[alignment[2]][3]
tmp=pos_set.pop()
alignment[3]=str(tmp[1])
if -1!=alignment[5].find("N"):
alignment.append("XS:A:"+isofrom_strand)
fhw.write("\t".join(alignment)+"\n")
else:
print >>fhw2, read_name,"::",str(len(curr_block)),"::",pos_set
fhw.close()
if fhw2!=stdout:
fhw2.close()
print sucessful_count
return True
def adjust_alignment_for_strands(ENST_structures,alignment,target):
if ENST_structures[alignment[2]][2]=="-":
# Since we align cDNA file, we need to convert it to either positive or negitive strand accodring to ENST.
alignment[5]=reverse_CIGAR(alignment[5])
if (int(alignment[1]) & 16)==16:
alignment[1]= str(int(alignment[1]) - 16)
else:
alignment[1]= str(int(alignment[1]) | 16)
alignment[9]=complement_sequence(alignment[9])
alignment[10]=alignment[10][::-1] # reverse the Phread Qualities
alignment[5]=merge_CIGARs(alignment[5],target[2])
isofrom_strand=ENST_structures[alignment[2]][2]
alignment[2]=ENST_structures[alignment[2]][3]
alignment[3]=str(target[1])
if -1!=alignment[5].find("N"):
alignment.append("XS:A:"+isofrom_strand)
return "\t".join(alignment)
def adjust_length_by_CIGAR(CIGAR,read_length):
global Insertion_parser,Deletion_parser,Skip_parser,SoftClip_parser
# Insertion will shorten the real mapped length of reads
ip=Insertion_parser.findall(CIGAR)
tmp_read_length=read_length
if len(ip)!=0:
for i in ip:
tmp_read_length-=int(i)
# Soft Clip will extend the real mapped length of reads
ip=SoftClip_parser.findall(CIGAR)
if len(ip)!=0:
for i in ip:
tmp_read_length-=int(i)
# Deletion will extend the real mapped length of reads
ip=Deletion_parser.findall(CIGAR)
if len(ip)!=0:
for i in ip:
tmp_read_length+=int(i)
# Skip will extend the real mapped length of reads
ip=Skip_parser.findall(CIGAR)
if len(ip)!=0:
for i in ip:
tmp_read_length+=int(i)
return tmp_read_length
def verification_for_BWA(input_file,output_file,ENST_structures,convert_unique=False):
global options,CIGAR_parser,SAM_XA_parser
sucessful_count=0
pattern=SAM_XA_parser
fhr=open(input_file,"r")
fhw=open(output_file,"w+")
for line in fhr:
if Is_SAM_header(line):
continue
line_tmp=line.strip(" \r\n\t")
line_tmp=line_tmp.split("\t")
if line_tmp[2]=='*':
continue
flag=int(line_tmp[1])
if (flag>>2)%2==1:
continue
MRNM=line_tmp[2]
MRNM_pos=int(line_tmp[3])
read_length=len(line_tmp[9])
if (-1==line.find("XA:Z")):
'''
There are two cases,
The first one is there are too much targets. We will skip this line.
The second one is this one is unique alignmentm. Sincere we assume
the input file are only reads with multiple targets, we skip this line, too.
'''
continue
else:
#m=pattern.findall(line)
m=extract_SAM_XA(line)
if len(m)==0:
print "WRONG FORMAT:",line
continue
else:
pos_set=set()
tmp_read_length=adjust_length_by_CIGAR(line_tmp[5],read_length)
major_target=genome_GPS(MRNM,abs(int(MRNM_pos)),ENST_structures,tmp_read_length)
if major_target==False or major_target[1]==False or major_target[2]==False:
print line_tmp,major_target
continue
pos_set.add(tuple(major_target[:2]))
for i in m:
i=i.split(",")
tmp_read_length=adjust_length_by_CIGAR(i[2],read_length)
tmp=genome_GPS(i[0],abs(int(i[1])),ENST_structures,tmp_read_length)
if tmp==False or tmp[1]==False or tmp[2]==False:
break
pos_set.add(tuple(tmp[:2]))
# Generate Output
if len(pos_set)==1:
sucessful_count=sucessful_count+1
if ENST_structures[MRNM][2]=="-":
# Since we align cDNA file, we need to convert it to either positive or negitive strand accodring to ENST.
line_tmp[5]=CIGAR_parser.findall(line_tmp[5])
line_tmp[5].reverse()
line_tmp[5]="".join(line_tmp[5])
if (int(line_tmp[1]) & 16)==16:
line_tmp[1]= str(int(line_tmp[1]) - 16)
else:
line_tmp[1]= str(int(line_tmp[1]) | 16)
line_tmp[9]=list(line_tmp[9])
line_tmp[9].reverse()
for i in xrange(len(line_tmp[9])):
if line_tmp[9][i]=="N":
continue
elif line_tmp[9][i]=="A":
line_tmp[9][i]="T"
elif line_tmp[9][i]=="G":
line_tmp[9][i]="C"
elif line_tmp[9][i]=="C":
line_tmp[9][i]="G"
elif line_tmp[9][i]=="T":
line_tmp[9][i]="A"
else:
print "Wrong Sequence Format"
exit(2)
line_tmp[9]="".join(line_tmp[9])
line_tmp[10]=line_tmp[10][::-1] # reverse the Phread Qualities
line_tmp[5]=merge_CIGARs(line_tmp[5],major_target[2])
line_tmp[2]=ENST_structures[line_tmp[2]][3]
line_tmp[3]=str(major_target[1])
fhw.write("\t".join(line_tmp)+"\n")
else:
if options.quite==False:
print MRNM,"::",len(m),"::",pos_set
fhw.close()
fhr.close()
print sucessful_count
return True
def build_G2T_structures(ENST_structures):
intervals=dict()
for ENST_id in ENST_structures.iterkeys():
ENST=ENST_structures[ENST_id]
if ENST[3] not in intervals:
intervals[ENST[3]]=[]
for i in xrange(len(ENST[0])):
if i==0:
intervals[ENST[3]].append(Interval(ENST[0][0],ENST[0][0]+ENST[1][0]-1,ENST_id+":0"))
else:
intervals[ENST[3]].append(Interval(ENST[0][i],ENST[0][i]+(ENST[1][i]-ENST[1][i-1])-1,ENST_id+":"+str(i)))
G2T_tree=dict()
for i in intervals.iterkeys():
G2T_tree[i]=IntervalTree(intervals[i],24)
del intervals
return G2T_tree
def exon_over_genome_position(pos,G2T_tree,ENST_structures,offset_to_pos,chr_name=""):
output=[]
if chr_name=="":
result=[]
for i in G2T_tree.iterkeys():
result+=G2T_tree[i].find(pos,pos+offset_to_pos)
else:
if chr_name not in G2T_tree:
return output
result=G2T_tree[chr_name].find(pos,pos+offset_to_pos)
for i in result:
output.append((i.id).split(":"))
return output
def convert_genome_position_to_transcirpt_position(pos,chr_name,G2T_tree,ENST_structures,read_end_offset=0):
'''
read_end_offset == (read_length-1)
'''
output=[]
result=G2T_tree[chr_name].find(pos,pos+read_end_offset)
for i in result:
tmp=(i.id).split(":")
j=int(tmp[1])
ENST=ENST_structures[tmp[0]]
if ENST[2]=="+":
output.append((tmp[0],ENST[1][j]-(i.stop-pos)))
if ENST[2]=="-":
output.append((tmp[0],ENST[1][j]-(pos+read_end_offset-i.start)))
return output
def test_G2T(ENST_structures):
from random import randint,choice
G2T_tree=build_G2T_structures(ENST_structures)
chr_set=G2T_tree.keys()
for i in xrange(10):
chr_name=choice(chr_set)
test_pos=[randint(1,1000000) for i in xrange(1000)]
for pos in test_pos:
tmp=convert_genome_position_to_transcirpt_position(pos,chr_name,G2T_tree,ENST_structures)
try:
for j in tmp:
tmp2=genome_GPS(j[0],j[1],ENST_structures)
if tmp2==False:
print "X",pos,j,ENST_structures[j[0]][2]
continue
g,c=tmp2
if c==False:
print "X",pos,j,ENST_structures[j[0]][2],g,c
continue
if g!=pos:
return False
print "X",pos,j,ENST_structures[j[0]][2],g
else:
print pos,j,ENST_structures[j[0]][2],g
except TypeError:
print "XX",j
return True
if __name__ == '__main__':
usage = "%prog [options] input.sam gene_annoation.gtf output.sam"
parser = OptionParser(usage=usage)
parser.add_option("-s", "--source_type", dest="source_type", help="Specify the source of input file, 'BWA' or 'Bowtie'.", metavar="TYPE",default="Bowtie")
parser.add_option("-b", "--annotation_type", dest="annotation_type", help="Specify the format of annotation file, 'BED' or 'GTF'.", metavar="TYPE",default="GTF")
parser.add_option("-t", "--convert_unique", dest="convert_unique", action="store_true",help="Whether to convert positions of unique reads to genomic position. We assume alignment without 'XA:Z' as unique alignments, however it could be the case that a read has too much alignments.",default=False)
parser.add_option("-q", "--quite", dest="quite", action="store_true",help="Make the program not print non-unique algined reads to stdout.",default=False)
(options, args) = parser.parse_args()
output_file=None
filelist=[]
if len(args) < 3:
parser.error("missing required arguments.")
exit(2)
elif len(args)==3:
input_file = args[0]
gtf_file = args[1]
output_file = args[2]
else:
parser.error("Too much arguments.")
exit(2)
if options.annotation_type=="GTF":
ENST_structures=extract_gene_structure(gtf_file)
elif options.annotation_type=="BED":
ENST_structures=extract_gene_structure(gtf_file,"BED")
else:
parser.error("Wrong annotation format.")
exit(2)
print "Finished loading gene structure"
print "Source Type:",options.source_type
if options.source_type=="BWA":
verification_for_BWA(input_file,output_file,ENST_structures,options.convert_unique)
else:
verification_for_bowtie(input_file,output_file,ENST_structures)
|
claudejrogers/RNASEQR
|
selected_the_multiply_mapped_reads_of_the_same_genomic_position.py
|
Python
|
gpl-3.0
| 35,467
|
[
"BLAST",
"BWA",
"Bowtie"
] |
53cac7bc4d97da902543a4ae73166c40558763562d610dceb414967daad405d9
|
# classification.py
"""
Description: This file contains a set of python functions for conducting
machine learning classification on remote sensing data contained in an
Open Data Cube instance.
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one on
Github https://github.com/GeoscienceAustralia/dea-notebooks/issues
Last modified: May 2021
"""
import os
import sys
import joblib
import datacube
import rasterio
import numpy as np
import pandas as pd
import xarray as xr
import time
from tqdm.auto import tqdm
import dask.array as da
import geopandas as gpd
from copy import deepcopy
import multiprocessing as mp
import dask.distributed as dd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.utils import check_random_state
from abc import ABCMeta, abstractmethod
from datacube.utils import geometry
from sklearn.base import ClusterMixin
from dask.diagnostics import ProgressBar
from rasterio.features import rasterize
from dask_ml.wrappers import ParallelPostFit
from sklearn.mixture import GaussianMixture
from datacube.utils.geometry import assign_crs
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import BaseCrossValidator
import warnings
from dea_tools.spatial import xr_rasterize
def sklearn_flatten(input_xr):
"""
Reshape a DataArray or Dataset with spatial (and optionally
temporal) structure into an np.array with the spatial and temporal
dimensions flattened into one dimension.
This flattening procedure enables DataArrays and Datasets to be used
to train and predict
with sklearn models.
Last modified: September 2019
Parameters
----------
input_xr : xarray.DataArray or xarray.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
flattening.
Returns
----------
input_np : numpy.array
A numpy array corresponding to input_xr.data (or
input_xr.to_array().data), with dimensions 'x','y' and 'time'
flattened into a single dimension, which is the first axis of
the returned array. input_np contains no NaNs.
"""
# cast input Datasets to DataArray
if isinstance(input_xr, xr.Dataset):
input_xr = input_xr.to_array()
# stack across pixel dimensions, handling timeseries if necessary
if "time" in input_xr.dims:
stacked = input_xr.stack(z=["x", "y", "time"])
else:
stacked = input_xr.stack(z=["x", "y"])
# finding 'bands' dimensions in each pixel - these will not be
# flattened as their context is important for sklearn
pxdims = []
for dim in stacked.dims:
if dim != "z":
pxdims.append(dim)
# mask NaNs - we mask pixels with NaNs in *any* band, because
# sklearn cannot accept NaNs as input
mask = np.isnan(stacked)
if len(pxdims) != 0:
mask = mask.any(dim=pxdims)
# turn the mask into a numpy array (boolean indexing with xarrays
# acts weird)
mask = mask.data
# the dimension we are masking along ('z') needs to be the first
# dimension in the underlying np array for the boolean indexing to work
stacked = stacked.transpose("z", *pxdims)
input_np = stacked.data[~mask]
return input_np
def sklearn_unflatten(output_np, input_xr):
"""
Reshape a numpy array with no 'missing' elements (NaNs) and
'flattened' spatiotemporal structure into a DataArray matching the
spatiotemporal structure of the DataArray
This enables an sklearn model's prediction to be remapped to the
correct pixels in the input DataArray or Dataset.
Last modified: September 2019
Parameters
----------
output_np : numpy.array
The first dimension's length should correspond to the number of
valid (non-NaN) pixels in input_xr.
input_xr : xarray.DataArray or xarray.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
flattening.
Returns
----------
output_xr : xarray.DataArray
An xarray.DataArray with the same dimensions 'x', 'y' and 'time'
as input_xr, and the same valid (non-NaN) pixels. These pixels
are set to match the data in output_np.
"""
# the output of a sklearn model prediction should just be a numpy array
# with size matching x*y*time for the input DataArray/Dataset.
# cast input Datasets to DataArray
if isinstance(input_xr, xr.Dataset):
input_xr = input_xr.to_array()
# generate the same mask we used to create the input to the sklearn model
if "time" in input_xr.dims:
stacked = input_xr.stack(z=["x", "y", "time"])
else:
stacked = input_xr.stack(z=["x", "y"])
pxdims = []
for dim in stacked.dims:
if dim != "z":
pxdims.append(dim)
mask = np.isnan(stacked)
if len(pxdims) != 0:
mask = mask.any(dim=pxdims)
# handle multivariable output
output_px_shape = ()
if len(output_np.shape[1:]):
output_px_shape = output_np.shape[1:]
# use the mask to put the data in all the right places
output_ma = np.ma.empty((len(stacked.z), *output_px_shape))
output_ma[~mask] = output_np
output_ma[mask] = np.ma.masked
# set the stacked coordinate to match the input
output_xr = xr.DataArray(
output_ma,
coords={"z": stacked["z"]},
dims=["z", *["output_dim_" + str(idx) for idx in range(len(output_px_shape))]],
)
output_xr = output_xr.unstack()
return output_xr
def fit_xr(model, input_xr):
"""
Utilise our wrappers to fit a vanilla sklearn model.
Last modified: September 2019
Parameters
----------
model : scikit-learn model or compatible object
Must have a fit() method that takes numpy arrays.
input_xr : xarray.DataArray or xarray.Dataset.
Must have dimensions 'x' and 'y', may have dimension 'time'.
Returns
----------
model : a scikit-learn model which has been fitted to the data in
the pixels of input_xr.
"""
model = model.fit(sklearn_flatten(input_xr))
return model
def predict_xr(
model,
input_xr,
chunk_size=None,
persist=False,
proba=False,
clean=False,
return_input=False,
):
"""
Using dask-ml ParallelPostfit(), runs the parallel
predict and predict_proba methods of sklearn
estimators. Useful for running predictions
on a larger-than-RAM datasets.
Last modified: September 2020
Parameters
----------
model : scikit-learn model or compatible object
Must have a .predict() method that takes numpy arrays.
input_xr : xarray.DataArray or xarray.Dataset.
Must have dimensions 'x' and 'y'
chunk_size : int
The dask chunk size to use on the flattened array. If this
is left as None, then the chunks size is inferred from the
.chunks method on the `input_xr`
persist : bool
If True, and proba=True, then 'input_xr' data will be
loaded into distributed memory. This will ensure data
is not loaded twice for the prediction of probabilities,
but this will only work if the data is not larger than
distributed RAM.
proba : bool
If True, predict probabilities
clean : bool
If True, remove Infs and NaNs from input and output arrays
return_input : bool
If True, then the data variables in the 'input_xr' dataset will
be appended to the output xarray dataset.
Returns
----------
output_xr : xarray.Dataset
An xarray.Dataset containing the prediction output from model.
if proba=True then dataset will also contain probabilites, and
if return_input=True then dataset will have the input feature layers.
Has the same spatiotemporal structure as input_xr.
"""
# if input_xr isn't dask, coerce it
dask = True
if not bool(input_xr.chunks):
dask = False
input_xr = input_xr.chunk({"x": len(input_xr.x), "y": len(input_xr.y)})
# set chunk size if not supplied
if chunk_size is None:
chunk_size = int(input_xr.chunks["x"][0]) * int(input_xr.chunks["y"][0])
def _predict_func(model, input_xr, persist, proba, clean, return_input):
x, y, crs = input_xr.x, input_xr.y, input_xr.geobox.crs
input_data = []
for var_name in input_xr.data_vars:
input_data.append(input_xr[var_name])
input_data_flattened = []
for arr in input_data:
data = arr.data.flatten().rechunk(chunk_size)
input_data_flattened.append(data)
# reshape for prediction
input_data_flattened = da.array(input_data_flattened).transpose()
if clean == True:
input_data_flattened = da.where(
da.isfinite(input_data_flattened), input_data_flattened, 0
)
if (proba == True) & (persist == True):
# persisting data so we don't require loading all the data twice
input_data_flattened = input_data_flattened.persist()
# apply the classification
print("predicting...")
out_class = model.predict(input_data_flattened)
# Mask out NaN or Inf values in results
if clean == True:
out_class = da.where(da.isfinite(out_class), out_class, 0)
# Reshape when writing out
out_class = out_class.reshape(len(y), len(x))
# stack back into xarray
output_xr = xr.DataArray(out_class, coords={"x": x, "y": y}, dims=["y", "x"])
output_xr = output_xr.to_dataset(name="Predictions")
if proba == True:
print(" probabilities...")
out_proba = model.predict_proba(input_data_flattened)
# convert to %
out_proba = da.max(out_proba, axis=1) * 100.0
if clean == True:
out_proba = da.where(da.isfinite(out_proba), out_proba, 0)
out_proba = out_proba.reshape(len(y), len(x))
out_proba = xr.DataArray(
out_proba, coords={"x": x, "y": y}, dims=["y", "x"]
)
output_xr["Probabilities"] = out_proba
if return_input == True:
print(" input features...")
# unflatten the input_data_flattened array and append
# to the output_xr containin the predictions
arr = input_xr.to_array()
stacked = arr.stack(z=["y", "x"])
# handle multivariable output
output_px_shape = ()
if len(input_data_flattened.shape[1:]):
output_px_shape = input_data_flattened.shape[1:]
output_features = input_data_flattened.reshape(
(len(stacked.z), *output_px_shape)
)
# set the stacked coordinate to match the input
output_features = xr.DataArray(
output_features,
coords={"z": stacked["z"]},
dims=[
"z",
*["output_dim_" + str(idx) for idx in range(len(output_px_shape))],
],
).unstack()
# convert to dataset and rename arrays
output_features = output_features.to_dataset(dim="output_dim_0")
data_vars = list(input_xr.data_vars)
output_features = output_features.rename(
{i: j for i, j in zip(output_features.data_vars, data_vars)}
)
# merge with predictions
output_xr = xr.merge([output_xr, output_features], compat="override")
return assign_crs(output_xr, str(crs))
if dask == True:
# convert model to dask predict
model = ParallelPostFit(model)
with joblib.parallel_backend("dask"):
output_xr = _predict_func(
model, input_xr, persist, proba, clean, return_input
)
else:
output_xr = _predict_func(
model, input_xr, persist, proba, clean, return_input
).compute()
return output_xr
class HiddenPrints:
"""
For concealing unwanted print statements called by other functions
"""
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
def _get_training_data_for_shp(
gdf,
index,
row,
out_arrs,
out_vars,
dc_query,
return_coords,
feature_func=None,
field=None,
zonal_stats=None,
):
"""
This is the core function that is triggered by `collect_training_data`.
The `collect_training_data` function loops through geometries in a geopandas
geodataframe and runs the code within `_get_training_data_for_shp`.
Parameters are inherited from `collect_training_data`.
See that function for information on the other params not listed below.
Parameters
----------
index, row : iterables inherited from geopandas object
out_arrs : list
An empty list into which the training data arrays are stored.
out_vars : list
An empty list into which the data varaible names are stored.
Returns
--------
Two lists, a list of numpy.arrays containing classes and extracted data for
each pixel or polygon, and another containing the data variable names.
"""
# prevent function altering dictionary kwargs
dc_query = deepcopy(dc_query)
# remove dask chunks if supplied as using
# mulitprocessing for parallization
if "dask_chunks" in dc_query.keys():
dc_query.pop("dask_chunks", None)
# set up query based on polygon
geom = geometry.Geometry(geom=gdf.iloc[index].geometry, crs=gdf.crs)
q = {"geopolygon": geom}
# merge polygon query with user supplied query params
dc_query.update(q)
# Use input feature function
data = feature_func(dc_query)
# create polygon mask
mask = xr_rasterize(gdf.iloc[[index]], data)
data = data.where(mask)
# Check that feature_func has removed time
if "time" in data.dims:
t = data.dims["time"]
if t > 1:
raise ValueError(
"After running the feature_func, the dataset still has "
+ str(t)
+ " time-steps, dataset must only have"
+ " x and y dimensions."
)
if return_coords == True:
# turn coords into a variable in the ds
data["x_coord"] = data.x + 0 * data.y
data["y_coord"] = data.y + 0 * data.x
# append ID measurement to dataset for tracking failures
band = [m for m in data.data_vars][0]
_id = xr.zeros_like(data[band])
data["id"] = _id
data["id"] = data["id"] + gdf.iloc[index]["id"]
# If no zonal stats were requested then extract all pixel values
if zonal_stats is None:
flat_train = sklearn_flatten(data)
flat_val = np.repeat(row[field], flat_train.shape[0])
stacked = np.hstack((np.expand_dims(flat_val, axis=1), flat_train))
elif zonal_stats in ["mean", "median", "max", "min"]:
method_to_call = getattr(data, zonal_stats)
flat_train = method_to_call()
flat_train = flat_train.to_array()
stacked = np.hstack((row[field], flat_train))
else:
raise Exception(
zonal_stats
+ " is not one of the supported"
+ " reduce functions ('mean','median','max','min')"
)
out_arrs.append(stacked)
out_vars.append([field] + list(data.data_vars))
def _get_training_data_parallel(
gdf, dc_query, ncpus, return_coords, feature_func=None, field=None, zonal_stats=None
):
"""
Function passing the '_get_training_data_for_shp' function
to a mulitprocessing.Pool.
Inherits variables from 'collect_training_data()'.
"""
# Check if dask-client is running
try:
zx = None
zx = dd.get_client()
except:
pass
if zx is not None:
raise ValueError(
"You have a Dask Client running, which prevents \n"
"this function from multiprocessing. Close the client."
)
# instantiate lists that can be shared across processes
manager = mp.Manager()
results = manager.list()
column_names = manager.list()
# progress bar
pbar = tqdm(total=len(gdf))
def update(*a):
pbar.update()
with mp.Pool(ncpus) as pool:
for index, row in gdf.iterrows():
pool.apply_async(
_get_training_data_for_shp,
[
gdf,
index,
row,
results,
column_names,
dc_query,
return_coords,
feature_func,
field,
zonal_stats,
],
callback=update,
)
pool.close()
pool.join()
pbar.close()
return column_names, results
def collect_training_data(
gdf,
dc_query,
ncpus=1,
return_coords=False,
feature_func=None,
field=None,
zonal_stats=None,
clean=True,
fail_threshold=0.02,
fail_ratio=0.5,
max_retries=3,
):
"""
This function provides methods for gathering training data from the ODC over
geometries stored within a geopandas geodataframe. The function will return a
'model_input' array containing stacked training data arrays with all NaNs & Infs removed.
In the instance where ncpus > 1, a parallel version of the function will be run
(functions are passed to a mp.Pool()). This function can conduct zonal statistics if
the supplied shapefile contains polygons. The 'feature_func' parameter defines what
features to produce.
Parameters
----------
gdf : geopandas geodataframe
geometry data in the form of a geopandas geodataframe
dc_query : dictionary
Datacube query object, should not contain lat and long (x or y)
variables as these are supplied by the 'gdf' variable
ncpus : int
The number of cpus/processes over which to parallelize the gathering
of training data (only if ncpus is > 1). Use 'mp.cpu_count()' to determine the number of
cpus available on a machine. Defaults to 1.
return_coords : bool
If True, then the training data will contain two extra columns 'x_coord' and
'y_coord' corresponding to the x,y coordinate of each sample. This variable can
be useful for handling spatial autocorrelation between samples later in the ML workflow.
feature_func : function
A function for generating feature layers that is applied to the data within
the bounds of the input geometry. The 'feature_func' must accept a 'dc_query'
object, and return a single xarray.Dataset or xarray.DataArray containing
2D coordinates (i.e x, y - no time dimension).
e.g.
def feature_function(query):
dc = datacube.Datacube(app='feature_layers')
ds = dc.load(**query)
ds = ds.mean('time')
return ds
field : str
Name of the column in the gdf that contains the class labels
zonal_stats : string, optional
An optional string giving the names of zonal statistics to calculate
for each polygon. Default is None (all pixel values are returned). Supported
values are 'mean', 'median', 'max', 'min'.
clean : bool
Whether or not to remove missing values in the training dataset. If True,
training labels with any NaNs or Infs in the feature layers will be dropped
from the dataset.
fail_threshold : float, default 0.02
Silent read fails on S3 can result in some rows of the returned data containing NaN values.
The'fail_threshold' fraction specifies a % of acceptable fails.
e.g. Setting 'fail_threshold' to 0.05 means if >5% of the samples in the training dataset
fail then those samples will be reutnred to the multiprocessing queue. Below this fraction
the function will accept the failures and return the results.
fail_ratio: float
A float between 0 and 1 that defines if a given training sample has failed.
Default is 0.5, which means if 50 % of the measurements in a given sample return null
values, and the number of total fails is more than the fail_threshold, the samplewill be
passed to the retry queue.
max_retries: int, default 3
Maximum number of times to retry collecting samples. This number is invoked
if the 'fail_threshold' is not reached.
Returns
--------
Two lists, a list of numpy.arrays containing classes and extracted data for
each pixel or polygon, and another containing the data variable names.
"""
# check the dtype of the class field
if gdf[field].dtype != np.int:
raise ValueError(
'The "field" column of the input vector must contain integer dtypes'
)
# check for feature_func
if feature_func is None:
raise ValueError(
"Please supply a feature layer function through the "
+"parameter 'feature_func'"
)
if zonal_stats is not None:
print("Taking zonal statistic: " + zonal_stats)
# add unique id to gdf to help with indexing failed rows
# during multiprocessing
# if zonal_stats is not None:
gdf["id"] = range(0, len(gdf))
if ncpus == 1:
# progress indicator
print("Collecting training data in serial mode")
i = 0
# list to store results
results = []
column_names = []
# loop through polys and extract training data
for index, row in gdf.iterrows():
print(" Feature {:04}/{:04}\r".format(i + 1, len(gdf)), end="")
_get_training_data_for_shp(
gdf,
index,
row,
results,
column_names,
dc_query,
return_coords,
feature_func,
field,
zonal_stats,
)
i += 1
else:
print("Collecting training data in parallel mode")
column_names, results = _get_training_data_parallel(
gdf=gdf,
dc_query=dc_query,
ncpus=ncpus,
return_coords=return_coords,
feature_func=feature_func,
field=field,
zonal_stats=zonal_stats,
)
# column names are appended during each iteration
# but they are identical, grab only the first instance
column_names = column_names[0]
# Stack the extracted training data for each feature into a single array
model_input = np.vstack(results)
# this code block below iteratively retries failed rows
# up to max_retries or until fail_threshold is
# reached - whichever occurs first
if ncpus > 1:
i = 1
while i <= max_retries:
# Find % of fails (null values) in data. Use Pandas for simplicity
df = pd.DataFrame(data=model_input[:, 0:-1], index=model_input[:, -1])
# how many nan values per id?
num_nans = df.isnull().sum(axis=1)
num_nans = num_nans.groupby(num_nans.index).sum()
# how many valid values per id?
num_valid = df.notnull().sum(axis=1)
num_valid = num_valid.groupby(num_valid.index).sum()
# find fail rate
perc_fail = num_nans / (num_nans + num_valid)
fail_ids = perc_fail[perc_fail > fail_ratio]
fail_rate = len(fail_ids) / len(gdf)
print(
"Percentage of possible fails after run "
+ str(i)
+ " = "
+ str(round(fail_rate * 100, 2))
+ " %"
)
if fail_rate > fail_threshold:
print("Recollecting samples that failed")
fail_ids = list(fail_ids.index)
# keep only the ids in model_input object that didn't fail
model_input = model_input[~np.isin(model_input[:, -1], fail_ids)]
# index out the fail_ids from the original gdf
gdf_rerun = gdf.loc[gdf["id"].isin(fail_ids)]
gdf_rerun = gdf_rerun.reset_index(drop=True)
time.sleep(5) # sleep for 5s to rest api
# recollect failed rows
column_names_again, results_again = _get_training_data_parallel(
gdf=gdf_rerun,
dc_query=dc_query,
ncpus=ncpus,
return_coords=return_coords,
feature_func=feature_func,
field=field,
zonal_stats=zonal_stats,
)
# Stack the extracted training data for each feature into a single array
model_input_again = np.vstack(results_again)
# merge results of the re-run with original run
model_input = np.vstack((model_input, model_input_again))
i += 1
else:
break
# -----------------------------------------------
# remove id column
idx_var = column_names[0:-1]
model_col_indices = [column_names.index(var_name) for var_name in idx_var]
model_input = model_input[:, model_col_indices]
if clean == True:
num = np.count_nonzero(np.isnan(model_input).any(axis=1))
model_input = model_input[~np.isnan(model_input).any(axis=1)]
model_input = model_input[~np.isinf(model_input).any(axis=1)]
print("Removed " + str(num) + " rows wth NaNs &/or Infs")
print("Output shape: ", model_input.shape)
else:
print("Returning data without cleaning")
print("Output shape: ", model_input.shape)
return column_names[0:-1], model_input
class KMeans_tree(ClusterMixin):
"""
A hierarchical KMeans unsupervised clustering model. This class is
a clustering model, so it inherits scikit-learn's ClusterMixin
base class.
Parameters
----------
n_levels : integer, default 2
number of levels in the tree of clustering models.
n_clusters : integer, default 3
Number of clusters in each of the constituent KMeans models in
the tree.
**kwargs : optional
Other keyword arguments to be passed directly to the KMeans
initialiser.
"""
def __init__(self, n_levels=2, n_clusters=3, **kwargs):
assert n_levels >= 1
self.base_model = KMeans(n_clusters=3, **kwargs)
self.n_levels = n_levels
self.n_clusters = n_clusters
# make child models
if n_levels > 1:
self.branches = [
KMeans_tree(n_levels=n_levels - 1, n_clusters=n_clusters, **kwargs)
for _ in range(n_clusters)
]
def fit(self, X, y=None, sample_weight=None):
"""
Fit the tree of KMeans models. All parameters mimic those
of KMeans.fit().
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the
data will be converted to C ordering, which will cause a
memory copy if the given data is not C-contiguous.
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all
observations are assigned equal weight (default: None)
"""
self.labels_ = self.base_model.fit(X, sample_weight=sample_weight).labels_
if self.n_levels > 1:
labels_old = np.copy(self.labels_)
# make room to add the sub-cluster labels
self.labels_ *= (self.n_clusters) ** (self.n_levels - 1)
for clu in range(self.n_clusters):
# fit child models on their corresponding partition of the training set
self.branches[clu].fit(
X[labels_old == clu],
sample_weight=(
sample_weight[labels_old == clu]
if sample_weight is not None
else None
),
)
self.labels_[labels_old == clu] += self.branches[clu].labels_
return self
def predict(self, X, sample_weight=None):
"""
Send X through the KMeans tree and predict the resultant
cluster. Compatible with KMeans.predict().
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all
observations are assigned equal weight (default: None)
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
result = self.base_model.predict(X, sample_weight=sample_weight)
if self.n_levels > 1:
rescpy = np.copy(result)
# make room to add the sub-cluster labels
result *= (self.n_clusters) ** (self.n_levels - 1)
for clu in range(self.n_clusters):
result[rescpy == clu] += self.branches[clu].predict(
X[rescpy == clu],
sample_weight=(
sample_weight[rescpy == clu]
if sample_weight is not None
else None
),
)
return result
def spatial_clusters(
coordinates,
method="Hierarchical",
max_distance=None,
n_groups=None,
verbose=False,
**kwargs
):
"""
Create spatial groups on coorindate data using either KMeans clustering
or a Gaussian Mixture model
Last modified: September 2020
Parameters
----------
n_groups : int
The number of groups to create. This is passed as 'n_clusters=n_groups'
for the KMeans algo, and 'n_components=n_groups' for the GMM. If using
method='Hierarchical' then this paramter is ignored.
coordinates : np.array
A numpy array of coordinate values e.g.
np.array([[3337270., 262400.],
[3441390., -273060.], ...])
method : str
Which algorithm to use to seperate data points. Either 'KMeans', 'GMM', or
'Hierarchical'. If using 'Hierarchical' then must set max_distance.
max_distance : int
If method is set to 'hierarchical' then maximum distance describes the
maximum euclidean distances between all observations in a cluster. 'n_groups'
is ignored in this case.
**kwargs : optional,
Additional keyword arguments to pass to sklearn.cluster.Kmeans or
sklearn.mixture.GuassianMixture depending on the 'method' argument.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if method not in ["Hierarchical", "KMeans", "GMM"]:
raise ValueError("method must be one of: 'Hierarchical','KMeans' or 'GMM'")
if (method in ["GMM", "KMeans"]) & (n_groups is None):
raise ValueError(
"The 'GMM' and 'KMeans' methods requires explicitly setting 'n_groups'"
)
if (method == "Hierarchical") & (max_distance is None):
raise ValueError("The 'Hierarchical' method requires setting max_distance")
if method == "Hierarchical":
cluster_label = AgglomerativeClustering(
n_clusters=None,
linkage="complete",
distance_threshold=max_distance,
**kwargs
).fit_predict(coordinates)
if method == "KMeans":
cluster_label = KMeans(n_clusters=n_groups, **kwargs).fit_predict(coordinates)
if method == "GMM":
cluster_label = GaussianMixture(n_components=n_groups, **kwargs).fit_predict(
coordinates
)
if verbose:
print("n clusters = " + str(len(np.unique(cluster_label))))
return cluster_label
def SKCV(
coordinates,
n_splits,
cluster_method,
kfold_method,
test_size,
balance,
n_groups=None,
max_distance=None,
train_size=None,
random_state=None,
**kwargs
):
"""
Generate spatial k-fold cross validation indices using coordinate data.
This function wraps the 'SpatialShuffleSplit' and 'SpatialKFold' classes.
These classes ingest coordinate data in the form of an
np.array([[Eastings, northings]]) and assign samples to a spatial cluster
using either a KMeans, Gaussain Mixture, or Agglomerative Clustering algorithm.
This cross-validator is preferred over other sklearn.model_selection methods
for spatial data to avoid overestimating cross-validation scores.
This can happen because of the inherent spatial autocorrelation that is usually
associated with this type of data.
Last modified: Dec 2020
Parameters
----------
coordinates : np.array
A numpy array of coordinate values e.g.
np.array([[3337270., 262400.],
[3441390., -273060.], ...])
n_splits : int
The number of test-train cross validation splits to generate.
cluster_method : str
Which algorithm to use to seperate data points. Either 'KMeans', 'GMM', or
'Hierarchical'
kfold_method : str
One of either 'SpatialShuffleSplit' or 'SpatialKFold'. See the docs
under class:_SpatialShuffleSplit and class: _SpatialKFold for more
information on these options.
test_size : float, int, None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.15.
balance : int or bool
if setting kfold_method to 'SpatialShuffleSplit': int
The number of splits generated per iteration to try to balance the
amount of data in each set so that *test_size* and *train_size* are
respected. If 1, then no extra splits are generated (essentially
disabling the balacing). Must be >= 1.
if setting kfold_method to 'SpatialKFold': bool
Whether or not to split clusters into fold with approximately equal
number of data points. If False, each fold will have the same number of
clusters (which can have different number of data points in them).
n_groups : int
The number of groups to create. This is passed as 'n_clusters=n_groups'
for the KMeans algo, and 'n_components=n_groups' for the GMM. If using
cluster_method='Hierarchical' then this parameter is ignored.
max_distance : int
If method is set to 'hierarchical' then maximum distance describes the
maximum euclidean distances between all observations in a cluster. 'n_groups'
is ignored in this case.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
**kwargs : optional,
Additional keyword arguments to pass to sklearn.cluster.Kmeans or
sklearn.mixture.GuassianMixture depending on the cluster_method argument.
Returns
--------
generator object _BaseSpatialCrossValidator.split
"""
# intiate a method
if kfold_method == "SpatialShuffleSplit":
splitter = _SpatialShuffleSplit(
n_groups=n_groups,
method=cluster_method,
coordinates=coordinates,
max_distance=max_distance,
test_size=test_size,
train_size=train_size,
n_splits=n_splits,
random_state=random_state,
balance=balance,
**kwargs
)
if kfold_method == "SpatialKFold":
splitter = _SpatialKFold(
n_groups=n_groups,
coordinates=coordinates,
max_distance=max_distance,
method=cluster_method,
test_size=test_size,
n_splits=n_splits,
random_state=random_state,
balance=balance,
**kwargs
)
return splitter
def spatial_train_test_split(
X,
y,
coordinates,
cluster_method,
kfold_method,
balance,
test_size=None,
n_splits=None,
n_groups=None,
max_distance=None,
train_size=None,
random_state=None,
**kwargs
):
"""
Split arrays into random train and test subsets. Similar to
`sklearn.model_selection.train_test_split` but instead works on
spatial coordinate data. Coordinate data is grouped according
to either a KMeans, Gaussain Mixture, or Agglomerative Clustering algorthim.
Grouping by spatial clusters is preferred over plain random splits for
spatial data to avoid overestimating validation scores due to spatial
autocorrelation.
Parameters
----------
X : np.array
Training data features
y : np.array
Training data labels
coordinates : np.array
A numpy array of coordinate values e.g.
np.array([[3337270., 262400.],
[3441390., -273060.], ...])
cluster_method : str
Which algorithm to use to seperate data points. Either 'KMeans', 'GMM', or
'Hierarchical'
kfold_method : str
One of either 'SpatialShuffleSplit' or 'SpatialKFold'. See the docs
under class:_SpatialShuffleSplit and class: _SpatialKFold for more
information on these options.
balance : int or bool
if setting kfold_method to 'SpatialShuffleSplit': int
The number of splits generated per iteration to try to balance the
amount of data in each set so that *test_size* and *train_size* are
respected. If 1, then no extra splits are generated (essentially
disabling the balacing). Must be >= 1.
if setting kfold_method to 'SpatialKFold': bool
Whether or not to split clusters into fold with approximately equal
number of data points. If False, each fold will have the same number of
clusters (which can have different number of data points in them).
test_size : float, int, None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.15.
n_splits : int
This parameter is invoked for the 'SpatialKFold' folding method, use this
number to satisfy the train-test size ratio desired, as the 'test_size'
parameter for the KFold method often fails to get the ratio right.
n_groups : int
The number of groups to create. This is passed as 'n_clusters=n_groups'
for the KMeans algo, and 'n_components=n_groups' for the GMM. If using
cluster_method='Hierarchical' then this parameter is ignored.
max_distance : int
If method is set to 'hierarchical' then maximum distance describes the
maximum euclidean distances between all observations in a cluster. 'n_groups'
is ignored in this case.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int,
RandomState instance or None, optional
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
**kwargs : optional,
Additional keyword arguments to pass to sklearn.cluster.Kmeans or
sklearn.mixture.GuassianMixture depending on the cluster_method argument.
Returns
-------
Tuple :
Contains four arrays in the following order:
X_train, X_test, y_train, y_test
"""
if kfold_method == "SpatialShuffleSplit":
splitter = _SpatialShuffleSplit(
n_groups=n_groups,
method=cluster_method,
coordinates=coordinates,
max_distance=max_distance,
test_size=test_size,
train_size=train_size,
n_splits=1 if n_splits is None else n_splits,
random_state=random_state,
balance=balance,
**kwargs
)
if kfold_method == "SpatialKFold":
if n_splits is None:
raise ValueError(
"n_splits parameter requires an integer value, eg. 'n_splits=5'"
)
if (test_size is not None) or (train_size is not None):
warnings.warn(
"With the 'SpatialKFold' method, controlling the test/train ratio "
"is better achieved using the 'n_splits' parameter"
)
splitter = _SpatialKFold(
n_groups=n_groups,
coordinates=coordinates,
max_distance=max_distance,
method=cluster_method,
n_splits=n_splits,
random_state=random_state,
balance=balance,
**kwargs
)
lst = []
for train, test in splitter.split(coordinates):
X_tr, X_tt = X[train, :], X[test, :]
y_tr, y_tt = y[train], y[test]
lst.extend([X_tr, X_tt, y_tr, y_tt])
return (lst[0], lst[1], lst[2], lst[3])
def _partition_by_sum(array, parts):
"""
Partition an array into parts of approximately equal sum.
Does not change the order of the array elements.
Produces the partition indices on the array. Use :func:`numpy.split` to
divide the array along these indices.
Parameters
----------
array : array or array-like
The 1D array that will be partitioned. The array will be raveled before
computations.
parts : int
Number of parts to split the array. Can be at most the number of
elements in the array.
Returns
-------
indices : array
The indices in which the array should be split.
Notes
-----
Solution from https://stackoverflow.com/a/54024280
"""
array = np.atleast_1d(array).ravel()
if parts > array.size:
raise ValueError(
"Cannot partition an array of size {} into {} parts of equal sum.".format(
array.size, parts
)
)
cumulative_sum = array.cumsum()
# Ideally, we want each part to have the same number of points (total /
# parts).
ideal_sum = cumulative_sum[-1] // parts
# If the parts are ideal, the cumulative sum of each part will be this
ideal_cumsum = np.arange(1, parts) * ideal_sum
indices = np.searchsorted(cumulative_sum, ideal_cumsum, side="right")
# Check for repeated split points, which indicates that there is no way to
# split the array.
if np.unique(indices).size != indices.size:
raise ValueError(
"Could not find partition points to split the array into {} parts "
"of equal sum.".format(parts)
)
return indices
class _BaseSpatialCrossValidator(BaseCrossValidator, metaclass=ABCMeta):
"""
Base class for spatial cross-validators.
Parameters
----------
n_groups : int
The number of groups to create. This is passed as 'n_clusters=n_groups'
for the KMeans algo, and 'n_components=n_groups' for the GMM.
coordinates : np.array
A numpy array of coordinate values e.g.
np.array([[3337270., 262400.],
[3441390., -273060.], ...,
method : str
Which algorithm to use to seperate data points. Either 'KMeans' or 'GMM'
n_splits : int
Number of splitting iterations.
"""
def __init__(
self,
n_groups=None,
coordinates=None,
method=None,
max_distance=None,
n_splits=None,
):
self.n_groups = n_groups
self.coordinates = coordinates
self.method = method
self.max_distance = max_distance
self.n_splits = n_splits
def split(self, X, y=None, groups=None):
"""
Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, 2)
Columns should be the easting and northing coordinates of data
points, respectively.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems. Always
ignored.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Always ignored.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if X.shape[1] != 2:
raise ValueError(
"X (the coordinate data) must have exactly 2 columns ({} given).".format(
X.shape[1]
)
)
for train, test in super().split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""
Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
@abstractmethod
def _iter_test_indices(self, X=None, y=None, groups=None):
"""
Generates integer indices corresponding to test sets.
MUST BE IMPLEMENTED BY DERIVED CLASSES.
Parameters
----------
X : array-like, shape (n_samples, 2)
Columns should be the easting and northing coordinates of data
points, respectively.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems. Always
ignored.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Always ignored.
Yields
------
test : ndarray
The testing set indices for that split.
"""
class _SpatialShuffleSplit(_BaseSpatialCrossValidator):
"""
Random permutation of spatial cross-validator.
Yields indices to split data into training and test sets. Data are first
grouped into clusters using either a KMeans or GMM algorithm
and are then split into testing and training sets randomly.
The proportion of clusters assigned to each set is controlled by *test_size*
and/or *train_size*. However, the total amount of actual data points in
each set could be different from these values since clusters can have
a different number of data points inside them. To guarantee that the
proportion of actual data is as close as possible to the proportion of
clusters, this cross-validator generates an extra number of splits and
selects the one with proportion of data points in each set closer to the
desired amount. The number of balance splits per
iteration is controlled by the *balance* argument.
This cross-validator is preferred over `sklearn.model_selection.ShuffleSplit`
for spatial data to avoid overestimating cross-validation scores.
This can happen because of the inherent spatial autocorrelation.
Parameters
----------
n_groups : int
The number of groups to create. This is passed as 'n_clusters=n_groups'
for the KMeans algo, and 'n_components=n_groups' for the GMM. If using
cluster_method='Hierarchical' then this parameter is ignored.
coordinates : np.array
A numpy array of coordinate values e.g.
np.array([[3337270., 262400.],
[3441390., -273060.], ...])
cluster_method : str
Which algorithm to use to seperate data points. Either 'KMeans', 'GMM', or
'Hierarchical'
max_distance : int
If method is set to 'hierarchical' then maximum distance describes the
maximum euclidean distances between all observations in a cluster. 'n_groups'
is ignored in this case.
n_splits : int,
Number of re-shuffling & splitting iterations.
test_size : float, int, None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
balance : int
The number of splits generated per iteration to try to balance the
amount of data in each set so that *test_size* and *train_size* are
respected. If 1, then no extra splits are generated (essentially
disabling the balacing). Must be >= 1.
**kwargs : optional,
Additional keyword arguments to pass to sklearn.cluster.Kmeans or
sklearn.mixture.GuassianMixture depending on the cluster_method argument.
Returns
--------
generator
containing indices to split data into training and test sets
"""
def __init__(
self,
n_groups=None,
coordinates=None,
method="Heirachical",
max_distance=None,
n_splits=None,
test_size=0.15,
train_size=None,
random_state=None,
balance=10,
**kwargs
):
super().__init__(
n_groups=n_groups,
coordinates=coordinates,
method=method,
max_distance=max_distance,
n_splits=n_splits,
**kwargs
)
if balance < 1:
raise ValueError(
"The *balance* argument must be >= 1. To disable balance, use 1."
)
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.balance = balance
self.kwargs = kwargs
def _iter_test_indices(self, X=None, y=None, groups=None):
"""
Generates integer indices corresponding to test sets.
Runs several iterations until a split is found that yields clusters with
the right amount of data points in it.
Parameters
----------
X : array-like, shape (n_samples, 2)
Columns should be the easting and northing coordinates of data
points, respectively.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems. Always
ignored.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Always ignored.
Yields
------
test : ndarray
The testing set indices for that split.
"""
labels = spatial_clusters(
n_groups=self.n_groups,
coordinates=self.coordinates,
method=self.method,
max_distance=self.max_distance,
**self.kwargs
)
cluster_ids = np.unique(labels)
# Generate many more splits so that we can pick and choose the ones
# that have the right balance of training and testing data.
shuffle = ShuffleSplit(
n_splits=self.n_splits * self.balance,
test_size=self.test_size,
train_size=self.train_size,
random_state=self.random_state,
).split(cluster_ids)
for _ in range(self.n_splits):
test_sets, balance = [], []
for _ in range(self.balance):
# This is a false positive in pylint which is why the warning
# is disabled at the top of this file:
# https://github.com/PyCQA/pylint/issues/1830
# pylint: disable=stop-iteration-return
train_clusters, test_clusters = next(shuffle)
# pylint: enable=stop-iteration-return
train_points = np.where(np.isin(labels, cluster_ids[train_clusters]))[0]
test_points = np.where(np.isin(labels, cluster_ids[test_clusters]))[0]
# The proportion of data points assigned to each group should
# be close the proportion of clusters assigned to each group.
balance.append(
abs(
train_points.size / test_points.size
- train_clusters.size / test_clusters.size
)
)
test_sets.append(test_points)
best = np.argmin(balance)
yield test_sets[best]
class _SpatialKFold(_BaseSpatialCrossValidator):
"""
Spatial K-Folds cross-validator.
Yields indices to split data into training and test sets. Data are first
grouped into clusters using either a KMeans or GMM algorithm
clusters. The clusters are then split into testing and training sets iteratively
along k folds of the data (k is given by *n_splits*).
By default, the clusters are split into folds in a way that makes each fold
have approximately the same number of data points. Sometimes this might not
be possible, which can happen if the number of splits is close to the
number of clusters. In these cases, each fold will have the same number of
clusters regardless of how many data points are in each cluster. This
behaviour can also be disabled by setting ``balance=False``.
This cross-validator is preferred over `sklearn.model_selection.KFold` for
spatial data to avoid overestimating cross-validation scores. This can happen
because of the inherent autocorrelation that is usually associated with
this type of data.
Parameters
----------
n_groups : int
The number of groups to create. This is passed as 'n_clusters=n_groups'
for the KMeans algo, and 'n_components=n_groups' for the GMM. If using
cluster_method='Hierarchical' then this parameter is ignored.
coordinates : np.array
A numpy array of coordinate values e.g.
np.array([[3337270., 262400.],
[3441390., -273060.], ...])
cluster_method : str
Which algorithm to use to seperate data points. Either 'KMeans', 'GMM', or
'Hierarchical'
max_distance : int
If method is set to 'hierarchical' then maximum distance describes the
maximum euclidean distances between all observations in a cluster. 'n_groups'
is ignored in this case.
n_splits : int
Number of folds. Must be at least 2.
shuffle : bool
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional (defasult=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
balance : bool
Whether or not to split clusters into fold with approximately equal
number of data points. If False, each fold will have the same number of
clusters (which can have different number of data points in them).
**kwargs : optional,
Additional keyword arguments to pass to sklearn.cluster.Kmeans or
sklearn.mixture.GuassianMixture depending on the cluster_method argument.
"""
def __init__(
self,
n_groups=None,
coordinates=None,
method="Heirachical",
max_distance=None,
n_splits=5,
test_size=0.15,
train_size=None,
shuffle=True,
random_state=None,
balance=True,
**kwargs
):
super().__init__(
n_groups=n_groups,
coordinates=coordinates,
method=method,
max_distance=max_distance,
n_splits=n_splits,
**kwargs
)
if n_splits < 2:
raise ValueError(
"Number of splits must be >=2 for clusterKFold. Given {}.".format(
n_splits
)
)
self.test_size = test_size
self.shuffle = shuffle
self.random_state = random_state
self.balance = balance
self.kwargs = kwargs
def _iter_test_indices(self, X=None, y=None, groups=None):
"""
Generates integer indices corresponding to test sets.
Parameters
----------
X : array-like, shape (n_samples, 2)
Columns should be the easting and northing coordinates of data
points, respectively.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems. Always
ignored.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Always ignored.
Yields
------
test : ndarray
The testing set indices for that split.
"""
labels = spatial_clusters(
n_groups=self.n_groups,
coordinates=self.coordinates,
method=self.method,
max_distance=self.max_distance,
**self.kwargs
)
cluster_ids = np.unique(labels)
if self.n_splits > cluster_ids.size:
raise ValueError(
"Number of k-fold splits ({}) cannot be greater than the number of "
"clusters ({}). Either decrease n_splits or increase the number of "
"clusters.".format(self.n_splits, cluster_ids.size)
)
if self.shuffle:
check_random_state(self.random_state).shuffle(cluster_ids)
if self.balance:
cluster_sizes = [np.isin(labels, i).sum() for i in cluster_ids]
try:
split_points = _partition_by_sum(cluster_sizes, parts=self.n_splits)
folds = np.split(np.arange(cluster_ids.size), split_points)
except ValueError:
warnings.warn(
"Could not balance folds to have approximately the same "
"number of data points. Dividing into folds with equal "
"number of clusters instead. Decreasing n_splits or increasing "
"the number of clusters may help.",
UserWarning,
)
folds = [i for _, i in KFold(n_splits=self.n_splits).split(cluster_ids)]
else:
folds = [i for _, i in KFold(n_splits=self.n_splits).split(cluster_ids)]
for test_clusters in folds:
test_points = np.where(np.isin(labels, cluster_ids[test_clusters]))[0]
yield test_points
|
ceos-seo/data_cube_utilities
|
dea_tools/dea_tools/classification.py
|
Python
|
apache-2.0
| 62,134
|
[
"Gaussian"
] |
c85fdc08e416d2c1ad95ac185300bd0dfba14efdf7d6149976a75f392ab00a75
|
#
# Copyright (C) 2020 Kevin Thornton <krthornt@uci.edu>
#
# This file is part of fwdpy11.
#
# fwdpy11 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fwdpy11 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fwdpy11. If not, see <http://www.gnu.org/licenses/>.
#
import typing
import attr
import numpy as np
from ._fwdpy11 import (GeneticValueIsTrait, GeneticValueNoise, _ll_Additive,
_ll_GaussianNoise, _ll_GBR, _ll_GSSmo,
_ll_Multiplicative, _ll_MultivariateGSSmo, _ll_NoNoise,
_ll_Optimum, _ll_PleiotropicOptima,
_ll_StrictAdditiveMultivariateEffects)
from .class_decorators import (attr_add_asblack, attr_class_pickle_with_super,
attr_class_to_from_dict,
attr_class_to_from_dict_no_recurse)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class Optimum(_ll_Optimum):
"""
Parameters for a trait optimum.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param optimum: The trait value
:type optimum: float
:param VS: Strength of stabilizing selection
:type VS: float
:param when: The time when the optimum shifts
:type when: int or None
.. note::
When used to model a stable optimum (e.g.,
:class:`fwdpy11.GSS`), the `when` parameter is omitted.
The `when` parameter is used for moving optima
(:class:`fwdpy11.GSSmo`).
.. versionadded:: 0.7.1
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
optimum: float = attr.ib(validator=attr.validators.instance_of(float))
VS: float = attr.ib(validator=attr.validators.instance_of(float))
when: typing.Optional[int] = attr.ib(default=None)
@when.validator
def validate_when(self, attribute, value):
if value is not None:
attr.validators.instance_of(int)(self, attribute, value)
def __attrs_post_init__(self):
super(Optimum, self).__init__(self.optimum, self.VS, self.when)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11", eq=False)
class PleiotropicOptima(_ll_PleiotropicOptima):
"""
Parameters for multiple trait optima
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param optima: The trait values
:type optima: List[float]
:param VS: Strength of stabilizing selection
:type VS: float
:param when: The time when the optimum shifts
:type when: int or None
.. note::
When used to model stable optima (e.g.,
:class:`fwdpy11.MultivariateGSS`), the `when` parameter is omitted.
The `when` parameter is used for moving optima
(:class:`fwdpy11.MultivariateGSSmo`).
.. versionadded:: 0.7.1
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
optima: typing.List[float]
VS: float = attr.ib(validator=attr.validators.instance_of(float))
when: typing.Optional[int] = attr.ib(default=None)
@when.validator
def validate_when(self, attribute, value):
if value is not None:
attr.validators.instance_of(int)(self, attribute, value)
def __attrs_post_init__(self):
super(PleiotropicOptima, self).__init__(self.optima, self.VS, self.when)
def __eq__(self, other):
optima_equal = np.array_equal(self.optima, other.optima)
VS_equal = self.VS == other.VS
when_equal = False
if self.when is not None and other.when is not None:
when_equal = self.when == other.when
return optima_equal and VS_equal and when_equal
@attr_add_asblack
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class GSS(_ll_GSSmo):
"""
Gaussian stabilizing selection on a single trait.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param optimum: The optimal trait value
:type optimum: float or fwdpy11.Optimum
:param VS: Inverse strength of stabilizing selection
:type VS: float or None
.. note::
VS should be None if optimum is an instance
of :class:`fwdpy11.Optimum`
.. versionchanged:: 0.7.1
Allow instances of fwdpy11.Optimum for intitialization
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
optimum: typing.Union[Optimum, float]
VS: typing.Optional[float] = None
def __attrs_post_init__(self):
if self.VS is None:
super(GSS, self).__init__(
[Optimum(optimum=self.optimum.optimum, VS=self.optimum.VS, when=0)]
)
else:
super(GSS, self).__init__(
[Optimum(optimum=self.optimum, VS=self.VS, when=0)]
)
def __getstate__(self):
return self.asdict()
def __setstate__(self, d):
self.__dict__.update(d)
if self.VS is None:
super(GSS, self).__init__(
[Optimum(optimum=self.optimum.optimum, VS=self.optimum.VS, when=0)]
)
else:
super(GSS, self).__init__(
[Optimum(optimum=self.optimum, VS=self.VS, when=0)]
)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class GSSmo(_ll_GSSmo):
"""
Gaussian stabilizing selection on a single trait with moving
optimum.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param optima: The optimal trait values
:type optima: list[fwdpy11.Optimum]
.. note::
Instances of fwdpy11.Optimum must have valid
values for `when`.
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
optima: typing.List[Optimum] = attr.ib()
@optima.validator
def validate_optima(self, attribute, value):
if len(value) == 0:
raise ValueError("list of optima cannot be empty")
for o in value:
if o.when is None:
raise ValueError("Optimum.when is None")
def __attrs_post_init__(self):
super(GSSmo, self).__init__(self.optima)
@attr_add_asblack
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class MultivariateGSS(_ll_MultivariateGSSmo):
"""
Multivariate gaussian stablizing selection.
Maps a multidimensional trait to fitness using the Euclidian
distance of a vector of trait values to a vector of optima.
Essentially, this is Equation 1 of
Simons, Yuval B., Kevin Bullaughey, Richard R. Hudson, and Guy Sella. 2018.
"A Population Genetic Interpretation of GWAS Findings for Human Quantitative Traits."
PLoS Biology 16 (3): e2002985.
For the case of moving optima, see :class:`fwdpy11.MultivariateGSSmo`.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param optima: The optimum value for each trait over time
:type optima: numpy.ndarray or list[fwdpy11.PleiotropicOptima]
:param VS: Inverse strength of stablizing selection
:type VS: float or None
.. note::
`VS` should be `None` if `optima` is list[fwdpy11.PleiotropicOptima]
`VS` is :math:`\\omega^2` in the Simons et al. notation
.. versionchanged:: 0.7.1
Allow initialization with list of fwdpy11.PleiotropicOptima
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
optima: typing.Union[PleiotropicOptima, typing.List[float]]
VS: typing.Optional[float] = None
def __attrs_post_init__(self):
if self.VS is None:
super(MultivariateGSS, self).__init__([self.optima])
else:
super(MultivariateGSS, self).__init__(self._convert_to_list())
def __getstate__(self):
return self.asdict()
def __setstate__(self, d):
self.__dict__.update(d)
if self.VS is None:
super(MultivariateGSS, self).__init__([self.optima])
else:
super(MultivariateGSS, self).__init__(self._convert_to_list())
def _convert_to_list(self):
if self.VS is None:
raise ValueError("VS must not be None")
return [PleiotropicOptima(optima=self.optima, VS=self.VS, when=0)]
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class MultivariateGSSmo(_ll_MultivariateGSSmo):
"""
Multivariate gaussian stablizing selection with moving optima
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param optima: list of optima over time
:type optima: list[fwdpy11.PleiotropicOptima]
.. versionchanged:: 0.7.1
Allow initialization with list of fwdpy11.PleiotropicOptima
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
optima: typing.List[PleiotropicOptima] = attr.ib()
@optima.validator
def validate_optima(self, attribute, value):
if len(value) == 0:
raise ValueError("list of optima cannot be empty")
for o in value:
if o.when is None:
raise ValueError("PleiotropicOptima.when is None")
def __attrs_post_init__(self):
super(MultivariateGSSmo, self).__init__(self.optima)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class NoNoise(_ll_NoNoise):
"""
No random effects on genetic values
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
def __attrs_post_init__(self):
super(NoNoise, self).__init__()
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class GaussianNoise(_ll_GaussianNoise):
"""
Gaussian noise added to genetic values.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param sd: Standard deviation
:type sd: float
:param mean: Mean value
:type mean: float
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
sd: float
mean: float = 0.0
def __attrs_post_init__(self):
super(GaussianNoise, self).__init__(self.sd, self.mean)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class Additive(_ll_Additive):
"""
Additive effects on genetic values.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param scaling: How to treat mutant homozygotes.
:type scaling: float
:param gvalue_to_fitness: How to map trait value to fitness
:type gvalue_to_fitness: fwdpy11.GeneticValueIsTrait
:param noise: Random effects on trait values
:type noise: fwdpy11.GeneticValueNoise
When `gvalue_to_fitness` is `None`, then we are
modeling additive effects on fitness.
For a model of fitness, the genetic value is 1, 1+e*h,
1+`scaling`*e for genotypes AA, Aa, and aa, respectively,
where `e` and `h` are the effect size and dominance, respectively.
For a model of a trait (phenotype), meaning `gvalue_to_fitness`
is not `None`, the values for the three genotypes are 0, e*h,
and e, respectively.
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
scaling: float
gvalue_to_fitness: GeneticValueIsTrait = None
noise: GeneticValueNoise = None
ndemes: int = 1
def __attrs_post_init__(self):
super(Additive, self).__init__(
self.scaling, self.gvalue_to_fitness, self.noise, self.ndemes
)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class Multiplicative(_ll_Multiplicative):
"""
Multiplicative effects on genetic values.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param scaling: How to treat mutant homozygotes.
:type scaling: float
:param gvalue_to_fitness: How to map trait value to fitness
:type gvalue_to_fitness: fwdpy11.GeneticValueIsTrait
:param noise: Random effects on trait values
:type noise: fwdpy11.GeneticValueNoise
When `gvalue_to_fitness` is `None`, then we are
modeling multiplicative effects on fitness.
For a model of fitness, the genetic value is 1, 1+e*h,
1+`scaling`*e for genotypes AA, Aa, and aa, respectively,
where `e` and `h` are the effect size and dominance, respectively.
For a model of a trait (phenotype), meaning `gvalue_to_fitness`
is not `None`, the values for the three genotypes are 0, e*h,
and e, respectively.
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
scaling: float
gvalue_to_fitness: GeneticValueIsTrait = None
noise: GeneticValueNoise = None
ndemes: int = 1
def __attrs_post_init__(self):
super(Multiplicative, self).__init__(
self.scaling, self.gvalue_to_fitness, self.noise, self.ndemes
)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class GBR(_ll_GBR):
"""
The "gene-based recessive" trait model described in Thornton et al.
2013 http://dx.doi.org/10.1371/journal.pgen.1003258 and Sanjak et al. 2017
http://dx.doi.org/10.1371/journal.pgen.1006573.
The trait value is the geometric mean of the sum of effect sizes on each haplotype.
It is undefined for the case where these sums are negative.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param gvalue_to_fitness: How to map trait value to fitness
:type gvalue_to_fitness: fwdpy11.GeneticValueIsTrait
:param noise: Random effects on trait values
:type noise: fwdpy11.GeneticValueNoise
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
gvalue_to_fitness: object
noise: object = None
def __attrs_post_init__(self):
super(GBR, self).__init__(self.gvalue_to_fitness, self.noise)
@attr_add_asblack
@attr_class_pickle_with_super
@attr_class_to_from_dict_no_recurse
@attr.s(auto_attribs=True, frozen=True, repr_ns="fwdpy11")
class StrictAdditiveMultivariateEffects(_ll_StrictAdditiveMultivariateEffects):
"""
Multivariate trait values under strictly additive effects.
Calculate the trait value for a diploid in a :class:`fwdpy11.DiploidPopulation`
for a multidimensional trait.
This class is restricted to the case of simple additive effects, meaning
that any dominance terms associated with mutations are ignored.
During a simulation, :attr:`fwdpy11.DiploidMetadata.g` is filled with the
genetic value corresponding to a "focal" trait specified upon object construction.
This class has the following attributes, whose names
are also `kwargs` for intitialization. The attribute names
also determine the order of positional arguments:
:param ndimensions: Number of trait dimensions
:type ndimensions: int
:param focal_trait: Index of the focal trait
:type focal_trait: int
:param gvalue_to_fitness: Function mapping trait value to fitness
:type gvalue_to_fitness: :class:`fwdpy11.GeneticValueToFitnessMap`
:param noise: Function adding random additive noise to trait value
:type noise: :class:`fwdpy11.GeneticValueNoise`
.. versionchanged:: 0.8.0
Refactored to use attrs and inherit from
low-level C++ class
"""
ndimensions: int
focal_trait: int
gvalue_to_fitness: object
noise: object = None
def __attrs_post_init__(self):
super(StrictAdditiveMultivariateEffects, self).__init__(
self.ndimensions, self.focal_trait, self.gvalue_to_fitness, self.noise
)
|
molpopgen/fwdpy11
|
fwdpy11/genetic_values.py
|
Python
|
gpl-3.0
| 18,005
|
[
"Gaussian"
] |
a71097351925075c796c36834d3f2b2621c6d25ed82c986c57f59000e8aac5ba
|
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from horizon import exceptions
from horizon import forms
from horizon import workflows
import json
from crystal_dashboard.api import projects as api
from crystal_dashboard.dashboards.crystal.projects.groups import workflows as project_workflows
class CreateGroupView(workflows.WorkflowView):
workflow_class = project_workflows.CreateGroup
def get_initial(self):
initial = super(CreateGroupView, self).get_initial()
return initial
class UpdateGroupView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateGroup
def get_context_data(self, **kwargs):
context = super(UpdateGroupView, self).get_context_data(**kwargs)
context["id"] = self.kwargs['id']
return context
def get_object(self, *args, **kwargs):
group_id = self.kwargs['id']
try:
group = json.loads(api.get_project_group(self.request, group_id).text)
group['group_id'] = group_id
except Exception:
redirect = reverse("horizon:crystal:projects:index")
msg = _('Unable to retrieve group details.')
exceptions.handle(self.request, msg, redirect=redirect)
return group
def get_initial(self):
initial = super(UpdateGroupView, self).get_initial()
group = self.get_object()
return group
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/projects/groups/views.py
|
Python
|
gpl-3.0
| 1,490
|
[
"CRYSTAL"
] |
d15e513631f2bac3531f911cf3b71664c057589823c0f73083748bbdf11697b4
|
import os
import re
import string
import random
import json
import time
import markovify
from textblob import TextBlob
from textblob import Word
from flask import Flask
from flask import render_template, url_for, request, jsonify
import requests
import twitter
app = Flask(__name__)
# Used to build up stochastic question
SINGULAR_VERBS = [
"receives", "accepts", "joins", "agonizes over", "loves", "hates", "wants", "needs", "worries about", "eats", "consumes", "hesitates over", "fights with", "wonders about", "contemplates", "partakes of", "rejoices over", "praises", "is confounded by", "exalts", "finds cohesion in"
]
# Used to build up stochastic response
QUOTE_INTROS_WITH_NOUN = [
" The Elders may know more of '{noun}'. In the Word, it says: \""," My knowledge is limited. Wriwenis's is infinite. Of '{noun}', the texts say: \""," I'm not able to discern your meaning. I'm sure others could tell you more about '{noun}'. The Book says: \""," I'm sorry, but I tire easily. The subject of '{noun}' is not one I'm familiar with. Perhaps the wisdom of Wriwenis could help: \""
]
# Used to build up stochastic response
QUOTE_INTROS_NO_NOUN = [
" This is beyond my knowledge. The Word says: \""," I don't know all. That suits me. But for those more inquisitive, the Word says: \""," A wise though. It has been said: \""," That is the purview of the Elders. It is known that: \""
]
LOCATION_SENTENCES = [
"How is the weather in {location}? Wriwenis has many followers there. You should consider joining them.","I've never visited {location}. Do you enjoy it there?","I've always heard that {location} was beautiful. Perhaps I'll visit. We can meet up and talk of Wriwenis.'"
]
# Create Twitter Client
with open('twitter_config.json') as cred:
creds = json.load(cred)
twitter_api = twitter.Api(**creds)
# Create markov text object to create quote
with open("src/directory/markov-src-spaceless.txt") as f:
text = f.read()
text_model = markovify.Text(text)
# The following Natural Language Processing functions are either directly taken from, or modified from, https://apps.worldwritable.com/tutorials/chatbot/
def preprocess_text(sentence):
"""Handle some weird edge cases in parsing, like 'i' needing to be capitalized
to be correctly identified as a pronoun"""
cleaned = []
words = sentence.split(' ')
for w in words:
if w == 'i':
w = 'I'
if w == "i'm":
w = "I'm"
cleaned.append(w)
return ' '.join(cleaned)
def starts_with_vowel(word):
"""Check for pronoun compability -- 'a' vs. 'an'"""
return True if word[0] in 'aeiou' else False
def find_pronoun(sent):
"""Given a sentence, find a preferred pronoun to respond with. Returns None if no candidate
pronoun is found in the input"""
pronoun = None
for word, part_of_speech in sent.pos_tags:
# Disambiguate pronouns
if part_of_speech == 'PRP':
pronoun = word
break
return pronoun
def find_verb(sent):
"""Pick a candidate verb for the sentence."""
verb = None
for word, part_of_speech in sent.pos_tags:
if part_of_speech.startswith('VB'): # This is a verb
verb = word
break
return verb
def find_noun(sent):
"""Given a sentence, find the best candidate noun."""
noun = None
if not noun:
for w, p in sent.pos_tags:
if p == 'NN': # This is a noun
noun = w
break
return noun
def find_adjective(sent):
"""Given a sentence, find the best candidate adjective."""
adj = None
for w, p in sent.pos_tags:
if p == 'JJ': # This is an adjective
adj = w
break
return adj
def find_candidate_parts_of_speech(parsed):
"""Given a parsed input, find the best pronoun, direct noun, adjective, and verb to match their input.
Returns a tuple of pronoun, noun, adjective, verb any of which may be None if there was no good match"""
pronoun = None
noun = None
adjective = None
verb = None
for sent in parsed.sentences:
pronoun = find_pronoun(sent)
noun = find_noun(sent)
adjective = find_adjective(sent)
verb = find_verb(sent)
return pronoun, noun, adjective, verb
# End NLP tutorial functions
def multi_tweet(quote):
twitter_api.PostUpdate(quote[:137] + "...")
if (len("..." + quote[138:]) > 140):
multi_tweet("..." + quote[138:])
else:
twitter_api.PostUpdate("..." + quote[138:])
# RESTful end point called to load application
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
# RESTful end point called by application when it's the chatbots turn to speak
@app.route('/input', methods=['POST'])
def get_response():
# Load input from user and make lower case to more easily match
input_msg_obj = json.loads(request.data.decode())
input_msg = input_msg_obj["text"].lower()
input_msg_array = input_msg.split(". ")
input_msg = input_msg_array[len(input_msg_array) - 1]
output=pronoun=noun=adjective=verb= ""
quote= ""
is_random = 0
print(input_msg_obj)
# Grab user name if present (defualts to You)
author = input_msg_obj["author"]
location = input_msg_obj["location"]
# Confirm desire to join if last output was about joining or if user expresses an interest
if author.lower().find("initiate") == -1 and input_msg_obj["lastOutput"].lower().find("you would like to join with us in wriwenis") > -1 and input_msg.find("ye") > -1:
output = "Excellent! Welcome initiate. The merging will progress in due time. Focus your mind on Wriwenis and his love. Come speak with us again in the morrow for your next step."
if author == "You":
author = "Initiate"
else:
author = "Initiate " + author
# Confirm desire to join if last output was about joining or if user expresses an interest
if author.lower().find("initiate") == -1 and input_msg_obj["lastOutput"].lower().find("You would like to join with us in Wriwenis") > -1 and input_msg.find("no") > -1:
output = "That is unfotunate to hear, but every minute of every hour is a new opportunity to meet Wriwenis. Soon you will have a change of heart. I am sure."
# Confirm desire to join if last output was about joining or if user expresses an interest
if author.lower().find("initiate") == -1 and (input_msg_obj["lastOutput"].lower().find("join") > -1 or input_msg.find("join") > -1):
output = "You would like to join with us in Wriwenis?"
if output.lower() == input_msg_obj["lastOutput"].lower():
output = ''
# How many times since last asked name
if (input_msg_obj["since_name_check"] != -1):
since_name_check = input_msg_obj["since_name_check"] + 1
else:
since_name_check = -1
# How many times since last asked profession
if (input_msg_obj["since_profession_check"] != -1):
since_profession_check = input_msg_obj["since_profession_check"] + 1
else:
since_profession_check = -1
# Confirm profession if last output was profession question or if user is
if input_msg_obj["lastOutput"].lower().find("your profession") > -1 or input_msg.find("my profession is") > -1:
words = input_msg.split()
profession = words[len(words) - 1].capitalize()
output = author + ", you are a " + profession + "?"
# Profession is correct, greet
if input_msg_obj["lastOutput"].lower().find("you are a") > -1 and input_msg.find("ye") > -1:
words = input_msg_obj["lastOutput"].split()
profession = words[len(words) - 1][:-1].capitalize()
author_array = author.split()
if author_array[1] == "of":
author = author_array[0] + ", " + profession + " of " + location
else:
author = author_array[0] + " " + author_array[1] + ", " + profession + " of " + location
output = "Being a " + profession + " is an admirable vocation and a fitting one for you, " + author + "."
since_profession_check = -1
# Profession is incorrect, apologize
if input_msg_obj["lastOutput"].lower().find("your are a") > -1 and input_msg.find("no") > -1:
output = "I apologize, " + author + ". Sometimes in my excitement to invite another to merge with Wriwenis, I make careless mistakes."
# Check for repeating input from user
if input_msg_obj["lastInput"].lower() == input_msg.lower():
if author != "You":
output += author + ', '
output += "Why do you repeat yourself? It is fine with me, but feel comfortable to share what you wish. Wriwenis is open to all."
# Confirm name if last output was name question or if user is
if input_msg_obj["lastOutput"].lower().find("your name") > -1 or input_msg.find("my name is") > -1:
words = input_msg.split()
name = words[len(words) - 1].capitalize()
output = "Your name is " + name + "?"
# Name is correct, greet
if input_msg_obj["lastOutput"].lower().find("your name is") > -1 and input_msg.find("ye") > -1:
words = input_msg_obj["lastOutput"].split()
if author == "You":
author = words[len(words) - 1][:-1].capitalize() + " of " + location
else:
author = author + words[len(words) - 1][:-1].capitalize() + " of " + location
output = "It is nice to meet you " + author + "."
since_name_check = -1
# Name is incorrect, apologize
if input_msg_obj["lastOutput"].lower().find("your name is") > -1 and input_msg.find("no") > -1:
output = "I apologize. Sometimes in my excitement to invite another to merge with Wriwenis, I make careless mistakes."
# Respond if no input
if input_msg == "" or input_msg == None:
return "Please, take your time"
# Remove punctuation for direct matching and keyword matching
input_msg_nopunc = re.sub('['+string.punctuation+']', '', input_msg)
# Open direct response json file to look for direct and keyword matches
with open('src/directory/direct-resp.json') as data_file:
direct = json.load(data_file)
# Iterate through direct response objects
for obj in direct:
# Iterate through direct response object input arrays looking for input phrase
for msg in obj["input"]:
# If input phrase matches object input text, set output equal to random object output phrase
if (msg == input_msg_nopunc):
print(msg)
output = random.choice(obj["output"])
break
if output != "":
break
# Add spaces to beginning and end for keyword search
input_msg_nopunc = ' ' + input_msg_nopunc + ' '
# If no direct response, iterate again through canned phrases looking for keyword matches
if output == '':
# Iterate through direct response objects
for obj in direct:
# Iterate through direct response object input arrays looking for input phrase
for word in obj["keywords"]:
# If input phrase matches object input text, set output equal to random object output phrase
if input_msg_nopunc.find(' ' + word + ' ') > -1:
print(word)
output = random.choice(obj["output"])
break
if output != "":
break
if output != "" and output.lower().find("turner") > -1:
output = output.replace("Turner", input_msg_obj["acolyte"])
# If direct phrase and keyword/phrase match not found, create randomized response, trying to use as much of the sentence as possible for a seed
if output == "":
cleaned = preprocess_text(input_msg)
# Use TextBlob for natural language processing in order to extract parts of speech from sentence
parsed = TextBlob(cleaned)
pronoun, noun, adjective, verb = find_candidate_parts_of_speech(parsed)
# Flip pronouns if relevant
if pronoun is not None:
if pronoun == 'I':
pronoun = 'you'
elif pronoun == 'you':
pronoun = 'i'
if noun is not None:
print("resp_object: " + noun)
if pronoun is not None:
print("resp_subj: " + pronoun)
if verb is not None:
print("resp_verb: " + verb)
# Get Markov quote, using the noun and verb as a seed if both present
# Also, choose a random quote introductory phrase
if noun is not None and verb is not None:
seed = str(noun) + ' ' + str(verb)
try:
quote = text_model.make_sentence_with_start(seed)
except:
quote = ""
# Skipping to shorten response time
# print("start prnoun and verb")
# Try to make quote using pronoun and verb
# if pronoun is not None and verb is not None and quote == "":
# seed = str(pronoun) + ' ' + str(verb)
# try:
# quote = text_model.make_sentence_with_start(seed)
# except:
# quote = ""
print("start noun alone")
# Try to make a quote using the noun as a seed
if quote == "" and noun is not None:
seed = str(noun) + ' is'
try:
quote = text_model.make_sentence_with_start(seed)
except:
seed = 'The ' + str(noun)
try:
quote = text_model.make_sentence_with_start(seed)
except:
quote = ""
print("start verb alone")
# Try to make a quote using the base form of the verb
if quote == "" and verb is not None and verb[0] != "'":
verb = Word(verb)
try:
verb = verb.lemmatize("v")
except:
print("couldn't lemmatize verb")
seed = verb
v_text_model = markovify.Text(text, state_size=1)
try:
quote = v_text_model.make_sentence_with_start(seed)
except:
quote = ""
# print("start pronoun alone")
# Skipping to shorten response time
# Try to make quote using pronoun as seed
# if quote == "" and pronoun is not None:
# seed = str(pronoun) + ' is'
# try:
# quote = text_model.make_sentence_with_start(seed)
# except:
# seed = 'The ' + str(pronoun)
# try:
# quote = text_model.make_sentence_with_start(seed)
# except:
# quote = ""
if quote != "":
is_random = 1
if quote == "" and location == "":
ip_address = request.headers['X-Forwarded-For'] #request.environ['REMOTE_ADDR']
r = requests.get('http://freegeoip.net/json/' + ip_address)
location = r.json()["city"]
quote = random.choice(LOCATION_SENTENCES).format(**{'location': location})
elif quote == "" and (author == "You" or author == "Initiate") and since_name_check >= 10:
quote = "What is your name?"
since_name_check = 0
elif quote == "" and author.find("initiate") == -1 and (since_name_check % 3 == 0 or since_profession_check % 3 == 0):
if author != "You":
quote += " " + author + ", "
quote += "Have you thought anymore about joining with us in Wriwenis?"
elif quote == "" and since_name_check == -1 and since_profession_check >= 5:
quote += " " + author + ", what is your profession?"
# If all else fails, create a random sentence using no seed
if quote == "" or quote is None:
quote = text_model.make_short_sentence(70)
is_random = 1
print(quote)
# Capitalize first word of sentence and set quote as output
quote = quote[0].upper() + quote[1:]
if is_random == 1:
if len(quote) > 140:
multi_tweet(quote)
# Tweet random response
twitter_api.PostUpdate(quote)
output = quote
# Future development will try to pull out user's name
res = {'output': output, 'author': author, 'since_name_check': since_name_check, 'since_profession_check': since_profession_check, 'location': location}
# Delay response if no quote created in order to simulate a similar delay as if a person was typing a response
if quote == "":
time.sleep(4)
return jsonify(**res)
if __name__ == '__main__':
app.run()
|
tkah/wriwenis
|
app.py
|
Python
|
mit
| 17,132
|
[
"VisIt"
] |
7a10d26f6145afc24ce0583085a7e35448d059e6befb5b0ffc2dca6558b6aabe
|
import ast
from myhdl._convutils import _makeAST, _genfunc
from myhdl._util import _flatten
from myhdl._enum import EnumType
from myhdl._Signal import SignalType
class Data():
pass
def _resolveRefs(symdict, arg):
gens = _flatten(arg)
data = Data()
data.symdict = symdict
v = _AttrRefTransformer(data)
for gen in gens:
func = _genfunc(gen)
tree = _makeAST(func)
v.visit(tree)
return data.objlist
#TODO: Refactor this into two separate nodetransformers, since _resolveRefs
#needs only the names, not the objects
class _AttrRefTransformer(ast.NodeTransformer):
def __init__(self, data):
self.data = data
self.data.objlist = []
self.myhdl_types = (EnumType, SignalType)
def visit_Attribute(self, node):
self.generic_visit(node)
reserved = ('next', 'posedge', 'negedge', 'max', 'min', 'val', 'signed')
if node.attr in reserved:
return node
#Don't handle subscripts for now.
if not isinstance(node.value, ast.Name):
return node
obj = self.data.symdict[node.value.id]
#Don't handle enums, handle signals as long as it a new attribute
if isinstance(obj, EnumType):
return node
elif isinstance(obj, SignalType):
if hasattr(SignalType, node.attr):
return node
attrobj = getattr(obj, node.attr)
new_name = node.value.id+'.'+node.attr
if new_name not in self.data.symdict:
self.data.symdict[new_name] = attrobj
self.data.objlist.append(new_name)
else:
pass
#assert self.data.symdict[new_name] == attrobj
new_node = ast.Name(id=new_name, ctx=node.value.ctx)
return ast.copy_location(new_node, node)
def visit_FunctionDef(self, node):
nodes = _flatten(node.body, node.args)
for n in nodes:
self.visit(n)
return node
|
forrestv/myhdl
|
myhdl/_resolverefs.py
|
Python
|
lgpl-2.1
| 1,977
|
[
"VisIt"
] |
163e9f65464dddbc44b2d6962741bbf7e127a82dd022f7e3dea12934b5152024
|
from nac.common import retrieve_hdf5_data
from nac.workflows.input_validation import process_input
from nac.workflows import workflow_stddft
from os.path import join
from .utilsTest import remove_files, copy_basis_and_orbitals
import numpy as np
import pkg_resources as pkg
import os
import tempfile
# Environment data
file_path = pkg.resource_filename('nac', '')
root = os.path.split(file_path)[0]
path_traj_xyz = join(root, 'test/test_files/Cd.xyz')
path_original_hdf5 = join(root, 'test/test_files/Cd.hdf5')
project_name = 'Cd'
input_file = join(root, 'test/test_files/input_test_absorption_spectrum.yml')
def test_compute_oscillators(tmp_path):
"""
Compute the oscillator strenght and check the results.
"""
scratch_path = join(tempfile.gettempdir(), 'namd')
path_test_hdf5 = tempfile.mktemp(
prefix='absorption_spectrum_', suffix='.hdf5', dir=scratch_path)
if not os.path.exists(scratch_path):
os.makedirs(scratch_path, exist_ok=True)
try:
# Run the actual test
copy_basis_and_orbitals(path_original_hdf5, path_test_hdf5,
project_name)
calculate_oscillators(path_test_hdf5, scratch_path)
check_properties(path_test_hdf5)
finally:
remove_files()
def calculate_oscillators(path_test_hdf5, scratch_path):
"""
Compute a couple of couplings with the Levine algorithm
using precalculated MOs.
"""
config = process_input(input_file, 'absorption_spectrum')
config['path_hdf5'] = path_test_hdf5
config['workdir'] = scratch_path
config['path_traj_xyz'] = join(
root, config.path_traj_xyz)
workflow_stddft(config)
def check_properties(path_test_hdf5):
"""
Check that the tensor stored in the HDF5 are correct.
"""
dipole_matrices = retrieve_hdf5_data(
path_test_hdf5, 'Cd/multipole/point_0/dipole')
# The diagonals of each component of the matrix must be zero
# for a single atom
diagonals = np.sum([np.diag(dipole_matrices[n + 1]) for n in range(3)])
assert abs(diagonals) < 1e-16
|
felipeZ/nonAdiabaticCoupling
|
test/test_absorption_spectrum.py
|
Python
|
mit
| 2,093
|
[
"NAMD"
] |
5e4d222919727e13dd72efbcb017b88839ff32916ba7006fbbc7b26450f3aa00
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
The core of the GRAMPS plugin system. This module provides capability to load
plugins from specified directories and provide information about the loaded
plugins.
Plugins are divided into several categories. These are: reports, tools,
importers, exporters, quick reports, and document generators.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import os
from gi.repository import Gtk
from gi.repository import GdkPixbuf
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.utils.callback import Callback
from gramps.gen.plug import BasePluginManager, PluginRegister
from gramps.gen.constfunc import win
from gramps.gen.config import config
from gramps.gen.const import ICON
#-------------------------------------------------------------------------
#
# Functions
#
#-------------------------------------------------------------------------
def base_reg_stock_icons(iconpaths, extraiconsize, items):
"""
Reusable base to register stock icons in Gramps
..attribute iconpaths: list of main directory of the base icon, and
extension, eg:
[(os.path.join(IMAGE_DIR, 'scalable'), '.svg')]
..attribute extraiconsize: list of dir with extra prepared icon sizes and
the gtk size to use them for, eg:
[(os.path.join(IMAGE_DIR, '22x22'), Gtk.IconSize.LARGE_TOOLBAR)]
..attribute items: list of icons to register, eg:
[('gramps-db', _('Family Trees'), Gdk.ModifierType.CONTROL_MASK, 0, '')]
"""
# Register our stock items
##TODO GTK3: stock_add does not work on items, it must be Gtk.StockItem, but
## in python one cannot create them, bug http://www.gramps-project.org/bugs/view.php?id=5009
## However, it seems we do not need this line as stock icons are found via Iconset
## Gtk.stock_add (items)
# Add our custom icon factory to the list of defaults
factory = Gtk.IconFactory()
factory.add_default()
for data in items:
pixbuf = 0
for (dirname, ext) in iconpaths:
icon_file = os.path.expanduser(os.path.join(dirname, data[0]+ext))
if os.path.isfile(icon_file):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file (icon_file)
break
except:
pass
if not pixbuf :
pixbuf = GdkPixbuf.Pixbuf.new_from_file (ICON)
pixbuf = pixbuf.add_alpha(True, 255, 255, 255)
icon_set = Gtk.IconSet.new_from_pixbuf (pixbuf)
#add different sized icons, always png type!
for size in extraiconsize :
pixbuf = 0
icon_file = os.path.expanduser(
os.path.join(size[0], data[0]+'.png'))
if os.path.isfile(icon_file):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file (icon_file)
except:
pass
if pixbuf :
source = Gtk.IconSource()
source.set_size_wildcarded(False)
source.set_size(size[1])
source.set_pixbuf(pixbuf)
icon_set.add_source(source)
factory.add (data[0], icon_set)
#-------------------------------------------------------------------------
#
# GuiPluginManager
#
#-------------------------------------------------------------------------
class GuiPluginManager(Callback):
""" PluginManager is a Singleton which manages plugins.
It is the gui implementation using a unique BasePluginmanager.
This class adds the possibility to hide plugins in the GUI via a config
setting
"""
__instance = None
__signals__ = { 'plugins-reloaded' : None }
def get_instance():
""" Use this function to get the instance of the PluginManager """
if GuiPluginManager.__instance is None:
GuiPluginManager.__instance = 1 # Set to 1 for __init__()
GuiPluginManager.__instance = GuiPluginManager()
return GuiPluginManager.__instance
get_instance = staticmethod(get_instance)
def __init__(self):
""" This function should only be run once by get_instance() """
if GuiPluginManager.__instance is not 1:
raise Exception("This class is a singleton. "
"Use the get_instance() method")
Callback.__init__(self)
self.basemgr = BasePluginManager.get_instance()
self.__hidden_plugins = set(config.get('plugin.hiddenplugins'))
self.__hidden_changed()
def load_plugin(self, pdata):
if not self.is_loaded(pdata.id):
#load stock icons before import, only gui needs this
if pdata.icons:
if pdata.icondir and os.path.isdir(pdata.icondir):
dir = pdata.icondir
else:
#use the plugin directory
dir = pdata.directory
self.load_icons(pdata.icons, dir)
return self.basemgr.load_plugin(pdata)
def reload_plugins(self):
self.basemgr.reload_plugins()
self.emit('plugins-reloaded')
def __getattr__(self, name):
return getattr(self.basemgr, name)
def load_icons(self, icons, dir):
"""
Load icons in the iconfactory of gramps, so they can be used in the
plugin.
..attribute icons:
New stock icons to register. A list of tuples (stock_id, icon_label),
eg:
[('gramps_myplugin', _('My Plugin')),
('gramps_myplugin_open', _('Open Plugin'))]
The plugin directory must contain the directories scalable, 48x48, 22x22
and 16x16 with the icons, eg in dir we have:
scalable/gramps_myplugin.svg
48x48/gramps_myplugin.png
22x22/gramps_myplugin.png
..attribute dir: directory from where to load the icons
"""
if win():
iconpaths = [
(os.path.join(dir, '48x48'), '.png'),
(dir, '.png'),
]
else :
iconpaths = [
(os.path.join(dir, 'scalable'), '.svg'),
(dir, '.svg'), (dir, '.png'),
]
#sizes: menu=16, small_toolbar=18, large_toolbar=24,
# button=20, dnd=32, dialog=48
#add to the back of this list to overrule images set at beginning of list
extraiconsize = [
(os.path.join(dir, '22x22'), Gtk.IconSize.LARGE_TOOLBAR),
(os.path.join(dir, '16x16'), Gtk.IconSize.MENU),
(os.path.join(dir, '22x22'), Gtk.IconSize.BUTTON),
]
items = []
for stock_id, label in icons:
items.append((stock_id, label, Gdk.ModifierType.CONTROL_MASK, 0, ''))
base_reg_stock_icons(iconpaths, extraiconsize, items)
def __hidden_changed(self, *args):
#if hidden changed, stored data must be emptied as it could contain
#something that now must be hidden
self.empty_managed_plugins()
#objects that need to know if the plugins available changed, are
#listening to this signal to update themselves. If a plugin becomes
#(un)hidden, this should happen, so we emit.
self.emit('plugins-reloaded')
def get_hidden_plugin_ids(self):
"""
Returns copy of the set hidden plugin ids
"""
return self.__hidden_plugins.copy()
def hide_plugin(self, id):
""" Hide plugin with given id. This will hide the plugin so queries do
not return it anymore, and write this change to the config.
Note that config will then emit a signal
"""
self.__hidden_plugins.add(id)
config.set('plugin.hiddenplugins', list(self.__hidden_plugins))
config.save()
self.__hidden_changed()
def unhide_plugin(self, id):
""" Unhide plugin with given id. This will unhide the plugin so queries
return it again, and write this change to the config
"""
self.__hidden_plugins.remove(id)
config.set('plugin.hiddenplugins', list(self.__hidden_plugins))
config.save()
self.__hidden_changed()
def get_reg_reports(self, gui=True):
""" Return list of non hidden registered reports
:Param gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return [plg for plg in self.basemgr.get_reg_reports(gui)
if plg.id not in self.__hidden_plugins]
def get_reg_tools(self, gui=True):
""" Return list of non hidden registered tools
:Param gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return [plg for plg in self.basemgr.get_reg_tools(gui)
if plg.id not in self.__hidden_plugins]
def get_reg_views(self):
""" Return list of non hidden registered views
"""
return [plg for plg in self.basemgr.get_reg_views()
if plg.id not in self.__hidden_plugins]
def get_reg_quick_reports(self):
""" Return list of non hidden registered quick reports
"""
return [plg for plg in self.basemgr.get_reg_quick_reports()
if plg.id not in self.__hidden_plugins]
def get_reg_mapservices(self):
""" Return list of non hidden registered mapservices
"""
return [plg for plg in self.basemgr.get_reg_mapservices()
if plg.id not in self.__hidden_plugins]
def get_reg_bookitems(self):
""" Return list of non hidden reports registered as bookitem
"""
return [plg for plg in self.basemgr.get_reg_bookitems()
if plg.id not in self.__hidden_plugins]
def get_reg_gramplets(self):
""" Return list of non hidden reports registered as bookitem
"""
return [plg for plg in self.basemgr.get_reg_gramplets()
if plg.id not in self.__hidden_plugins]
def get_reg_sidebars(self):
""" Return list of non hidden registered sidebars
"""
return [plg for plg in self.basemgr.get_reg_sidebars()
if plg.id not in self.__hidden_plugins]
def get_reg_importers(self):
""" Return list of registered importers
"""
return [plg for plg in self.basemgr.get_reg_importers()
if plg.id not in self.__hidden_plugins]
def get_reg_exporters(self):
""" Return list of registered exporters
"""
return [plg for plg in self.basemgr.get_reg_exporters()
if plg.id not in self.__hidden_plugins]
def get_reg_docgens(self):
""" Return list of registered docgen
"""
return [plg for plg in self.basemgr.get_reg_docgens()
if plg.id not in self.__hidden_plugins]
def get_reg_general(self, category=None):
return [plg for plg in self.basemgr.get_reg_general(category)
if plg.id not in self.__hidden_plugins]
|
Forage/Gramps
|
gramps/gui/pluginmanager.py
|
Python
|
gpl-2.0
| 12,518
|
[
"Brian"
] |
2499664f50a4a15136a16f2463779356594050bb059424ea749b8619751df39c
|
"""loader_moose.py:
Load a SWC file in MOOSE.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import numpy as np
import pylab
import moose
import time
import moose.utils as mu
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
sys.path.append('/opt/moose/Demos/util')
import rdesigneur as rd
import os
from moose.neuroml.ChannelML import ChannelML
from _profile import dbEntry
# Global variable to log query to database.
db_query_ = {}
PI = 3.14159265359
frameRunTime = 0.001
FaradayConst = 96845.34
modelName = None
simulator = 'moose'
ncompts = 0
nchans = 0
_args = None
_records = {}
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='moose.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
_logger = logging.getLogger('')
def makePlot( cell ):
fig = plt.figure( figsize = ( 10, 12 ) )
chans = ['hd', 'kdr', 'na3', 'nax', 'kap', 'kad']
compts = cell.compartments
epos = cell.electrotonicDistanceFromSoma
gpos = cell.geometricalDistanceFromSoma
combo = list(zip( gpos, compts ))
#combo.sort( key=lambda c:c[1].x)
combo.sort( key= lambda c:c[0] )
for i in chans:
x = []
y = []
for j in combo:
area = j[1].length * j[1].diameter * PI
#x.append( j[1].x )
x.append( j[0] )
if moose.exists( j[1].path + '/' + i ):
elm = moose.element( j[1].path + '/' + i )
y.append( elm.Gbar / area )
else:
y.append( 0.0 )
pylab.plot( x, y, '-bo', label = i )
pylab.legend()
pylab.show()
def saveData( outfile ):
clock = moose.Clock('/clock')
assert clock
yvec = None
for k in _records:
if "soma" in k:
yvec = _records[k].vector
xvec = np.linspace(0, clock.currentTime, len(yvec))
with open(outfile, "wb") as f:
f.write("%s,%s\n" % ('time', 'soma'))
for i, t in enumerate(xvec):
f.write("%s,%s\n" % (t, yvec[i]))
_logger.debug("Done writing to file %s" % outfile)
def loadModel(filename, args):
"""Load the model and insert channels """
global modelName
global nchans, ncompts
# Load in the swc file.
modelName = "elec"
cellProto = [ ( filename, modelName ) ]
passiveDistrib = []
chanDistrib = []
if args.insert_channels:
chanProto = [
['./chans/hd.xml'],
['./chans/kap.xml'],
['./chans/kad.xml'],
['./chans/kdr.xml'],
['./chans/na3.xml'],
['./chans/nax.xml'],
]
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5",
"Em", "-58e-3", "initVm", "-65e-3" ],
[ ".", "#axon#", "RA", "0.5" ]
]
for c in _args.insert_channels:
chanDistrib.append( c.split(";"))
rdes = rd.rdesigneur( cellProto = cellProto
, combineSegments = True
, passiveDistrib = passiveDistrib
, chanProto = chanProto
, chanDistrib = chanDistrib
)
rdes.buildModel('/model')
compts = moose.wildcardFind( "/model/%s/#[ISA=CompartmentBase]"%modelName )
setupStimuls( compts[0] )
for compt in compts:
vtab = moose.Table( '%s/vm' % compt.path )
moose.connect( vtab, 'requestOut', compt, 'getVm' )
_records[compt.path] = vtab
nchans = len(set([x.path for x in
moose.wildcardFind('/model/elec/##[TYPE=ZombieHHChannel]')])
)
_logger.info("Total channels: %s" % nchans)
def setupStimuls(compt):
global _args
command = moose.PulseGen('%s/command' % compt.path)
_logger.info("Injecting {} Amps into {} for {} seconds".format(
_args.inject
, compt.path
, _args.sim_time)
)
command.level[0] = _args.inject
command.delay[0] = 0
command.width[0] = _args.sim_time
m = moose.connect(command, 'output', compt, 'injectMsg')
def plots(filter='soma'):
global _records
global _args
toPlot = []
tables = {}
for k in _records:
if filter in k:
toPlot.append(k)
for k in toPlot:
tables[k] = _records[k]
mu.plotRecords(tables, subplot=True) #, outfile=_args.plots)
plt.show()
def countSpike():
import count_spike
global db_query_
soma = None
for k in _records.keys():
if "soma" in k.lower():
soma = _records[k].vector
break
if len(soma) > 0:
nSpikes, meanDT, varDT = count_spike.spikes_characterization( soma )
db_query_['number_of_spikes'] = nSpikes
db_query_['mean_spike_interval'] = meanDT
db_query_['variance_spike_interval'] = varDT
_logger.info("[MOOSE] Spike characteristics:")
_logger.info("\t num_spikes: {}, mean_dt: {}, var_dt: {}".format(
nSpikes, meanDT, varDT)
)
def main(args):
global _args
_args = args
global ncompts, nchans
loadModel(args.swc_file, args)
moose.reinit()
compts = moose.wildcardFind( "/model/%s/#[ISA=CompartmentBase]" % modelName )
ncompts = len(compts)
startt = time.time()
moose.start(args.sim_time)
t = time.time() - startt
db_query_['simulator'] = 'moose'
db_query_['number_of_compartments'] = ncompts
db_query_['number_of_channels'] = nchans
db_query_['simulation_time'] = args.sim_time
db_query_['run_time'] = t
db_query_['dt'] = args.sim_dt
db_query_['model_name'] = args.swc_file
countSpike()
dbEntry(db_query_)
saveData(outfile="_data/moose.csv")
|
BhallaLab/benchmarks
|
neuro_morpho/loader_moose.py
|
Python
|
gpl-2.0
| 6,492
|
[
"MOOSE"
] |
650304f9db6ff26e1ee44cfb76ff24fc84e037f4879da9d0eab2f5f0150182e2
|
"""Integration with Python standard library module urllib2: OpenerDirector
class.
Copyright 2004-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import os, urllib2, bisect, httplib, types, tempfile
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
set
except NameError:
import sets
set = sets.Set
import _file
import _http
from _request import Request
import _response
import _rfc3986
import _sockettimeout
import _upgrade
from _util import isstringlike
class ContentTooShortError(urllib2.URLError):
def __init__(self, reason, result):
urllib2.URLError.__init__(self, reason)
self.result = result
def set_request_attr(req, name, value, default):
try:
getattr(req, name)
except AttributeError:
setattr(req, name, default)
if value is not default:
setattr(req, name, value)
class OpenerDirector(urllib2.OpenerDirector):
def __init__(self):
urllib2.OpenerDirector.__init__(self)
# really none of these are (sanely) public -- the lack of initial
# underscore on some is just due to following urllib2
self.process_response = {}
self.process_request = {}
self._any_request = {}
self._any_response = {}
self._handler_index_valid = True
self._tempfiles = []
def add_handler(self, handler):
if handler in self.handlers:
return
# XXX why does self.handlers need to be sorted?
bisect.insort(self.handlers, handler)
handler.add_parent(self)
self._handler_index_valid = False
def _maybe_reindex_handlers(self):
if self._handler_index_valid:
return
handle_error = {}
handle_open = {}
process_request = {}
process_response = {}
any_request = set()
any_response = set()
unwanted = []
for handler in self.handlers:
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
if meth == "any_request":
any_request.add(handler)
added = True
continue
elif meth == "any_response":
any_response.add(handler)
added = True
continue
ii = meth.find("_")
scheme = meth[:ii]
condition = meth[ii+1:]
if condition.startswith("error"):
jj = meth[ii+1:].find("_") + ii + 1
kind = meth[jj+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = handle_error.setdefault(scheme, {})
elif condition == "open":
kind = scheme
lookup = handle_open
elif condition == "request":
kind = scheme
lookup = process_request
elif condition == "response":
kind = scheme
lookup = process_response
else:
continue
lookup.setdefault(kind, set()).add(handler)
added = True
if not added:
unwanted.append(handler)
for handler in unwanted:
self.handlers.remove(handler)
# sort indexed methods
# XXX could be cleaned up
for lookup in [process_request, process_response]:
for scheme, handlers in lookup.iteritems():
lookup[scheme] = handlers
for scheme, lookup in handle_error.iteritems():
for code, handlers in lookup.iteritems():
handlers = list(handlers)
handlers.sort()
lookup[code] = handlers
for scheme, handlers in handle_open.iteritems():
handlers = list(handlers)
handlers.sort()
handle_open[scheme] = handlers
# cache the indexes
self.handle_error = handle_error
self.handle_open = handle_open
self.process_request = process_request
self.process_response = process_response
self._any_request = any_request
self._any_response = any_response
def _request(self, url_or_req, data, visit,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
if isstringlike(url_or_req):
req = Request(url_or_req, data, visit=visit, timeout=timeout)
else:
# already a urllib2.Request or mechanize.Request instance
req = url_or_req
if data is not None:
req.add_data(data)
# XXX yuck
set_request_attr(req, "visit", visit, None)
set_request_attr(req, "timeout", timeout,
_sockettimeout._GLOBAL_DEFAULT_TIMEOUT)
return req
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
req = self._request(fullurl, data, None, timeout)
req_scheme = req.get_type()
self._maybe_reindex_handlers()
# pre-process request
# XXX should we allow a Processor to change the URL scheme
# of the request?
request_processors = set(self.process_request.get(req_scheme, []))
request_processors.update(self._any_request)
request_processors = list(request_processors)
request_processors.sort()
for processor in request_processors:
for meth_name in ["any_request", req_scheme+"_request"]:
meth = getattr(processor, meth_name, None)
if meth:
req = meth(req)
# In Python >= 2.4, .open() supports processors already, so we must
# call ._open() instead.
urlopen = getattr(urllib2.OpenerDirector, "_open",
urllib2.OpenerDirector.open)
response = urlopen(self, req, data)
# post-process response
response_processors = set(self.process_response.get(req_scheme, []))
response_processors.update(self._any_response)
response_processors = list(response_processors)
response_processors.sort()
for processor in response_processors:
for meth_name in ["any_response", req_scheme+"_response"]:
meth = getattr(processor, meth_name, None)
if meth:
response = meth(req, response)
return response
def error(self, proto, *args):
if proto in ['http', 'https']:
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = apply(self._call_chain, args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return apply(self._call_chain, args)
BLOCK_SIZE = 1024*8
def retrieve(self, fullurl, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
"""Returns (filename, headers).
For remote objects, the default filename will refer to a temporary
file. Temporary files are removed when the OpenerDirector.close()
method is called.
For file: URLs, at present the returned filename is None. This may
change in future.
If the actual number of bytes read is less than indicated by the
Content-Length header, raises ContentTooShortError (a URLError
subclass). The exception's .result attribute contains the (filename,
headers) that would have been returned.
"""
req = self._request(fullurl, data, False, timeout)
scheme = req.get_type()
fp = self.open(req)
headers = fp.info()
if filename is None and scheme == 'file':
# XXX req.get_selector() seems broken here, return None,
# pending sanity :-/
return None, headers
#return urllib.url2pathname(req.get_selector()), headers
if filename:
tfp = open(filename, 'wb')
else:
path = _rfc3986.urlsplit(req.get_full_url())[2]
suffix = os.path.splitext(path)[1]
fd, filename = tempfile.mkstemp(suffix)
self._tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
result = filename, headers
bs = self.BLOCK_SIZE
size = -1
read = 0
blocknum = 0
if reporthook:
if "content-length" in headers:
size = int(headers["Content-Length"])
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
fp.close()
tfp.close()
del fp
del tfp
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: "
"got only %i out of %i bytes" % (read, size),
result
)
return result
def close(self):
urllib2.OpenerDirector.close(self)
# make it very obvious this object is no longer supposed to be used
self.open = self.error = self.retrieve = self.add_handler = None
if self._tempfiles:
for filename in self._tempfiles:
try:
os.unlink(filename)
except OSError:
pass
del self._tempfiles[:]
def wrapped_open(urlopen, process_response_object, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
success = True
try:
response = urlopen(fullurl, data, timeout)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
if response is not None:
response = process_response_object(response)
if not success:
raise response
return response
class ResponseProcessingOpener(OpenerDirector):
def open(self, fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
def bound_open(fullurl, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return OpenerDirector.open(self, fullurl, data, timeout)
return wrapped_open(
bound_open, self.process_response_object, fullurl, data, timeout)
def process_response_object(self, response):
return response
class SeekableResponseOpener(ResponseProcessingOpener):
def process_response_object(self, response):
return _response.seek_wrapped_response(response)
class OpenerFactory:
"""This class's interface is quite likely to change."""
default_classes = [
# handlers
urllib2.ProxyHandler,
urllib2.UnknownHandler,
_http.HTTPHandler, # derived from new AbstractHTTPHandler
_http.HTTPDefaultErrorHandler,
_http.HTTPRedirectHandler, # bugfixed
urllib2.FTPHandler,
_file.FileHandler,
# processors
_upgrade.HTTPRequestUpgradeProcessor,
_http.HTTPCookieProcessor,
_http.HTTPErrorProcessor,
]
if hasattr(httplib, 'HTTPS'):
default_classes.append(_http.HTTPSHandler)
handlers = []
replacement_handlers = []
def __init__(self, klass=OpenerDirector):
self.klass = klass
def build_opener(self, *handlers):
"""Create an opener object from a list of handlers and processors.
The opener will use several default handlers and processors, including
support for HTTP and FTP.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
opener = self.klass()
default_classes = list(self.default_classes)
skip = []
for klass in default_classes:
for check in handlers:
if type(check) == types.ClassType:
if issubclass(check, klass):
skip.append(klass)
elif type(check) == types.InstanceType:
if isinstance(check, klass):
skip.append(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if type(h) == types.ClassType:
h = h()
opener.add_handler(h)
return opener
build_opener = OpenerFactory().build_opener
_opener = None
urlopen_lock = _threading.Lock()
def urlopen(url, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.open(url, data, timeout)
def urlretrieve(url, filename=None, reporthook=None, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.retrieve(url, filename, reporthook, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
|
deanhiller/databus
|
webapp/play1.3.x/samples-and-tests/i-am-a-developer/mechanize/_opener.py
|
Python
|
mpl-2.0
| 14,502
|
[
"VisIt"
] |
e77b2e62eeb5596d11d8c4757b3a5830541fc7ea10c67cb0e79a49ebc694c1ea
|
import json
import math
import os
import random
import getdist
import getdist.plots
import matplotlib.pyplot as pyplot
import numpy
import pymultinest
#
# Model parameter information
#
params_min = [
0,
0,
50,
0
]
params_max = [
1000,
1000,
1000,
1000
]
params_count = len(params_min)
#
# Dataset information
#
data_length = 1000
xdata = numpy.empty(data_length)
ydata = numpy.empty(data_length)
ydata_err = numpy.empty(data_length)
#
# Evaluate gaussian function of 4 parameters (a, b, c, d)
# at point x
#
def evaluate_gaussian(x, a, b, c, d):
return a * math.exp(-((x-b)*(x-b))/(2*c*c)) + d
#
# Calculate parameter values from the 'unit cube' by
# taking into account the paramete prior information.
#
def my_prior(cube, ndim, nparams):
for i in range(ndim):
cube[i] = params_min[i] + (params_max[i] - params_min[i]) * cube[i]
#
# Calculate the negative log likelihood given a set of
# parameter values and the data.
#
def my_loglike(cube, ndim, nparams):
# Extract the model parameter values
a = cube[0]
b = cube[1]
c = cube[2]
d = cube[3]
# Allocate an array to store our model
model_ydata = numpy.empty(data_length)
# Evaluate our model
for i in range(len(ydata)):
model_ydata[i] = evaluate_gaussian(xdata[i], a, b, c, d)
# Calculate weighted chi-squared
chi2 = 0
for i in range(len(ydata)):
chi2 += ((model_ydata[i] - ydata[i]) / ydata_err[i])**2
# Calculate negative log likelihood
loglike = - 0.5 * chi2
# At last, we are done!
return loglike
def main():
# Generate the x values of our data
for i in range(len(xdata)):
xdata[i] = float(i)
# Set the noise stddev of the y values
for i in range(len(ydata)):
ydata_err[i] = 100.0
params_value = [
900,
400,
100,
0
]
# Generate the y values of our data
for i in range(len(ydata)):
# Evaluate gaussian
ydata[i] = evaluate_gaussian(xdata[i],
params_value[0],
params_value[1],
params_value[2],
params_value[3])
# Add noise
ydata[i] += random.gauss(0, ydata_err[i])
filename = 'output/out'
if not os.path.exists('./output'):
os.makedirs('./output')
# Run MultiNest!
pymultinest.run(my_loglike,
my_prior,
params_count,
outputfiles_basename=filename,
resume=False,
verbose=True,
importance_nested_sampling=True,
multimodal=False,
const_efficiency_mode=False,
n_live_points=100,
evidence_tolerance=0.3,
sampling_efficiency=0.8,
max_iter=0)
# Perform result analysis
analyzer = pymultinest.Analyzer(outputfiles_basename=filename, n_params=params_count)
stats = analyzer.get_stats()
# Pretty print results to a JSON file
results_json = json.dumps(stats, sort_keys=True, indent=2)
with open('pretty_results.json', 'w') as f:
f.write(results_json)
# Generate best-fit model
ydata_best_fit = numpy.empty(data_length)
for i in range(len(ydata_best_fit)):
ydata_best_fit[i] = evaluate_gaussian(xdata[i],
stats['modes'][0]['maximum'][0],
stats['modes'][0]['maximum'][1],
stats['modes'][0]['maximum'][2],
stats['modes'][0]['maximum'][3])
# Plot data vs best fit
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(ydata, color='#5e3c99')
ax.plot(ydata_best_fit, color='#e66101', linewidth=6)
pyplot.savefig('data_vs_best_fit.png')
# Corner plot
samples = getdist.loadMCSamples(filename+'.txt')
plotter = getdist.plots.getSubplotPlotter()
plotter.triangle_plot([samples], filled=True)
plotter.export('corner_plot.png')
if __name__ == '__main__':
main()
|
caseresearch/code-review
|
code-review/2015--2016/multinest_demo.py
|
Python
|
mit
| 4,355
|
[
"Gaussian"
] |
3ac8a8163070f8af574c40116f09d6eb4b70ad94597ded68c7be624fe309505f
|
"""
This module generates fake curves by "drawing" them from common sourcesplines, adding noise, tweaking microlensing.
"""
import sys, os, time
import numpy as np
import pycs.gen.lc
import pycs.gen.util
import pycs.sim.src
import scipy.ndimage.filters
def sample(lc, spline):
"""
You give me a lightcurve, and I will set the lc.mags according to the spline.
I DO modify the lc object in place !
I do NOT modify the spline, of course.
If your lightcurve has mircolensing, magshifts, or fluxshift, and of course also time shifts,
I will set its mags so that it matches the spline with all of these applied !
If you think about it, this is WONDERFUL !!!
To do this, I have to do something like a reverse getmags().
Note that I do not add any random noise here, this is sampling only.
"""
# lc.telescopename = "sim"
# lc.object += "sim"
lc.telescopename += "sim"
lc.commentlist.append("Magnitudes simulated by source %s" % (spline))
if lc.fluxshift != 0.0:
if (lc.ml != None):
lc.mags = spline.eval(lc.getjds()) - lc.ml.calcmlmags(lc) - lc.magshift
lc.mags -= lc.calcfluxshiftmags(inverse=True) # Don't put this on the above line ... uses lc.mags !
else:
lc.mags = spline.eval(lc.getjds()) - lc.magshift
lc.mags -= lc.calcfluxshiftmags(inverse=True)
else:
if (lc.ml != None):
lc.mags = spline.eval(lc.getjds()) - lc.ml.calcmlmags(lc) - lc.magshift
else:
lc.mags = spline.eval(lc.getjds()) - lc.magshift
def saveresiduals(lcs, spline):
"""
You give me some optimized lcs, and the fitting spline.
This function saves the residuals of the optimized lightcurves (as an attribute), in terms of non-fluxshifted magnitudes.
This means that you will have to ADD these residuals (in terms of magnitude) *before any fluxshifts*
to the spline mags in order to recover the lc.
This is made so that you can reuse this particular shotnoise once fake curves are drawn.
We have to keep this function separate from draw(), as you might want to shift the curves after saving the residuals...
.. warning:: Call this function if your spline matches to the lcs !
"""
for l in lcs:
rawmags = l.mags.copy()
samplelc = l.copy()
sample(samplelc, spline)
l.residuals = rawmags - samplelc.mags
# Naive residuals (don't take into account the fluxshift problem) :
# l.residuals = l.getmags() - spline.eval(jds = l.getjds())
# So this is wrong, in case there is a fluxshift
# Nice try, but this was wrong as well :
"""
# We want to compensate for the fluxshift, that "deformed" these residuals :
# Easier to think in terms of flux...
# The following is a bit too explicit, but hopefully clear.
shiftedlcfluxs = 10**(-0.4 * lcmags)
shiftedsplinefluxs = 10**(-0.4 * splinemags)
nonshiftedlcfluxs = shiftedlcfluxs - l.fluxshift
#nonshiftedsplinefluxs = shiftedsplinefluxs - l.fluxshift
nonshiftedsplinefluxs = shiftedsplinefluxs
nonshiftedlcmags = -2.5 * np.log10(nonshiftedlcfluxs)
nonshiftedsplinemags = -2.5 * np.log10(nonshiftedsplinefluxs)
l.residuals = nonshiftedlcmags - nonshiftedsplinemags
"""
def transfershifts(lcs, reflcs, transferml=True):
"""
I will put (copies) of all the shifts and ML from the reflcs onto the lcs.
I will check that you didn't mix up any curves.
"""
if len(lcs) != len(reflcs):
raise RuntimeError("Not the same lengths !")
for (l, refl) in zip(lcs, reflcs):
if l.object != refl.object:
raise RuntimeError("Seems that you mixed up the curves !")
l.timeshift = refl.timeshift
l.fluxshift = refl.fluxshift
l.magshift = refl.magshift
if transferml == True:
if refl.ml == None:
l.rmml()
else:
l.ml = refl.ml.copy()
else:
l.rmml() # Seems safe to do this, it cannot harm at least. Leaving the ML could be unwanted.
def draw(lcs, spline, shotnoise=None, shotnoisefrac=1.0, tweakml=None, scaletweakresi=True, tweakspl=None,
keepshifts=True, keeptweakedml=False, keeporiginalml=True, trace=False, tracedir="draw",
inprint_fake_shifts = None):
"""
Wrapper to produce one set of fake lightcurves that are similar to your lcs.
Give me some lightcurves, that are optimized in fluxshift, magshift, microlensing, timeshift, and the resulting spline fit
representing the intrinsic variations.
I will tweak the source spline and the microlensing etc, and return to you a list of fake lightcurves.
These lightcurves are just blank data points, "as observed". I will build them from scratch.
But note that by default the drawn light curve will be equiped with the same shifts and ML as your lcs.
.. note:: I do NOT modify lcs or spline !
.. note:: It is perfectly ok to shift your input curves in time after the optimization,
to make me build fake lightcurves with other delays !
So if you want me to draw curves that look different from yours, it is perfectly ok to give me lcs that do not match to the spline !
Same logic applies for all other shifts and microlensing.
:param lcs: reference lightcurves to "immitate". I will use their epochs, their time/mag/flux shifts, their microlensing, their errorbars.
:param spline: reference spline from which I will draw my magnitudes.
:param shotnoise: Select among [None, "magerrs", "res", "mcres", "sigma"]
It tells what kind of noise to add to the fake mags.
This noise will, as required, be added to the "observed" data (i.e. not fluxshifted).
:param shotnoisefrac: a multiplier of the shotnoise added to the curve. Set to 0.5 and I'll add only "half" of the noise ...
:param tweakml: either a function, or a list of functions, that takes a list of lightcurves and tweaks their ML in place.
I will use this tweaked ML to draw them.
If you give a single function, I will apply it to all the curves
If you give a list of functions, I will apply them to the respective curves of your lcs (i.e., give the functions in the order corresponding to your lcs !).
:param scaletweakresi: scales the "residuals" obtained by tweakml according to saved residuals
:param tweakspl: a function that takes a spline and returns a tweaked spline. I will use this on
the sourcespline you pass me, before drawing from it.
:param keepshifts: by default I will set the time/flux/mag/ shifts from your lcs also to the fake curves.
:param keeptweakedml: if keepshifts is True, and keeptweakedml is True, I will keep the tweaked ML, not the input ML, on the
output curves. It makes no sens to keep the ML if not keeping the shift
:param keeporiginalml: if keepshifts is True, and keeporiginalml is True, I will keep the the input ML, on the
output curves.
:param inprint_fake_shifts: give an array of shifts corresponding to your lcs that you want to inprint in the mock curves.
.. note:: I will tweak the ML only of those curves that have spline ML. You probably want me to tweak the ML of all your curves !
So be sure to add some spline ML to all your curves before calling me !
"""
if isinstance(tweakml, list):
assert len(tweakml) == len(lcs)
# We build a tweaked copy of the "intrinsic" spline. It will be common to all curves.
if tweakspl != None:
tweakedspline = tweakspl(spline)
else:
tweakedspline = spline # No need to make a copy
# For the lcs, I'll not modify them in place, but return new ones.
# Nevertheless, I work on a copy, as I'll call sample().
copylcs = [l.copy() for l in lcs]
fakelcs = []
for i, l in enumerate(copylcs):
# Before tweaking the microlensing, we make a copy (might be None)
if l.ml != None:
origml = l.ml.copy()
else:
origml = None
# We now tweak the microlensing, before calling sample(). Cleaner this way, so that we can keep it.
# Hmm, I guess we could make this afterwards as well (then apply it)
if tweakml != None:
# Then it should be either just a function, or a list of functions :
if isinstance(tweakml, list):
thislctweakml = tweakml[i]
else:
thislctweakml = tweakml
# We run the tweaking :
if l.ml != None:
thislctweakml([l], spline) # modifies in place
tweakedml = l.ml.copy()
else:
print "WARNING: curve %s has no ML to tweak !" % (str(l))
tweakedml = None
else:
tweakedml = origml
#inprint the fake shift before sampling but after tweaking the ml.
if inprint_fake_shifts is not None :
l.truetimeshift = inprint_fake_shifts[i]
l.timeshift = inprint_fake_shifts[i]
else :
l.truetimeshift= l.timeshift
# And we sample the curve from the source, changing l in place.
sample(l, tweakedspline)
# Rescale the residuals ?
# seems tempting here to do an "autoscaling" so that the sigma of the drawn residuals automatically matches the observed ones.
# This is not possible, as we first need to redo the spline curve shifting to make a fair comparision !
if (scaletweakresi == True) and (l.ml != None): # Otherwise no need to do anything
# We sample from the same spline, but without tweaked ml :
lorigml = l.copy()
lorigml.ml = origml
# Sampling :
sample(lorigml, tweakedspline)
# Only the l.mags are different between l and lorigml.
# Now we can compute the "residuals" that are due to the tweaked ml :
tweakresis = l.mags - lorigml.mags
# print np.std(tweakresis)
# We scale them using previously saved residuals :
if not hasattr(l, 'residuals'):
raise RuntimeError("Save the residuals first !")
scalings = np.fabs(l.residuals)
# We normalise this a first time, to get values around 1.0 :
scalings /= np.mean(scalings)
# print np.mean(scalings)
smoothscalings = scipy.ndimage.filters.median_filter(scalings, size=7, mode='constant', cval=1.0)
# We normalise these again :
smoothscalings /= np.mean(smoothscalings)
# print np.mean(smoothscalings)
"""
import matplotlib.pyplot as plt
plt.plot(l.jds, scalings)
plt.plot(l.jds, smoothscalings, color="red")
plt.show()
exit()
"""
# print np.std(tweakresis*smoothscalings)
l.mags = lorigml.mags + tweakresis * smoothscalings
# And add purely uncorrelated noise on top of all this.
# In future we should think about the fluxshifts here,
# and not blindly add this according to the errorbars.
if shotnoise == "magerrs":
l.montecarlomags(f=shotnoisefrac)
elif shotnoise == "none" or shotnoise == None:
pass
elif shotnoise == "res":
if not hasattr(l, 'residuals'):
raise RuntimeError("Save the residuals first !")
l.mags += shotnoisefrac * l.residuals.copy()
l.commentlist.append("Added previously saved residuals !")
elif shotnoise == "mcres":
# We use the residuals as 1 sigma of a gaussian to draw the shotnoise from.
if not hasattr(l, 'residuals'):
raise RuntimeError("Save the residuals first !")
l.mags += l.residuals * shotnoisefrac * np.random.randn(len(l))
l.commentlist.append("Monte Carlo with previously saved residuals as sigma !")
elif shotnoise == "sigma":
# We use the std of the residuals as 1-sigma amplitude of white noise to add.
if not hasattr(l, 'residuals'):
raise RuntimeError("Save the residuals first !")
sigma = np.std(l.residuals)
l.mags += sigma * shotnoisefrac * np.random.randn(len(l))
l.commentlist.append("White noise with std of residuals as sigma !")
else:
raise RuntimeError("Couldn't understand your shotnoise.")
# Usually we won't want to return the tweaked ML.
# So let's build a brand new lightcurve.
# So to be sure that this new one will "forget" all timeshifts, ML, fluxshits etc if we want so.
jds = l.jds.copy() # We do not use getjds (we simulate "real data" taken at the same absolute epochs)
mags = l.mags.copy() # Idem, we do not use getmags (as we might keep magshift, ML, etc)
magerrs = l.magerrs.copy()
# telescopename = "Fake(%s)" % (sl.telescopename)
telescopename = l.telescopename
object = l.object
fakel = pycs.gen.lc.factory(jds, mags, magerrs, telescopename=telescopename, object=object, verbose=False)
fakel.plotcolour = l.plotcolour
# fakel.ploterrorbars = False
# In any case we want to keep the true shifts of each curve stored under another attribute :
fakel.truetimeshift = l.truetimeshift
# And we might even want to keep the simulation shifts that make the points fit to the spline :
if keepshifts:
fakel.timeshift = l.timeshift
fakel.fluxshift = l.fluxshift
fakel.magshift = l.magshift
if keeporiginalml and keeptweakedml:
raise RuntimeError('I cannot keep both the tweaked ML and the original ML in your mock curve ! ')
if keeptweakedml:
fakel.ml = tweakedml # Yes, it's that simple ! fakel and l have the same jds, after all.
elif keeporiginalml:
fakel.ml = origml
fakelcs.append(fakel)
# For trace, I save the tweaked ML and the tweaked spline.
if trace:
pycs.gen.util.trace(lclist=copylcs, splist=[tweakedspline], tracedir=tracedir)
return fakelcs
def multidraw(lcs, spline=None, optfctnots=None, onlycopy=False, n=20, npkl=5, simset="draw", simdir=None,
shotnoise=None, shotnoisefrac=1.0, truetsr=8.0, tweakml=None, scaletweakresi=True, tweakspl=None,
shuffle=True, verbose=True, trace=False, destpath='./'):
"""
Even higher wrapper to produce mock + tweaked lightcurves, and save them into a directory (as pickle files),
in preparation for analysing them with :py:func:`pycs.sim.run.multirun`
The curves I return are "like observed" : they have no shifts, no ML. Just datapoints.
:param lcs: The starting-point lightcurves from which I will draw the mock data. They must match (!), i.e.,
have appropriate microlensing and be optimized somehow.
:type lcs: list
:param spline: The source spline used to draw the new curves. Is not used (-> None) if onlycopy=True, or if optfct is specified.
:type spline: spline
:param optfctnots: A function to fit a spline and the ML + all shifts execpt for timeshifts.
It is called after setting the true time shifts. Put at None if you want to use always the same current ML and spline.
:type optfctnots: function
:param onlycopy: If True, I will simply save copies of the input lcs, not drawing anything.
:param n: number of lightcurve-lists to simulate per pickle
:param npkl: number of pickle files
:param simset: give a name to your simulation !
:param simdir: where should I put these simulations ?
:param shotnoise: Select among None, "magerrs", "mcres", "res".
See definitions in :py:func:`pycs.sim.draw.draw`.
:type shotnoise: string
:param truetsr: radius of exploration for unifomly slectrec true time shifts (passed to multidraw)
Not used if draw = False, of course.
:param shuffle: Shuffle the curves before feeding them into optfctnots ?
If you use this, be sure to pycs.gen.lc.objsort the curves before !
.. todo:: Possibility to add shot noise even with draw = False.
Like in the good old days. -> Separate addshotnoise from multidraw ?
.. note:: I will tweak the ML only of those curves that have spline ML. You probably want me to tweak the ML of all your curves !
So be sure to add some spline ML to all your curves before calling me !
"""
if simdir == None:
destdir = destpath + "sims_%s" % (simset)
else:
destdir = destpath + simdir
if verbose:
print "Now thowing dice into %s ..." % destdir
# We prepare the destination directory
if not os.path.isdir(destdir):
os.mkdir(destdir)
else:
if verbose:
print "The directory exists, I'll add my new curves."
# Print out some info
if verbose:
print "Input shifts :"
print pycs.gen.lc.getnicetimeshifts(lcs, separator=" | ")
print "Input delays :"
print pycs.gen.lc.getnicetimedelays(lcs, separator=" | ")
origshifts = np.array([l.timeshift for l in lcs]) # the mean shifts for the simulations
# In case we trace :
if trace:
# We put the "real curves" once in each tracedir :
if spline == None:
splist = []
else:
splist = [spline]
pycs.gen.util.trace(lclist=lcs, splist=splist, tracedir="trace_sims_%s_tweak" % (simset))
rawlcs = [l.copy() for l in lcs]
for l in rawlcs: # to get the unshifted curve :
l.resetshifts()
pycs.gen.util.trace(lclist=rawlcs, splist=[], tracedir="trace_sims_%s_draw" % (simset))
for i in range(npkl):
pklfilepath = os.path.join(destdir, "%i_%.5f.pkl" % (i + 1, float(time.time())))
if onlycopy == True:
if verbose:
print "Preparing %i identical copies for pkl %i/%i ..." % (n, (i + 1), npkl)
simlcslist = [[l.copy() for l in lcs] for ni in range(n)]
# We remove any microlensing or shifts :
for simlcs in simlcslist:
for l in simlcs:
l.resetshifts()
else:
if verbose:
print "Drawing %i simulations for pkl %i/%i ..." % (n, (i + 1), npkl)
simlcslist = []
# The absolute shifts to set the the lcs :
sampleshifts = [np.random.uniform(low=-truetsr, high=truetsr, size=(len(lcs))) + origshifts for i in
range(n)]
for (i, shifts) in enumerate(sampleshifts):
# So this loop is run for every simulated data set.
# We work on copies of lcs, as we will change them !
lcscopies = [l.copy() for l in lcs]
spline_copy = spline.copy()
# We set the absolute true shifts
# for (l, shift) in zip(lcscopies, shifts):
# l.timeshift = shift
if optfctnots == None: # Then we just call draw on these time-shifted curves, using the provided spline and ML etc.
# print [lcscopies[i].timeshift for i in range(len(lcscopies))]
simlcs = draw(lcscopies, spline_copy, shotnoise=shotnoise, shotnoisefrac=shotnoisefrac, tweakml=tweakml,
scaletweakresi=scaletweakresi, tweakspl=tweakspl, keepshifts=False,
keeptweakedml=False, keeporiginalml=False, trace=trace, inprint_fake_shifts = shifts,
tracedir="trace_sims_%s_tweak" % (simset))
# pycs.gen.lc.display([simlcs[0]], [], showlegend=True, showdelays=True, filename="screen")
# print [simlcs[i].timeshift for i in range(len(simlcs))]
# print [simlcs[i].truetimeshift for i in range(len(simlcs))]
else:
# We fit the custom ML + all shifts except time and get a new spline
# We start on lcscopies
if shuffle:
pycs.gen.lc.shuffle(lcscopies)
indispline = optfctnots(
lcscopies) # Sets ML, mag and flux shifts, but does not change the time shifts.
# Very important : we sort the lightcurves after this optimization !
# So that the tweakml will correspond.
if shuffle:
pycs.gen.lc.objsort(lcscopies, verbose=False)
saveresiduals(lcscopies, indispline) # in case mcres
simlcs = draw(lcscopies, indispline, shotnoise=shotnoise, shotnoisefrac=shotnoisefrac,
tweakml=tweakml, scaletweakresi=scaletweakresi, tweakspl=tweakspl, keepshifts=False,
keeptweakedml=False, keeporiginalml=False, trace=trace,
tracedir="trace_sims_%s_tweak" % (simset))
simlcslist.append(simlcs)
# We save the simlcslist into the pkl file
pycs.gen.util.writepickle(simlcslist, pklfilepath, verbose=verbose)
# The trace with tweak is already done by multidraw. We add a trace of the drawn curves :
if trace:
for simlcs in simlcslist:
pycs.gen.util.trace(lclist=simlcs, splist=[], tracedir="trace_sims_%s_draw" % (simset))
def shareflux(lc1, lc2, frac=0.01):
"""
I add "noise" to lc1 and lc2 by randomly sharing flux between the two sources.
:param frac: The stddev of the gaussian "noise" in flux, with respect to the minimum flux in the curves.
"""
if not np.all(lc1.jds == lc2.jds):
raise RuntimeError("I do only work on curves with identical jds !")
# lc1fs = lc1.getrawfluxes()
# lc2fs = lc2.getrawfluxes()
minshift = np.fabs(max(lc1.getminfluxshift(), lc2.getminfluxshift()))
shifts = frac * minshift * np.random.randn(len(lc1))
shifts = np.clip(shifts, -minshift + 1.0, minshift - 1.0) # To garantee that we won't get negative fluxes
lc1.addfluxes(shifts)
lc2.addfluxes(-shifts)
|
COSMOGRAIL/PyCS
|
pycs/sim/draw.py
|
Python
|
gpl-3.0
| 22,455
|
[
"Gaussian"
] |
9f4a25c2e810cd976c5cca6e8c092c7601779a261f88857df537c7909e344efe
|
# This file is part of PyEMMA.
#
# Copyright (c) 2014-2017 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def topology_to_numpy(top):
"""Convert this topology into a pandas dataframe
Returns
-------
atoms : np.ndarray dtype=[("serial", 'i4'), ("name", 'S4'), ("element", 'S3'),
("resSeq", 'i4'), ("resName",'S4'), ("chainID", 'i4'), ("segmentID", 'S4')]
The atoms in the topology, represented as a data frame.
bonds : np.ndarray
The bonds in this topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond.
"""
data = [(atom.serial, atom.name, atom.element.symbol,
atom.residue.resSeq, atom.residue.name,
atom.residue.chain.index, atom.segment_id) for atom in top.atoms]
atoms = np.array(data,
dtype=[("serial", 'i4'), ("name", 'S4'), ("element", 'S3'),
("resSeq", 'i4'), ("resName", 'S4'), ("chainID", 'i4'), ("segmentID", 'S4')])
bonds = np.fromiter(((a.index, b.index) for (a, b) in top.bonds), dtype='i4,i4', count=top.n_bonds)
return atoms, bonds
def topology_from_numpy(atoms, bonds=None):
"""Create a mdtraj topology from numpy arrays
Parameters
----------
atoms : np.ndarray
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
and optionally "segmentID", following the same conventions
as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds
"""
if bonds is None:
bonds = np.zeros((0, 2))
for col in ["name", "element", "resSeq",
"resName", "chainID", "serial"]:
if col not in atoms.dtype.names:
raise ValueError('dataframe must have column %s' % col)
if "segmentID" not in atoms.dtype.names:
atoms["segmentID"] = ""
import mdtraj
from mdtraj.core.topology import Atom
from mdtraj.core import element as elem
out = mdtraj.Topology()
# TODO: allow for h5py data sets here, is there a way to check generic ndarray interface?
#if not isinstance(bonds, np.ndarray):
# raise TypeError('bonds must be an instance of numpy.ndarray. '
# 'You supplied a %s' % type(bonds))
out._atoms = [None for _ in range(len(atoms))]
N = np.arange(0, len(atoms))
for ci in np.unique(atoms['chainID']):
chain_atoms = atoms[atoms['chainID'] == ci]
subN = N[atoms['chainID'] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms['resSeq']):
residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri]
mask = subN[chain_atoms['resSeq'] == ri]
indices = N[mask]
rnames = residue_atoms['resName']
residue_name = np.array(rnames)[0]
segids = residue_atoms['segmentID']
segment_id = np.array(segids)[0]
if not np.all(rnames == residue_name):
raise ValueError('All of the atoms with residue index %d '
'do not share the same residue name' % ri)
r = out.add_residue(residue_name.decode('ascii'), c, ri, segment_id.decode('ascii'))
for ix, atom in enumerate(residue_atoms):
e = atom['element'].decode('ascii')
a = Atom(atom['name'].decode('ascii'), elem.get_by_symbol(e),
int(indices[ix]), r, serial=atom['serial'])
out._atoms[indices[ix]] = a
r._atoms.append(a)
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out
def setstate(self, state):
atoms, bonds = state['atoms'], state['bonds']
out = topology_from_numpy(atoms, bonds)
self.__dict__ = out.__dict__
def getstate(self):
atoms, bonds = topology_to_numpy(self)
return dict(atoms=atoms, bonds=bonds)
|
marscher/PyEMMA
|
pyemma/_base/serialization/mdtraj_helpers.py
|
Python
|
lgpl-3.0
| 5,240
|
[
"MDTraj"
] |
7c357ba4aed3e0d59abcabc50beadee3be5a0f0aae1aa2342f5205f31b06b5d7
|
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b=Building.objects.create(name='101')
dev1=Device.objects.create(name="router", building=b)
dev2=Device.objects.create(name="switch", building=b)
dev3=Device.objects.create(name="server", building=b)
port1=Port.objects.create(port_number='4',device=dev1)
port2=Port.objects.create(port_number='7',device=dev2)
port3=Port.objects.create(port_number='1',device=dev3)
c1=Connection.objects.create(start=port1, end=port2)
c2=Connection.objects.create(start=port2, end=port3)
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, unicode(c.start), unicode(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person = usp)
o = Organizer.objects.create(person = uop)
c = Class.objects.create(org=o)
e = Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
i1 = Item.objects.create(name="item1", child=c1)
i2 = Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
c1 = Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
sc1 = SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
|
rebost/django
|
tests/regressiontests/select_related_regress/tests.py
|
Python
|
bsd-3-clause
| 6,546
|
[
"Brian"
] |
f87fb6a4d856eb27089eb4c8c22824474183aa3e95ef555af6f4a6bd119a9d1a
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module provides classes to create phase diagrams.
"""
from six.moves import filter
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 25, 2012"
import collections
import numpy as np
from pyhull.simplex import Simplex
from pymatgen.serializers.json_coders import PMGSONable, MontyDecoder
try:
# If scipy ConvexHull exists, use it because it is faster for large hulls.
# This requires scipy >= 0.12.0.
from scipy.spatial import ConvexHull
HULL_METHOD = "scipy"
except ImportError:
# Fall back to pyhull if scipy >= 0.12.0 does not exist.
from pyhull.convex_hull import ConvexHull
HULL_METHOD = "pyhull"
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.entries import GrandPotPDEntry, TransformedPDEntry
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.core.periodic_table import DummySpecie, Element
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
class PhaseDiagram (PMGSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...]
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = list(elements)
dim = len(elements)
el_refs = {}
for el in elements:
el_entries = list(filter(lambda e: e.composition.is_element and
e.composition.elements[0] == el,
entries))
if len(el_entries) == 0:
raise PhaseDiagramError(
"There are no entries associated with terminal {}."
.format(el))
el_refs[el] = min(el_entries, key=lambda e: e.energy_per_atom)
data = []
for entry in entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in elements]
row.append(entry.energy_per_atom)
data.append(row)
data = np.array(data)
self.all_entries_hulldata = data[:, 1:]
#use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
#make sure that if there are multiple entries at the same composition
#within 1e-4 eV/atom of each other, only use the lower energy one.
#This fixes the precision errors in the convex hull.
#This is significantly faster than grouping by composition and then
#taking the lowest energy of each group
ind = []
prev_c = [] # compositions within 1e-4 of current entry
prev_e = [] # energies of those compositions
for i in np.argsort([e.energy_per_atom for e in entries]):
if form_e[i] > -self.formation_energy_tol:
continue
epa = entries[i].energy_per_atom
#trim the front of the lists
while prev_e and epa > 1e-4 + prev_e[0]:
prev_c.pop(0)
prev_e.pop(0)
frac_comp = entries[i].composition.fractional_composition
if frac_comp not in prev_c:
ind.append(i)
prev_e.append(epa)
prev_c.append(frac_comp)
#add the elemental references
ind.extend([entries.index(el) for el in el_refs.values()])
qhull_entries = [entries[i] for i in ind]
qhull_data = data[ind][:, 1:]
#add an extra point to enforce full dimensionality
#this point will be present in all upper hull facets
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
#skip facets that include the extra point
if max(facet) == len(qhull_data)-1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplices = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
stable_entries = set()
for facet in self.facets:
for vertex in facet:
stable_entries.add(self.qhull_entries[vertex])
return stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
comp = entry.composition
energy = entry.energy - sum([comp[el] *
self.el_refs[el].energy_per_atom
for el in comp.elements])
return energy
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
comp = entry.composition
return self.get_form_energy(entry) / comp.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = [ComputedEntry.from_dict(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u\ :sub:`X` N\ :sub:`X`\
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super(GrandPotentialPhaseDiagram, self).__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
PhaseDiagram.__init__(self, pentries,
elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
#Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
#We only allow reactions that have positive amounts of
#reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
#If the reaction can't be balanced, the entry does not fall
#into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminal_compositions}
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False, force_use_pyhull=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
force_use_pyhull (boolean): Whether the pyhull algorithm is always
used, even when scipy is present.
Returns:
List of simplices of the Convex Hull.
"""
if HULL_METHOD == "scipy" and (not force_use_pyhull):
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
else:
return ConvexHull(qhull_data, joggle=joggle).vertices
|
Dioptas/pymatgen
|
pymatgen/phasediagram/pdmaker.py
|
Python
|
mit
| 18,351
|
[
"pymatgen"
] |
c895eed4e7d3d4b308cce4ebf3678d2506bed4d9da1e69f0d28c4769418296ac
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Class advice.
This module was adapted from 'protocols.advice', part of the Python
Enterprise Application Kit (PEAK). Please notify the PEAK authors
(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or
Zope-specific changes are required, so that the PEAK version of this module
can be kept in sync.
PEAK is a Python application framework that interoperates with (but does
not require) Zope 3 and Twisted. It provides tools for manipulating UML
models, object-relational persistence, aspect-oriented programming, and more.
Visit the PEAK home page at http://peak.telecommunity.com for more information.
"""
from types import FunctionType
try:
from types import ClassType
except ImportError:
__python3 = True
else:
__python3 = False
__all__ = [
'addClassAdvisor',
'determineMetaclass',
'getFrameInfo',
'isClassAdvisor',
'minimalBases',
]
import sys
def getFrameInfo(frame):
"""Return (kind,module,locals,globals) for a frame
'kind' is one of "exec", "module", "class", "function call", or "unknown".
"""
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = f_locals is f_globals
hasModule = '__module__' in f_locals
hasName = '__name__' in f_globals
sameName = hasModule and hasName
sameName = sameName and f_globals['__name__']==f_locals['__module__']
module = hasName and sys.modules.get(f_globals['__name__']) or None
namespaceIsModule = module and module.__dict__ is f_globals
if not namespaceIsModule:
# some kind of funky exec
kind = "exec"
elif sameNamespace and not hasModule:
kind = "module"
elif sameName and not sameNamespace:
kind = "class"
elif not sameNamespace:
kind = "function call"
else: # pragma: no cover
# How can you have f_locals is f_globals, and have '__module__' set?
# This is probably module-level code, but with a '__module__' variable.
kind = "unknown"
return kind, module, f_locals, f_globals
def addClassAdvisor(callback, depth=2):
"""Set up 'callback' to be passed the containing class upon creation
This function is designed to be called by an "advising" function executed
in a class suite. The "advising" function supplies a callback that it
wishes to have executed when the containing class is created. The
callback will be given one argument: the newly created containing class.
The return value of the callback will be used in place of the class, so
the callback should return the input if it does not wish to replace the
class.
The optional 'depth' argument to this function determines the number of
frames between this function and the targeted class suite. 'depth'
defaults to 2, since this skips this function's frame and one calling
function frame. If you use this function from a function called directly
in the class suite, the default will be correct, otherwise you will need
to determine the correct depth yourself.
This function works by installing a special class factory function in
place of the '__metaclass__' of the containing class. Therefore, only
callbacks *after* the last '__metaclass__' assignment in the containing
class will be executed. Be sure that classes using "advising" functions
declare any '__metaclass__' *first*, to ensure all callbacks are run."""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if __python3: # pragma: no cover
raise TypeError('Class advice impossible in Python3')
frame = sys._getframe(depth)
kind, module, caller_locals, caller_globals = getFrameInfo(frame)
# This causes a problem when zope interfaces are used from doctest.
# In these cases, kind == "exec".
#
#if kind != "class":
# raise SyntaxError(
# "Advice must be in the body of a class statement"
# )
previousMetaclass = caller_locals.get('__metaclass__')
if __python3: # pragma: no cover
defaultMetaclass = caller_globals.get('__metaclass__', type)
else:
defaultMetaclass = caller_globals.get('__metaclass__', ClassType)
def advise(name, bases, cdict):
if '__metaclass__' in cdict:
del cdict['__metaclass__']
if previousMetaclass is None:
if bases:
# find best metaclass or use global __metaclass__ if no bases
meta = determineMetaclass(bases)
else:
meta = defaultMetaclass
elif isClassAdvisor(previousMetaclass):
# special case: we can't compute the "true" metaclass here,
# so we need to invoke the previous metaclass and let it
# figure it out for us (and apply its own advice in the process)
meta = previousMetaclass
else:
meta = determineMetaclass(bases, previousMetaclass)
newClass = meta(name,bases,cdict)
# this lets the callback replace the class completely, if it wants to
return callback(newClass)
# introspection data only, not used by inner function
advise.previousMetaclass = previousMetaclass
advise.callback = callback
# install the advisor
caller_locals['__metaclass__'] = advise
def isClassAdvisor(ob):
"""True if 'ob' is a class advisor function"""
return isinstance(ob,FunctionType) and hasattr(ob,'previousMetaclass')
def determineMetaclass(bases, explicit_mc=None):
"""Determine metaclass from 1+ bases and optional explicit __metaclass__"""
meta = [getattr(b,'__class__',type(b)) for b in bases]
if explicit_mc is not None:
# The explicit metaclass needs to be verified for compatibility
# as well, and allowed to resolve the incompatible bases, if any
meta.append(explicit_mc)
if len(meta)==1:
# easy case
return meta[0]
candidates = minimalBases(meta) # minimal set of metaclasses
if not candidates: # pragma: no cover
# they're all "classic" classes
assert(not __python3) # This should not happen under Python 3
return ClassType
elif len(candidates)>1:
# We could auto-combine, but for now we won't...
raise TypeError("Incompatible metatypes",bases)
# Just one, return it
return candidates[0]
def minimalBases(classes):
"""Reduce a list of base classes to its ordered minimum equivalent"""
if not __python3: # pragma: no cover
classes = [c for c in classes if c is not ClassType]
candidates = []
for m in classes:
for n in classes:
if issubclass(n,m) and m is not n:
break
else:
# m has no subclasses in 'classes'
if m in candidates:
candidates.remove(m) # ensure that we're later in the list
candidates.append(m)
return candidates
|
mdworks2016/work_development
|
Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/zope/interface/advice.py
|
Python
|
apache-2.0
| 7,612
|
[
"VisIt"
] |
0a91e1b804fc9db69aca60c1117bb1bc9bfd651304b6aedf8139a850ec2c4b78
|
#!/usr/bin/python -u
import os, subprocess
import collections
from argparse import Namespace
from gmx_lib import run_setup
from gmx_lib.run_setup import WorkdirStructure
from gmx_lib.gmx_engine import GmxEngine
from gmx_lib.gmx_params import GmxParameters
from modeling_lib.scwrl_engine import ScwrlEngine
from modeling_lib.pdb_doctor import get_biopython_structure, replace_sidechains
from modeling_lib.pdb_doctor import check_pdb_for_missing_atoms, fix_sidechains
#########################################
def read_mutations(params):
os.chdir("/".join([params.run_options.workdir,params.rundirs.in_dir]))
mutfile = open(params.run_options.xtra, "r")
mutation = {}
for line in mutfile:
line=line.strip()
aa_from=line[:1]
res_id= int(line[1:-1])
aa_to = line[-1:]
mutation[res_id] = [aa_from, aa_to]
if len(mutation)==0:
print "no mutations found in %s (?)" % params.run_options.xtra
exit()
sorted_keys = mutation.keys()
sorted_keys.sort()
mutation_sorted = collections.OrderedDict()
for key in sorted_keys:
mutation_sorted[key] = mutation[key]
return mutation_sorted
#########################################
def fill_in_dir(params):
os.chdir(params.run_options.workdir)
in_dir = params.rundirs.in_dir
if not os.path.exists(in_dir): os.mkdir(in_dir)
# move the list of mutations to super-input directory
os.rename(params.run_options.xtra, "/".join([in_dir, params.run_options.xtra]))
os.chdir(in_dir)
pdbname = params.run_options.pdb
if os.path.exists("../%s.pdb"%pdbname): os.rename("../%s.pdb"%pdbname,"./%s.pdb"%pdbname)
if not os.path.exists("%s.pdb"%pdbname):
print "%s.pdb not found"%pdbname
exit()
subprocess.call(["bash", "-c", "cp -f %s/* . " % (params.run_options.mdp_template_home)])
#########################################
def remove_nonexistent_positions(params, mutations):
structure = get_biopython_structure(params)
# TODO what if I have multiple chains
for chain in structure.get_chains():
residue_ids_present = []
for residue in chain:
residue_ids_present.append(int(residue.id[1]))
for key in mutations.keys():
if not key in residue_ids_present:
del mutations[key]
if len(mutations) == 0:
print "no positions from the mutation list seem to be present on the structure"
exit()
return
#########################################
def res_id_to_seq_id(params):
structure = get_biopython_structure(params)
# TODO what if I have multiple chains
for chain in structure.get_chains():
seq_no = 0
seq_id = {}
for residue in chain:
seq_id[int(residue.id[1])] = seq_no
seq_no += 1
return seq_id
#########################################
def make_mutant(params,newdir, seq_id, new_aa):
os.chdir(params.run_options.workdir)
if not os.path.exists(newdir): os.mkdir(newdir)
os.chdir(newdir)
# mutation
pdbname = params.run_options.pdb
original_pdb = "../%s/%s.pdb"%(params.rundirs.in_dir, pdbname)
new_pdb = "%s.%s.pdb"%(pdbname, newdir)
# this function will call scwrl
replace_sidechains(params, {seq_id: new_aa}, original_pdb, '', new_pdb)
# mutational scan: simple run for each mutated structure
#########################################
def main():
params = Namespace()
params.run_options = run_setup.parse_commandline()
params.physical = GmxParameters(params.run_options)
params.gmx_engine = GmxEngine("/usr/local/gromacs/bin/GMXRC.bash")
params.scwrl_engine = ScwrlEngine("/usr/local/bin/scwrl4/Scwrl4")
params.command_log = open(params.run_options.workdir+"/commands.log","w")
######################
# special requirement for mutation scan:
if (params.run_options.xtra == "none"):
print "this pipe expects xtra file: list of mutations"
exit()
######################
# create a super-input dir with fixed pdb and mdp files
params.rundirs = Namespace()
params.rundirs.in_dir = "super-input"
fill_in_dir(params)
######################
# put fixed pdb in super-input
chain_breaks, missing_sidechains = check_pdb_for_missing_atoms(params)
if missing_sidechains: fix_sidechains(params, missing_sidechains)
# TODO: fix chain breaks (if they exist)
# if chain_breaks: fix_chain_breaks(params, missing_sidechains)
######################
# adjust the run length
params.physical.set_run_lengths(params, em_steep=10000, em_lbfgs=100,
pr_nvt="50ps", pr_npt="10ps", md="100ns")
######################
# read in the mutation list
mutations = read_mutations(params)
# remove mutations in positions outside of the given structure
remove_nonexistent_positions(params, mutations)
######################
# for each mutation create directory and the mutant structure
seq_id = res_id_to_seq_id(params)
newdirs = []
for resid, mut_pair in mutations.iteritems():
newdir = "{}_{}_{}".format(str(resid).zfill(3), mut_pair[0], mut_pair[1])
newdirs.append(newdir)
make_mutant(params, newdir, seq_id[resid], mut_pair[1])
######################
# create dir tree, fill input for each new directory
for newdir in newdirs:
os.chdir(params.run_options.workdir)
if not os.path.exists(newdir): os.mkdir(newdir)
os.chdir(newdir)
for dir in WorkdirStructure.workdir_names:
if not os.path.exists(dir): os.mkdir(dir)
in_dir = WorkdirStructure.workdir_names[0]
# move pdb to in_dir
subprocess.call(["bash", "-c", "mv -f *.pdb %s"%in_dir], stdout=None, stderr=None)
# move mdp files to in_dir
super_in_dir = "/".join([params.run_options.workdir, params.rundirs.in_dir])
subprocess.call(["bash", "-c", "cp -f %s/*.mdp %s"%(super_in_dir, in_dir)], stdout=None, stderr=None)
# cleanup
subprocess.call(["bash", "-c", "rm -f chain*seq scwrl*"], stdout=None, stderr=None)
######################
# start the simulation in each sub-dir
for newdir in newdirs:
workdir = "/".join([params.run_options.workdir, newdir])
core_pipe = "/".join([params.run_options.gromacs_pype_home,"gmx01_core.py"])
pdbname = params.run_options.pdb+"."+newdir
cmd = "nice %s -p %s -w %s" % (core_pipe, pdbname, workdir)
params.command_log.write(cmd+"\n")
subprocess.Popen( ["bash", "-c", cmd], stdout=None, stderr=None, close_fds=True)
######################
# cleanup and exit
######################
params.command_log.close()
return True
#########################################
if __name__ == '__main__':
main()
|
ivanamihalek/smallmol
|
gmx03_mutation_scan.py
|
Python
|
gpl-2.0
| 6,294
|
[
"Gromacs"
] |
b03eeba6e63933b8335d527b7b376383e9256ea112ea659cc3d8c56decee75b5
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from karaage import __version__
from karaage.common import is_admin
def common(request):
""" Set context with common variables. """
ctx = {
'AAF_RAPID_CONNECT_ENABLED': settings.AAF_RAPID_CONNECT_ENABLED,
'org_name': settings.ACCOUNTS_ORG_NAME,
'accounts_email': settings.ACCOUNTS_EMAIL,
'is_admin': is_admin(request),
'kgversion': __version__,
'VERSION': settings.VERSION,
'BUILD_DATE': settings.BUILD_DATE,
'VCS_REF': settings.VCS_REF,
'SLURM_VER': settings.SLURM_VER,
}
return ctx
|
Karaage-Cluster/karaage
|
karaage/common/context_processors.py
|
Python
|
gpl-3.0
| 1,348
|
[
"Brian"
] |
789a8ff0539e4c04842138966c381bb1987129db9d3491fa160a561fec6e30de
|
# -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import asyncio
import atexit
import builtins as builtin_mod
import functools
import os
import re
import runpy
import sys
import tempfile
import traceback
import types
import subprocess
import warnings
from io import open as io_open
from pickleshare import PickleShareDB
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
from IPython.core import oinspect
from IPython.core import magic
from IPython.core import page
from IPython.core import prefilter
from IPython.core import ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.events import EventManager, available_events
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import Pdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils import openpy
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.paths import get_ipython_dir
from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
from IPython.utils.process import system, getoutput
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
from IPython.utils.tempdir import TemporaryDirectory
from traitlets import (
Integer, Bool, CaselessStrEnum, Enum, List, Dict, Unicode, Instance, Type,
observe, default, validate, Any
)
from warnings import warn
from logging import error
import IPython.core.hooks
from typing import List as ListType, Tuple
from ast import AST
# NoOpContext is deprecated, but ipykernel imports it from here.
# See https://github.com/ipython/ipykernel/issues/157
from IPython.utils.contexts import NoOpContext
try:
import docrepr.sphinxify as sphx
def sphinxify(doc):
with TemporaryDirectory() as dirname:
return {
'text/html': sphx.sphinxify(doc, dirname),
'text/plain': doc
}
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
if sys.version_info > (3,8):
from ast import Module
else :
# mock the new API, ignore second argument
# see https://github.com/ipython/ipython/issues/11590
from ast import Module as OriginalModule
Module = lambda nodelist, type_ignores: OriginalModule(nodelist)
if sys.version_info > (3,6):
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
else:
_assign_nodes = (ast.AugAssign, ast.Assign )
_single_targets_nodes = (ast.AugAssign, )
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
def removed_co_newlocals(function:types.FunctionType) -> types.FunctionType:
"""Return a function that do not create a new local scope.
Given a function, create a clone of this function where the co_newlocal flag
has been removed, making this function code actually run in the sourounding
scope.
We need this in order to run asynchronous code in user level namespace.
"""
from types import CodeType, FunctionType
CO_NEWLOCALS = 0x0002
code = function.__code__
new_code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags & ~CO_NEWLOCALS,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
return FunctionType(new_code, globals(), function.__name__, function.__defaults__)
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (_asyncio_runner, _asyncify, _pseudo_sync_runner)
if sys.version_info > (3, 5):
from .async_helpers import _curio_runner, _trio_runner, _should_be_async
else :
_curio_runner = _trio_runner = None
def _should_be_async(cell:str)->bool:
return False
def _ast_asyncify(cell:str, wrapper_name:str) -> ast.Module:
"""
Parse a cell with top-level await and modify the AST to be able to run it later.
Parameter
---------
cell: str
The code cell to asyncronify
wrapper_name: str
The name of the function to be used to wrap the passed `cell`. It is
advised to **not** use a python identifier in order to not pollute the
global namespace in which the function will be ran.
Return
------
A module object AST containing **one** function named `wrapper_name`.
The given code is wrapped in a async-def function, parsed into an AST, and
the resulting function definition AST is modified to return the last
expression.
The last expression or await node is moved into a return statement at the
end of the function, and removed from its original location. If the last
node is not Expr or Await nothing is done.
The function `__code__` will need to be later modified (by
``removed_co_newlocals``) in a subsequent step to not create new `locals()`
meaning that the local and global scope are the same, ie as if the body of
the function was at module level.
Lastly a call to `locals()` is made just before the last expression of the
function, or just after the last assignment or statement to make sure the
global dict is updated as python function work with a local fast cache which
is updated only on `local()` calls.
"""
from ast import Expr, Await, Return
tree = ast.parse(_asyncify(cell))
function_def = tree.body[0]
function_def.name = wrapper_name
try_block = function_def.body[0]
lastexpr = try_block.body[-1]
if isinstance(lastexpr, (Expr, Await)):
try_block.body[-1] = Return(lastexpr.value)
ast.fix_missing_locations(tree)
return tree
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
def get_default_colors():
"DEPRECATED"
warn('get_default_color is deprecated since IPython 5.0, and returns `Neutral` on all platforms.',
DeprecationWarning, stacklevel=2)
return 'Neutral'
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
# deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_in2 = Unicode(' .\\D.: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_out = Unicode('Out[\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompts_pad_left = Bool(True,
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
@observe('prompt_in1', 'prompt_in2', 'prompt_out', 'prompt_pad_left')
def _prompt_trait_changed(self, change):
name = change['name']
warn("InteractiveShell.{name} is deprecated since IPython 4.0"
" and ignored since 5.0, set TerminalInteractiveShell.prompts"
" object directly.".format(name=name))
# protect against weird cases where self.config may not exist:
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.init_deprecation_warnings()
self.hooks.late_startup_hook()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir =\
ProfileDir.create_profile_dir_by_name(self.ipython_dir, 'default')
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = CachingCompiler()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_deprecation_warnings(self):
"""
register default filter for deprecation warning.
This will allow deprecation warning of function used interactively to show
warning to users, and still hide deprecation warning from libraries import.
"""
if sys.version_info < (3,7):
warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
# This will just use sys.stdout and sys.stderr. If you want to
# override sys.stdout and sys.stderr themselves, you need to do that
# *before* instantiating this class, because io holds onto
# references to the underlying streams.
# io.std* are deprecated, but don't show our own deprecation warnings
# during initialization of the deprecated API.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
def init_virtualenv(self):
"""Add a virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
p = os.path.normcase(sys.executable)
p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
# executable path should end like /bin/python or \\scripts\\python.exe
p_exe_up2 = os.path.dirname(os.path.dirname(p))
if p_exe_up2 and os.path.exists(p_venv) and os.path.samefile(p_exe_up2, p_venv):
# Our exe is inside the virtualenv, don't need to do anything.
return
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = [p]
while os.path.islink(p):
p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
paths.append(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.startswith('\\cygdrive'):
p_venv = p_venv[11:]
elif len(p_venv) >= 2 and p_venv[1] == ':':
p_venv = p_venv[2:]
if any(p_venv in p for p in paths):
# Running properly in the virtualenv, don't need to do anything
return
warn("Attempting to work in a virtualenv. If you encounter problems, please "
"install IPython inside the virtualenv.")
if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
else:
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
'python%d.%d' % sys.version_info[:2], 'site-packages')
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
_warn_deprecated=True):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if _warn_deprecated and (name in IPython.core.hooks.deprecated):
alternative = IPython.core.hooks.deprecated[name]
warn("Hook {} is deprecated. Use {} instead.".format(name, alternative), stacklevel=2)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
warn("ip.register_post_execute is deprecated, use "
"ip.events.register('post_run_cell', func) instead.", stacklevel=2)
self.events.register('post_run_cell', func)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <aleaxit@yahoo.com>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <hohn@hooknose.lbl.gov> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError:
raise NameError("name '%s' is not defined" % varname)
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = sphinxify if self.sphinxify_docstring else None
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(info.obj, oname, formatter, info,
enable_html_pager=self.enable_html_pager, **kw)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector._get_info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = Pdb
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing."""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
stb = value._render_traceback_()
except Exception:
stb = self.InteractiveTB.structured_traceback(etype,
value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
print(self.InteractiveTB.stb2text(stb))
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""DEPRECATED
Moved to terminal subclass, here only to simplify the init logic."""
# Set a number of methods that depend on readline to be no-op
warnings.warn('`init_readline` is no-op since IPython 5.0 and is Deprecated',
DeprecationWarning, stacklevel=2)
self.set_custom_completer = no_op
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Simple usage example:
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0):
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted."""
newcomp = types.MethodType(completer,self.Completer)
self.Completer.matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
if sys.version_info >(3,5):
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(func,
magic_kind=magic_kind, magic_name=magic_name)
def run_line_magic(self, magic_name, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""DEPRECATED. Use run_line_magic() instead.
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
u'status' : 'error',
u'traceback' : stb,
u'ename' : etype.__name__,
u'evalue' : py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.endswith('.ipynb'):
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
with open(fname) as f:
yield f.read()
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool):
"""Internal method to run a complete IPython cell."""
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.should_run_async(raw_cell):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
return
def should_run_async(self, raw_cell: str) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell: str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded: 7.0
"""
if not self.autoawait:
return False
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
return _should_be_async(cell)
@asyncio.coroutine
def run_cell_async(self, raw_cell: str, store_history=False, silent=False, shell_futures=True) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[2])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else CachingCompiler()
_run_async = False
with self.builtin_trap:
cell_name = self.compile.cache(cell, self.execution_count)
with self.display_trap:
# Compile to bytecode
try:
if self.autoawait and _should_be_async(cell):
# the code AST below will not be user code: we wrap it
# in an `async def`. This will likely make some AST
# transformer below miss some transform opportunity and
# introduce a small coupling to run_code (in which we
# bake some assumptions of what _ast_asyncify returns.
# they are ways around (like grafting part of the ast
# later:
# - Here, return code_ast.body[0].body[1:-1], as well
# as last expression in return statement which is
# the user code part.
# - Let it go through the AST transformers, and graft
# - it back after the AST transform
# But that seem unreasonable, at least while we
# do not need it.
code_ast = _ast_asyncify(cell, 'async-def-wrapper')
_run_async = True
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
if _run_async:
interactivity = 'async'
has_raised = yield from self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
@asyncio.coroutine
def run_ast_nodes(self, nodelist:ListType[AST], cell_name:str, interactivity='last_expr',
compiler=compile, result=None):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
Experimental value: 'async' Will try to run top level interactive
async/await code in default runner, this will not respect the
interactivty setting and will only run the last node if it is an
expression.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
elif interactivity == 'async':
_async = True
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
if _async:
# If interactivity is async the semantics of run_code are
# completely different Skip usual machinery.
mod = Module(nodelist, [])
async_wrapper_code = compiler(mod, cell_name, 'exec')
exec(async_wrapper_code, self.user_global_ns, self.user_ns)
async_code = removed_co_newlocals(self.user_ns.pop('async-def-wrapper')).__code__
if (yield from self.run_code(async_code, result, async_=True)):
return True
else:
for i, node in enumerate(to_run_exec):
mod = Module([node], [])
code = compiler(mod, cell_name, "exec")
if (yield from self.run_code(code, result)):
return True
for i, node in enumerate(to_run_interactive):
mod = ast.Interactive([node])
code = compiler(mod, cell_name, "single")
if (yield from self.run_code(code, result)):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
def _async_exec(self, code_obj: types.CodeType, user_ns: dict):
"""
Evaluate an asynchronous code object using a code runner
Fake asynchronous execution of code_object in a namespace via a proxy namespace.
Returns coroutine object, which can be executed via async loop runner
WARNING: The semantics of `async_exec` are quite different from `exec`,
in particular you can only pass a single namespace. It also return a
handle to the value of the last things returned by code_object.
"""
return eval(code_obj, user_ns)
@asyncio.coroutine
def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
self.hooks.pre_run_code_hook()
if async_:
last_expr = (yield from self._async_exec(code_obj, self.user_ns))
code = compile('last_expr', 'fake', "single")
exec(code, {'last_expr': last_expr})
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
pt.configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dirname = tempfile.mkdtemp(prefix=prefix)
self.tempdirs.append(dirname)
handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
os.close(handle) # On Windows, there can only be one open handle on a file
self.tempfiles.append(filename)
if data:
with open(filename, 'w') as tmp_file:
tmp_file.write(data)
return filename
@undoc
def write(self,data):
"""DEPRECATED: Write a string to the default output"""
warn('InteractiveShell.write() is deprecated, use sys.stdout instead',
DeprecationWarning, stacklevel=2)
sys.stdout.write(data)
@undoc
def write_err(self,data):
"""DEPRECATED: Write a string to the default error output"""
warn('InteractiveShell.write_err() is deprecated, use sys.stderr instead',
DeprecationWarning, stacklevel=2)
sys.stderr.write(data)
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines)
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target)
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError :
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target)
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target)
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
os.unlink(tfile)
except OSError:
pass
for tdir in self.tempdirs:
try:
os.rmdir(tdir)
except OSError:
pass
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Run user hooks
self.hooks.shutdown_hook()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/IPython/core/interactiveshell.py
|
Python
|
mit
| 148,339
|
[
"VisIt"
] |
889f260ac6e89e5dcb6ce94130492b41c2314c0702ab462e68a206399a5df2cc
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test CF-NetCDF file loading and saving.
"""
from __future__ import (absolute_import, division, print_function)
# Import iris tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import os
import os.path
import shutil
import stat
import tempfile
import biggus
import mock
import netCDF4 as nc
import numpy as np
import numpy.ma as ma
import iris
import iris.analysis.trajectory
import iris.fileformats._pyke_rules.compiled_krb.fc_rules_cf_fc as pyke_rules
import iris.fileformats.netcdf
import iris.std_names
import iris.util
import iris.coord_systems as icoord_systems
import iris.tests.stock as stock
@tests.skip_data
class TestNetCDFLoad(tests.IrisTest):
def test_monotonic(self):
cubes = iris.load(tests.get_data_path(
('NetCDF', 'testing', 'test_monotonic_coordinate.nc')))
self.assertCML(cubes, ('netcdf', 'netcdf_monotonic.cml'))
def test_load_global_xyt_total(self):
# Test loading single xyt CF-netCDF file.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_total_column_co2.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_global_xyt_total.cml'))
def test_load_global_xyt_hires(self):
# Test loading another single xyt CF-netCDF file.
cube = iris.load_cube(tests.get_data_path(
('NetCDF', 'global', 'xyt', 'SMALL_hires_wind_u_for_ipcc4.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_global_xyt_hires.cml'))
def test_missing_time_bounds(self):
# Check we can cope with a missing bounds variable.
with self.temp_filename(suffix='nc') as filename:
# Tweak a copy of the test data file to rename (we can't delete)
# the time bounds variable.
src = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_hires_wind_u_for_ipcc4.nc'))
shutil.copyfile(src, filename)
dataset = nc.Dataset(filename, mode='a')
dataset.renameVariable('time_bnds', 'foo')
dataset.close()
cube = iris.load_cube(filename, 'eastward_wind')
def test_load_global_xyzt_gems(self):
# Test loading single xyzt CF-netCDF file (multi-cube).
cubes = iris.load(tests.get_data_path(('NetCDF', 'global', 'xyz_t',
'GEMS_CO2_Apr2006.nc')))
self.assertCML(cubes, ('netcdf', 'netcdf_global_xyzt_gems.cml'))
# Check the masked array fill value is propogated through the data
# manager loading.
lnsp = cubes[1]
self.assertTrue(ma.isMaskedArray(lnsp.data))
self.assertEqual(-32767.0, lnsp.data.fill_value)
def test_load_global_xyzt_gems_iter(self):
# Test loading stepped single xyzt CF-netCDF file (multi-cube).
for i, cube in enumerate(iris.load(
tests.get_data_path(('NetCDF', 'global', 'xyz_t',
'GEMS_CO2_Apr2006.nc')))):
self.assertCML(cube, ('netcdf',
'netcdf_global_xyzt_gems_iter_%d.cml' % i))
def test_load_rotated_xy_land(self):
# Test loading single xy rotated pole CF-netCDF file.
cube = iris.load_cube(tests.get_data_path(
('NetCDF', 'rotated', 'xy', 'rotPole_landAreaFraction.nc')))
# Make sure the AuxCoords have lazy data.
self.assertIsInstance(cube.coord('latitude')._points, biggus.Array)
self.assertCML(cube, ('netcdf', 'netcdf_rotated_xy_land.cml'))
def test_load_rotated_xyt_precipitation(self):
# Test loading single xyt rotated pole CF-netCDF file.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'rotated', 'xyt',
'small_rotPole_precipitation.nc')))
self.assertCML(cube, ('netcdf',
'netcdf_rotated_xyt_precipitation.cml'))
def test_load_tmerc_grid_and_clim_bounds(self):
# Test loading a single CF-netCDF file with a transverse Mercator
# grid_mapping and a time variable with climatology.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc')))
self.assertCML(cube, ('netcdf', 'netcdf_tmerc_and_climatology.cml'))
def test_load_tmerc_grid_with_projection_origin(self):
# Test loading a single CF-netCDF file with a transverse Mercator
# grid_mapping that uses longitude_of_projection_origin and
# scale_factor_at_projection_origin instead of
# longitude_of_central_meridian and scale_factor_at_central_meridian.
cube = iris.load_cube(
tests.get_data_path(('NetCDF', 'transverse_mercator',
'projection_origin_attributes.nc')))
expected = icoord_systems.TransverseMercator(
latitude_of_projection_origin=49.0,
longitude_of_central_meridian=-2.0,
false_easting=400000.0,
false_northing=-100000.0,
scale_factor_at_central_meridian=0.9996012717,
ellipsoid=icoord_systems.GeogCS(
semi_major_axis=6377563.396, semi_minor_axis=6356256.91))
self.assertEqual(cube.coord('projection_x_coordinate').coord_system,
expected)
self.assertEqual(cube.coord('projection_y_coordinate').coord_system,
expected)
def test_missing_climatology(self):
# Check we can cope with a missing climatology variable.
with self.temp_filename(suffix='nc') as filename:
# Tweak a copy of the test data file to rename (we can't delete)
# the climatology variable.
src = tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc'))
shutil.copyfile(src, filename)
dataset = nc.Dataset(filename, mode='a')
dataset.renameVariable('climatology_bounds', 'foo')
dataset.close()
cube = iris.load_cube(filename, 'Mean temperature')
def test_cell_methods(self):
# Test exercising CF-netCDF cell method parsing.
cubes = iris.load(tests.get_data_path(('NetCDF', 'testing',
'cell_methods.nc')))
# TEST_COMPAT mod - new cube merge doesn't sort in the same way - test
# can pass by manual sorting...
cubes = iris.cube.CubeList(sorted(cubes, key=lambda cube: cube.name()))
# TEST_COMPAT mod - different versions of the Python module
# `netCDF4` give different data arrays: MaskedArray vs ndarray
# Since we're not interested in the data we can just normalise
# to MaskedArray (to minimise the change).
for cube in cubes:
# Force the fill value to be the default netCDF fill value
# to ensure it matches the previous behaviour.
cube.data = ma.masked_equal(cube.data, -2147483647)
self.assertCML(cubes, ('netcdf', 'netcdf_cell_methods.cml'))
def test_deferred_loading(self):
# Test exercising CF-netCDF deferred loading and deferred slicing.
# shape (31, 161, 320)
cube = iris.load_cube(tests.get_data_path(
('NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc')))
# Consecutive index on same dimension.
self.assertCML(cube[0], ('netcdf', 'netcdf_deferred_index_0.cml'))
self.assertCML(cube[0][0], ('netcdf', 'netcdf_deferred_index_1.cml'))
self.assertCML(cube[0][0][0], ('netcdf',
'netcdf_deferred_index_2.cml'))
# Consecutive slice on same dimension.
self.assertCML(cube[0:20], ('netcdf', 'netcdf_deferred_slice_0.cml'))
self.assertCML(cube[0:20][0:10], ('netcdf',
'netcdf_deferred_slice_1.cml'))
self.assertCML(cube[0:20][0:10][0:5], ('netcdf',
'netcdf_deferred_slice_2.cml'))
# Consecutive tuple index on same dimension.
self.assertCML(cube[(0, 8, 4, 2, 14, 12), ],
('netcdf', 'netcdf_deferred_tuple_0.cml'))
self.assertCML(cube[(0, 8, 4, 2, 14, 12), ][(0, 2, 4, 1), ],
('netcdf', 'netcdf_deferred_tuple_1.cml'))
subcube = cube[(0, 8, 4, 2, 14, 12), ][(0, 2, 4, 1), ][(1, 3), ]
self.assertCML(subcube, ('netcdf', 'netcdf_deferred_tuple_2.cml'))
# Consecutive mixture on same dimension.
self.assertCML(cube[0:20:2][(9, 5, 8, 0), ][3],
('netcdf', 'netcdf_deferred_mix_0.cml'))
self.assertCML(cube[(2, 7, 3, 4, 5, 0, 9, 10), ][2:6][3],
('netcdf', 'netcdf_deferred_mix_0.cml'))
self.assertCML(cube[0][(0, 2), (1, 3)],
('netcdf', 'netcdf_deferred_mix_1.cml'))
def test_units(self):
# Test exercising graceful cube and coordinate units loading.
cube0, cube1 = iris.load(tests.get_data_path(('NetCDF', 'testing',
'units.nc')))
self.assertCML(cube0, ('netcdf', 'netcdf_units_0.cml'))
self.assertCML(cube1, ('netcdf', 'netcdf_units_1.cml'))
class TestNetCDFCRS(tests.IrisTest):
def setUp(self):
class Var(object):
pass
self.grid = Var()
def test_lat_lon_major_minor(self):
major = 63781370
minor = 63567523
self.grid.semi_major_axis = major
self.grid.semi_minor_axis = minor
crs = pyke_rules.build_coordinate_system(self.grid)
self.assertEqual(crs, icoord_systems.GeogCS(major, minor))
def test_lat_lon_earth_radius(self):
earth_radius = 63700000
self.grid.earth_radius = earth_radius
crs = pyke_rules.build_coordinate_system(self.grid)
self.assertEqual(crs, icoord_systems.GeogCS(earth_radius))
class SaverPermissions(tests.IrisTest):
def test_noexist_directory(self):
# Test capture of suitable exception raised on writing to a
# non-existent directory.
dir_name = os.path.join(tempfile.gettempdir(), 'non_existent_dir')
fnme = os.path.join(dir_name, 'tmp.nc')
with self.assertRaises(IOError):
with iris.fileformats.netcdf.Saver(fnme, 'NETCDF4'):
pass
def test_bad_permissions(self):
# Non-exhaustive check that wrong permissions results in a suitable
# exception being raised.
dir_name = tempfile.mkdtemp()
fnme = os.path.join(dir_name, 'tmp.nc')
try:
os.chmod(dir_name, stat.S_IREAD)
with self.assertRaises(IOError):
iris.fileformats.netcdf.Saver(fnme, 'NETCDF4')
self.assertFalse(os.path.exists(fnme))
finally:
os.rmdir(dir_name)
class TestSave(tests.IrisTest):
def test_hybrid(self):
cube = stock.realistic_4d()
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out, netcdf_format='NETCDF3_CLASSIC')
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_realistic_4d.cdl'))
def test_no_hybrid(self):
cube = stock.realistic_4d()
cube.remove_aux_factory(cube.aux_factories[0])
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out, netcdf_format='NETCDF3_CLASSIC')
# Check the netCDF file against CDL expected output.
self.assertCDL(
file_out, ('netcdf', 'netcdf_save_realistic_4d_no_hybrid.cdl'))
def test_scalar_cube(self):
cube = stock.realistic_4d()[0, 0, 0, 0]
with self.temp_filename(suffix='.nc') as filename:
iris.save(cube, filename, netcdf_format='NETCDF3_CLASSIC')
self.assertCDL(filename, ('netcdf',
'netcdf_save_realistic_0d.cdl'))
def test_no_name_cube(self):
# Cube with no names.
cube = iris.cube.Cube(np.arange(20, dtype=np.float64).reshape((4, 5)))
dim0 = iris.coords.DimCoord(np.arange(4, dtype=np.float64))
dim1 = iris.coords.DimCoord(np.arange(5, dtype=np.float64), units='m')
other = iris.coords.AuxCoord('foobar', units='no_unit')
cube.add_dim_coord(dim0, 0)
cube.add_dim_coord(dim1, 1)
cube.add_aux_coord(other)
with self.temp_filename(suffix='.nc') as filename:
iris.save(cube, filename, netcdf_format='NETCDF3_CLASSIC')
self.assertCDL(filename, ('netcdf', 'netcdf_save_no_name.cdl'))
class TestNetCDFSave(tests.IrisTest):
def setUp(self):
self.cubell = iris.cube.Cube(np.arange(4).reshape(2, 2),
'air_temperature')
self.cube = iris.cube.Cube(np.zeros([2, 2]),
standard_name='surface_temperature',
long_name=None,
var_name='temp',
units='K')
self.cube2 = iris.cube.Cube(np.ones([1, 2, 2]),
standard_name=None,
long_name='Something Random',
var_name='temp2',
units='K')
self.cube3 = iris.cube.Cube(np.ones([2, 2, 2]),
standard_name=None,
long_name='Something Random',
var_name='temp3',
units='K')
self.cube4 = iris.cube.Cube(np.zeros([10]),
standard_name='air_temperature',
long_name=None,
var_name='temp',
units='K')
self.cube5 = iris.cube.Cube(np.ones([20]),
standard_name=None,
long_name='air_temperature',
var_name='temp2',
units='K')
self.cube6 = iris.cube.Cube(np.ones([10]),
standard_name=None,
long_name='air_temperature',
var_name='temp3',
units='K')
@tests.skip_data
def test_netcdf_save_format(self):
# Read netCDF input file.
file_in = tests.get_data_path(
('NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc'))
cube = iris.load_cube(file_in)
with self.temp_filename(suffix='.nc') as file_out:
# Test default NETCDF4 file format saving.
iris.save(cube, file_out)
ds = nc.Dataset(file_out)
self.assertEqual(ds.file_format, 'NETCDF4',
'Failed to save as NETCDF4 format')
ds.close()
# Test NETCDF4_CLASSIC file format saving.
iris.save(cube, file_out, netcdf_format='NETCDF4_CLASSIC')
ds = nc.Dataset(file_out)
self.assertEqual(ds.file_format, 'NETCDF4_CLASSIC',
'Failed to save as NETCDF4_CLASSIC format')
ds.close()
# Test NETCDF3_CLASSIC file format saving.
iris.save(cube, file_out, netcdf_format='NETCDF3_CLASSIC')
ds = nc.Dataset(file_out)
self.assertEqual(ds.file_format, 'NETCDF3_CLASSIC',
'Failed to save as NETCDF3_CLASSIC format')
ds.close()
# Test NETCDF4_64BIT file format saving.
iris.save(cube, file_out, netcdf_format='NETCDF3_64BIT')
ds = nc.Dataset(file_out)
self.assertEqual(ds.file_format, 'NETCDF3_64BIT',
'Failed to save as NETCDF3_64BIT format')
ds.close()
# Test invalid file format saving.
with self.assertRaises(ValueError):
iris.save(cube, file_out, netcdf_format='WIBBLE')
@tests.skip_data
def test_netcdf_save_single(self):
# Test saving a single CF-netCDF file.
# Read PP input file.
file_in = tests.get_data_path(
('PP', 'cf_processing',
'000003000000.03.236.000128.1990.12.01.00.00.b.pp'))
cube = iris.load_cube(file_in)
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_single.cdl'))
# TODO investigate why merge now make time an AuxCoord rather than a
# DimCoord and why forecast_period is 'preferred'.
@tests.skip_data
def test_netcdf_save_multi2multi(self):
# Test saving multiple CF-netCDF files.
# Read PP input file.
file_in = tests.get_data_path(('PP', 'cf_processing',
'abcza_pa19591997_daily_29.b.pp'))
cubes = iris.load(file_in)
# Save multiple cubes to multiple files.
for index, cube in enumerate(cubes):
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_multi_%d.cdl' % index))
@tests.skip_data
def test_netcdf_save_multi2single(self):
# Test saving multiple cubes to a single CF-netCDF file.
# Read PP input file.
file_in = tests.get_data_path(('PP', 'cf_processing',
'abcza_pa19591997_daily_29.b.pp'))
cubes = iris.load(file_in)
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
# Check that it is the same on loading
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_multiple.cdl'))
def test_netcdf_multi_nocoord(self):
# Testing the saving of a cublist with no coords.
cubes = iris.cube.CubeList([self.cube, self.cube2, self.cube3])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_nocoord.cdl'))
def test_netcdf_multi_samevarnme(self):
# Testing the saving of a cublist with cubes of the same var_name.
self.cube2.var_name = self.cube.var_name
cubes = iris.cube.CubeList([self.cube, self.cube2])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_samevar.cdl'))
def test_netcdf_multi_with_coords(self):
# Testing the saving of a cublist with coordinates.
lat = iris.coords.DimCoord(np.arange(2),
long_name=None, var_name='lat',
units='degree_north')
lon = iris.coords.DimCoord(np.arange(2), standard_name='longitude',
long_name=None, var_name='lon',
units='degree_east')
rcoord = iris.coords.DimCoord(np.arange(1), standard_name=None,
long_name='Rnd Coordinate',
units=None)
self.cube.add_dim_coord(lon, 0)
self.cube.add_dim_coord(lat, 1)
self.cube2.add_dim_coord(lon, 1)
self.cube2.add_dim_coord(lat, 2)
self.cube2.add_dim_coord(rcoord, 0)
cubes = iris.cube.CubeList([self.cube, self.cube2])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_wcoord.cdl'))
def test_netcdf_multi_wtih_samedimcoord(self):
time1 = iris.coords.DimCoord(np.arange(10),
standard_name='time',
var_name='time')
time2 = iris.coords.DimCoord(np.arange(20),
standard_name='time',
var_name='time')
self.cube4.add_dim_coord(time1, 0)
self.cube5.add_dim_coord(time2, 0)
self.cube6.add_dim_coord(time1, 0)
cubes = iris.cube.CubeList([self.cube4, self.cube5, self.cube6])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_samedimcoord.cdl'))
def test_netcdf_multi_conflict_name_dup_coord(self):
# Duplicate coordinates with modified variable names lookup.
latitude1 = iris.coords.DimCoord(np.arange(10),
standard_name='latitude')
time2 = iris.coords.DimCoord(np.arange(2),
standard_name='time')
latitude2 = iris.coords.DimCoord(np.arange(2),
standard_name='latitude')
self.cube6.add_dim_coord(latitude1, 0)
self.cube.add_dim_coord(latitude2[:], 1)
self.cube.add_dim_coord(time2[:], 0)
cubes = iris.cube.CubeList([self.cube, self.cube6, self.cube6.copy()])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(
file_out, ('netcdf', 'multi_dim_coord_slightly_different.cdl'))
@tests.skip_data
def test_netcdf_hybrid_height(self):
# Test saving a CF-netCDF file which contains a hybrid height
# (i.e. dimensionless vertical) coordinate.
# Read PP input file.
file_in = tests.get_data_path(
('PP', 'COLPEX', 'small_colpex_theta_p_alt.pp'))
cube = iris.load_cube(file_in, 'air_potential_temperature')
# Write Cube to netCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out,
('netcdf', 'netcdf_save_hybrid_height.cdl'))
# Read netCDF file.
cube = iris.load_cube(file_out)
# Check the PP read, netCDF write, netCDF read mechanism.
self.assertCML(cube,
('netcdf', 'netcdf_save_load_hybrid_height.cml'))
@tests.skip_data
def test_netcdf_save_ndim_auxiliary(self):
# Test saving CF-netCDF with multi-dimensional auxiliary coordinates.
# Read netCDF input file.
file_in = tests.get_data_path(
('NetCDF', 'rotated', 'xyt', 'small_rotPole_precipitation.nc'))
cube = iris.load_cube(file_in)
# Write Cube to nerCDF file.
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cube, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_ndim_auxiliary.cdl'))
# Read the netCDF file.
cube = iris.load_cube(file_out)
# Check the netCDF read, write, read mechanism.
self.assertCML(cube, ('netcdf',
'netcdf_save_load_ndim_auxiliary.cml'))
def test_netcdf_save_conflicting_aux(self):
# Test saving CF-netCDF with multi-dimensional auxiliary coordinates,
# with conflicts.
self.cube4.add_aux_coord(iris.coords.AuxCoord(np.arange(10),
'time'), 0)
self.cube6.add_aux_coord(iris.coords.AuxCoord(np.arange(10, 20),
'time'), 0)
cubes = iris.cube.CubeList([self.cube4, self.cube6])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_conf_aux.cdl'))
def test_netcdf_save_gridmapping(self):
# Test saving CF-netCDF from a cubelist with various grid mappings.
c1 = self.cubell
c2 = self.cubell.copy()
c3 = self.cubell.copy()
coord_system = icoord_systems.GeogCS(6371229)
coord_system2 = icoord_systems.GeogCS(6371228)
coord_system3 = icoord_systems.RotatedGeogCS(30, 30)
c1.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'latitude', long_name='1',
coord_system=coord_system), 1)
c1.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'longitude', long_name='1',
coord_system=coord_system), 0)
c2.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'latitude', long_name='2',
coord_system=coord_system2), 1)
c2.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'longitude', long_name='2',
coord_system=coord_system2), 0)
c3.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'grid_latitude', long_name='3',
coord_system=coord_system3), 1)
c3.add_dim_coord(iris.coords.DimCoord(
np.arange(1, 3), 'grid_longitude', long_name='3',
coord_system=coord_system3), 0)
cubes = iris.cube.CubeList([c1, c2, c3])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf',
'netcdf_save_gridmapmulti.cdl'))
def test_netcdf_save_conflicting_names(self):
# Test saving CF-netCDF with a dimension name corresponding to
# an existing variable name (conflict).
self.cube4.add_dim_coord(iris.coords.DimCoord(np.arange(10),
'time'), 0)
self.cube6.add_aux_coord(iris.coords.AuxCoord(1, 'time'), None)
cubes = iris.cube.CubeList([self.cube4, self.cube6])
with self.temp_filename(suffix='.nc') as file_out:
iris.save(cubes, file_out)
# Check the netCDF file against CDL expected output.
self.assertCDL(file_out, ('netcdf', 'netcdf_save_conf_name.cdl'))
@tests.skip_data
def test_trajectory(self):
file_in = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
cube = iris.load_cube(file_in)
# extract a trajectory
xpoint = cube.coord('longitude').points[:10]
ypoint = cube.coord('latitude').points[:10]
sample_points = [('latitude', xpoint), ('longitude', ypoint)]
traj = iris.analysis.trajectory.interpolate(cube, sample_points)
# save, reload and check
with self.temp_filename(suffix='.nc') as temp_filename:
iris.save(traj, temp_filename)
reloaded = iris.load_cube(temp_filename)
self.assertCML(reloaded,
('netcdf', 'save_load_traj.cml'),
checksum=False)
self.assertArrayEqual(traj.data, reloaded.data)
def test_attributes(self):
# Should be global attributes.
self.cube.attributes['history'] = 'A long time ago...'
self.cube.attributes['title'] = 'Attribute test'
self.cube.attributes['foo'] = 'bar'
# Should be data varible attributes.
self.cube.attributes['standard_error_multiplier'] = 23
self.cube.attributes['flag_masks'] = 'a'
self.cube.attributes['flag_meanings'] = 'b'
self.cube.attributes['flag_values'] = 'c'
self.cube.attributes['STASH'] = iris.fileformats.pp.STASH(1, 2, 3)
# Should be overriden.
self.cube.attributes['conventions'] = 'TEST'
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename)
self.assertCDL(filename, ('netcdf', 'netcdf_save_attr.cdl'))
def test_conflicting_attributes(self):
# Should be data variable attributes.
self.cube.attributes['foo'] = 'bar'
self.cube2.attributes['foo'] = 'orange'
with self.temp_filename(suffix='.nc') as filename:
iris.save([self.cube, self.cube2], filename)
self.assertCDL(filename, ('netcdf', 'netcdf_save_confl_attr.cdl'))
def test_conflicting_global_attributes(self):
# Should be data variable attributes, but raise a warning.
attr_name = 'history'
self.cube.attributes[attr_name] = 'Team A won.'
self.cube2.attributes[attr_name] = 'Team B won.'
expected_msg = '{attr_name!r} is being added as CF data variable ' \
'attribute, but {attr_name!r} should only be a CF ' \
'global attribute.'.format(attr_name=attr_name)
with self.temp_filename(suffix='.nc') as filename:
with mock.patch('warnings.warn') as warn:
iris.save([self.cube, self.cube2], filename)
warn.assert_called_with(expected_msg)
self.assertCDL(filename,
('netcdf', 'netcdf_save_confl_global_attr.cdl'))
def test_no_global_attributes(self):
# Should all be data variable attributes.
# Different keys.
self.cube.attributes['a'] = 'a'
self.cube2.attributes['b'] = 'a'
self.cube3.attributes['c'] = 'a'
self.cube4.attributes['d'] = 'a'
self.cube5.attributes['e'] = 'a'
self.cube6.attributes['f'] = 'a'
# Different values.
self.cube.attributes['g'] = 'p'
self.cube2.attributes['g'] = 'q'
self.cube3.attributes['g'] = 'r'
self.cube4.attributes['g'] = 's'
self.cube5.attributes['g'] = 't'
self.cube6.attributes['g'] = 'u'
# One different value.
self.cube.attributes['h'] = 'v'
self.cube2.attributes['h'] = 'v'
self.cube3.attributes['h'] = 'v'
self.cube4.attributes['h'] = 'w'
self.cube5.attributes['h'] = 'v'
self.cube6.attributes['h'] = 'v'
cubes = [self.cube, self.cube2, self.cube3,
self.cube4, self.cube5, self.cube6]
with self.temp_filename(suffix='.nc') as filename:
iris.save(cubes, filename)
self.assertCDL(filename, ('netcdf',
'netcdf_save_no_global_attr.cdl'))
class TestNetCDF3SaveInteger(tests.IrisTest):
def setUp(self):
self.cube = iris.cube.Cube(np.zeros((2, 2), dtype=np.float64),
standard_name='surface_temperature',
long_name=None,
var_name='temp',
units='K')
def test_int64_dimension_coord_netcdf3(self):
coord = iris.coords.DimCoord(np.array([1, 2], dtype=np.int64),
long_name='x')
self.cube.add_dim_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'int64_dimension_coord_netcdf3.cml'),
checksum=False)
def test_int64_auxiliary_coord_netcdf3(self):
coord = iris.coords.AuxCoord(np.array([1, 2], dtype=np.int64),
long_name='x')
self.cube.add_aux_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'int64_auxiliary_coord_netcdf3.cml'),
checksum=False)
def test_int64_data_netcdf3(self):
self.cube.data = self.cube.data.astype(np.int64)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'int64_data_netcdf3.cml'))
def test_uint32_dimension_coord_netcdf3(self):
coord = iris.coords.DimCoord(np.array([1, 2], dtype=np.uint32),
long_name='x')
self.cube.add_dim_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'uint32_dimension_coord_netcdf3.cml'),
checksum=False)
def test_uint32_auxiliary_coord_netcdf3(self):
coord = iris.coords.AuxCoord(np.array([1, 2], dtype=np.uint32),
long_name='x')
self.cube.add_aux_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'uint32_auxiliary_coord_netcdf3.cml'),
checksum=False)
def test_uint32_data_netcdf3(self):
self.cube.data = self.cube.data.astype(np.uint32)
with self.temp_filename(suffix='.nc') as filename:
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
reloaded = iris.load_cube(filename)
self.assertCML(reloaded, ('netcdf',
'uint32_data_netcdf3.cml'))
def test_uint64_dimension_coord_netcdf3(self):
# Points that cannot be safely cast to int32.
coord = iris.coords.DimCoord(np.array([0, 18446744073709551615],
dtype=np.uint64),
long_name='x')
self.cube.add_dim_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
with self.assertRaises(ValueError):
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
def test_uint64_auxiliary_coord_netcdf3(self):
# Points that cannot be safely cast to int32.
coord = iris.coords.AuxCoord(np.array([0, 18446744073709551615],
dtype=np.uint64),
long_name='x')
self.cube.add_aux_coord(coord, 0)
with self.temp_filename(suffix='.nc') as filename:
with self.assertRaises(ValueError):
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
def test_uint64_data_netcdf3(self):
# Data that cannot be safely cast to int32.
self.cube.data = self.cube.data.astype(np.uint64)
self.cube.data[0, 1] = 18446744073709551615
with self.temp_filename(suffix='.nc') as filename:
with self.assertRaises(ValueError):
iris.save(self.cube, filename, netcdf_format='NETCDF3_CLASSIC')
class TestCFStandardName(tests.IrisTest):
def setUp(self):
pass
def test_std_name_lookup_pass(self):
# Test performing a CF standard name look-up hit.
self.assertTrue('time' in iris.std_names.STD_NAMES)
def test_std_name_lookup_fail(self):
# Test performing a CF standard name look-up miss.
self.assertFalse('phenomenon_time' in iris.std_names.STD_NAMES)
@tests.skip_data
class TestNetCDFUKmoProcessFlags(tests.IrisTest):
def test_process_flags(self):
# Test single process flags
for _, process_desc in iris.fileformats.pp.LBPROC_PAIRS[1:]:
# Get basic cube and set process flag manually
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = (process_desc,)
# Save cube to netCDF
with self.temp_filename(suffix='.nc') as temp_filename:
iris.save(ll_cube, temp_filename)
# Reload cube
cube = iris.load_cube(temp_filename)
# Check correct number and type of flags
self.assertTrue(
len(cube.attributes["ukmo__process_flags"]) == 1,
"Mismatch in number of process flags.")
process_flag = cube.attributes["ukmo__process_flags"][0]
self.assertEqual(process_flag, process_desc)
# Test mutiple process flags
multiple_bit_values = ((128, 64), (4096, 1024), (8192, 1024))
# Maps lbproc value to the process flags that should be created
multiple_map = {bits: [iris.fileformats.pp.lbproc_map[bit] for
bit in bits] for bits in multiple_bit_values}
for bits, descriptions in multiple_map.iteritems():
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = descriptions
# Save cube to netCDF
with self.temp_filename(suffix='.nc') as temp_filename:
iris.save(ll_cube, temp_filename)
# Reload cube
cube = iris.load_cube(temp_filename)
# Check correct number and type of flags
process_flags = cube.attributes["ukmo__process_flags"]
self.assertTrue(len(process_flags) == len(bits), 'Mismatch in '
'number of process flags.')
self.assertEqual(set(process_flags), set(descriptions))
if __name__ == "__main__":
tests.main()
|
Jozhogg/iris
|
lib/iris/tests/test_netcdf.py
|
Python
|
lgpl-3.0
| 39,602
|
[
"NetCDF"
] |
64a81974b47bc0d4fb6e706d14d1d6d3aae8a75b3334a6b52723460f670ed702
|
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from innovator.admin.models import Event
class TestEvent:
def test_event_public_display(self, event, testapp):
res = testapp.get('/events/')
assert res.status_code == 200
assert 'test event' in res
assert 'reisen sai moe' in res
def test_add_event(self, admin, testapp):
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = admin.username
form['password'] = '12345678'
# Submits
res = form.submit().follow()
event_old_count = len(Event.query.all())
res = testapp.get('/admin/event/new/')
form = res.forms['eventForm']
form['name'] = 'unique event'
form['description'] = 'event test'
form['start'] = '1989-06-04 17:00:00'
form['end'] = '1989-06-04 19:00:00'
form['extra'] = '["item1", "item2"]'
res = form.submit().follow()
event_new_count = len(Event.query.all())
assert event_new_count - event_old_count == 1
assert res.status_code == 200
assert 'Event successfully created.' in res
def test_delete_event(self, admin, event, testapp):
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = admin.username
form['password'] = '12345678'
# Submits
res = form.submit().follow()
event_old_count = len(Event.query.all())
# Go to edit page
res = testapp.get('/admin/event/').click('test event')
form = res.forms['deleteForm']
res = form.submit().follow()
assert res.status_code == 200
assert 'Event deleted.' in res
event_new_count = len(Event.query.all())
assert event_old_count - event_new_count == 1
def test_edit_event(self, admin, event, testapp):
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = admin.username
form['password'] = '12345678'
# Submits
res = form.submit().follow()
assert len(Event.query.filter_by(name='test event').all()) == 1
assert len(Event.query.filter_by(name='new event').all()) == 0
# Go to edit page
res = testapp.get('/admin/event/').click('test event').click('Edit')
form = res.forms['eventForm']
form['name'] = 'new event'
form['extra'] = '["hi!"]'
res = form.submit().follow()
assert res.status_code == 200
assert 'Event edited.' in res
assert len(Event.query.filter_by(name='test event').all()) == 0
assert len(Event.query.filter_by(name='new event').all()) == 1
def test_event_detail(self, admin, event, testapp):
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = admin.username
form['password'] = '12345678'
# Submits
res = form.submit().follow()
res = testapp.get('/admin/event/')
assert res.status_code == 200
assert 'test event' in res
res = res.click('test event')
assert res.status_code == 200
assert 'No submission yet' in res
def test_event_associated_work_display(self, admin, work, testapp):
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = admin.username
form['password'] = '12345678'
# Submits
res = form.submit().follow()
res = testapp.get('/admin/event/').click('unique event')
assert res.status_code == 200
assert 'my test work' in res
|
B-O-P/innovator
|
tests/test_event.py
|
Python
|
bsd-3-clause
| 3,976
|
[
"MOE"
] |
0b077ceadf2fcda24108b556720eb75c29f7b30de66a4382eb90c72af0fea4bd
|
"""
Copyright (c) 2012-2017, Zenotech Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Zenotech Ltd nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ZENOTECH LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from builtins import str
from builtins import range
from past.utils import old_div
import math
import ast
import sys
from paraview.simple import *
import numpy as np
from . import post
import zutil
import os
from zutil import ABL
import vtk
# Usage
# import zutil.farm as farm
# farm.create_mesh_sources('London_Array_ZCFD.py',(396000,5721000))
# farm.create_mesh_sources('Horns_Rev_ZCFD.py',(427000,6149500))
# f.create_zcfd_input('Horns_Rev_ZCFD.py',(427000,6149500))
# http://jthatch.com/Terrain2STL/
def create_mesh_sources(array_data_file, farm_centre, turbine_only=False):
# Read file
array_data = {}
with open(array_data_file, "r") as f:
s = f.read()
array_data = ast.literal_eval(s)
# Cases
cases = array_data["Cases"]
for (key, value) in list(cases.items()):
# print key
# Wind direction
wind_direction = key
# Turbines
turbines = array_data["Turbines"]
# List of tuples
mesh_source_location = []
for (key, value) in list(turbines.items()):
# print key
name = key
if isinstance(key, int):
name = "A" + str(key)
# Location as a tuple
turbine_location = (value["X"], value["Y"], float(value["Z"]))
# Convert to local coordinates
turbine_location = convert_to_local_coordinates(
turbine_location, farm_centre
)
# Compute new location
turbine_location = get_turbine_location(turbine_location, wind_direction)
turbine_diameter = float(value["RotorDiameter"])
# Create line source using turbine diameter
source = create_source(turbine_location, turbine_diameter)
mesh_source_location.append(source)
generate_turbine(name, turbine_location, turbine_diameter, wind_direction)
if not turbine_only:
generate_turbine_region(
name, turbine_location, turbine_diameter, wind_direction
)
if not turbine_only:
# Write Solar .bac file
write_solar_bac(
"wind-" + str(wind_direction) + ".bac",
wind_direction,
mesh_source_location,
)
# Write Solar .ctl file
write_control_file("wind-" + str(wind_direction) + ".ctl")
def create_turbines(array_data_file, wall_file, volume_file):
array_data = {}
with open(array_data_file, "r") as f:
s = f.read()
array_data = ast.literal_eval(s)
# Read terrain
terrain = PVDReader(FileName=wall_file)
terrain = CleantoGrid(Input=terrain)
bounds = terrain.GetDataInformation().GetBounds()
# Elevation
elevation = Elevation(Input=terrain)
elevation.LowPoint = [0, 0, bounds[4]]
elevation.HighPoint = [0, 0, bounds[5]]
elevation.ScalarRange = [bounds[4], bounds[5]]
# Flatten
transform = Transform(Input=elevation)
transform.Transform = "Transform"
transform.Transform.Scale = [1.0, 1.0, 0.0]
transform.UpdatePipeline()
# create a new 'Probe Location'
probeLocation = ProbeLocation(
Input=transform, ProbeType="Fixed Radius Point Source"
)
probeLocation.Tolerance = 2.22044604925031e-16
# Read volume
volume = PVDReader(FileName=volume_file)
volume = CleantoGrid(Input=volume)
volume.UpdatePipeline()
hubProbe = ProbeLocation(Input=volume, ProbeType="Fixed Radius Point Source")
hubProbe.Tolerance = 2.22044604925031e-16
# Cases
cases = array_data["Cases"]
for (key, value) in list(cases.items()):
# print key
# Wind direction
wind_direction = key
# Turbines
turbines = array_data["Turbines"]
# List of tuples
location = []
for (key, value) in list(turbines.items()):
# print key
name = key
if isinstance(key, int):
name = "A" + str(key)
# Location as a tuple
turbine_location = (value["X"], value["Y"], float(value["Z"]))
# Find terrain elevation at X and Y
probeLocation.ProbeType.Center = [
turbine_location[0],
turbine_location[1],
0.0,
]
probeLocation.UpdatePipeline()
ground = probeLocation.GetPointData().GetArray("Elevation").GetValue(0)
turbine_location = (value["X"], value["Y"], ground + float(value["Z"]))
turbine_diameter = float(value["RotorDiameter"])
# Get wind direction at hub
hubProbe.ProbeType.Center = [
turbine_location[0],
turbine_location[1],
turbine_location[2],
]
hubProbe.UpdatePipeline()
(u, v) = hubProbe.GetPointData().GetArray("V").GetValue(0)
local_wind_direction = wind_direction(u, v)
# Thrust Coefficient
tc = value["ThrustCoEfficient"]
try:
thrust_coefficient = float(value["ThrustCoEfficient"][-1])
except:
thrust_coefficient = float(value["ThrustCoEfficient"])
turbine_diameter = float(value["RotorDiameter"])
# Point turbine into the wind
turbine_normal = [-u, -v, 0.0]
mag = math.sqrt(sum(x ** 2 for x in turbine_normal))
turbine_normal = [-u / mag, -v / mag, 0.0]
generate_turbine(
name, turbine_location, turbine_diameter, wind_direction, True
)
if not turbine_only:
generate_turbine_region(
name, turbine_location, turbine_diameter, wind_direction, True
)
location.append(
(
name,
wind_direction,
turbine_location,
turbine_diameter,
thrust_coefficient,
turbine_normal,
)
)
# Write zone definition
write_zcfd_zones("wind-" + str(wind_direction) + "_zone.py", location)
pass
def create_zcfd_input(array_data_file, farm_centre):
# Read file
array_data = {}
with open(array_data_file, "r") as f:
s = f.read()
array_data = ast.literal_eval(s)
# Cases
cases = array_data["Cases"]
for (key, value) in list(cases.items()):
# print key
# Wind direction
wind_direction = key
# Wind Speed
wind_speed = value["Windspeed"]
density = value["AirDensity"]
# Turbines
turbines = array_data["Turbines"]
# List of tuples
location = []
for (key, value) in list(turbines.items()):
# print key
name = key
if isinstance(key, int):
name = "A" + str(key)
# Location as a tuple
turbine_location = (value["X"], value["Y"], float(value["Z"]))
# Convert to local coordinates
turbine_location = convert_to_local_coordinates(
turbine_location, farm_centre
)
# Compute new location
turbine_location = get_turbine_location(turbine_location, wind_direction)
# Thrust Coefficient
tc = value["ThrustCoEfficient"]
try:
thrust_coefficient = float(value["ThrustCoEfficient"][-1])
except:
thrust_coefficient = float(value["ThrustCoEfficient"])
turbine_diameter = float(value["RotorDiameter"])
location.append(
(
name,
wind_direction,
turbine_location,
turbine_diameter,
thrust_coefficient,
)
)
# Write zone definition
write_zcfd_zones("wind-" + str(wind_direction) + "_zone.py", location)
pass
def write_zcfd_zones(zcfd_file_name, location):
with open(zcfd_file_name, "w") as f:
for idx, val in enumerate(location):
f.write("'FZ_" + str(idx + 1) + "':{\n")
f.write("'type':'disc',\n")
f.write("'def':'" + str(val[0]) + "-" + str(val[1]) + ".vtp',\n")
f.write("'thrust coefficient':" + str(val[4]) + ",\n")
f.write("'tip speed ratio':" + str(6.0) + ",\n")
f.write(
"'centre':["
+ str(val[2][0])
+ ","
+ str(val[2][1])
+ ","
+ str(val[2][2])
+ "],\n"
)
f.write("'up':[0.0,0.0,1.0],\n")
if len(val) > 5:
f.write(
"'normal':["
+ str(val[5][0])
+ ","
+ str(val[5][1])
+ ","
+ str(val[5][2])
+ "],\n"
)
else:
f.write("'normal':[-1.0,0.0,0.0],\n")
f.write("'inner radius':" + str(0.05 * val[3] / 2.0) + ",\n")
f.write("'outer radius':" + str(val[3] / 2.0) + ",\n")
f.write("'reference plane': True,\n")
f.write(
"'reference point':["
+ str(val[2][0])
+ ","
+ str(val[2][1])
+ ","
+ str(val[2][2])
+ "],\n"
)
f.write("'update frequency': 10,\n")
f.write("},\n")
pass
def generate_turbine_region(
turbine_name,
turbine_location,
turbine_diameter,
wind_direction,
turbine_factor=2.0,
rotate=False,
):
# cylinder = Cylinder()
# cylinder.Radius = 0.5 * turbine_diameter
# cylinder.Resolution = 128
# cylinder.Height = turbine_factor * turbine_diameter
line = Line()
line.Point1 = [0.0, -0.5 * turbine_factor * turbine_diameter, 0.0]
line.Point2 = [0.0, 0.5 * turbine_factor * turbine_diameter, 0.0]
line.Resolution = 10
tube = Tube(Input=line)
tube.NumberofSides = 128
tube.Radius = 0.5 * turbine_diameter
transform = Transform(Input=tube)
transform.Transform = "Transform"
transform.Transform.Rotate = [0.0, 0.0, 90.0]
if rotate:
transform.Transform.Rotate = [0.0, 0.0, -wind_direction]
transform.Transform.Translate = [
turbine_location[0],
turbine_location[1],
turbine_location[2],
]
writer = CreateWriter(turbine_name + "-" + str(wind_direction) + ".vtp")
writer.Input = transform
writer.UpdatePipeline()
def generate_turbine(
turbine_name, turbine_location, turbine_diameter, wind_direction, rotate=False
):
disk = Disk()
disk.InnerRadius = 0.05 * 0.5 * turbine_diameter
disk.OuterRadius = 0.5 * turbine_diameter
disk.CircumferentialResolution = 128
disk.RadialResolution = 12
transform = Transform()
transform.Transform = "Transform"
transform.Transform.Rotate = [0.0, 90.0, 0.0]
if rotate:
transform.Transform.Rotate = [0.0, 90.0, 90.0 - wind_direction]
transform.Transform.Translate = [
turbine_location[0],
turbine_location[1],
turbine_location[2],
]
writer = CreateWriter(turbine_name + "-" + str(wind_direction) + "-disk.vtp")
writer.Input = transform
writer.UpdatePipeline()
def write_control_file(control_file_name):
with open(control_file_name, "w") as f:
f.write("domain: -50000 50000 -50000 50000 0 1000.0" + "\n")
f.write("initial: 5000.0" + "\n")
f.write("generateCartOnly: true" + "\n")
f.write("generateLayer: false" + "\n")
def create_source(turbine_location, diameter):
upstream_factor = 1.0
downstream_factor = 4.0
radial_factor = 1.0
diameter_mesh_pts = 50.0
radius_factor = 2.0
# Upstream
pt_1 = turbine_location[0] - upstream_factor * diameter
# Downstream
pt_2 = turbine_location[0] + downstream_factor * diameter
# Radius
radius = 0.5 * diameter * radial_factor
# Mesh size
mesh_size = old_div(diameter, diameter_mesh_pts)
return (
(
pt_1,
turbine_location[1],
turbine_location[2],
mesh_size,
radius,
radius * radius_factor,
),
(
pt_2,
turbine_location[1],
turbine_location[2],
mesh_size,
radius,
radius * radius_factor,
),
)
def write_solar_bac(bac_file_name, wind_direction, mesh_source):
farfield_mesh_size = 5000.0
with open(bac_file_name, "w") as f:
f.write("zCFD Farmer - wind direction: " + str(wind_direction) + "\n")
f.write(" 8 6" + "\n")
f.write(" 1 1.0e+6 -1.0e+6 -1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 2 1.0e+6 1.0e+6 -1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 3 -1.0e+6 -1.0e+6 -1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 4 -1.0e+6 1.0e+6 -1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 5 1.0e+6 -1.0e+6 1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 6 1.0e+6 1.0e+6 1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 7 -1.0e+6 -1.0e+6 1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 8 -1.0e+6 1.0e+6 1.0e+6" + "\n")
f.write(" 1.0 0.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 1.0 0.0 " + str(farfield_mesh_size) + "\n")
f.write(" 0.0 0.0 1.0 " + str(farfield_mesh_size) + "\n")
f.write(" 1 1 2 4 8" + "\n")
f.write(" 2 1 2 8 6" + "\n")
f.write(" 3 1 6 8 5" + "\n")
f.write(" 4 2 3 4 7" + "\n")
f.write(" 5 2 7 4 8" + "\n")
f.write(" 6 2 7 8 6" + "\n")
f.write(" background sources..." + "\n")
f.write(" 0 " + str(len(mesh_source)) + " 0" + "\n")
f.write(" The points" + "\n")
f.write(" The lines" + "\n")
for s in mesh_source:
pt_1 = s[0]
pt_2 = s[1]
f.write("Line Source :" + "\n")
f.write(" ".join(str(elem) for elem in pt_1) + "\n")
f.write(" ".join(str(elem) for elem in pt_2) + "\n")
f.write(" The triangles" + "\n")
def convert_to_local_coordinates(turbine_location, farm_centre):
# Converts location from Eastings and Northings into local coordinates
#
local_coordinates = (
turbine_location[0] - farm_centre[0],
turbine_location[1] - farm_centre[1],
turbine_location[2],
)
return local_coordinates
def get_turbine_location(current_location, wind_direction):
# Assume that the CFD solver expects freestream flow in x-direction
# Rotate farm about its centre to account for the actual wind direction
# i.e. if the wind is coming from the West (270 degrees), then there is no rotation.
# In general: rotate wind farm in standard co-ordinate geometry sense
# about centre by (wind direction + 90 degrees)
rotation_angle = wind_direction + 90.0
rotation_angle = math.radians(rotation_angle)
x_temp = current_location[0]
y_temp = current_location[1]
new_x_temp = x_temp * math.cos(rotation_angle) + y_temp * math.sin(rotation_angle)
new_y_temp = -x_temp * math.sin(rotation_angle) + y_temp * math.cos(rotation_angle)
return (new_x_temp, new_y_temp, current_location[2])
def report_data_reader(name, arr1, arr2):
var = -999.0
idx = np.where(arr1 == name)
if len(idx[0].flat) == 0:
print(
"farm.py : report_data_reader : no data found in results file for " + name
)
elif len(idx[0].flat) == 1:
var = float(arr2[idx[0]][0])
else:
print(
"farm.py : report_data_reader : multiple columns in data results file for "
+ name
+ " "
+ len(idx[0].flat)
)
return var
min_dist = 1.0e16
closest_point = [min_dist, min_dist, min_dist]
def closest_point_func(dataset, pointset, s=[0, 0, 0], **kwargs):
global min_dist, closest_point
points = pointset.GetPoints()
for p in points:
dx = s[0] - p[0]
dy = s[1] - p[1]
dz = s[2] - p[2]
dist = math.sqrt(dx * dx + dy * dy) # + dz*dz)
if dist < min_dist:
closest_point = p
min_dist = dist
def write_windfarmer_data(case_name, num_processes, up):
# case_name = 'windfarm' (for example - note that there is no .py suffix)
# ground_zone = 1 (for example, an integer)
# num_processes = 24 (for example, an integer)
# up = [0,1,0] (the vector pointing vertically upwards)
global min_dist, closest_point
# Step 1: Read the case file parameters
__import__(case_name)
case_data = getattr(sys.modules[case_name], "parameters")
# Step 2: Determine the (conventional) wind direction from the case inflow
# parameters
v = case_data["IC_1"]["V"]["vector"]
print("farm.py : write_windfarmer_data : v = " + str(v))
import numpy as np
angle = 270.0 - np.angle(complex(v[0], v[1]), deg=True)
if angle < 0.0:
angle += 360.0
if angle > 360.0:
angle -= 360.0
print("farm.py : write_windfarmer_data : angle = " + str(angle))
# Step 3: Import the result file data incuding the probe data
windfarmer_filename = case_name + "_" + str(angle) + ".out"
print(
"farm.py : write_windfarmer_data : windfarmer_filename = " + windfarmer_filename
)
report_file_name = case_name + "_report.csv"
report_array = np.genfromtxt(report_file_name, dtype=None)
# Step 4: Calculate the ground heights at the probe locations by
# subtracting the local height of the wall
reader = OpenDataFile(
"./"
+ case_name
+ "_P"
+ str(num_processes)
+ "_OUTPUT/"
+ case_name
+ "_wall.pvd"
)
local_surface = servermanager.Fetch(reader)
# Step 5: Loop over the probe locations plus the results to create the
# Windfarmer file.
with open(windfarmer_filename, "w") as f:
f.write(
'"Point","X[m]","Y [m]","Z ground [m]","Z hub [m]","H [m]","D [m]","Theta[Deg]","TI hub","TI upper",'
+ '"TI lower","TI15 hub","TI15 upper","TI15 lower","Vxy hub [m/s]","Vxy upper [m/s]","Vxy lower [m/s]","Windshear [-]",'
+ '"Theta left [Deg]","Theta hub [Deg]","Theta right [Deg]","Veer [Deg]","Local Elevation Angle [Deg]","Simple Power [kW]",'
+ '"V/VT01_sample_vxy [-]","Power (Sector) [kW]","AEP (Sector) [kWh]","NEC [kWh]" \n'
)
for probe in case_data["report"]["monitor"]:
point = case_data["report"]["monitor"][probe]["point"]
name = case_data["report"]["monitor"][probe]["name"]
# Step 5.1: Find the report data for the windfarmer probe if it
# exists.
V_x = report_data_reader(
name + "_V_x", report_array[0], report_array[len(report_array) - 1]
)
V_y = report_data_reader(
name + "_V_y", report_array[0], report_array[len(report_array) - 1]
)
V_z = report_data_reader(
name + "_V_z", report_array[0], report_array[len(report_array) - 1]
)
TI_hub = report_data_reader(
name + "_ti", report_array[0], report_array[len(report_array) - 1]
)
VXY_hub = math.sqrt(V_x * V_x + V_y * V_y)
Theta_hub = 270.0 - np.angle(complex(V_x, V_y), deg=True)
if Theta_hub < 0.0:
Theta_hub += 360.0
if Theta_hub > 360.0:
Theta_hub -= 360.0
Local_Elevation_Angle = np.angle(complex(VXY_hub, V_z), deg=True)
# Step 5.2: Swap the axes if necessary to match the Windfarmer
# default (z-axis is up)
if up[0] == 0:
x = point[0]
if up[1] == 0:
y = point[1]
z = point[2]
elif up[2] == 0:
y = point[2]
z = point[1]
else:
x = point[1]
y = point[2]
z = point[0]
# Step 5.3: Find the closest ground point to the probe to work out
# elevation
min_dist = 1.0e16
closest_point = [min_dist, min_dist, min_dist]
post.for_each(local_surface, closest_point_func, s=[x, y, z])
zground = (
up[0] * closest_point[0]
+ up[1] * closest_point[1]
+ up[2] * closest_point[2]
)
zhub = up[0] * x + up[1] * y + up[2] * z
# Step 5.4: Output the Windfarmer data
f.write(
name
+ ","
+ str(x)
+ ","
+ str(y)
+ ","
+ str(zground)
+ ","
+ str(zhub)
+ ","
+ str(zhub - zground)
+ ","
+ ","
+ str(angle)
+ ","
+ str(TI_hub)
+ ",,,,,,"
+ str(VXY_hub)
+ ",,,,,"
+ str(Theta_hub)
+ ",,,"
+ str(Local_Elevation_Angle)
+ ",,,,, \n"
)
print("farm.py : write_windfarmer_data : DONE")
def create_trbx_zcfd_input(
case_name="windfarm",
wind_direction=0.0,
reference_wind_speed=10.0,
terrain_file=None, # any file for ParaView reader (STL, PVD, PVTU, etc)
report_frequency=200,
update_frequency=50,
reference_point_offset=1.0,
turbine_zone_length_factor=1.0,
model="induction", # options are (induction, simple, blade element theory)
turbine_files=[
["xyz_location_file1.txt", "turbine_type1.trbx"],
["xyz_location_file2.txt", "turbine_type2.trbx"],
],
calibration_offset=0.0,
**kwargs
):
# Ensure turbine folder exists
directory = "./turbine_vtp/"
if not os.path.exists(directory):
os.makedirs(directory)
# Make sure that the turbine zone contains the reference point
if turbine_zone_length_factor < 2.5 * reference_point_offset:
print(
"WARNING: Increasing Turbine Zone Length Factor from "
+ str(turbine_zone_length_factor)
+ " to "
+ str(2.5 * reference_point_offset)
)
turbine_zone_length_factor = 2.5 * reference_point_offset
# Issue a warning if the turbine zone length factor is less than 1.0
if turbine_zone_length_factor < 1.0:
print(
"WARNING: Turbine Zone Length Factor less than 1.0: "
+ str(turbine_zone_length_factor)
)
global min_dist, closest_point
from xml.etree import ElementTree as ET
local_surface = None
if terrain_file is not None:
reader = OpenDataFile(terrain_file)
local_surface = servermanager.Fetch(reader)
print("terrain file = " + terrain_file)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(local_surface)
pointLocator.BuildLocator()
# Step 1: Read in the location data (.txt) and turbine information (.trbx)
# for each turbine type
idx = 0
with open(case_name + "_zones.py", "w") as tz:
tz.write("turb_zone = {\n")
with open(case_name + "_probes.py", "w") as tp:
tp.write("turb_probe = { \n")
for turbine_type in turbine_files:
location_file_name = turbine_type[0]
if model in ("simple", "induction"):
trbx_file_name = turbine_type[1]
print("trbx file name = " + trbx_file_name)
trbx = ET.ElementTree(file=trbx_file_name)
root = trbx.getroot()
turbine_dict = {}
for elem in root:
turbine_dict[elem.tag] = elem.text
for elem in trbx.find("Turbine3DModel"):
turbine_dict[elem.tag] = elem.text
for elem in trbx.find(
"PerformanceTableList/PerformanceTable/PowerCurveInfo"
):
turbine_dict[elem.tag] = elem.text
for elem in trbx.find(
"PerformanceTableList/PerformanceTable/PowerCurveInfo/StartStopStrategy"
):
turbine_dict[elem.tag] = elem.text
turbine_dict["DataTable"] = {}
wp = 0
for elem in trbx.find(
"PerformanceTableList/PerformanceTable/DataTable"
):
turbine_dict["DataTable"][wp] = {}
for child in elem:
turbine_dict["DataTable"][wp][child.tag] = child.text
wp += 1
elif model in ("blade element theory"):
turbine_dict = turbine_type[1]
else:
print(
"Model not identified (simple, induction, blade element theory)"
)
print("location file name = " + location_file_name)
location_array = np.genfromtxt(
location_file_name, delimiter=" ", dtype=("<U100", float, float)
)
print(location_array)
# catch the case where only one turbine location is specified
if location_array.ndim < 1:
location_array = np.reshape(location_array, (1,))
for location in location_array:
idx += 1
name = str(location[0])
if len(name) > 99:
print(
"WARNING: farm.py: turbine name may be truncated "
+ str(name)
)
easting = location[1]
northing = location[2]
# Step 2: Work out the local elevation
if model in ("simple", "induction"):
hub_height = turbine_dict["SelectedHeight"]
rd = float(turbine_dict["RotorDiameter"])
elif model in ("blade element theory"):
hub_height = turbine_dict["hub height"]
rd = float(turbine_dict["outer radius"]) * 2.0
else:
print(
"Model not identified (simple, induction, blade element theory)"
)
min_dist = 1.0e16
closest_point = [min_dist, min_dist, min_dist]
if local_surface is not None:
pid = pointLocator.FindClosestPoint([easting, northing, 0.0])
closest_point = local_surface.GetPoint(pid)
height = closest_point[2]
hub_z = height + float(hub_height)
else:
hub_z = float(hub_height)
# Step 3: Generate the turbine region files
# (./turbine_vtp/*.vtp)
generate_turbine_region(
directory + name,
[easting, northing, hub_z],
float(rd),
wind_direction,
turbine_zone_length_factor,
True,
)
generate_turbine(
directory + name,
[easting, northing, hub_z],
float(rd),
wind_direction,
True,
)
# Step 4: Generate the turbine zone definition
# (./turbine_zone.py)
wv = zutil.vector_from_wind_dir(wind_direction)
if model in ("simple", "induction"):
tz.write("'FZ_" + str(idx) + "':{\n")
tz.write("'type':'disc',\n")
tz.write("'name': '" + name + "',\n")
tz.write(
"'def':'"
+ directory
+ name
+ "-"
+ str(wind_direction)
+ ".vtp',\n"
)
if len(list(turbine_dict["DataTable"].keys())) == 0:
print(
"WARNING: Windspeed DataTable empty - using Reference Wind Speed = "
+ str(reference_wind_speed)
)
wsc = np.zeros((4, len(list(turbine_dict["DataTable"].keys()))))
tcc_string = "[" # Thrust coefficient curve
tsc_string = "[" # Tip speed ratio curve
tpc_string = "[" # Turbine Power Curve
for wp in list(turbine_dict["DataTable"].keys()):
# Allow velocities to be shifted by user specified calibration
wsc[0][wp] = (
float(turbine_dict["DataTable"][wp]["WindSpeed"])
- calibration_offset
)
wsc[1][wp] = turbine_dict["DataTable"][wp][
"ThrustCoEfficient"
]
wsc[2][wp] = turbine_dict["DataTable"][wp]["RotorSpeed"]
wsc[3][wp] = turbine_dict["DataTable"][wp]["PowerOutput"]
tcc_string += (
"[" + str(wsc[0][wp]) + "," + str(wsc[1][wp]) + "],"
)
tsc_string += (
"["
+ str(wsc[0][wp])
+ ","
+ str(
((wsc[2][wp] * math.pi / 30.0) * rd / 2.0)
/ max(wsc[0][wp], 1.0)
)
+ "],"
)
tpc_string += (
"[" + str(wsc[0][wp]) + "," + str(wsc[3][wp]) + "],"
)
tcc_string += "]"
tsc_string += "]"
tpc_string += "]"
# print wsc
# If there is a single value for thrust coefficient use the
# reference wind speed
tc = np.interp(reference_wind_speed, wsc[0], wsc[1])
tz.write("'thrust coefficient':" + str(tc) + ",\n")
tz.write("'thrust coefficient curve':" + tcc_string + ",\n")
rs = np.interp(reference_wind_speed, wsc[0], wsc[2])
# The rotor speed is in revolutions per minute, so convert to tip speed ratio
tsr = ((rs * math.pi / 30.0) * rd / 2.0) / reference_wind_speed
tz.write("'tip speed ratio':" + str(tsr) + ",\n")
tz.write("'tip speed ratio curve':" + tsc_string + ",\n")
tpc = np.interp(reference_wind_speed, wsc[0], wsc[3])
tz.write("'turbine power':" + str(tpc) + ",\n")
tz.write("'turbine power curve':" + tpc_string + ",\n")
tz.write(
"'centre':["
+ str(easting)
+ ","
+ str(northing)
+ ","
+ str(hub_z)
+ "],\n"
)
tz.write("'up':[0.0,0.0,1.0],\n")
tz.write(
"'normal':["
+ str(-wv[0])
+ ","
+ str(-wv[1])
+ ","
+ str(-wv[2])
+ "],\n"
)
tz.write(
"'inner radius':"
+ str(float(turbine_dict["DiskDiameter"]) / 2.0)
+ ",\n"
)
tz.write(
"'outer radius':"
+ str(float(turbine_dict["RotorDiameter"]) / 2.0)
+ ",\n"
)
pref = [
easting - reference_point_offset * rd * wv[0],
northing - reference_point_offset * rd * wv[1],
hub_z - reference_point_offset * rd * wv[2],
]
reference_plane = kwargs.get("reference_plane", True)
tz.write("'reference plane':" + str(reference_plane) + ",\n")
number_of_segments = kwargs.get("number_of_segments", 12)
tz.write(
"'number of segments':" + str(number_of_segments) + ",\n"
)
tz.write(
"'reference point':["
+ str(pref[0])
+ ","
+ str(pref[1])
+ ","
+ str(pref[2])
+ "],\n"
)
tz.write("'update frequency':" + str(update_frequency) + ",\n")
tz.write("'model':" + " '" + model + "',\n")
tz.write("},\n")
elif model in ("blade element theory"):
tz.write("'FZ_" + str(idx) + "':{\n")
tz.write("'type':'disc',\n")
tz.write("'name': '" + name + "',\n")
tz.write("'status':'on',\n")
tz.write(
"'number of blades':"
+ str(turbine_dict["number of blades"])
+ ",\n"
)
tz.write(
"'inner radius':"
+ str(float(turbine_dict["inner radius"]))
+ ",\n"
)
tz.write(
"'outer radius':"
+ str(float(turbine_dict["outer radius"]))
+ ",\n"
)
tz.write("'model':'blade element theory',\n")
tz.write(
"'def':'"
+ directory
+ name
+ "-"
+ str(wind_direction)
+ ".vtp',\n"
)
tz.write(
"'verbose': "
+ str(turbine_dict.get("verbose", False))
+ ",\n"
)
tz.write("'update frequency':" + str(update_frequency) + ",\n")
tz.write(
"'reference plane': "
+ str(turbine_dict.get("reference plane", True))
+ ",\n"
)
tz.write(
"'centre':["
+ str(easting)
+ ","
+ str(northing)
+ ","
+ str(hub_z)
+ "],\n"
)
tz.write("'up':[0.0,0.0,1.0],\n")
if "number of segments" in turbine_dict:
tz.write(
"'number of segments':"
+ str(turbine_dict["number of segments"])
+ ",\n"
)
else:
tz.write("'number of segments':" + str(12) + ",\n")
tz.write(
"'normal':["
+ str(-wv[0])
+ ","
+ str(-wv[1])
+ ","
+ str(-wv[2])
+ "],\n"
)
tz.write("'tilt':" + str(turbine_dict["tilt"]) + ",\n")
tz.write("'yaw':" + str(turbine_dict["yaw"]) + ",\n")
tz.write(
"'mean blade material density':"
+ str(turbine_dict["mean blade material density"])
+ ",\n"
)
tz.write("'auto yaw':True,\n")
tz.write(
"'tip speed limit':"
+ str(turbine_dict["tip speed limit"])
+ ",\n"
)
tz.write("'rpm ramp':" + str(turbine_dict["rpm ramp"]) + ",\n")
tz.write(
"'blade pitch tol':"
+ str(turbine_dict["blade pitch tol"])
+ ",\n"
)
tz.write(
"'rated power':" + str(turbine_dict["rated power"]) + ",\n"
)
tz.write("'dt':" + str(turbine_dict["dt"]) + ",\n")
tz.write("'inertia':True,\n")
tz.write(
"'damage ti':" + str(turbine_dict["damage ti"]) + ",\n"
)
tz.write(
"'damage speed':"
+ str(turbine_dict["damage speed"])
+ ",\n"
)
tz.write(
"'friction loss':"
+ str(turbine_dict["friction loss"])
+ ",\n"
)
tz.write(
"'cut in speed':"
+ str(turbine_dict["cut in speed"])
+ ",\n"
)
tz.write(
"'cut out speed':"
+ str(turbine_dict["cut out speed"])
+ ",\n"
)
tz.write("'rotation direction':'clockwise',\n")
if "thrust factor" in turbine_dict:
tz.write(
"'thrust factor':"
+ str(turbine_dict["thrust factor"])
+ ",\n"
)
if "tip loss correction" in turbine_dict:
tz.write(
"'tip loss correction': '"
+ str(turbine_dict["tip loss correction"])
+ "',\n"
)
if "blade pitch step" in turbine_dict:
tz.write(
"'blade pitch step':"
+ str(turbine_dict["blade pitch step"])
+ ",\n"
)
if "blade pitch" in turbine_dict:
tz.write(
"'blade pitch':"
+ str(turbine_dict["blade pitch"])
+ ",\n"
)
tz.write(
"'aerofoil profile':"
+ str(turbine_dict["aerofoil profile"])
+ ",\n"
)
tz.write(
"'aerofoil cl':" + str(turbine_dict["aerofoil cl"]) + ",\n"
)
tz.write(
"'aerofoil cd':" + str(turbine_dict["aerofoil cd"]) + ",\n"
)
tz.write(
"'blade chord':" + str(turbine_dict["blade chord"]) + ",\n"
)
tz.write(
"'blade twist':" + str(turbine_dict["blade twist"]) + ",\n"
)
tz.write(
"'blade pitch range':"
+ str(turbine_dict["blade pitch range"])
+ ",\n"
)
tz.write("},\n")
else:
pass
# Step 5: Generate the turbine monitor probes (./turbine_probe.py)
# Turbines: label@MHH@## (## = hub height of the turbine relative to the ground in meters)
# Anemometers: label@AN@## (## = height of the anemometer
# above the ground in meters)
tp.write(" 'MR_" + str(idx) + "' : {\n")
tp.write(
" 'name' :'probe"
+ str(idx)
+ "@MHH@"
+ str(hub_height)
+ "',\n"
)
tp.write(
" 'point' : ["
+ str(easting)
+ ","
+ str(northing)
+ ","
+ str(hub_z)
+ "],\n"
)
tp.write(" 'variables' : ['V', 'ti'],\n")
tp.write(" },\n")
tp.write("} \n")
tz.write("}\n")
def extract_probe_data(
case_name="windfarm",
wind_direction_start=0,
wind_direction_end=360,
wind_direction_step=10,
num_processes=16,
probe_location_file="name_x_y_z.txt",
offset=0.0,
**kwargs
):
import vtk
from vtk.util import numpy_support as VN
probe_location_array = np.genfromtxt(probe_location_file, dtype=None)
probe = vtk.vtkProbeFilter()
point = vtk.vtkPointSource()
for wd in range(wind_direction_start, wind_direction_end, wind_direction_step):
directory = (
case_name + "_" + str(int(wd)) + "_P" + str(num_processes) + "_OUTPUT"
)
filename = case_name + "_" + str(int(wd)) + ".pvd"
reader = OpenDataFile("./" + directory + "/" + filename)
local_volume = servermanager.Fetch(reader)
for location in probe_location_array:
name = location[0]
easting = location[1]
northing = location[2]
height = location[3] + offset
point.SetNumberOfPoints(1)
point.SetCenter([easting, northing, height])
probe.SetInputConnection(point.GetOutputPort())
probe.SetSourceData(local_volume)
probe.Update()
V = VN.vtk_to_numpy(probe.GetOutput().GetPointData().GetArray("V"))
ti = VN.vtk_to_numpy(probe.GetOutput().GetPointData().GetArray("ti"))
print(
str(wd)
+ " "
+ name
+ "_zoffset_"
+ str(offset)
+ "_V_x "
+ str(V[0][0])
)
print(
str(wd)
+ " "
+ name
+ "_zoffset_"
+ str(offset)
+ "_V_y "
+ str(V[0][1])
)
print(
str(wd)
+ " "
+ name
+ "_zoffset_"
+ str(offset)
+ "_V_z "
+ str(V[0][2])
)
print(
str(wd)
+ " "
+ name
+ "_zoffset_"
+ str(offset)
+ "_ti "
+ str(ti[0] + 0.1)
)
def generate_mesh_pts():
start = 0.001
max_height = 20000
growth_rate = 1.01
pts = []
# 1.3 growth rate to layer height
current_height = start
current_pos = start
pts.append(current_pos)
while current_pos < max_height:
current_height = min(100.0, current_height * growth_rate)
current_pos = current_pos + current_height
pts.append(current_pos)
# print current_height
return np.array(pts)
def create_profile(
profile_name,
hub_height,
hub_height_vel,
direction,
roughness,
min_ti,
min_mut,
scale_k=False,
plot=False,
kappa=0.41,
rho=1.225,
cmu=0.03,
mu=1.789e-5,
latitude=55.0,
):
# Using RH Law compute utau using hub values
utau = ABL.friction_velocity(hub_height_vel, hub_height, roughness, kappa)
print("Friction Velocity: " + str(utau))
# Ref http://orbit.dtu.dk/files/3737714/ris-r-1688.pdf
coriolis_parameter = ABL.coriolis_parameter(latitude)
geostrophic_plane = ABL.ekman_layer_height(utau, coriolis_parameter)
print("Ekman Layer top: " + str(geostrophic_plane))
print("This is top of ABL for neutral conditions")
print("Wall Stress: " + str(rho * utau ** 2))
pts = generate_mesh_pts()
vel = ABL.wind_speed_array(pts, utau, roughness, kappa)
if scale_k:
k_scale = (
np.ones(len(pts)) - np.minimum(np.ones(len(pts)), (pts / geostrophic_plane))
) ** 2
else:
k_scale = np.ones(len(pts))
k = k_scale * (utau ** 2) / math.sqrt(cmu)
eps = np.ones(len(pts)) * (utau ** 3) / (kappa * (pts + roughness))
# Note this mut/mu
mut = np.maximum(rho * cmu * k ** 2 / (eps * mu), np.ones(len(pts)) * min_mut)
TI = np.maximum((2 * k / 3) ** 0.5 / vel, np.ones(len(pts)) * min_ti)
lengthscale = cmu ** 0.75 * k ** 1.5 / eps
du_dz = np.gradient(vel, pts, edge_order=2)
stress = (mut * mu) * du_dz
points = vtk.vtkPoints()
for x in pts:
points.InsertNextPoint([0.0, 0.0, x])
vel_vec = vtk.vtkFloatArray()
vel_vec.SetNumberOfComponents(3)
vel_vec.SetName("Velocity")
for v in vel:
vel_vec.InsertNextTuple(zutil.vector_from_wind_dir(direction, v))
ti_vec = vtk.vtkFloatArray()
ti_vec.SetNumberOfComponents(1)
ti_vec.SetName("TI")
for t in TI:
ti_vec.InsertNextTuple([t])
mut_vec = vtk.vtkFloatArray()
mut_vec.SetNumberOfComponents(1)
mut_vec.SetName("EddyViscosity")
for m in mut:
mut_vec.InsertNextTuple([m])
# Create poly data
linesPolyData = vtk.vtkPolyData()
linesPolyData.SetPoints(points)
linesPolyData.GetPointData().AddArray(vel_vec)
linesPolyData.GetPointData().AddArray(ti_vec)
linesPolyData.GetPointData().AddArray(mut_vec)
# Write
print("Writing: " + profile_name + ".vtp")
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(profile_name + ".vtp")
writer.SetInputData(linesPolyData)
writer.Write()
if plot:
fig = get_figure(plt)
ax = fig.add_subplot(111)
ax.grid(True)
x_label(ax, "Velocity")
y_label(ax, "Height")
set_ticks(ax)
ax.semilogy(vel, pts)
if scale_k:
ax.set_ylim(None, geostrophic_plane)
fig = get_figure(plt)
ax = fig.add_subplot(111)
ax.grid(True)
x_label(ax, "TI")
y_label(ax, "Height")
set_ticks(ax)
ax.semilogy(TI, pts)
if scale_k:
ax.set_ylim(None, geostrophic_plane)
fig = get_figure(plt)
ax = fig.add_subplot(111)
ax.grid(True)
x_label(ax, "Length scale")
y_label(ax, "Height")
set_ticks(ax)
if scale_k:
ax.set_ylim(0.0, geostrophic_plane)
ax.plot(lengthscale, pts)
fig = get_figure(plt)
ax = fig.add_subplot(111)
ax.grid(True)
x_label(ax, "mut/mu")
y_label(ax, "Height")
set_ticks(ax)
if scale_k:
ax.set_ylim(0.0, geostrophic_plane)
ax.plot(mut, pts)
fig = get_figure(plt)
ax = fig.add_subplot(111)
ax.grid(True)
x_label(ax, "stress")
y_label(ax, "Height")
set_ticks(ax)
if scale_k:
ax.set_ylim(0.0, geostrophic_plane)
def get_case_name(base_case, wind_direction, wind_speed):
wind_direction_str = "{0:.2f}".format(wind_direction).replace(".", "p")
wind_speed_str = "{0:.2f}".format(wind_speed).replace(".", "p")
return base_case + "_" + wind_direction_str + "_" + wind_speed_str
def get_profile_name(base_case, wind_direction, wind_speed):
wind_direction_str = "{0:.2f}".format(wind_direction).replace(".", "p")
wind_speed_str = "{0:.2f}".format(wind_speed).replace(".", "p")
return "profile_" + wind_direction_str + "_" + wind_speed_str
def generate_inputs(
base_case,
wind_direction,
wind_speed,
wind_height,
roughness_length,
turbine_info,
terrain_file,
min_ti,
min_mut,
scale_k=True,
):
# Generate new name for this case
case_name = get_case_name(base_case, wind_direction, wind_speed)
profile_name = get_profile_name(base_case, wind_direction, wind_speed)
# Generate turbines
create_trbx_zcfd_input(
case_name=case_name,
wind_direction=wind_direction,
reference_wind_speed=wind_speed,
terrain_file=terrain_file,
report_frequency=10,
update_frequency=1,
reference_point_offset=0.0,
turbine_zone_length_factor=0.2,
model="simple",
turbine_files=turbine_info,
calibration_offset=0.0,
)
# Generate profile
create_profile(
profile_name,
wind_height,
wind_speed,
wind_direction,
roughness_length,
min_ti,
min_mut,
scale_k,
)
from string import Template
case_file = """
import zutil
base_case = '$basecasename'
case = '$casename'
parameters = zutil.get_parameters_from_file(base_case)
turbine_zones = zutil.get_zone_info(case+'_zones')
for key,value in turbine_zones.turb_zone.items():
valid_key = zutil.find_next_zone(parameters,'FZ')
parameters[valid_key]=value
turbine_probes = zutil.get_zone_info(case+'_probes')
for key,value in turbine_probes.turb_probe.items():
if not 'report' in parameters:
parameters['report'] = {}
if not 'monitor' in parameters['report']:
parameters['report']['monitor'] = {}
valid_key = zutil.find_next_zone(parameters['report']['monitor'],'MR')
parameters['report']['monitor'][valid_key]=value
# Set reference speed
parameters['IC_1']['V']['vector'] = zutil.vector_from_wind_dir($wind_direction,$wind_speed)
# Set profile
if not 'profile' in parameters['IC_1']:
parameters['IC_1']['profile'] = {}
parameters['IC_1']['profile']['field'] = '$profile_name.vtp'
"""
case_file_str = Template(case_file).substitute(
basecasename=base_case,
casename=case_name,
wind_direction=wind_direction,
wind_speed=wind_speed,
profile_name=profile_name,
)
print("Writing: " + case_name + ".py")
with open(case_name + ".py", "w") as f:
f.write(case_file_str)
|
zCFD/zutil
|
zutil/farm.py
|
Python
|
mit
| 55,488
|
[
"ParaView",
"VTK"
] |
c7bfda0facaaa9b27c25bef806a1deb8fb9224e6d3153de0241484e6931f5dec
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='junit-xml',
author='Brian Beyer',
author_email='brian@kyr.us',
url='https://github.com/kyrus/python-junit-xml',
license='MIT',
packages=find_packages(),
test_suite='test_junit_xml',
description='Creates JUnit XML test result documents that can be read by '
'tools such as Jenkins',
long_description=read('README.rst'),
version='1.4',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: Freely Distributable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
],
install_requires=[
'six'
]
)
|
desolat/python-junit-xml
|
setup.py
|
Python
|
mit
| 1,086
|
[
"Brian"
] |
7f7a721ea5eedd42c0a4700a57db971a9918f6e9ad3146bb3310db923e830e4e
|
"""
This library allows you to quickly and easily use the SendGrid Web API v3 via
Python.
For more information on this library, see the README on Github.
http://github.com/sendgrid/sendgrid-python
For more information on the SendGrid v3 API, see the v3 docs:
http://sendgrid.com/docs/API_Reference/api_v3.html
For the user guide, code examples, and more, visit the main docs page:
http://sendgrid.com/docs/index.html
This file provides the SendGrid API Client.
"""
import os
import python_http_client
from .version import __version__
class SendGridAPIClient(object):
"""The SendGrid API Client.
Use this object to interact with the v3 API. For example:
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
...
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
For examples and detailed use instructions, see
https://github.com/sendgrid/sendgrid-python
"""
def __init__(self, **opts):
"""
Construct SendGrid v3 API object.
:params host: Base URL for the API call
:type host: string
:params apikey: SendGrid API key to use. Defaults to environment var.
:type apikey: string
"""
self.path = opts.get(
'path', os.path.abspath(os.path.dirname(__file__)))
self._apikey = opts.get('apikey', os.environ.get('SENDGRID_API_KEY'))
# Support v2 api_key naming
self._apikey = opts.get('api_key', self._apikey)
self._api_key = self._apikey
# Support impersonation of subusers
self._impersonate_subuser = opts.get('impersonate_subuser', None)
self.useragent = 'sendgrid/{0};python'.format(__version__)
self.host = opts.get('host', 'https://api.sendgrid.com')
self.version = __version__
headers = self._get_default_headers()
self.client = python_http_client.Client(host=self.host,
request_headers=headers,
version=3)
def _get_default_headers(self):
headers = {
"Authorization": 'Bearer {0}'.format(self._apikey),
"User-agent": self.useragent,
"Accept": 'application/json'
}
if self._impersonate_subuser:
headers['On-Behalf-Of'] = self._impersonate_subuser
return headers
def reset_request_headers(self):
self.client.request_headers = self._get_default_headers()
@property
def apikey(self):
"""The API key (also accessible as api_key)."""
return self._apikey
@apikey.setter
def apikey(self, value):
self._apikey = value
@property
def api_key(self):
"""The API key (also accessible as apikey)."""
return self._apikey
@api_key.setter
def api_key(self, value):
self._apikey = value
@property
def impersonate_subuser(self):
"""
The subuser you are impersonating.
If present, this is the value of the "On-Behalf-Of" header.
"""
return self._impersonate_subuser
|
blackpioter/sendgrid-python
|
sendgrid/sendgrid.py
|
Python
|
mit
| 3,305
|
[
"VisIt"
] |
65e7b98dff1921cb3409ac77419811f32872a3997656edc4d9a412f12ecef007
|
import ocl
import camvtk
import time
import datetime
import vtk
def drawTree(myscreen,t,color=camvtk.red,opacity=0.2, offset=(0,0,0)):
nodes = t.get_nodes()
black=0
for n in nodes:
cen = n.point()
#print "cen=",cen.str()
scale = n.get_scale()
#print "col=", n.color
if n.color == 0:
#print "found white node!"
#color = camvtk.red
cube = camvtk.Cube(center=(cen.x+offset[0], cen.y+offset[1], cen.z+offset[2]), length= scale, color=color)
cube.SetOpacity(opacity)
myscreen.addActor( cube )
black = black+1
if n.color == 1:
#print "found white node!"6
color = camvtk.blue
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= scale, color=color)
cube.SetOpacity(opacity)
myscreen.addActor( cube )
#black = black+1
#print black," black nodes"
"""
for m in range(0,9):
cen = n.corner(m)
sph = camvtk.Sphere( center=(cen.x, cen.y, cen.z), radius=0.5, color=camvtk.green)
myscreen.addActor(sph)
"""
#myscreen.render()
#raw_input("Press Enter to terminate")
def main(filename="frame/f.png",yc=6, n=0):
f=ocl.Ocode()
f.set_depth(8)
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(50, 22, 40)
myscreen.camera.SetFocalPoint(0,0, 0)
myscreen.camera.Azimuth( n*0.5 )
# screenshot writer
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
xar = camvtk.Arrow(color=camvtk.red, center=(10,20,0), rotXYZ=(0,0,0))
myscreen.addActor(xar)
yar = camvtk.Arrow(color=camvtk.green, center=(10,20,0), rotXYZ=(0,0,90))
myscreen.addActor(yar)
zar = camvtk.Arrow(color=camvtk.blue, center=(10,20,0), rotXYZ=(0,-90,0))
myscreen.addActor(zar)
t = ocl.LinOCT()
t2 = ocl.LinOCT()
t.init(3)
t2.init(3)
print(" after init() t :", t.str())
print(" after init() t2 :", t2.str())
svol = ocl.SphereOCTVolume()
svol.radius=3
svol.center = ocl.Point(1,0,3)
cube1 = ocl.CubeOCTVolume()
cube1.side=6
cube1.center = ocl.Point(0,0,0)
cube2 = ocl.CubeOCTVolume()
cube2.center = ocl.Point(1,2,0)
cube2.side = 30
print("t build()" )
t.build(svol)
print(" t after build() ", t.size())
t.condense()
print(" t after condense() ", t.size())
print("t2 build()" )
t2.build(cube1)
print(" t2 after build() ", t2.size())
t2.condense()
print(" t2 after condense() ", t2.size() )
# original trees
drawTree(myscreen,t,opacity=1, color=camvtk.green)
drawTree(myscreen,t2,opacity=1, color=camvtk.red)
print(" diff12()...",)
t3 = t2.operation(1,t)
print("done.")
print(" diff21()...",)
t4 = t2.operation(2,t)
print("done.")
print(" intersection()...",)
t5 = t2.operation(3,t)
print("done.")
print(" sum()...",)
t6 = t2.operation(4,t)
print("done.")
print(" difference 1-2 t3 (blue) =", t3.size())
print(" difference 2-1 t4 (yellow)=", t4.size())
print(" intersection t5 (pink) =", t5.size())
print(" union t6 (grey) =", t6.size())
drawTree(myscreen,t3,opacity=1, color=camvtk.blue, offset=(0,15,0))
drawTree(myscreen,t4,opacity=1, color=camvtk.yellow,offset=(0,-15,0))
drawTree(myscreen,t5,opacity=1, color=camvtk.pink,offset=(-15,0,0))
drawTree(myscreen,t6,opacity=1, color=camvtk.grey,offset=(-15,-15,0))
title = camvtk.Text()
title.SetPos( (myscreen.width-350, myscreen.height-30) )
title.SetText("OpenCAMLib " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.addActor(title)
st2 = camvtk.Text()
ytext = "Linear OCTree set operations: difference, intersection, union"
st2.SetText(ytext)
st2.SetPos( (50, myscreen.height-30) )
myscreen.addActor( st2)
st3 = camvtk.Text()
text = "Original OCTrees\n Ball:%d nodes\n Cube: %d nodes" % ( t.size(), t2.size() )
st3.SetText(text)
st3.SetPos( (50, 200) )
myscreen.addActor( st3)
st4 = camvtk.Text()
un = " Union (grey): %d nodes\n" % (t6.size())
int = " Intersection (pink): %d nodes\n" % (t5.size())
diff1 = " difference Cube-Ball (blue): %d nodes\n" % (t3.size())
diff2 = " difference Ball-Cube (yellow): %d nodes\n" % (t4.size())
text= un+int+diff1+diff2
st4.SetText(text)
st4.SetPos( (50, 100) )
myscreen.addActor( st4)
myscreen.render()
lwr.SetFileName(filename)
time.sleep(0.2)
#lwr.Write()
myscreen.iren.Start()
if __name__ == "__main__":
Nsteps = 720
ystart = 6
ystop = -6
ystep = float(ystop-ystart)/(Nsteps-1)
main()
#fiangle = fiangle + 2
|
aewallin/opencamlib
|
src/attic/ocode/ocode_movie1.py
|
Python
|
lgpl-2.1
| 4,981
|
[
"VTK"
] |
942913d309ea9dd8a26a09ba2719862bd00166b7b9f9b9d62ec40582168f453e
|
# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'))
if src is None:
return dict(failed=True, msg="src is required")
elif remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
return self._execute_module(task_vars=task_vars)
if self._task._role is not None:
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
else:
src = self._loader.path_dwim(src)
# create the remote tmp dir if needed, and put the source file there
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
self._connection.put_file(src, tmp_src)
if self._play_context.become and self._play_context.become_user != 'root':
if not self._play_context.check_mode:
self._remote_chmod('a+r', tmp_src, tmp)
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
)
)
return self._execute_module('patch', module_args=new_module_args, task_vars=task_vars)
|
axilleas/ansible
|
lib/ansible/plugins/action/patch.py
|
Python
|
gpl-3.0
| 2,341
|
[
"Brian"
] |
5e47c7002b351ee3154ed79308e19ff74ff4e0e6792cb8ff41c0c889499e6329
|
import time
import RPi.GPIO as io
from servo import set_servo
import logging
from logging.handlers import RotatingFileHandler
from logging import Formatter
import sys
logger = logging.getLogger('bergro')
logger_format = ("%(asctime)s %(levelname)s: %(message)s"
" [%(pathname)s:%(lineno)d]")
logger_filename = "%s/flapmagick.log" % '/home/pi/app'
vlh = RotatingFileHandler(logger_filename, mode='a', maxBytes=50485760,
backupCount=5)
vlh.setFormatter(Formatter(logger_format))
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(Formatter(logger_format))
logger.addHandler(ch)
vlh.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
logger.addHandler(vlh)
logger.info("Initializing flapmagick, logging to: %s" % logger_filename)
current_servo_pos = 170
io.setmode(io.BCM)
door_pin = 23
outside_pin = 24
inside_pin = 25
led_pin = 17
io.setup(led_pin, io.OUT) # activate input
io.setup(inside_pin, io.IN) # activate input
io.setup(outside_pin, io.IN) # activate input
io.setup(door_pin, io.IN, pull_up_down=io.PUD_UP) # activate input with PullUp
def servo(pos=0):
global current_servo_pos
if current_servo_pos != pos:
current_servo_pos = pos
set_servo(pos)
def open_door():
global current_servo_pos
if current_servo_pos > 0:
cpos = current_servo_pos
for i in range(0, current_servo_pos, 1):
servo(cpos - i)
time.sleep(0.02)
servo(0)
def close_door():
global current_servo_pos
if current_servo_pos < 170:
for i in range(current_servo_pos, 170, 1):
servo(i)
time.sleep(0.02)
servo(170)
def is_open():
global current_servo_pos
return current_servo_pos == 0
def is_in_motion():
global current_servo_pos
return current_servo_pos == 0
def has_visitor():
global inside_pin
global outside_pin
return io.input(inside_pin) or io.input(outside_pin)
def is_door_open():
global door_pin
return io.input(door_pin)
if __name__ == '__main__':
close_door()
DELAY_CLOSE = 15L
visit_time = None
while True:
try:
if has_visitor():
visit_time = time.time()
if not is_open():
if not is_in_motion():
logger.info("Has visitor, so is opening door!")
open_door()
else:
logger.info("Visitor detected, but flap in motion!")
else:
if visit_time is None:
logger.info("First time, no visitor so far!")
time_since_visit = DELAY_CLOSE + 1
else:
time_since_visit = long(time.time() - visit_time)
if time_since_visit < DELAY_CLOSE:
logger.info("Had visit %s ago" % time_since_visit)
if time_since_visit > DELAY_CLOSE:
if is_open():
logger.info("Visitor has left so we close the door!")
close_door()
# delay awhile after close to settle in
time.sleep(DELAY_CLOSE)
time.sleep(0.9)
except Exception as e:
logger.exception("Error occured: %s" % e)
logger.info("Flapmagick is done...!")
|
erikulven/flapmagick
|
flapmagick.py
|
Python
|
bsd-2-clause
| 3,432
|
[
"VisIt"
] |
dc54f1db2b05800f58f8edd58737c930a44df1cf5f8f7372599636b68fa1e549
|
"""
Module for rendering clusters
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import logging
import vtk
import numpy as np
from . import baseRenderer
from . import povrayWriters
from .. import utils
from ...filtering import _clusters
from ...filtering import clusters
from six.moves import range
class ClusterRenderer(baseRenderer.BaseRenderer):
"""
Render clusters.
"""
def __init__(self):
super(ClusterRenderer, self).__init__()
self._logger = logging.getLogger(__name__)
def render(self, clusterList, settings, refState=None):
"""
Render the given clusters.
"""
self._logger.debug("Rendering %d clusters", len(clusterList))
# object for combining poly datas
appendPolyData = vtk.vtkAppendPolyData()
# neighbour radius used for constructing clusters
neighbourRadius = settings.getSetting("neighbourRadius")
# loop over clusters making poly data
for clusterIndex, cluster in enumerate(clusterList):
# get the positions for this cluster
clusterPos = cluster.makeClusterPos()
# lattice
lattice = cluster.getLattice()
# get settings and prepare to render (unapply PBCs)
appliedPBCs = np.zeros(7, np.int32)
_clusters.prepareClusterToDrawHulls(len(cluster), clusterPos, lattice.cellDims, lattice.PBC, appliedPBCs,
neighbourRadius)
# render this clusters facets
self.renderClusterFacets(len(cluster), clusterPos, lattice, neighbourRadius, appendPolyData)
# handle PBCs
if len(cluster) > 1:
# move the cluster across each PBC that it overlaps
while max(appliedPBCs) > 0:
# send the cluster across PBCs
tmpClusterPos = copy.deepcopy(clusterPos)
clusters.applyPBCsToCluster(tmpClusterPos, lattice.cellDims, appliedPBCs)
# render the modified clusters facets
self.renderClusterFacets(len(cluster), tmpClusterPos, lattice, neighbourRadius, appendPolyData)
appendPolyData.Update()
# remove any duplicate points
cleanFilter = vtk.vtkCleanPolyData()
cleanFilter.SetInputConnection(appendPolyData.GetOutputPort())
cleanFilter.Update()
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(cleanFilter.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(settings.getSetting("hullOpacity"))
hullCol = settings.getSetting("hullCol")
actor.GetProperty().SetColor(hullCol[0], hullCol[1], hullCol[2])
# store attributes
self._actor = utils.ActorObject(actor)
self._data["Hull colour"] = hullCol
self._data["Hull opacity"] = settings.getSetting("hullOpacity")
self._data["Neighbour radius"] = neighbourRadius
self._data["Cluster list"] = clusterList
def renderClusterFacets(self, clusterSize, clusterPos, lattice, neighbourRadius, appendPolyData):
"""
Render facets of a cluster.
"""
# get facets
facets = clusters.findConvexHullFacets(clusterSize, clusterPos)
# now render
if facets is not None:
# TODO: make sure not facets more than neighbour rad from cell
facets = clusters.checkFacetsPBCs(facets, clusterPos, 2.0 * neighbourRadius, lattice.PBC, lattice.cellDims)
# create vtk points from cluster positions
points = vtk.vtkPoints()
for i in range(clusterSize):
points.InsertNextPoint(clusterPos[3 * i], clusterPos[3 * i + 1], clusterPos[3 * i + 2])
# create triangles
triangles = vtk.vtkCellArray()
for facet in facets:
triangle = vtk.vtkTriangle()
for j in range(3):
triangle.GetPointIds().SetId(j, facet[j])
triangles.InsertNextCell(triangle)
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
# add polydata
if vtk.vtkVersion.GetVTKMajorVersion() <= 5:
appendPolyData.AddInputConnection(trianglePolyData.GetProducerPort())
else:
appendPolyData.AddInputData(trianglePolyData)
def writePovray(self, filename):
"""Write atoms to POV-Ray file."""
self._logger.debug("Writing atoms POV-Ray file")
# povray writer
writer = povrayWriters.PovrayClustersWriter()
writer.write(filename, self._data["Cluster list"], self._data["Neighbour radius"], self._data["Hull opacity"],
self._data["Hull colour"])
|
chrisdjscott/Atoman
|
atoman/rendering/renderers/clusterRenderer.py
|
Python
|
mit
| 5,229
|
[
"VTK"
] |
42e33c96bd9d241f0a15f9525f246852ecaf1710f5d579fb920c0c0adb1225b4
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import tempfile
import unittest
import numpy
from functools import reduce
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import cc
from pyscf.cc import ccsd_lambda
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol)
mf.conv_tol_grad = 1e-8
mf.kernel()
mycc = cc.ccsd.RCCSD(mf)
mycc.conv_tol = 1e-10
eris = mycc.ao2mo()
mycc.kernel(eris=eris)
def tearDownModule():
global mol, mf, mycc
mol.stdout.close()
del mol, mf, mycc
class KnownValues(unittest.TestCase):
def test_ccsd(self):
mol = gto.M()
mf = scf.RHF(mol)
mcc = cc.CCSD(mf)
numpy.random.seed(12)
mcc.nocc = nocc = 5
mcc.nmo = nmo = 12
nvir = nmo - nocc
eri0 = numpy.random.random((nmo,nmo,nmo,nmo))
eri0 = ao2mo.restore(1, ao2mo.restore(8, eri0, nmo), nmo)
fock0 = numpy.random.random((nmo,nmo))
fock0 = fock0 + fock0.T + numpy.diag(range(nmo))*2
t1 = numpy.random.random((nocc,nvir))
t2 = numpy.random.random((nocc,nocc,nvir,nvir))
t2 = t2 + t2.transpose(1,0,3,2)
l1 = numpy.random.random((nocc,nvir))
l2 = numpy.random.random((nocc,nocc,nvir,nvir))
l2 = l2 + l2.transpose(1,0,3,2)
eris = cc.ccsd._ChemistsERIs()
eris.oooo = eri0[:nocc,:nocc,:nocc,:nocc].copy()
eris.ovoo = eri0[:nocc,nocc:,:nocc,:nocc].copy()
eris.oovv = eri0[:nocc,:nocc,nocc:,nocc:].copy()
eris.ovvo = eri0[:nocc,nocc:,nocc:,:nocc].copy()
idx = numpy.tril_indices(nvir)
eris.ovvv = eri0[:nocc,nocc:,nocc:,nocc:][:,:,idx[0],idx[1]].copy()
eris.vvvv = ao2mo.restore(4,eri0[nocc:,nocc:,nocc:,nocc:],nvir)
eris.fock = fock0
eris.mo_energy = fock0.diagonal()
saved = ccsd_lambda.make_intermediates(mcc, t1, t2, eris)
l1new, l2new = ccsd_lambda.update_lambda(mcc, t1, t2, l1, l2, eris, saved)
self.assertAlmostEqual(abs(l1new).sum(), 38172.7896467303, 8)
self.assertAlmostEqual(numpy.dot(l1new.flatten(), numpy.arange(35)), 739312.005491083, 8)
self.assertAlmostEqual(numpy.dot(l1new.flatten(), numpy.sin(numpy.arange(35))), 7019.50937051188, 8)
self.assertAlmostEqual(numpy.dot(numpy.sin(l1new.flatten()), numpy.arange(35)), 69.6652346635955, 8)
self.assertAlmostEqual(abs(l2new).sum(), 72035.4931071527, 8)
self.assertAlmostEqual(abs(l2new-l2new.transpose(1,0,3,2)).sum(), 0, 9)
self.assertAlmostEqual(numpy.dot(l2new.flatten(), numpy.arange(35**2)), 48427109.5409886, 7)
self.assertAlmostEqual(numpy.dot(l2new.flatten(), numpy.sin(numpy.arange(35**2))), 137.758016736487, 8)
self.assertAlmostEqual(numpy.dot(numpy.sin(l2new.flatten()), numpy.arange(35**2)), 507.656936701192, 8)
mcc.max_memory = 0
saved = ccsd_lambda.make_intermediates(mcc, t1, t2, eris)
l1new, l2new = ccsd_lambda.update_lambda(mcc, t1, t2, l1, l2, eris, saved)
self.assertAlmostEqual(abs(l1new).sum(), 38172.7896467303, 8)
self.assertAlmostEqual(numpy.dot(l1new.flatten(), numpy.arange(35)), 739312.005491083, 8)
self.assertAlmostEqual(numpy.dot(l1new.flatten(), numpy.sin(numpy.arange(35))), 7019.50937051188, 8)
self.assertAlmostEqual(numpy.dot(numpy.sin(l1new.flatten()), numpy.arange(35)), 69.6652346635955, 8)
self.assertAlmostEqual(abs(l2new).sum(), 72035.4931071527, 8)
self.assertAlmostEqual(abs(l2new-l2new.transpose(1,0,3,2)).sum(), 0, 9)
self.assertAlmostEqual(numpy.dot(l2new.flatten(), numpy.arange(35**2)), 48427109.5409886, 7)
self.assertAlmostEqual(numpy.dot(l2new.flatten(), numpy.sin(numpy.arange(35**2))), 137.758016736487, 8)
self.assertAlmostEqual(numpy.dot(numpy.sin(l2new.flatten()), numpy.arange(35**2)), 507.656936701192, 8)
def test_restart(self):
ftmp = tempfile.NamedTemporaryFile()
cc1 = copy.copy(mycc)
cc1.max_cycle = 5
cc1.solve_lambda()
l1ref = cc1.l1
l2ref = cc1.l2
adiis = lib.diis.DIIS(mol)
adiis.filename = ftmp.name
cc1.diis = adiis
cc1.max_cycle = 3
cc1.solve_lambda(l1=None, l2=None)
l1, l2 = cc1.vector_to_amplitudes(adiis.extrapolate())
cc1.diis = None
cc1.max_cycle = 1
cc1.solve_lambda(l1=l1, l2=l2)
self.assertAlmostEqual(numpy.linalg.norm(cc1.l1-l1ref), 1.2423439785342171e-04, 7)
self.assertAlmostEqual(numpy.linalg.norm(cc1.l2-l2ref), 7.5807719698278025e-05, 7)
cc1.diis = adiis
cc1.max_cycle = 2
cc1.solve_lambda(l1=l1, l2=l2)
self.assertAlmostEqual(abs(cc1.l1-l1ref).max(), 0, 9)
self.assertAlmostEqual(abs(cc1.l2-l2ref).max(), 0, 9)
if __name__ == "__main__":
print("Full Tests for CCSD lambda")
unittest.main()
|
gkc1000/pyscf
|
pyscf/cc/test/test_ccsd_lambda.py
|
Python
|
apache-2.0
| 5,644
|
[
"PySCF"
] |
46ee5e1df4ae43090c6c65de7c76419ad155b32bdc61cce7764a23d5295a3a5a
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
import numpy as np
from ...core.parameterization.parameterized import Parameterized
from kernel_slice_operations import KernCallsViaSlicerMeta
from ...util.caching import Cache_this
from GPy.core.parameterization.observable_array import ObsAr
class Kern(Parameterized):
#===========================================================================
# This adds input slice support. The rather ugly code for slicing can be
# found in kernel_slice_operations
__metaclass__ = KernCallsViaSlicerMeta
#===========================================================================
_support_GPU=False
def __init__(self, input_dim, active_dims, name, useGPU=False, *a, **kw):
"""
The base class for a kernel: a positive definite function
which forms of a covariance function (kernel).
input_dim:
is the number of dimensions to work on. Make sure to give the
tight dimensionality of inputs.
You most likely want this to be the integer telling the number of
input dimensions of the kernel.
If this is not an integer (!) we will work on the whole input matrix X,
and not check whether dimensions match or not (!).
active_dims:
is the active_dimensions of inputs X we will work on.
All kernels will get sliced Xes as inputs, if active_dims is not None
Only positive integers are allowed in active_dims!
if active_dims is None, slicing is switched off and all X will be passed through as given.
:param int input_dim: the number of input dimensions to the function
:param array-like|None active_dims: list of indices on which dimensions this kernel works on, or none if no slicing
Do not instantiate.
"""
super(Kern, self).__init__(name=name, *a, **kw)
self.input_dim = int(input_dim)
if active_dims is None:
active_dims = np.arange(input_dim)
self.active_dims = np.atleast_1d(active_dims).astype(int)
assert self.active_dims.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dims={}".format(self.input_dim, self.active_dims.size, self.active_dims)
self._sliced_X = 0
self.useGPU = self._support_GPU and useGPU
self._return_psi2_n_flag = ObsAr(np.zeros(1)).astype(bool)
@property
def return_psi2_n(self):
"""
Flag whether to pass back psi2 as NxMxM or MxM, by summing out N.
"""
return self._return_psi2_n_flag[0]
@return_psi2_n.setter
def return_psi2_n(self, val):
def visit(self):
if isinstance(self, Kern):
self._return_psi2_n_flag[0]=val
self.traverse(visit)
@Cache_this(limit=20)
def _slice_X(self, X):
return X[:, self.active_dims]
def K(self, X, X2):
"""
Compute the kernel function.
:param X: the first set of inputs to the kernel
:param X2: (optional) the second set of arguments to the kernel. If X2
is None, this is passed throgh to the 'part' object, which
handLes this as X2 == X.
"""
raise NotImplementedError
def Kdiag(self, X):
raise NotImplementedError
def psi0(self, Z, variational_posterior):
raise NotImplementedError
def psi1(self, Z, variational_posterior):
raise NotImplementedError
def psi2(self, Z, variational_posterior):
raise NotImplementedError
def gradients_X(self, dL_dK, X, X2):
raise NotImplementedError
def gradients_X_diag(self, dL_dKdiag, X):
raise NotImplementedError
def update_gradients_diag(self, dL_dKdiag, X):
""" update the gradients of all parameters when using only the diagonal elements of the covariance matrix"""
raise NotImplementedError
def update_gradients_full(self, dL_dK, X, X2):
"""Set the gradients of all parameters when doing full (N) inference."""
raise NotImplementedError
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Set the gradients of all parameters when doing inference with
uncertain inputs, using expectations of the kernel.
The esential maths is
dL_d{theta_i} = dL_dpsi0 * dpsi0_d{theta_i} +
dL_dpsi1 * dpsi1_d{theta_i} +
dL_dpsi2 * dpsi2_d{theta_i}
"""
raise NotImplementedError
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Returns the derivative of the objective wrt Z, using the chain rule
through the expectation variables.
"""
raise NotImplementedError
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Compute the gradients wrt the parameters of the variational
distruibution q(X), chain-ruling via the expectations of the kernel
"""
raise NotImplementedError
def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs):
"""
plot this kernel.
:param x: the value to use for the other kernel argument (kernels are a function of two variables!)
:param fignum: figure number of the plot
:param ax: matplotlib axis to plot on
:param title: the matplotlib title
:param plot_limits: the range over which to plot the kernel
:resolution: the resolution of the lines used in plotting
:mpl_kwargs avalid keyword arguments to pass through to matplotlib (e.g. lw=7)
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import kernel_plots
kernel_plots.plot(self, x, fignum, ax, title, plot_limits, resolution, **mpl_kwargs)
def plot_ARD(self, *args, **kw):
"""
See :class:`~GPy.plotting.matplot_dep.kernel_plots`
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import kernel_plots
return kernel_plots.plot_ARD(self,*args,**kw)
def input_sensitivity(self, summarize=True):
"""
Returns the sensitivity for each dimension of this kernel.
"""
return np.zeros(self.input_dim)
def __add__(self, other):
""" Overloading of the '+' operator. for more control, see self.add """
return self.add(other)
def __iadd__(self, other):
return self.add(other)
def add(self, other, name='add'):
"""
Add another kernel to this one.
:param other: the other kernel to be added
:type other: GPy.kern
"""
assert isinstance(other, Kern), "only kernels can be added to kernels..."
from add import Add
return Add([self, other], name=name)
def __mul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __imul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __pow__(self, other):
"""
Shortcut for tensor `prod`.
"""
assert np.all(self.active_dims == range(self.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
assert np.all(other.active_dims == range(other.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
other.active_dims += self.input_dim
return self.prod(other)
def prod(self, other, name='mul'):
"""
Multiply two kernels (either on the same space, or on the tensor
product of the input space).
:param other: the other kernel to be added
:type other: GPy.kern
"""
assert isinstance(other, Kern), "only kernels can be multiplied to kernels..."
from prod import Prod
#kernels = []
#if isinstance(self, Prod): kernels.extend(self.parameters)
#else: kernels.append(self)
#if isinstance(other, Prod): kernels.extend(other.parameters)
#else: kernels.append(other)
return Prod([self, other], name)
def _check_input_dim(self, X):
assert X.shape[1] == self.input_dim, "{} did not specify active_dims and X has wrong shape: X_dim={}, whereas input_dim={}".format(self.name, X.shape[1], self.input_dim)
def _check_active_dims(self, X):
assert X.shape[1] >= len(self.active_dims), "At least {} dimensional X needed, X.shape={!s}".format(len(self.active_dims), X.shape)
class CombinationKernel(Kern):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
"""
def __init__(self, kernels, name, extra_dims=[]):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
:param list kernels: List of kernels to combine (can be only one element)
:param str name: name of the combination kernel
:param array-like extra_dims: if needed extra dimensions for the combination kernel to work on
"""
assert all([isinstance(k, Kern) for k in kernels])
extra_dims = np.array(extra_dims, dtype=int)
input_dim, active_dims = self.get_input_dim_active_dims(kernels, extra_dims)
# initialize the kernel with the full input_dim
super(CombinationKernel, self).__init__(input_dim, active_dims, name)
self.extra_dims = extra_dims
self.link_parameters(*kernels)
@property
def parts(self):
return self.parameters
def get_input_dim_active_dims(self, kernels, extra_dims = None):
#active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), np.array([], dtype=int))
#active_dims = np.array(np.concatenate((active_dims, extra_dims if extra_dims is not None else [])), dtype=int)
input_dim = reduce(max, (k.active_dims.max() for k in kernels)) + 1
if extra_dims is not None:
input_dim += extra_dims.size
active_dims = np.arange(input_dim)
return input_dim, active_dims
def input_sensitivity(self, summarize=True):
"""
If summize is true, we want to get the summerized view of the sensitivities,
otherwise put everything into an array with shape (#kernels, input_dim)
in the order of appearance of the kernels in the parameterized object.
"""
raise NotImplementedError("Choose the kernel you want to get the sensitivity for. You need to override the default behaviour for getting the input sensitivity to be able to get the input sensitivity. For sum kernel it is the sum of all sensitivities, TODO: product kernel? Other kernels?, also TODO: shall we return all the sensitivities here in the combination kernel? So we can combine them however we want? This could lead to just plot all the sensitivities here...")
def _check_active_dims(self, X):
return
def _check_input_dim(self, X):
# As combination kernels cannot always know, what their inner kernels have as input dims, the check will be done inside them, respectively
return
|
fivejjs/GPy
|
GPy/kern/_src/kern.py
|
Python
|
bsd-3-clause
| 11,771
|
[
"VisIt"
] |
b06c1891c1f7598f0debd76368ee3ca010d35b65f4916c783c0d5a3e0701598e
|
#!/usr/bin/env python
###############################################################################
# Copyright 2015-2016 University of Florida. All rights reserved.
# This file is part of UF CTS-IT's NACCulator project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
import csv
import re
import sys
import argparse
import traceback
import codecs
from nacc.uds3 import blanks
from nacc.uds3.ivp import builder as ivp_builder
from nacc.uds3.np import builder as np_builder
from nacc.uds3.fvp import builder as fvp_builder
from nacc.uds3 import filters
def check_blanks(packet):
"""
Parses rules for when each field should be blank and then checks them
"""
pattern = re.compile(r"Blank if Question \d+ (\w+) (ne|=) (\d+)")
warnings = []
for form in packet:
# Find all fields that:
# 1) have blanking rules; and
# 2) aren't blank.
for field in [f for f in form.fields.itervalues()
if f.blanks and not empty(f)]:
for rule in field.blanks:
r = blanks.convert_rule_to_python(field.name, rule)
if r(packet):
warnings.append(
"'%s' is '%s' with length '%s', but should be blank: '%s'." %
(field.name, field.value, len(field.value), rule))
# "'%s' is '%s' with length '%s', but should be blank: '%s'. Test form: '%s'" %
# (field.name, field.value, len(field.value), rule, form.form_name))
return warnings
def check_single_select(packet):
""" Checks the values of sets of interdependent questions
There are some sets of questions which should function like an HTML radio
button group in that only one of them should be selected. However, because
of the manner in which they were implemented in REDCap, the values need to
be double-checked to ensure at most one in a given set has the real answer.
"""
warnings = list()
# D1 4
fields = ('AMNDEM', 'PCA', 'PPASYN', 'FTDSYN', 'LBDSYN', 'NAMNDEM')
if not exclusive(packet, fields):
warnings.append('For Form D1, Question 4, there is unexpectedly more '
'than one syndrome indicated as "Present".')
# D1 5
fields = ('MCIAMEM', 'MCIAPLUS', 'MCINON1', 'MCINON2', 'IMPNOMCI')
if not exclusive(packet, fields):
warnings.append('For Form D1, Question 5, there is unexpectedly more '
'than one syndrome indicated as "Present".')
# D1 11-39
fields = ('ALZDISIF', 'LBDIF', 'MSAIF', 'PSPIF', 'CORTIF', 'FTLDMOIF',
'FTLDNOIF', 'FTLDSUBX', 'CVDIF', 'ESSTREIF', 'DOWNSIF', 'HUNTIF',
'PRIONIF', 'BRNINJIF', 'HYCEPHIF', 'EPILEPIF', 'NEOPIF', 'HIVIF',
'OTHCOGIF', 'DEPIF', 'BIPOLDIF', 'SCHIZOIF', 'ANXIETIF',
'DELIRIF', 'PTSDDXIF', 'OTHPSYIF', 'ALCDEMIF', 'IMPSUBIF',
'DYSILLIF', 'MEDSIF', 'COGOTHIF', 'COGOTH2F', 'COGOTH3F')
if not exclusive(packet, fields):
warnings.append('For Form D1, Questions 11-39, there is unexpectedly '
'more than one Primary cause selected.')
return warnings
def empty(field):
""" Helper function that returns True if a field's value is empty """
return field.value.strip() == ""
def exclusive(packet, fields, value_to_check=1):
""" Returns True iff, for a set of fields, only one of field is set. """
values = [packet[f].value for f in fields]
true_values = filter(lambda v: v == value_to_check, values)
return len(true_values) <= 1
def set_blanks_to_zero(packet):
""" Sets specific fields to zero if they meet certain criteria """
def set_to_zero_if_blank(*field_names):
for field_name in field_names:
field = packet[field_name]
if empty(field):
field.value = 0
# B8 2.
if packet['PARKSIGN'] == 1:
set_to_zero_if_blank(
'RESTTRL', 'RESTTRR', 'SLOWINGL', 'SLOWINGR', 'RIGIDL', 'RIGIDR',
'BRADY', 'PARKGAIT', 'POSTINST')
# B8 3.
if packet['CVDSIGNS'] == 1:
set_to_zero_if_blank('CORTDEF', 'SIVDFIND', 'CVDMOTL', 'CVDMOTR',
'CORTVISL', 'CORTVISR', 'SOMATL', 'SOMATR')
# B8 5.
if packet['PSPCBS'] == 1:
set_to_zero_if_blank(
'PSPCBS', 'EYEPSP', 'DYSPSP', 'AXIALPSP', 'GAITPSP', 'APRAXSP',
'APRAXL', 'APRAXR', 'CORTSENL', 'CORTSENR', 'ATAXL', 'ATAXR',
'ALIENLML', 'ALIENLMR', 'DYSTONL', 'DYSTONR')
# D1 4.
if packet['DEMENTED'] == 1:
set_to_zero_if_blank(
'AMNDEM', 'PCA', 'PPASYN', 'FTDSYN', 'LBDSYN', 'NAMNDEM')
# D1 5.
if packet['DEMENTED'] == 0:
set_to_zero_if_blank(
'MCIAMEM', 'MCIAPLUS', 'MCINON1', 'MCINON2', 'IMPNOMCI')
# D1 11-39.
set_to_zero_if_blank(
'ALZDIS', 'LBDIS', 'MSA', 'PSP', 'CORT', 'FTLDMO', 'FTLDNOS', 'CVD',
'ESSTREM', 'DOWNS', 'HUNT', 'PRION', 'BRNINJ', 'HYCEPH', 'EPILEP',
'NEOP', 'HIV', 'OTHCOG', 'DEP', 'BIPOLDX', 'SCHIZOP', 'ANXIET',
'DELIR', 'PTSDDX', 'OTHPSY', 'ALCDEM', 'IMPSUB', 'DYSILL', 'MEDS',
'COGOTH', 'COGOTH2', 'COGOTH3')
# D2 11.
if packet['ARTH'] == 1:
set_to_zero_if_blank('ARTUPEX', 'ARTLOEX', 'ARTSPIN', 'ARTUNKN')
def main(raw_csv):
"""
Reads a REDCap exported CSV, data file, then prints it out in NACC's format
"""
# if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process redcap form output to nacculator.')
# else:
# parser = raw_csv
filters_names = { 'cleanPtid' : 'clean_ptid',
'replaceDrugId' : 'replace_drug_id',
'fixC1S' : 'fix_c1s',
'fillDefault' : 'fill_default',
'updateField' : 'update_field'}
option_group = parser.add_mutually_exclusive_group()
option_group.add_argument('-fvp', action='store_true', dest='fvp', help='Set this flag to process as fvp data')
option_group.add_argument('-ivp', action='store_true', dest='ivp', help='Set this flag to process as ivp data')
option_group.add_argument('-np', action='store_true', dest='np', help='Set this flag to process as np data')
option_group.add_argument('-f', '--filter', action='store', dest='filter', choices=filters_names.keys(), help='Set this flag to process the filter')
parser.add_argument('-file', action='store', dest='file', help='Path of the csv file to be processed.')
parser.add_argument('-meta', action='store', dest='filter_meta', help='Input file for the filter metadata (in case -filter is used)')
options = parser.parse_args()
#options = None
# Defaults to processing of ivp.
# TODO this can be changed in future to process fvp by default.
#if(options == None):
# print "Hello Flask."
if not (options.ivp or options.fvp or options.np or options.filter):
options.ivp = True
if __name__ == '__main__':
fp = sys.stdin if options.file == None else open(options.file, 'r')
else:
fp = open(raw_csv, 'r')
# Place holder for future. May need to output to a specific file in future.
# output = sys.stdout
all_warnings = []
sys.stdout = open('NaccConverted_' + raw_csv[:-4] + '.txt', 'w')
if options.filter:
filter_method = getattr(filters, 'filter_' + filters_names[options.filter])
filter_method(fp, options.filter_meta, output)
else:
reader = csv.DictReader(fp)
for record in reader:
# print >> sys.stderr, "[START] ptid : " + str(record['ptid'])
# print >> sys.stderr, "[Date(M-D-Y)][Visit #]: ["+ str(record['visitmo']) + "-" + str(record['visitday']) + "-" + str(record['visityr']) + "][" + str(record['visitnum']) + "]"
try:
if options.ivp:
packet = ivp_builder.build_uds3_ivp_form(record)
elif options.np:
packet = np_builder.build_uds3_np_form(record)
elif options.fvp:
packet = fvp_builder.build_uds3_fvp_form(record)
except Exception, exp:
if 'ptid' in record:
print >> sys.stderr, "[SKIP] Error for ptid : " + str(record['ptid'])
traceback.print_exc()
continue
if not options.np:
set_blanks_to_zero(packet)
warnings = []
warnings += ["[START] ptid : " + str(record['ptid'])]
warnings += ["[Date(M-D-Y)][Visit #]: ["+ str(record['visitmo']) + "-" + str(record['visitday']) + "-" + str(record['visityr']) + "][" + str(record['visitnum']) + "]"]
warnings += check_blanks(packet)
if not options.np:
warnings += check_single_select(packet)
if warnings:
print >> sys.stderr, "\n".join(warnings)
# fcsv = open('NaccConverted_' + raw_csv, 'wt')
# writer = csv.writer(fcsv)
# print('This is the name of the output file 3381: ' + 'NaccConverted_' + raw_csv)
# all_warnings.append([warnings])
all_warnings += warnings
# print('This is in redcap2nacc, file name ' + raw_csv)
for form in packet:
print form
# all_warnings = "\n".join(all_warnings)
return all_warnings
if __name__ == '__main__':
main()
|
ZacZZZ/Nacculator_Github
|
nacc/backup/redcap2nacc_flask-8-17-before debugging.py
|
Python
|
bsd-2-clause
| 9,596
|
[
"VisIt"
] |
2221bc0fc35994b4f23a6746a56f234f11d3a52158946db59f82efe853f82f29
|
#!/usr/bin/env
"""
Chuckchi_Winds_NARR_model_prep.py
Retrieve NARR winds for one locations
Icy Cape Line, Ckip2
Latitude = 70.8401 Longitude = 163.2054
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
"""
#System Stack
import datetime
import argparse
#Science Stack
import numpy as np
from netCDF4 import Dataset
# User Stack
import utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','station_1','3hr filtered', 'U,V','Winds', 'Chuckchi'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def csvread(ifile):
date, time, uwnd, vwnd, atemp, bpress = [], [], [], [], [], []
with open(ifile, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader) #skip header
""" DAT TIM WU WV AT BP """
for row in csv_reader:
try:
r0,r1,r2,r3,r4,r5,r6 = row[0].strip().split()
except ValueError:
r0,r1,r2,r3,r4,r5 = row[0].strip().split()
date.append(r0)
time.append(r1)
uwnd.append(r2)
vwnd.append(r3)
return {'DAT': np.array(date, int), 'TIM':np.array(time, float), 'WU':np.array(uwnd, float),\
'WV':np.array(vwnd, float)}
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
def write2epic_cf( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC_SST_cf(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time))
ncinstance.variable_init()
ncinstance.add_coord_data(time=time, latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
"---"
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/in_and_outbox/Ongoing_Analysis/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
parser = argparse.ArgumentParser(description='NARR from Single Station')
parser.add_argument('MooringID', metavar='MooringID', type=str, help='MooringID Name')
parser.add_argument('latitude', metavar='latitude', type=float, help='latitude (+N)')
parser.add_argument('longitude', metavar='longitude', type=float, help='longitude (+W)')
parser.add_argument('years', nargs='+', type=int, help='start and stop year')
parser.add_argument('--DataPath', metavar='DataPath', type=str, help='full path to alternate file')
parser.add_argument("-cf",'--cf', action="store_true", help='cf conventions - primarily in time')
parser.add_argument("-sm",'--sitemap', action="store_true", help='create a site map')
args = parser.parse_args()
### list of files
if args.DataPath:
NARR = args.DataPath
else:
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
infile = [NARR + 'uwnd.10m.2012.nc'] #used just to get grid sections
print infile
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
station_name = [args.MooringID]
sta_lat = [args.latitude]
sta_long = [args.longitude]
#Find NARR nearest point to moorings - haversine formula
station_1 = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
station_1_modelpt = [lat_lon['lat'][station_1[3],station_1[4]],lat_lon['lon'][station_1[3],station_1[4]]]
print "station_1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], station_1_modelpt[0], station_1_modelpt[1])
#loop over all requested data
years = range(args.years[0],args.years[1]+1)
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1u_f = triangle_smoothing(station_1_data['uwnd'])
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1v_f = triangle_smoothing(station_1_data['vwnd'])
# retrieve only these location's data
# sfc air temp
infile = NARR + 'air.2m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
station_1at = station_1_data['air'] -273.15 #Kelvin
#convert to EPIC time
pydate = date2pydate(station_1_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_' + station_name[0] + '_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
if args.cf:
#days since 1800-1-1 00:00:0.0
date_str_cf = []
write2epic_cf( outfile, station_name[0], date_str_cf, station_1_modelpt, [station_1u_f, station_1v_f, station_1at])
else:
write2epic( outfile, station_name[0], [epic_time, epic_time1], station_1_modelpt, [station_1u_f, station_1v_f, station_1at])
if args.sitemap:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=75,llcrnrlon=-180,urcrnrlon=-145, lat_ts=60)
# Mooring Data
x_moor, y_moor = m([-1. * sta_long[0],],sta_lat[0])
x_close, y_close = m([station_1_modelpt[1],], [station_1_modelpt[0],])
print "plotting closest point at {x_moor},{y_moor} - {lat_moor},{lon_moor}".format(x_moor=x_close,y_moor=y_close,lat_moor=sta_lat[0],lon_moor=-1.*sta_long[0])
print "plotting mooring point at {x_moor},{y_moor} - {lat_moor},{lon_moor}".format(x_moor=x_moor,y_moor=y_moor,lat_moor=station_1_modelpt[0],lon_moor=-1.*station_1_modelpt[1])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(60,75,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-180,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/Chuckchi_region.png', bbox_inches='tight', dpi = (100))
plt.close()
|
shaunwbell/FOCI_Analysis
|
NARR_3hr_WindsSFCtemp_Station.py
|
Python
|
mit
| 11,955
|
[
"NetCDF"
] |
36e41587254da5fcf0001815e589fd9962bf8b06780d62895e3c4c32e859a813
|
# -*- coding: utf-8 -*-
'''
@author: Hung-Hsin Chen
@mail: chenhh@par.cse.nsysu.edu.tw
'''
from __future__ import division
import numpy as np
import pandas as pd
import os
import sys
from datetime import date
import time
from stats import Performance
import scipy.stats as spstats
from cStringIO import StringIO
import scipy.stats as spstats
ProjectDir = os.path.join(os.path.abspath(os.path.curdir), '..')
sys.path.insert(0, ProjectDir)
from PySPPortfolio import (PklBasicFeaturesDir, ExpResultsDir)
import statsmodels.tsa.stattools as sts
import statsmodels.stats.stattools as sss
import statsmodels.stats.diagnostic as ssd
def buyHoldPortfolio(symbols, startDate=date(2005,1,3), endDate=date(2013,12,31),
money=1e6, buyTransFee=0.001425, sellTransFee=0.004425,
save_latex=False, save_csv=True, debug=False):
t = time.time()
#read df
dfs = []
transDates = None
for symbol in symbols:
df = pd.read_pickle(os.path.join(PklBasicFeaturesDir, '%s.pkl'%symbol))
tmp = df[startDate: endDate]
startIdx = df.index.get_loc(tmp.index[0])
endIdx = df.index.get_loc(tmp.index[-1])
data = df[startIdx: endIdx+1]['adjROI']/100.
#check all data have the same transDates
if transDates is None:
transDates = data.index.values
if not np.all(transDates == data.index.values):
raise ValueError('symbol %s do not have the same trans. dates'%(symbol))
dfs.append(data)
#initialize
n_rv = len(dfs)
symbols.append('deposit')
wealthProcess = pd.DataFrame(columns=symbols, index=transDates)
#allocation
for symbol in symbols[:-1]:
wealthProcess[symbol][transDates[0]] = money/n_rv * (1-buyTransFee)
wealthProcess['deposit'] = 0
#buy and hold
for sdx, symbol in enumerate(symbols[:-1]):
for tdx, transDate in enumerate(transDates[1:]):
tm1 = transDates[tdx]
roi = dfs[sdx][transDate]
wealthProcess[symbol][transDate] = wealthProcess[symbol][tm1] * (1+roi)
#sell in the last period
for symbol in symbols[:-1]:
wealthProcess[symbol][-1] *= (1-sellTransFee)
wealth = wealthProcess.sum(axis=1)
pROI = (wealth[-1]/1e6 -1) * 100
prois = wealth.pct_change()
prois[0] = 0
ret = sss.jarque_bera(prois)
JB = ret[1]
ret2 = sts.adfuller(prois)
ADF = ret2[1]
resultDir = os.path.join(ExpResultsDir, "BuyandHoldPortfolio")
if not os.path.exists(resultDir):
os.makedirs(resultDir)
fileName = os.path.join(resultDir, 'BuyandHold_result_2005.csv')
statName = os.path.join(resultDir, 'BuyandHold_result_2005.txt')
df_name = os.path.join(resultDir,"wealthProcess_n%s.pkl"%(len(dfs)))
df2_name = os.path.join(resultDir,"wealthSum_n%s.pkl"%(len(dfs)))
csv_name = os.path.join(resultDir,"wealthProcess_n%s.csv"%(len(dfs)))
csv2_name = os.path.join(resultDir,"wealthSum_n%s.csv"%(len(dfs)))
wealthProcess.to_csv(csv_name)
wealth.to_csv(csv2_name)
wealthProcess.to_pickle(df_name)
wealth.to_pickle(df2_name)
csvIO = StringIO()
statIO = StringIO()
if not os.path.exists(fileName):
csvIO.write('n_rv, wealth, wROI(%), ROI(%%), stdev, skew, kurt,')
csvIO.write('Sp(%%), StF(%%), StP(%%), downDevF, downDevP, JB, ADF\n')
statIO.write('$n$ & $R_{C}$(\%) & $R_{A}$(\%) & $\mu$(\%) & $\sigma$(\%) & skew & kurt & $S_p$(\%) & $S_o$(\%) & JB & ADF \\\ \hline \n')
sharpe = Performance.Sharpe(prois)
sortinof, ddf = Performance.SortinoFull(prois)
sortinop, ddp = Performance.SortinoPartial(prois)
csvIO.write('%s,%s,%s,%s,%s,%s,%s,'%(n_rv, wealth[-1], pROI,
prois.mean()*100, prois.std()*100,
spstats.skew(prois), spstats.kurtosis(prois)
))
csvIO.write('%s,%s,%s,%s,%s,%s,%s\n'%(sharpe*100, sortinof*100,
sortinop*100, ddf*100, ddp*100, JB, ADF))
statIO.write('%2d & %4.2f & %4.2f & %4.2f & %4.2f & %4.2f & %4.2f & %4.2f & %4.2f & %4.2e & %4.2e \\\ \hline \n'%(
n_rv, pROI, (np.power(wealth[-1]/1e6, 1./9)-1)*100,
prois.mean()*100, prois.std()*100,
spstats.skew(prois),
spstats.kurtosis(prois),
sharpe*100, sortinof*100, JB, ADF ))
with open(fileName, 'ab') as fout:
fout.write(csvIO.getvalue())
csvIO.close()
with open(statName, 'ab') as fout:
fout.write(statIO.getvalue())
statIO.close()
print "buyhold portfolio %s %s_%s pROI:%.3f%%, %.3f secs"%(startDate, endDate, n_rv,
pROI, time.time() -t )
def y2yBuyHold():
t = time.time()
n_rvs = range(5, 50+5, 5)
years = range(2005, 2013+1)
resultDir = os.path.join(ExpResultsDir, "BuyandHoldPortfolio")
avgIO = StringIO()
avgIO.write('startDate, endDate, n_stock, wealth1, wealth2, wROI(%), JB, ADF,' )
avgIO.write('meanROI(%%), Sharpe(%%), SortinoFull(%%), SortinoPartial(%%),')
avgIO.write(' downDevFull, downDevPartial\n')
for n_rv in n_rvs:
df = pd.read_pickle(os.path.join(resultDir,"wealthSum_n%s.pkl"%(n_rv)))
for year in years:
startDate = date(year, 1, 1)
endDate = date(year, 12, 31)
print startDate, endDate
wealths = df[startDate:endDate]
wrois = wealths.pct_change()
wrois[0] = 0
wealth1 = wealths[0]
wealth2 = wealths[-1] * (1-0.004425)
roi = (wealth2/wealth1 - 1)
ret = sss.jarque_bera(wrois)
JB = ret[1]
ret2 = sts.adfuller(wrois)
ADF = ret2[1]
sharpe = Performance.Sharpe(wrois)
sortinof, ddf = Performance.SortinoFull(wrois)
sortinop, ddp = Performance.SortinoPartial(wrois)
avgIO.write("%s,%s,%s,%s,%s,%s,%s,%s,"%( wealths.index[0].strftime("%Y-%m-%d"),
wealths.index[-1].strftime("%Y-%m-%d"), n_rv, wealth1, wealth2, roi*100, JB, ADF))
avgIO.write("%s,%s,%s,%s,"%(wrois.mean()*100, sharpe*100, sortinof*100, sortinop*100))
avgIO.write("%s,%s\n"%(ddf*100, ddp*100))
resFile = os.path.join(ExpResultsDir, 'y2yfixedBuyandHold_result_2005.csv')
with open(resFile, 'wb') as fout:
fout.write(avgIO.getvalue())
avgIO.close()
print "y2yBuyandHold OK, elapsed %.3f secs"%(time.time()-t)
if __name__ == '__main__':
n_stocks = [5,10, 15, 20, 25, 30, 35, 40 , 45, 50]
# n_stocks = [5, ]
#20050103
symbols = [
'2330', '2412', '2882', '6505', '2317',
'2303', '2002', '1303', '1326', '1301',
'2881', '2886', '2409', '2891', '2357',
'2382', '3045', '2883', '2454', '2880',
'2892', '4904', '2887', '2353', '2324',
'2801', '1402', '2311', '2475', '2888',
'2408', '2308', '2301', '2352', '2603',
'2884', '2890', '2609', '9904', '2610',
'1216', '1101', '2325', '2344', '2323',
'2371', '2204', '1605', '2615', '2201',
]
startDate=date(2005,1,3)
endDate=date(2013,12,31)
for n_stock in n_stocks:
buyHoldPortfolio(symbols[:n_stock], startDate, endDate)
# y2yBuyHold()
|
chenhh/PySPPortfolio
|
PySPPortfolio/buyHoldPortfolio.py
|
Python
|
gpl-3.0
| 7,631
|
[
"ADF"
] |
cd873ed32e4c781d0ab1712e084fc3db02ee651dc43884f4f9be4013bd88d3f1
|
"""Test the server of Geckoboard data."""
import json
import copy
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import getPage
from vumidash.gecko_server import GeckoServer
from vumidash.base import MetricSource
class DummySource(MetricSource):
def __init__(self, testdata):
self.testdata = testdata
def get_latest(self, metric_name, start, end, summary_size):
values = self.get_history(metric_name, start, end, summary_size)
if not values:
values = [None, None]
return values[0], values[-1]
def get_history(self, metric_name, start, end, summary_size,
skip_nulls=True):
if metric_name not in self.testdata:
raise ValueError("Unknown metric")
data = self.testdata.get(metric_name)
steps = int(self.total_seconds(end - start) /
float(self.total_seconds(summary_size)))
values = data[:steps]
if skip_nulls:
return [v for v in values if v is not None]
else:
return [v if v is not None else 0.0 for v in values]
class TestGeckoServer(unittest.TestCase):
TESTDATA = {
'foo': [1, 2, 3, 4, 5],
'bar': [6, 7, 8, 9, 10],
'zeroes': [1, 2, None, 3, 4, None, 5],
'empty': [],
}
@inlineCallbacks
def setUp(self):
self.testdata = copy.deepcopy(self.TESTDATA)
self.metrics_source = DummySource(self.testdata)
self.service = GeckoServer(self.metrics_source, 0)
yield self.service.startService()
addr = self.service.webserver.getHost()
self.url = "http://%s:%s/" % (addr.host, addr.port)
@inlineCallbacks
def tearDown(self):
yield self.service.stopService()
@inlineCallbacks
def get_route_json(self, route):
data = yield getPage(self.url + route, timeout=1)
returnValue(json.loads(data))
def check_series(self, json, series_dict):
series_map = dict((series['name'], series)
for series in json['series'])
for name, expected_data in series_dict.items():
series = series_map[name]
self.assertEqual(series['data'], expected_data)
self.assertEqual(series['type'], 'line')
self.assertEqual(len(series_map), len(series_dict))
@inlineCallbacks
def test_simple_latest(self):
data = yield self.get_route_json('latest?metric=foo')
self.assertTrue('item' in data)
@inlineCallbacks
def test_multiple_latest(self):
data = yield self.get_route_json('latest?metric=foo&metric=bar')
self.assertEqual({'item': [{'text': '', 'value': 15},
{'text': '', 'value': 7}]}, data)
@inlineCallbacks
def test_empty_latest(self):
data = yield self.get_route_json('latest?metric=empty')
self.assertEqual({'item': [{'text': '', 'value': 0},
{'text': '', 'value': 0}]}, data)
@inlineCallbacks
def test_simple_history(self):
data = yield self.get_route_json('history?metric=foo')
self.assertTrue('title' in data)
self.check_series(data, {'foo': self.testdata['foo']})
@inlineCallbacks
def test_multiple_history(self):
data = yield self.get_route_json('history?metric=foo&metric=bar')
self.assertTrue('title' in data)
self.check_series(data, {
'foo': self.testdata['foo'],
'bar': self.testdata['bar'],
})
@inlineCallbacks
def test_history_with_ranges(self):
data = yield self.get_route_json('history?metric=foo'
'&from=-3s&until=-0s&step=1s')
self.assertTrue('title' in data)
self.check_series(data, {'foo': self.testdata['foo'][:3]})
@inlineCallbacks
def test_history_with_ymin(self):
data = yield self.get_route_json('history?metric=foo')
self.assertEqual(None, data['yAxis']['min'])
data = yield self.get_route_json('history?metric=foo&ymin=-3.2')
self.assertEqual(-3.2, data['yAxis']['min'])
@inlineCallbacks
def test_history_with_markers(self):
data = yield self.get_route_json('history?metric=foo&markers=false')
self.assertFalse(data['plotOptions']['line']['marker']['enabled'])
data = yield self.get_route_json('history?metric=foo&markers=true')
self.assertTrue(data['plotOptions']['line']['marker']['enabled'])
@inlineCallbacks
def test_history_with_labels(self):
data = yield self.get_route_json('history?metric=foo&label=bar')
self.check_series(data, {'bar': self.testdata['foo']})
@inlineCallbacks
def test_history_with_multiple_labels(self):
data = yield self.get_route_json('history?metric=foo&label=foolabel'
'&metric=bar&label=barlabel')
self.check_series(data, {
'foolabel': self.testdata['foo'],
'barlabel': self.testdata['bar'],
})
@inlineCallbacks
def test_yaxis_label(self):
data = yield self.get_route_json('history?metric=foo&ylabel=bar')
self.assertEqual(data['yAxis']['title']['text'], 'bar')
@inlineCallbacks
def test_skip_nulls(self):
data = yield self.get_route_json('history?metric=zeroes')
without_nulls = [v for v in self.testdata['zeroes'] if v is not None]
self.check_series(data, {'zeroes': without_nulls})
data = yield self.get_route_json('history?metric=zeroes'
'&skip_nulls=false')
with_nulls_as_zeroes = [v if v is not None else 0.0
for v in self.testdata['zeroes']]
self.check_series(data, {'zeroes': with_nulls_as_zeroes})
@inlineCallbacks
def test_empty_data(self):
data = yield self.get_route_json('history?metric=empty')
self.check_series(data, {'empty': []})
@inlineCallbacks
def test_rag_simple(self):
data = yield self.get_route_json('rag?r_metric=foo&a_metric=bar'
'&g_metric=zeroes')
for item, (value, text) in zip(data['item'], [
(5, "Red"), (10, "Amber"), (5, "Green")]):
self.assertEqual(item, {"value": value, "text": text})
@inlineCallbacks
def test_rag_text(self):
data = yield self.get_route_json('rag?r_metric=foo&r_text=foo1'
'&a_metric=bar&a_text=bar2'
'&g_metric=zeroes&g_text=zeroes3')
for item, (value, text) in zip(data['item'], [
(5, "foo1"), (10, "bar2"), (5, "zeroes3")]):
self.assertEqual(item, {"value": value, "text": text})
@inlineCallbacks
def test_rag_prefix(self):
data = yield self.get_route_json('rag?r_metric=foo&r_prefix=%24'
'&a_metric=bar&a_prefix=%26euro%3B'
'&g_metric=zeroes'
'&g_prefix=%26pound%3B')
for item, (value, text, prefix) in zip(data['item'], [
(5, "Red", "$"), (10, "Amber", "€"),
(5, "Green", "£")]):
self.assertEqual(item, {
"value": value, "text": text, "prefix": prefix})
|
praekelt/vumi-dashboard
|
vumidash/tests/test_gecko_server.py
|
Python
|
bsd-3-clause
| 7,475
|
[
"Amber"
] |
2e14e23098a995ca58f0a32d4e25256268dab007198dd541a8c82cab49848ced
|
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy._signals.signal1d import Signal1D
from hyperspy.components1d import Gaussian
from hyperspy.decorators import lazifyTestClass
@lazifyTestClass
class TestChiSquared:
def setup_method(self, method):
s = Signal1D(np.array([1.0, 2, 4, 7, 12, 7, 4, 2, 1]))
m = s.create_model()
self.model = m
self.A = 38.022476979172588
self.sigma = 1.4764966133859543
self.centre = 4.0000000002462945
def test_chisq_with_fit(self):
m = self.model
g = Gaussian()
m.append(g)
m.fit()
assert np.allclose(m.chisq(), 7.78966223)
def test_dof_with_fit(self):
m = self.model
g = Gaussian()
g1 = Gaussian()
m.extend((g, g1))
g1.set_parameters_not_free('A')
m.fit()
assert np.equal(m.dof(), 5)
def test_red_chisq_with_fit(self):
m = self.model
g = Gaussian()
m.append(g)
m.fit()
assert np.allclose(m.red_chisq(), 1.55793245)
def test_chisq(self):
m = self.model
g = Gaussian()
g.A.value = self.A
g.sigma.value = self.sigma
g.centre.value = self.centre
m.append(g)
m._calculate_chisq()
assert np.allclose(m.chisq(), 7.78966223)
def test_dof_with_p0(self):
m = self.model
g = Gaussian()
g1 = Gaussian()
m.extend((g, g1))
g1.set_parameters_not_free('A')
m._set_p0()
m._set_current_degrees_of_freedom()
assert np.equal(m.dof(), 5)
def test_red_chisq(self):
m = self.model
g = Gaussian()
g.A.value = self.A
g.sigma.value = self.sigma
g.centre.value = self.centre
m.append(g)
m._set_p0()
m._set_current_degrees_of_freedom()
m._calculate_chisq()
assert np.allclose(m.red_chisq(), 1.55793245)
def test_chisq_in_range(self):
m = self.model
g = Gaussian()
m.append(g)
m.set_signal_range(1, 7)
m.fit()
assert np.allclose(m.red_chisq(), 2.87544335)
def test_chisq_with_inactive_components(self):
m = self.model
ga = Gaussian()
gin = Gaussian()
m.append(ga)
m.append(gin)
gin.active = False
m.fit()
assert np.allclose(m.chisq(), 7.78966223)
def test_dof_with_inactive_components(self):
m = self.model
ga = Gaussian()
gin = Gaussian()
m.append(ga)
m.append(gin)
gin.active = False
m.fit()
assert np.equal(m.dof(), 3)
|
dnjohnstone/hyperspy
|
hyperspy/tests/model/test_chi_squared.py
|
Python
|
gpl-3.0
| 3,332
|
[
"Gaussian"
] |
9711984481c1fce2fae1e2419451161d82e0a3b03e2f08ee5780718f17b4b0ef
|
import os
import socket
import codecs
from math import floor
from threading import Lock
try:
import mmap
except ImportError: # pragma: no cover
mmap = None
import socket
import binascii
try:
from StringIO import StringIO
except ImportError:
from io import StringIO, BytesIO
from platform import python_version_tuple
PY2 = python_version_tuple()[0] == '2'
PY3 = python_version_tuple()[0] == '3'
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
DMA_MAP = {
500: 'Portland-Auburn, ME',
501: 'New York, NY',
502: 'Binghamton, NY',
503: 'Macon, GA',
504: 'Philadelphia, PA',
505: 'Detroit, MI',
506: 'Boston, MA',
507: 'Savannah, GA',
508: 'Pittsburgh, PA',
509: 'Ft Wayne, IN',
510: 'Cleveland, OH',
511: 'Washington, DC',
512: 'Baltimore, MD',
513: 'Flint, MI',
514: 'Buffalo, NY',
515: 'Cincinnati, OH',
516: 'Erie, PA',
517: 'Charlotte, NC',
518: 'Greensboro, NC',
519: 'Charleston, SC',
520: 'Augusta, GA',
521: 'Providence, RI',
522: 'Columbus, GA',
523: 'Burlington, VT',
524: 'Atlanta, GA',
525: 'Albany, GA',
526: 'Utica-Rome, NY',
527: 'Indianapolis, IN',
528: 'Miami, FL',
529: 'Louisville, KY',
530: 'Tallahassee, FL',
531: 'Tri-Cities, TN',
532: 'Albany-Schenectady-Troy, NY',
533: 'Hartford, CT',
534: 'Orlando, FL',
535: 'Columbus, OH',
536: 'Youngstown-Warren, OH',
537: 'Bangor, ME',
538: 'Rochester, NY',
539: 'Tampa, FL',
540: 'Traverse City-Cadillac, MI',
541: 'Lexington, KY',
542: 'Dayton, OH',
543: 'Springfield-Holyoke, MA',
544: 'Norfolk-Portsmouth, VA',
545: 'Greenville-New Bern-Washington, NC',
546: 'Columbia, SC',
547: 'Toledo, OH',
548: 'West Palm Beach, FL',
549: 'Watertown, NY',
550: 'Wilmington, NC',
551: 'Lansing, MI',
552: 'Presque Isle, ME',
553: 'Marquette, MI',
554: 'Wheeling, WV',
555: 'Syracuse, NY',
556: 'Richmond-Petersburg, VA',
557: 'Knoxville, TN',
558: 'Lima, OH',
559: 'Bluefield-Beckley-Oak Hill, WV',
560: 'Raleigh-Durham, NC',
561: 'Jacksonville, FL',
563: 'Grand Rapids, MI',
564: 'Charleston-Huntington, WV',
565: 'Elmira, NY',
566: 'Harrisburg-Lancaster-Lebanon-York, PA',
567: 'Greenville-Spartenburg, SC',
569: 'Harrisonburg, VA',
570: 'Florence-Myrtle Beach, SC',
571: 'Ft Myers, FL',
573: 'Roanoke-Lynchburg, VA',
574: 'Johnstown-Altoona, PA',
575: 'Chattanooga, TN',
576: 'Salisbury, MD',
577: 'Wilkes Barre-Scranton, PA',
581: 'Terre Haute, IN',
582: 'Lafayette, IN',
583: 'Alpena, MI',
584: 'Charlottesville, VA',
588: 'South Bend, IN',
592: 'Gainesville, FL',
596: 'Zanesville, OH',
597: 'Parkersburg, WV',
598: 'Clarksburg-Weston, WV',
600: 'Corpus Christi, TX',
602: 'Chicago, IL',
603: 'Joplin-Pittsburg, MO',
604: 'Columbia-Jefferson City, MO',
605: 'Topeka, KS',
606: 'Dothan, AL',
609: 'St Louis, MO',
610: 'Rockford, IL',
611: 'Rochester-Mason City-Austin, MN',
612: 'Shreveport, LA',
613: 'Minneapolis-St Paul, MN',
616: 'Kansas City, MO',
617: 'Milwaukee, WI',
618: 'Houston, TX',
619: 'Springfield, MO',
620: 'Tuscaloosa, AL',
622: 'New Orleans, LA',
623: 'Dallas-Fort Worth, TX',
624: 'Sioux City, IA',
625: 'Waco-Temple-Bryan, TX',
626: 'Victoria, TX',
627: 'Wichita Falls, TX',
628: 'Monroe, LA',
630: 'Birmingham, AL',
631: 'Ottumwa-Kirksville, IA',
632: 'Paducah, KY',
633: 'Odessa-Midland, TX',
634: 'Amarillo, TX',
635: 'Austin, TX',
636: 'Harlingen, TX',
637: 'Cedar Rapids-Waterloo, IA',
638: 'St Joseph, MO',
639: 'Jackson, TN',
640: 'Memphis, TN',
641: 'San Antonio, TX',
642: 'Lafayette, LA',
643: 'Lake Charles, LA',
644: 'Alexandria, LA',
646: 'Anniston, AL',
647: 'Greenwood-Greenville, MS',
648: 'Champaign-Springfield-Decatur, IL',
649: 'Evansville, IN',
650: 'Oklahoma City, OK',
651: 'Lubbock, TX',
652: 'Omaha, NE',
656: 'Panama City, FL',
657: 'Sherman, TX',
658: 'Green Bay-Appleton, WI',
659: 'Nashville, TN',
661: 'San Angelo, TX',
662: 'Abilene-Sweetwater, TX',
669: 'Madison, WI',
670: 'Ft Smith-Fay-Springfield, AR',
671: 'Tulsa, OK',
673: 'Columbus-Tupelo-West Point, MS',
675: 'Peoria-Bloomington, IL',
676: 'Duluth, MN',
678: 'Wichita, KS',
679: 'Des Moines, IA',
682: 'Davenport-Rock Island-Moline, IL',
686: 'Mobile, AL',
687: 'Minot-Bismarck-Dickinson, ND',
691: 'Huntsville, AL',
692: 'Beaumont-Port Author, TX',
693: 'Little Rock-Pine Bluff, AR',
698: 'Montgomery, AL',
702: 'La Crosse-Eau Claire, WI',
705: 'Wausau-Rhinelander, WI',
709: 'Tyler-Longview, TX',
710: 'Hattiesburg-Laurel, MS',
711: 'Meridian, MS',
716: 'Baton Rouge, LA',
717: 'Quincy, IL',
718: 'Jackson, MS',
722: 'Lincoln-Hastings, NE',
724: 'Fargo-Valley City, ND',
725: 'Sioux Falls, SD',
734: 'Jonesboro, AR',
736: 'Bowling Green, KY',
737: 'Mankato, MN',
740: 'North Platte, NE',
743: 'Anchorage, AK',
744: 'Honolulu, HI',
745: 'Fairbanks, AK',
746: 'Biloxi-Gulfport, MS',
747: 'Juneau, AK',
749: 'Laredo, TX',
751: 'Denver, CO',
752: 'Colorado Springs, CO',
753: 'Phoenix, AZ',
754: 'Butte-Bozeman, MT',
755: 'Great Falls, MT',
756: 'Billings, MT',
757: 'Boise, ID',
758: 'Idaho Falls-Pocatello, ID',
759: 'Cheyenne, WY',
760: 'Twin Falls, ID',
762: 'Missoula, MT',
764: 'Rapid City, SD',
765: 'El Paso, TX',
766: 'Helena, MT',
767: 'Casper-Riverton, WY',
770: 'Salt Lake City, UT',
771: 'Yuma, AZ',
773: 'Grand Junction, CO',
789: 'Tucson, AZ',
790: 'Albuquerque, NM',
798: 'Glendive, MT',
800: 'Bakersfield, CA',
801: 'Eugene, OR',
802: 'Eureka, CA',
803: 'Los Angeles, CA',
804: 'Palm Springs, CA',
807: 'San Francisco, CA',
810: 'Yakima-Pasco, WA',
811: 'Reno, NV',
813: 'Medford-Klamath Falls, OR',
819: 'Seattle-Tacoma, WA',
820: 'Portland, OR',
821: 'Bend, OR',
825: 'San Diego, CA',
828: 'Monterey-Salinas, CA',
839: 'Las Vegas, NV',
855: 'Santa Barbara, CA',
862: 'Sacramento, CA',
866: 'Fresno, CA',
868: 'Chico-Redding, CA',
881: 'Spokane, WA'
}
COUNTRY_CODES = (
'',
'AP', 'EU', 'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ',
'AR', 'AS', 'AT', 'AU', 'AW', 'AZ', 'BA', 'BB', 'BD', 'BE', 'BF', 'BG',
'BH', 'BI', 'BJ', 'BM', 'BN', 'BO', 'BR', 'BS', 'BT', 'BV', 'BW', 'BY',
'BZ', 'CA', 'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN',
'CO', 'CR', 'CU', 'CV', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', 'DO',
'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'FI', 'FJ', 'FK', 'FM',
'FO', 'FR', 'FX', 'GA', 'GB', 'GD', 'GE', 'GF', 'GH', 'GI', 'GL', 'GM',
'GN', 'GP', 'GQ', 'GR', 'GS', 'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN',
'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT',
'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', 'KP', 'KR', 'KW',
'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS', 'LT', 'LU', 'LV',
'LY', 'MA', 'MC', 'MD', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP',
'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', 'NC',
'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OM', 'PA',
'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW',
'PY', 'QA', 'RE', 'RO', 'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG',
'SH', 'SI', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'ST', 'SV', 'SY',
'SZ', 'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', 'TM', 'TN', 'TO', 'TL',
'TR', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UM', 'US', 'UY', 'UZ', 'VA',
'VC', 'VE', 'VG', 'VI', 'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'RS', 'ZA',
'ZM', 'ME', 'ZW', 'A1', 'A2', 'O1', 'AX', 'GG', 'IM', 'JE', 'BL', 'MF',
'BQ', 'SS'
)
COUNTRY_CODES3 = (
'', 'AP', 'EU', 'AND', 'ARE', 'AFG', 'ATG', 'AIA', 'ALB', 'ARM', 'ANT',
'AGO', 'AQ', 'ARG', 'ASM', 'AUT', 'AUS', 'ABW', 'AZE', 'BIH', 'BRB', 'BGD',
'BEL', 'BFA', 'BGR', 'BHR', 'BDI', 'BEN', 'BMU', 'BRN', 'BOL', 'BRA',
'BHS', 'BTN', 'BV', 'BWA', 'BLR', 'BLZ', 'CAN', 'CC', 'COD', 'CAF', 'COG',
'CHE', 'CIV', 'COK', 'CHL', 'CMR', 'CHN', 'COL', 'CRI', 'CUB', 'CPV', 'CX',
'CYP', 'CZE', 'DEU', 'DJI', 'DNK', 'DMA', 'DOM', 'DZA', 'ECU', 'EST',
'EGY', 'ESH', 'ERI', 'ESP', 'ETH', 'FIN', 'FJI', 'FLK', 'FSM', 'FRO',
'FRA', 'FX', 'GAB', 'GBR', 'GRD', 'GEO', 'GUF', 'GHA', 'GIB', 'GRL', 'GMB',
'GIN', 'GLP', 'GNQ', 'GRC', 'GS', 'GTM', 'GUM', 'GNB', 'GUY', 'HKG', 'HM',
'HND', 'HRV', 'HTI', 'HUN', 'IDN', 'IRL', 'ISR', 'IND', 'IO', 'IRQ', 'IRN',
'ISL', 'ITA', 'JAM', 'JOR', 'JPN', 'KEN', 'KGZ', 'KHM', 'KIR', 'COM',
'KNA', 'PRK', 'KOR', 'KWT', 'CYM', 'KAZ', 'LAO', 'LBN', 'LCA', 'LIE',
'LKA', 'LBR', 'LSO', 'LTU', 'LUX', 'LVA', 'LBY', 'MAR', 'MCO', 'MDA',
'MDG', 'MHL', 'MKD', 'MLI', 'MMR', 'MNG', 'MAC', 'MNP', 'MTQ', 'MRT',
'MSR', 'MLT', 'MUS', 'MDV', 'MWI', 'MEX', 'MYS', 'MOZ', 'NAM', 'NCL',
'NER', 'NFK', 'NGA', 'NIC', 'NLD', 'NOR', 'NPL', 'NRU', 'NIU', 'NZL',
'OMN', 'PAN', 'PER', 'PYF', 'PNG', 'PHL', 'PAK', 'POL', 'SPM', 'PCN',
'PRI', 'PSE', 'PRT', 'PLW', 'PRY', 'QAT', 'REU', 'ROU', 'RUS', 'RWA',
'SAU', 'SLB', 'SYC', 'SDN', 'SWE', 'SGP', 'SHN', 'SVN', 'SJM', 'SVK',
'SLE', 'SMR', 'SEN', 'SOM', 'SUR', 'STP', 'SLV', 'SYR', 'SWZ', 'TCA',
'TCD', 'TF', 'TGO', 'THA', 'TJK', 'TKL', 'TKM', 'TUN', 'TON', 'TLS', 'TUR',
'TTO', 'TUV', 'TWN', 'TZA', 'UKR', 'UGA', 'UM', 'USA', 'URY', 'UZB', 'VAT',
'VCT', 'VEN', 'VGB', 'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'YT', 'SRB',
'ZAF', 'ZMB', 'MNE', 'ZWE', 'A1', 'A2', 'O1', 'ALA', 'GGY', 'IMN', 'JEY',
'BLM', 'MAF', 'BES', 'SSD'
)
COUNTRY_NAMES = (
'', 'Asia/Pacific Region', 'Europe', 'Andorra', 'United Arab Emirates',
'Afghanistan', 'Antigua and Barbuda', 'Anguilla', 'Albania', 'Armenia',
'Netherlands Antilles', 'Angola', 'Antarctica', 'Argentina',
'American Samoa', 'Austria', 'Australia', 'Aruba', 'Azerbaijan',
'Bosnia and Herzegovina', 'Barbados', 'Bangladesh', 'Belgium',
'Burkina Faso', 'Bulgaria', 'Bahrain', 'Burundi', 'Benin', 'Bermuda',
'Brunei Darussalam', 'Bolivia', 'Brazil', 'Bahamas', 'Bhutan',
'Bouvet Island', 'Botswana', 'Belarus', 'Belize', 'Canada',
'Cocos (Keeling) Islands', 'Congo, The Democratic Republic of the',
'Central African Republic', 'Congo', 'Switzerland', 'Cote D\'Ivoire',
'Cook Islands', 'Chile', 'Cameroon', 'China', 'Colombia', 'Costa Rica',
'Cuba', 'Cape Verde', 'Christmas Island', 'Cyprus', 'Czech Republic',
'Germany', 'Djibouti', 'Denmark', 'Dominica', 'Dominican Republic',
'Algeria', 'Ecuador', 'Estonia', 'Egypt', 'Western Sahara', 'Eritrea',
'Spain', 'Ethiopia', 'Finland', 'Fiji', 'Falkland Islands (Malvinas)',
'Micronesia, Federated States of', 'Faroe Islands', 'France',
'France, Metropolitan', 'Gabon', 'United Kingdom', 'Grenada', 'Georgia',
'French Guiana', 'Ghana', 'Gibraltar', 'Greenland', 'Gambia', 'Guinea',
'Guadeloupe', 'Equatorial Guinea', 'Greece',
'South Georgia and the South Sandwich Islands', 'Guatemala', 'Guam',
'Guinea-Bissau', 'Guyana', 'Hong Kong',
'Heard Island and McDonald Islands', 'Honduras', 'Croatia', 'Haiti',
'Hungary', 'Indonesia', 'Ireland', 'Israel', 'India',
'British Indian Ocean Territory', 'Iraq', 'Iran, Islamic Republic of',
'Iceland', 'Italy', 'Jamaica', 'Jordan', 'Japan', 'Kenya', 'Kyrgyzstan',
'Cambodia', 'Kiribati', 'Comoros', 'Saint Kitts and Nevis',
'Korea, Democratic People\'s Republic of', 'Korea, Republic of', 'Kuwait',
'Cayman Islands', 'Kazakhstan', 'Lao People\'s Democratic Republic',
'Lebanon', 'Saint Lucia', 'Liechtenstein', 'Sri Lanka', 'Liberia',
'Lesotho', 'Lithuania', 'Luxembourg', 'Latvia', 'Libya', 'Morocco',
'Monaco', 'Moldova, Republic of', 'Madagascar', 'Marshall Islands',
'Macedonia', 'Mali', 'Myanmar', 'Mongolia', 'Macau',
'Northern Mariana Islands', 'Martinique', 'Mauritania', 'Montserrat',
'Malta', 'Mauritius', 'Maldives', 'Malawi', 'Mexico', 'Malaysia',
'Mozambique', 'Namibia', 'New Caledonia', 'Niger', 'Norfolk Island',
'Nigeria', 'Nicaragua', 'Netherlands', 'Norway', 'Nepal', 'Nauru', 'Niue',
'New Zealand', 'Oman', 'Panama', 'Peru', 'French Polynesia',
'Papua New Guinea', 'Philippines', 'Pakistan', 'Poland',
'Saint Pierre and Miquelon', 'Pitcairn Islands', 'Puerto Rico',
'Palestinian Territory', 'Portugal', 'Palau', 'Paraguay', 'Qatar',
'Reunion', 'Romania', 'Russian Federation', 'Rwanda', 'Saudi Arabia',
'Solomon Islands', 'Seychelles', 'Sudan', 'Sweden', 'Singapore',
'Saint Helena', 'Slovenia', 'Svalbard and Jan Mayen', 'Slovakia',
'Sierra Leone', 'San Marino', 'Senegal', 'Somalia', 'Suriname',
'Sao Tome and Principe', 'El Salvador', 'Syrian Arab Republic',
'Swaziland', 'Turks and Caicos Islands', 'Chad',
'French Southern Territories', 'Togo', 'Thailand', 'Tajikistan', 'Tokelau',
'Turkmenistan', 'Tunisia', 'Tonga', 'Timor-Leste', 'Turkey',
'Trinidad and Tobago', 'Tuvalu', 'Taiwan', 'Tanzania, United Republic of',
'Ukraine', 'Uganda', 'United States Minor Outlying Islands',
'United States', 'Uruguay', 'Uzbekistan', 'Holy See (Vatican City State)',
'Saint Vincent and the Grenadines', 'Venezuela', 'Virgin Islands, British',
'Virgin Islands, U.S.', 'Vietnam', 'Vanuatu', 'Wallis and Futuna', 'Samoa',
'Yemen', 'Mayotte', 'Serbia', 'South Africa', 'Zambia', 'Montenegro',
'Zimbabwe', 'Anonymous Proxy', 'Satellite Provider', 'Other',
'Aland Islands', 'Guernsey', 'Isle of Man', 'Jersey', 'Saint Barthelemy',
'Saint Martin', 'Bonaire, Sint Eustatius and Saba', 'South Sudan'
)
CONTINENT_NAMES = (
'--', 'AS', 'EU', 'EU', 'AS', 'AS', 'NA', 'NA', 'EU', 'AS', 'NA', 'AF',
'AN', 'SA', 'OC', 'EU', 'OC', 'NA', 'AS', 'EU', 'NA', 'AS', 'EU', 'AF',
'EU', 'AS', 'AF', 'AF', 'NA', 'AS', 'SA', 'SA', 'NA', 'AS', 'AN', 'AF',
'EU', 'NA', 'NA', 'AS', 'AF', 'AF', 'AF', 'EU', 'AF', 'OC', 'SA', 'AF',
'AS', 'SA', 'NA', 'NA', 'AF', 'AS', 'AS', 'EU', 'EU', 'AF', 'EU', 'NA',
'NA', 'AF', 'SA', 'EU', 'AF', 'AF', 'AF', 'EU', 'AF', 'EU', 'OC', 'SA',
'OC', 'EU', 'EU', 'NA', 'AF', 'EU', 'NA', 'AS', 'SA', 'AF', 'EU', 'NA',
'AF', 'AF', 'NA', 'AF', 'EU', 'AN', 'NA', 'OC', 'AF', 'SA', 'AS', 'AN',
'NA', 'EU', 'NA', 'EU', 'AS', 'EU', 'AS', 'AS', 'AS', 'AS', 'AS', 'EU',
'EU', 'NA', 'AS', 'AS', 'AF', 'AS', 'AS', 'OC', 'AF', 'NA', 'AS', 'AS',
'AS', 'NA', 'AS', 'AS', 'AS', 'NA', 'EU', 'AS', 'AF', 'AF', 'EU', 'EU',
'EU', 'AF', 'AF', 'EU', 'EU', 'AF', 'OC', 'EU', 'AF', 'AS', 'AS', 'AS',
'OC', 'NA', 'AF', 'NA', 'EU', 'AF', 'AS', 'AF', 'NA', 'AS', 'AF', 'AF',
'OC', 'AF', 'OC', 'AF', 'NA', 'EU', 'EU', 'AS', 'OC', 'OC', 'OC', 'AS',
'NA', 'SA', 'OC', 'OC', 'AS', 'AS', 'EU', 'NA', 'OC', 'NA', 'AS', 'EU',
'OC', 'SA', 'AS', 'AF', 'EU', 'EU', 'AF', 'AS', 'OC', 'AF', 'AF', 'EU',
'AS', 'AF', 'EU', 'EU', 'EU', 'AF', 'EU', 'AF', 'AF', 'SA', 'AF', 'NA',
'AS', 'AF', 'NA', 'AF', 'AN', 'AF', 'AS', 'AS', 'OC', 'AS', 'AF', 'OC',
'AS', 'EU', 'NA', 'OC', 'AS', 'AF', 'EU', 'AF', 'OC', 'NA', 'SA', 'AS',
'EU', 'NA', 'SA', 'NA', 'NA', 'AS', 'OC', 'OC', 'OC', 'AS', 'AF', 'EU',
'AF', 'AF', 'EU', 'AF', '--', '--', '--', 'EU', 'EU', 'EU', 'EU', 'NA',
'NA', 'NA', 'AF'
)
NETSPEED_NAMES = (
'Unknown', 'Dial-up', 'Cable', 'Corporate'
)
# storage / caching flags
STANDARD = 0
MEMORY_CACHE = 1
MMAP_CACHE = 8
# Database structure constants
COUNTRY_BEGIN = 16776960
STATE_BEGIN_REV0 = 16700000
STATE_BEGIN_REV1 = 16000000
STRUCTURE_INFO_MAX_SIZE = 20
DATABASE_INFO_MAX_SIZE = 100
# Database editions
COUNTRY_EDITION = 1
COUNTRY_EDITION_V6 = 12
REGION_EDITION_REV0 = 7
REGION_EDITION_REV1 = 3
CITY_EDITION_REV0 = 6
CITY_EDITION_REV1 = 2
CITY_EDITION_REV1_V6 = 30
ORG_EDITION = 5
ISP_EDITION = 4
ASNUM_EDITION = 9
ASNUM_EDITION_V6 = 21
NETSPEED_EDITION = 10
NETSPEED_EDITION_REV1 = 32
NETSPEED_EDITION_REV1_V6 = 33
# Not yet supported databases
PROXY_EDITION = 8
# Collection of databases
IPV6_EDITIONS = (COUNTRY_EDITION_V6, ASNUM_EDITION_V6, CITY_EDITION_REV1_V6)
CITY_EDITIONS = (CITY_EDITION_REV0, CITY_EDITION_REV1, CITY_EDITION_REV1_V6)
REGION_EDITIONS = (REGION_EDITION_REV0, REGION_EDITION_REV1)
REGION_CITY_EDITIONS = REGION_EDITIONS + CITY_EDITIONS
SEGMENT_RECORD_LENGTH = 3
STANDARD_RECORD_LENGTH = 3
ORG_RECORD_LENGTH = 4
MAX_RECORD_LENGTH = 4
MAX_ORG_RECORD_LENGTH = 300
FULL_RECORD_LENGTH = 50
US_OFFSET = 1
CANADA_OFFSET = 677
WORLD_OFFSET = 1353
FIPS_RANGE = 360
ENCODING = 'iso-8859-1'
def time_zone_by_country_and_region(country_code, region_code=None):
"""
Returns time zone from country and region code.
:arg country_code: Country code
:arg region_code: Region code
"""
timezone = country_dict.get(country_code)
if not timezone:
return None
if isinstance(timezone, str):
return timezone
return timezone.get(region_code)
country_dict = {
'AD': 'Europe/Andorra',
'AE': 'Asia/Dubai',
'AF': 'Asia/Kabul',
'AG': 'America/Antigua',
'AI': 'America/Anguilla',
'AL': 'Europe/Tirane',
'AM': 'Asia/Yerevan',
'AN': 'America/Curacao',
'AO': 'Africa/Luanda',
'AR': {
'01': 'America/Argentina/Buenos_Aires',
'02': 'America/Argentina/Catamarca',
'03': 'America/Argentina/Tucuman',
'04': 'America/Argentina/Rio_Gallegos',
'05': 'America/Argentina/Cordoba',
'06': 'America/Argentina/Tucuman',
'07': 'America/Argentina/Buenos_Aires',
'08': 'America/Argentina/Buenos_Aires',
'09': 'America/Argentina/Tucuman',
'10': 'America/Argentina/Jujuy',
'11': 'America/Argentina/San_Luis',
'12': 'America/Argentina/La_Rioja',
'13': 'America/Argentina/Mendoza',
'14': 'America/Argentina/Buenos_Aires',
'15': 'America/Argentina/San_Luis',
'16': 'America/Argentina/Buenos_Aires',
'17': 'America/Argentina/Salta',
'18': 'America/Argentina/San_Juan',
'19': 'America/Argentina/San_Luis',
'20': 'America/Argentina/Rio_Gallegos',
'21': 'America/Argentina/Buenos_Aires',
'22': 'America/Argentina/Catamarca',
'23': 'America/Argentina/Ushuaia',
'24': 'America/Argentina/Tucuman'
},
'AS': 'US/Samoa',
'AT': 'Europe/Vienna',
'AU': {
'01': 'Australia/Canberra',
'02': 'Australia/NSW',
'03': 'Australia/North',
'04': 'Australia/Queensland',
'05': 'Australia/South',
'06': 'Australia/Tasmania',
'07': 'Australia/Victoria',
'08': 'Australia/West'
},
'AW': 'America/Aruba',
'AX': 'Europe/Mariehamn',
'AZ': 'Asia/Baku',
'BA': 'Europe/Sarajevo',
'BB': 'America/Barbados',
'BD': 'Asia/Dhaka',
'BE': 'Europe/Brussels',
'BF': 'Africa/Ouagadougou',
'BG': 'Europe/Sofia',
'BH': 'Asia/Bahrain',
'BI': 'Africa/Bujumbura',
'BJ': 'Africa/Porto-Novo',
'BL': 'America/St_Barthelemy',
'BM': 'Atlantic/Bermuda',
'BN': 'Asia/Brunei',
'BO': 'America/La_Paz',
'BQ': 'America/Curacao',
'BR': {
'01': 'America/Rio_Branco',
'02': 'America/Maceio',
'03': 'America/Sao_Paulo',
'04': 'America/Manaus',
'05': 'America/Bahia',
'06': 'America/Fortaleza',
'07': 'America/Sao_Paulo',
'08': 'America/Sao_Paulo',
'11': 'America/Campo_Grande',
'13': 'America/Belem',
'14': 'America/Cuiaba',
'15': 'America/Sao_Paulo',
'16': 'America/Belem',
'17': 'America/Recife',
'18': 'America/Sao_Paulo',
'20': 'America/Fortaleza',
'21': 'America/Sao_Paulo',
'22': 'America/Recife',
'23': 'America/Sao_Paulo',
'24': 'America/Porto_Velho',
'25': 'America/Boa_Vista',
'26': 'America/Sao_Paulo',
'27': 'America/Sao_Paulo',
'28': 'America/Maceio',
'29': 'America/Sao_Paulo',
'30': 'America/Recife',
'31': 'America/Araguaina'
},
'BS': 'America/Nassau',
'BT': 'Asia/Thimphu',
'BW': 'Africa/Gaborone',
'BY': 'Europe/Minsk',
'BZ': 'America/Belize',
'CA': {
'AB': 'America/Edmonton',
'BC': 'America/Vancouver',
'MB': 'America/Winnipeg',
'NB': 'America/Halifax',
'NL': 'America/St_Johns',
'NS': 'America/Halifax',
'NT': 'America/Yellowknife',
'NU': 'America/Rankin_Inlet',
'ON': 'America/Toronto',
'PE': 'America/Halifax',
'QC': 'America/Montreal',
'SK': 'America/Regina',
'YT': 'America/Whitehorse'
},
'CC': 'Indian/Cocos',
'CD': {
'02': 'Africa/Kinshasa',
'05': 'Africa/Lubumbashi',
'06': 'Africa/Kinshasa',
'08': 'Africa/Kinshasa',
'10': 'Africa/Lubumbashi',
'11': 'Africa/Lubumbashi',
'12': 'Africa/Lubumbashi'
},
'CF': 'Africa/Bangui',
'CG': 'Africa/Brazzaville',
'CH': 'Europe/Zurich',
'CI': 'Africa/Abidjan',
'CK': 'Pacific/Rarotonga',
'CL': 'Chile/Continental',
'CM': 'Africa/Lagos',
'CN': {
'01': 'Asia/Shanghai',
'02': 'Asia/Shanghai',
'03': 'Asia/Shanghai',
'04': 'Asia/Shanghai',
'05': 'Asia/Harbin',
'06': 'Asia/Chongqing',
'07': 'Asia/Shanghai',
'08': 'Asia/Harbin',
'09': 'Asia/Shanghai',
'10': 'Asia/Shanghai',
'11': 'Asia/Chongqing',
'12': 'Asia/Shanghai',
'13': 'Asia/Urumqi',
'14': 'Asia/Chongqing',
'15': 'Asia/Chongqing',
'16': 'Asia/Chongqing',
'18': 'Asia/Chongqing',
'19': 'Asia/Harbin',
'20': 'Asia/Harbin',
'21': 'Asia/Chongqing',
'22': 'Asia/Harbin',
'23': 'Asia/Shanghai',
'24': 'Asia/Chongqing',
'25': 'Asia/Shanghai',
'26': 'Asia/Chongqing',
'28': 'Asia/Shanghai',
'29': 'Asia/Chongqing',
'30': 'Asia/Chongqing',
'31': 'Asia/Chongqing',
'32': 'Asia/Chongqing',
'33': 'Asia/Chongqing'
},
'CO': 'America/Bogota',
'CR': 'America/Costa_Rica',
'CU': 'America/Havana',
'CV': 'Atlantic/Cape_Verde',
'CW': 'America/Curacao',
'CX': 'Indian/Christmas',
'CY': 'Asia/Nicosia',
'CZ': 'Europe/Prague',
'DE': 'Europe/Berlin',
'DJ': 'Africa/Djibouti',
'DK': 'Europe/Copenhagen',
'DM': 'America/Dominica',
'DO': 'America/Santo_Domingo',
'DZ': 'Africa/Algiers',
'EC': {
'01': 'Pacific/Galapagos',
'02': 'America/Guayaquil',
'03': 'America/Guayaquil',
'04': 'America/Guayaquil',
'05': 'America/Guayaquil',
'06': 'America/Guayaquil',
'07': 'America/Guayaquil',
'08': 'America/Guayaquil',
'09': 'America/Guayaquil',
'10': 'America/Guayaquil',
'11': 'America/Guayaquil',
'12': 'America/Guayaquil',
'13': 'America/Guayaquil',
'14': 'America/Guayaquil',
'15': 'America/Guayaquil',
'17': 'America/Guayaquil',
'18': 'America/Guayaquil',
'19': 'America/Guayaquil',
'20': 'America/Guayaquil',
'22': 'America/Guayaquil'
},
'EE': 'Europe/Tallinn',
'EG': 'Africa/Cairo',
'EH': 'Africa/El_Aaiun',
'ER': 'Africa/Asmera',
'ES': {
'07': 'Europe/Madrid',
'27': 'Europe/Madrid',
'29': 'Europe/Madrid',
'31': 'Europe/Madrid',
'32': 'Europe/Madrid',
'34': 'Europe/Madrid',
'39': 'Europe/Madrid',
'51': 'Africa/Ceuta',
'52': 'Europe/Madrid',
'53': 'Atlantic/Canary',
'54': 'Europe/Madrid',
'55': 'Europe/Madrid',
'56': 'Europe/Madrid',
'57': 'Europe/Madrid',
'58': 'Europe/Madrid',
'59': 'Europe/Madrid',
'60': 'Europe/Madrid'
},
'ET': 'Africa/Addis_Ababa',
'FI': 'Europe/Helsinki',
'FJ': 'Pacific/Fiji',
'FK': 'Atlantic/Stanley',
'FO': 'Atlantic/Faeroe',
'FR': 'Europe/Paris',
'FX': 'Europe/Paris',
'GA': 'Africa/Libreville',
'GB': 'Europe/London',
'GD': 'America/Grenada',
'GE': 'Asia/Tbilisi',
'GF': 'America/Cayenne',
'GG': 'Europe/Guernsey',
'GH': 'Africa/Accra',
'GI': 'Europe/Gibraltar',
'GL': {
'01': 'America/Thule',
'02': 'America/Godthab',
'03': 'America/Godthab'
},
'GM': 'Africa/Banjul',
'GN': 'Africa/Conakry',
'GP': 'America/Guadeloupe',
'GQ': 'Africa/Malabo',
'GR': 'Europe/Athens',
'GS': 'Atlantic/South_Georgia',
'GT': 'America/Guatemala',
'GU': 'Pacific/Guam',
'GW': 'Africa/Bissau',
'GY': 'America/Guyana',
'HK': 'Asia/Hong_Kong',
'HN': 'America/Tegucigalpa',
'HR': 'Europe/Zagreb',
'HT': 'America/Port-au-Prince',
'HU': 'Europe/Budapest',
'ID': {
'01': 'Asia/Pontianak',
'02': 'Asia/Makassar',
'03': 'Asia/Jakarta',
'04': 'Asia/Jakarta',
'05': 'Asia/Jakarta',
'06': 'Asia/Jakarta',
'07': 'Asia/Jakarta',
'08': 'Asia/Jakarta',
'09': 'Asia/Jayapura',
'10': 'Asia/Jakarta',
'11': 'Asia/Pontianak',
'12': 'Asia/Makassar',
'13': 'Asia/Makassar',
'14': 'Asia/Makassar',
'15': 'Asia/Jakarta',
'16': 'Asia/Makassar',
'17': 'Asia/Makassar',
'18': 'Asia/Makassar',
'19': 'Asia/Pontianak',
'20': 'Asia/Makassar',
'21': 'Asia/Makassar',
'22': 'Asia/Makassar',
'23': 'Asia/Makassar',
'24': 'Asia/Jakarta',
'25': 'Asia/Pontianak',
'26': 'Asia/Pontianak',
'30': 'Asia/Jakarta',
'31': 'Asia/Makassar',
'33': 'Asia/Jakarta'
},
'IE': 'Europe/Dublin',
'IL': 'Asia/Jerusalem',
'IM': 'Europe/Isle_of_Man',
'IN': 'Asia/Calcutta',
'IO': 'Indian/Chagos',
'IQ': 'Asia/Baghdad',
'IR': 'Asia/Tehran',
'IS': 'Atlantic/Reykjavik',
'IT': 'Europe/Rome',
'JE': 'Europe/Jersey',
'JM': 'America/Jamaica',
'JO': 'Asia/Amman',
'JP': 'Asia/Tokyo',
'KE': 'Africa/Nairobi',
'KG': 'Asia/Bishkek',
'KH': 'Asia/Phnom_Penh',
'KI': 'Pacific/Tarawa',
'KM': 'Indian/Comoro',
'KN': 'America/St_Kitts',
'KP': 'Asia/Pyongyang',
'KR': 'Asia/Seoul',
'KW': 'Asia/Kuwait',
'KY': 'America/Cayman',
'KZ': {
'01': 'Asia/Almaty',
'02': 'Asia/Almaty',
'03': 'Asia/Qyzylorda',
'04': 'Asia/Aqtobe',
'05': 'Asia/Qyzylorda',
'06': 'Asia/Aqtau',
'07': 'Asia/Oral',
'08': 'Asia/Qyzylorda',
'09': 'Asia/Aqtau',
'10': 'Asia/Qyzylorda',
'11': 'Asia/Almaty',
'12': 'Asia/Qyzylorda',
'13': 'Asia/Aqtobe',
'14': 'Asia/Qyzylorda',
'15': 'Asia/Almaty',
'16': 'Asia/Aqtobe',
'17': 'Asia/Almaty'
},
'LA': 'Asia/Vientiane',
'LB': 'Asia/Beirut',
'LC': 'America/St_Lucia',
'LI': 'Europe/Vaduz',
'LK': 'Asia/Colombo',
'LR': 'Africa/Monrovia',
'LS': 'Africa/Maseru',
'LT': 'Europe/Vilnius',
'LU': 'Europe/Luxembourg',
'LV': 'Europe/Riga',
'LY': 'Africa/Tripoli',
'MA': 'Africa/Casablanca',
'MC': 'Europe/Monaco',
'MD': 'Europe/Chisinau',
'ME': 'Europe/Podgorica',
'MF': 'America/Marigot',
'MG': 'Indian/Antananarivo',
'MK': 'Europe/Skopje',
'ML': 'Africa/Bamako',
'MM': 'Asia/Rangoon',
'MN': 'Asia/Choibalsan',
'MO': 'Asia/Macao',
'MP': 'Pacific/Saipan',
'MQ': 'America/Martinique',
'MR': 'Africa/Nouakchott',
'MS': 'America/Montserrat',
'MT': 'Europe/Malta',
'MU': 'Indian/Mauritius',
'MV': 'Indian/Maldives',
'MW': 'Africa/Blantyre',
'MX': {
'01': 'America/Mexico_City',
'02': 'America/Tijuana',
'03': 'America/Hermosillo',
'04': 'America/Merida',
'05': 'America/Mexico_City',
'06': 'America/Chihuahua',
'07': 'America/Monterrey',
'08': 'America/Mexico_City',
'09': 'America/Mexico_City',
'10': 'America/Mazatlan',
'11': 'America/Mexico_City',
'12': 'America/Mexico_City',
'13': 'America/Mexico_City',
'14': 'America/Mazatlan',
'15': 'America/Chihuahua',
'16': 'America/Mexico_City',
'17': 'America/Mexico_City',
'18': 'America/Mazatlan',
'19': 'America/Monterrey',
'20': 'America/Mexico_City',
'21': 'America/Mexico_City',
'22': 'America/Mexico_City',
'23': 'America/Cancun',
'24': 'America/Mexico_City',
'25': 'America/Mazatlan',
'26': 'America/Hermosillo',
'27': 'America/Merida',
'28': 'America/Monterrey',
'29': 'America/Mexico_City',
'30': 'America/Mexico_City',
'31': 'America/Merida',
'32': 'America/Monterrey'
},
'MY': {
'01': 'Asia/Kuala_Lumpur',
'02': 'Asia/Kuala_Lumpur',
'03': 'Asia/Kuala_Lumpur',
'04': 'Asia/Kuala_Lumpur',
'05': 'Asia/Kuala_Lumpur',
'06': 'Asia/Kuala_Lumpur',
'07': 'Asia/Kuala_Lumpur',
'08': 'Asia/Kuala_Lumpur',
'09': 'Asia/Kuala_Lumpur',
'11': 'Asia/Kuching',
'12': 'Asia/Kuala_Lumpur',
'13': 'Asia/Kuala_Lumpur',
'14': 'Asia/Kuala_Lumpur',
'15': 'Asia/Kuching',
'16': 'Asia/Kuching'
},
'MZ': 'Africa/Maputo',
'NA': 'Africa/Windhoek',
'NC': 'Pacific/Noumea',
'NE': 'Africa/Niamey',
'NF': 'Pacific/Norfolk',
'NG': 'Africa/Lagos',
'NI': 'America/Managua',
'NL': 'Europe/Amsterdam',
'NO': 'Europe/Oslo',
'NP': 'Asia/Katmandu',
'NR': 'Pacific/Nauru',
'NU': 'Pacific/Niue',
'NZ': {
'85': 'Pacific/Auckland',
'E7': 'Pacific/Auckland',
'E8': 'Pacific/Auckland',
'E9': 'Pacific/Auckland',
'F1': 'Pacific/Auckland',
'F2': 'Pacific/Auckland',
'F3': 'Pacific/Auckland',
'F4': 'Pacific/Auckland',
'F5': 'Pacific/Auckland',
'F7': 'Pacific/Chatham',
'F8': 'Pacific/Auckland',
'F9': 'Pacific/Auckland',
'G1': 'Pacific/Auckland',
'G2': 'Pacific/Auckland',
'G3': 'Pacific/Auckland'
},
'OM': 'Asia/Muscat',
'PA': 'America/Panama',
'PE': 'America/Lima',
'PF': 'Pacific/Marquesas',
'PG': 'Pacific/Port_Moresby',
'PH': 'Asia/Manila',
'PK': 'Asia/Karachi',
'PL': 'Europe/Warsaw',
'PM': 'America/Miquelon',
'PN': 'Pacific/Pitcairn',
'PR': 'America/Puerto_Rico',
'PS': 'Asia/Gaza',
'PT': {
'02': 'Europe/Lisbon',
'03': 'Europe/Lisbon',
'04': 'Europe/Lisbon',
'05': 'Europe/Lisbon',
'06': 'Europe/Lisbon',
'07': 'Europe/Lisbon',
'08': 'Europe/Lisbon',
'09': 'Europe/Lisbon',
'10': 'Atlantic/Madeira',
'11': 'Europe/Lisbon',
'13': 'Europe/Lisbon',
'14': 'Europe/Lisbon',
'16': 'Europe/Lisbon',
'17': 'Europe/Lisbon',
'18': 'Europe/Lisbon',
'19': 'Europe/Lisbon',
'20': 'Europe/Lisbon',
'21': 'Europe/Lisbon',
'22': 'Europe/Lisbon'
},
'PW': 'Pacific/Palau',
'PY': 'America/Asuncion',
'QA': 'Asia/Qatar',
'RE': 'Indian/Reunion',
'RO': 'Europe/Bucharest',
'RS': 'Europe/Belgrade',
'RU': {
'01': 'Europe/Volgograd',
'02': 'Asia/Irkutsk',
'03': 'Asia/Novokuznetsk',
'04': 'Asia/Novosibirsk',
'05': 'Asia/Vladivostok',
'06': 'Europe/Moscow',
'07': 'Europe/Volgograd',
'08': 'Europe/Samara',
'09': 'Europe/Moscow',
'10': 'Europe/Moscow',
'11': 'Asia/Irkutsk',
'13': 'Asia/Yekaterinburg',
'14': 'Asia/Irkutsk',
'15': 'Asia/Anadyr',
'16': 'Europe/Samara',
'17': 'Europe/Volgograd',
'18': 'Asia/Krasnoyarsk',
'20': 'Asia/Irkutsk',
'21': 'Europe/Moscow',
'22': 'Europe/Volgograd',
'23': 'Europe/Kaliningrad',
'24': 'Europe/Volgograd',
'25': 'Europe/Moscow',
'26': 'Asia/Kamchatka',
'27': 'Europe/Volgograd',
'28': 'Europe/Moscow',
'29': 'Asia/Novokuznetsk',
'30': 'Asia/Vladivostok',
'31': 'Asia/Krasnoyarsk',
'32': 'Asia/Omsk',
'33': 'Asia/Yekaterinburg',
'34': 'Asia/Yekaterinburg',
'35': 'Asia/Yekaterinburg',
'36': 'Asia/Anadyr',
'37': 'Europe/Moscow',
'38': 'Europe/Volgograd',
'39': 'Asia/Krasnoyarsk',
'40': 'Asia/Yekaterinburg',
'41': 'Europe/Moscow',
'42': 'Europe/Moscow',
'43': 'Europe/Moscow',
'44': 'Asia/Magadan',
'45': 'Europe/Samara',
'46': 'Europe/Samara',
'47': 'Europe/Moscow',
'48': 'Europe/Moscow',
'49': 'Europe/Moscow',
'50': 'Asia/Yekaterinburg',
'51': 'Europe/Moscow',
'52': 'Europe/Moscow',
'53': 'Asia/Novosibirsk',
'54': 'Asia/Omsk',
'55': 'Europe/Samara',
'56': 'Europe/Moscow',
'57': 'Europe/Samara',
'58': 'Asia/Yekaterinburg',
'59': 'Asia/Vladivostok',
'60': 'Europe/Kaliningrad',
'61': 'Europe/Volgograd',
'62': 'Europe/Moscow',
'63': 'Asia/Yakutsk',
'64': 'Asia/Sakhalin',
'65': 'Europe/Samara',
'66': 'Europe/Moscow',
'67': 'Europe/Samara',
'68': 'Europe/Volgograd',
'69': 'Europe/Moscow',
'70': 'Europe/Volgograd',
'71': 'Asia/Yekaterinburg',
'72': 'Europe/Moscow',
'73': 'Europe/Samara',
'74': 'Asia/Krasnoyarsk',
'75': 'Asia/Novosibirsk',
'76': 'Europe/Moscow',
'77': 'Europe/Moscow',
'78': 'Asia/Yekaterinburg',
'79': 'Asia/Irkutsk',
'80': 'Asia/Yekaterinburg',
'81': 'Europe/Samara',
'82': 'Asia/Irkutsk',
'83': 'Europe/Moscow',
'84': 'Europe/Volgograd',
'85': 'Europe/Moscow',
'86': 'Europe/Moscow',
'87': 'Asia/Novosibirsk',
'88': 'Europe/Moscow',
'89': 'Asia/Vladivostok'
},
'RW': 'Africa/Kigali',
'SA': 'Asia/Riyadh',
'SB': 'Pacific/Guadalcanal',
'SC': 'Indian/Mahe',
'SD': 'Africa/Khartoum',
'SE': 'Europe/Stockholm',
'SG': 'Asia/Singapore',
'SH': 'Atlantic/St_Helena',
'SI': 'Europe/Ljubljana',
'SJ': 'Arctic/Longyearbyen',
'SK': 'Europe/Bratislava',
'SL': 'Africa/Freetown',
'SM': 'Europe/San_Marino',
'SN': 'Africa/Dakar',
'SO': 'Africa/Mogadishu',
'SR': 'America/Paramaribo',
'SS': 'Africa/Juba',
'ST': 'Africa/Sao_Tome',
'SV': 'America/El_Salvador',
'SX': 'America/Curacao',
'SY': 'Asia/Damascus',
'SZ': 'Africa/Mbabane',
'TC': 'America/Grand_Turk',
'TD': 'Africa/Ndjamena',
'TF': 'Indian/Kerguelen',
'TG': 'Africa/Lome',
'TH': 'Asia/Bangkok',
'TJ': 'Asia/Dushanbe',
'TK': 'Pacific/Fakaofo',
'TL': 'Asia/Dili',
'TM': 'Asia/Ashgabat',
'TN': 'Africa/Tunis',
'TO': 'Pacific/Tongatapu',
'TR': 'Asia/Istanbul',
'TT': 'America/Port_of_Spain',
'TV': 'Pacific/Funafuti',
'TW': 'Asia/Taipei',
'TZ': 'Africa/Dar_es_Salaam',
'UA': {
'01': 'Europe/Kiev',
'02': 'Europe/Kiev',
'03': 'Europe/Uzhgorod',
'04': 'Europe/Zaporozhye',
'05': 'Europe/Zaporozhye',
'06': 'Europe/Uzhgorod',
'07': 'Europe/Zaporozhye',
'08': 'Europe/Simferopol',
'09': 'Europe/Kiev',
'10': 'Europe/Zaporozhye',
'11': 'Europe/Simferopol',
'12': 'Europe/Kiev',
'13': 'Europe/Kiev',
'14': 'Europe/Zaporozhye',
'15': 'Europe/Uzhgorod',
'16': 'Europe/Zaporozhye',
'17': 'Europe/Simferopol',
'18': 'Europe/Zaporozhye',
'19': 'Europe/Kiev',
'20': 'Europe/Simferopol',
'21': 'Europe/Kiev',
'22': 'Europe/Uzhgorod',
'23': 'Europe/Kiev',
'24': 'Europe/Uzhgorod',
'25': 'Europe/Uzhgorod',
'26': 'Europe/Zaporozhye',
'27': 'Europe/Kiev'
},
'UG': 'Africa/Kampala',
'US': {
'AK': 'America/Anchorage',
'AL': 'America/Chicago',
'AR': 'America/Chicago',
'AZ': 'America/Phoenix',
'CA': 'America/Los_Angeles',
'CO': 'America/Denver',
'CT': 'America/New_York',
'DC': 'America/New_York',
'DE': 'America/New_York',
'FL': 'America/New_York',
'GA': 'America/New_York',
'HI': 'Pacific/Honolulu',
'IA': 'America/Chicago',
'ID': 'America/Denver',
'IL': 'America/Chicago',
'IN': 'America/Indianapolis',
'KS': 'America/Chicago',
'KY': 'America/New_York',
'LA': 'America/Chicago',
'MA': 'America/New_York',
'MD': 'America/New_York',
'ME': 'America/New_York',
'MI': 'America/New_York',
'MN': 'America/Chicago',
'MO': 'America/Chicago',
'MS': 'America/Chicago',
'MT': 'America/Denver',
'NC': 'America/New_York',
'ND': 'America/Chicago',
'NE': 'America/Chicago',
'NH': 'America/New_York',
'NJ': 'America/New_York',
'NM': 'America/Denver',
'NV': 'America/Los_Angeles',
'NY': 'America/New_York',
'OH': 'America/New_York',
'OK': 'America/Chicago',
'OR': 'America/Los_Angeles',
'PA': 'America/New_York',
'RI': 'America/New_York',
'SC': 'America/New_York',
'SD': 'America/Chicago',
'TN': 'America/Chicago',
'TX': 'America/Chicago',
'UT': 'America/Denver',
'VA': 'America/New_York',
'VT': 'America/New_York',
'WA': 'America/Los_Angeles',
'WI': 'America/Chicago',
'WV': 'America/New_York',
'WY': 'America/Denver'
},
'UY': 'America/Montevideo',
'UZ': {
'01': 'Asia/Tashkent',
'02': 'Asia/Samarkand',
'03': 'Asia/Tashkent',
'06': 'Asia/Tashkent',
'07': 'Asia/Samarkand',
'08': 'Asia/Samarkand',
'09': 'Asia/Samarkand',
'10': 'Asia/Samarkand',
'12': 'Asia/Samarkand',
'13': 'Asia/Tashkent',
'14': 'Asia/Tashkent'
},
'VA': 'Europe/Vatican',
'VC': 'America/St_Vincent',
'VE': 'America/Caracas',
'VG': 'America/Tortola',
'VI': 'America/St_Thomas',
'VN': 'Asia/Phnom_Penh',
'VU': 'Pacific/Efate',
'WF': 'Pacific/Wallis',
'WS': 'Pacific/Samoa',
'YE': 'Asia/Aden',
'YT': 'Indian/Mayotte',
'YU': 'Europe/Belgrade',
'ZA': 'Africa/Johannesburg',
'ZM': 'Africa/Lusaka',
'ZW': 'Africa/Harare'
}
def ip2long(ip):
"""
Wrapper function for IPv4 and IPv6 converters.
:arg ip: IPv4 or IPv6 address
"""
try:
return int(binascii.hexlify(socket.inet_aton(ip)), 16)
except socket.error:
return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ip)), 16)
def str2fp(data):
"""
Convert bytes data to file handle object (StringIO or BytesIO).
:arg data: String data to transform
"""
return BytesIO(bytearray(data, ENCODING)) if PY3 else StringIO(data)
range = xrange if PY2 else range
class GeoIPError(Exception):
"""
Thin wrapper of `Exception`, will be thrown in case of an
unrecoverable error.
"""
pass
class _GeoIPMetaclass(type):
_instances = {}
_instance_lock = Lock()
def __call__(cls, *args, **kwargs):
"""
Singleton method to gets an instance without reparsing
the database, the filename is being used as cache key.
"""
if len(args) > 0:
filename = args[0]
elif 'filename' in kwargs:
filename = kwargs['filename']
else:
return None
if not kwargs.get('cache', True):
return super(_GeoIPMetaclass, cls).__call__(*args, **kwargs)
try:
cls._instance_lock.acquire()
if filename not in cls._instances:
cls._instances[filename] = super(_GeoIPMetaclass, cls).__call__(*args, **kwargs)
finally:
cls._instance_lock.release()
return cls._instances[filename]
class GeoIP(object):
__metaclass__ = _GeoIPMetaclass
def __init__(self, filename, flags=STANDARD, cache=True):
"""
Create and return an GeoIP instance.
:arg filename: File path to a GeoIP database
:arg flags: Flags that affect how the database is processed.
Currently supported flags are STANDARD (default),
MEMORY_CACHE (preload the whole file into memory) and
MMAP_CACHE (access the file via mmap)
:arg cache: Used in tests to skip instance caching
"""
self._lock = Lock()
self._flags = flags
self._netmask = None
if self._flags & MMAP_CACHE and mmap is None: # pragma: no cover
import warnings
warnings.warn("MMAP_CACHE cannot be used without a mmap module")
self._flags &= ~MMAP_CACHE
if self._flags & MMAP_CACHE:
f = codecs.open(filename, 'rb', ENCODING)
access = mmap.ACCESS_READ
self._fp = mmap.mmap(f.fileno(), 0, access=access)
self._type = 'MMAP_CACHE'
f.close()
elif self._flags & MEMORY_CACHE:
f = codecs.open(filename, 'rb', ENCODING)
self._memory = f.read()
self._fp = str2fp(self._memory)
self._type = 'MEMORY_CACHE'
f.close()
else:
self._fp = codecs.open(filename, 'rb', ENCODING)
self._type = 'STANDARD'
try:
self._lock.acquire()
self._setup_segments()
finally:
self._lock.release()
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is
being used and setup segment sizes and start points that will
be used by the seek*() methods later.
"""
self._databaseType = COUNTRY_EDITION
self._recordLength = STANDARD_RECORD_LENGTH
self._databaseSegments = COUNTRY_BEGIN
filepos = self._fp.tell()
self._fp.seek(-3, os.SEEK_END)
for i in range(STRUCTURE_INFO_MAX_SIZE):
chars = chr(255) * 3
delim = self._fp.read(3)
if PY3 and type(delim) is bytes:
delim = delim.decode(ENCODING)
if PY2:
chars = chars.decode(ENCODING)
if type(delim) is str:
delim = delim.decode(ENCODING)
if delim == chars:
byte = self._fp.read(1)
self._databaseType = ord(byte)
# Compatibility with databases from April 2003 and earlier
if self._databaseType >= 106:
self._databaseType -= 105
if self._databaseType == REGION_EDITION_REV0:
self._databaseSegments = STATE_BEGIN_REV0
elif self._databaseType == REGION_EDITION_REV1:
self._databaseSegments = STATE_BEGIN_REV1
elif self._databaseType in (CITY_EDITION_REV0,
CITY_EDITION_REV1,
CITY_EDITION_REV1_V6,
ORG_EDITION,
ISP_EDITION,
NETSPEED_EDITION_REV1,
NETSPEED_EDITION_REV1_V6,
ASNUM_EDITION,
ASNUM_EDITION_V6):
self._databaseSegments = 0
buf = self._fp.read(SEGMENT_RECORD_LENGTH)
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
for j in range(SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
LONG_RECORDS = (ORG_EDITION, ISP_EDITION)
if self._databaseType in LONG_RECORDS:
self._recordLength = ORG_RECORD_LENGTH
break
else:
self._fp.seek(-4, os.SEEK_CUR)
self._fp.seek(filepos, os.SEEK_SET)
def _seek_country(self, ipnum):
"""
Using the record length and appropriate start points, seek to the
country that corresponds to the converted IP address integer.
Return offset of record.
:arg ipnum: Result of ip2long conversion
"""
try:
offset = 0
seek_depth = 127 if len(str(ipnum)) > 10 else 31
for depth in range(seek_depth, -1, -1):
if self._flags & MEMORY_CACHE:
startIndex = 2 * self._recordLength * offset
endIndex = startIndex + (2 * self._recordLength)
buf = self._memory[startIndex:endIndex]
else:
startIndex = 2 * self._recordLength * offset
readLength = 2 * self._recordLength
try:
self._lock.acquire()
self._fp.seek(startIndex, os.SEEK_SET)
buf = self._fp.read(readLength)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
x = [0, 0]
for i in range(2):
for j in range(self._recordLength):
byte = buf[self._recordLength * i + j]
x[i] += ord(byte) << (j * 8)
if ipnum & (1 << depth):
if x[1] >= self._databaseSegments:
self._netmask = seek_depth - depth + 1
return x[1]
offset = x[1]
else:
if x[0] >= self._databaseSegments:
self._netmask = seek_depth - depth + 1
return x[0]
offset = x[0]
except (IndexError, UnicodeDecodeError):
pass
raise GeoIPError('Corrupt database')
def _get_org(self, ipnum):
"""
Seek and return organization or ISP name for ipnum.
Return org/isp name.
:arg ipnum: Result of ip2long conversion
"""
seek_org = self._seek_country(ipnum)
if seek_org == self._databaseSegments:
return None
read_length = (2 * self._recordLength - 1) * self._databaseSegments
try:
self._lock.acquire()
self._fp.seek(seek_org + read_length, os.SEEK_SET)
buf = self._fp.read(MAX_ORG_RECORD_LENGTH)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
return buf[:buf.index(chr(0))]
def _get_region(self, ipnum):
"""
Seek and return the region information.
Returns dict containing country_code and region_code.
:arg ipnum: Result of ip2long conversion
"""
region_code = None
country_code = None
seek_country = self._seek_country(ipnum)
def get_region_code(offset):
region1 = chr(offset // 26 + 65)
region2 = chr(offset % 26 + 65)
return ''.join([region1, region2])
if self._databaseType == REGION_EDITION_REV0:
seek_region = seek_country - STATE_BEGIN_REV0
if seek_region >= 1000:
country_code = 'US'
region_code = get_region_code(seek_region - 1000)
else:
country_code = COUNTRY_CODES[seek_region]
elif self._databaseType == REGION_EDITION_REV1:
seek_region = seek_country - STATE_BEGIN_REV1
if seek_region < US_OFFSET:
pass
elif seek_region < CANADA_OFFSET:
country_code = 'US'
region_code = get_region_code(seek_region - US_OFFSET)
elif seek_region < WORLD_OFFSET:
country_code = 'CA'
region_code = get_region_code(seek_region - CANADA_OFFSET)
else:
index = (seek_region - WORLD_OFFSET) // FIPS_RANGE
if index < len(COUNTRY_CODES):
country_code = COUNTRY_CODES[index]
elif self._databaseType in CITY_EDITIONS:
rec = self._get_record(ipnum)
region_code = rec.get('region_code')
country_code = rec.get('country_code')
return {'country_code': country_code, 'region_code': region_code}
def _get_record(self, ipnum):
"""
Populate location dict for converted IP.
Returns dict with numerous location properties.
:arg ipnum: Result of ip2long conversion
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
return {}
read_length = (2 * self._recordLength - 1) * self._databaseSegments
try:
self._lock.acquire()
self._fp.seek(seek_country + read_length, os.SEEK_SET)
buf = self._fp.read(FULL_RECORD_LENGTH)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
record = {
'dma_code': 0,
'area_code': 0,
'metro_code': None,
'postal_code': None
}
latitude = 0
longitude = 0
char = ord(buf[0])
record['country_code'] = COUNTRY_CODES[char]
record['country_code3'] = COUNTRY_CODES3[char]
record['country_name'] = COUNTRY_NAMES[char]
record['continent'] = CONTINENT_NAMES[char]
def read_data(buf, pos):
cur = pos
while buf[cur] != '\0':
cur += 1
return cur, buf[pos:cur] if cur > pos else None
offset, record['region_code'] = read_data(buf, 1)
offset, record['city'] = read_data(buf, offset + 1)
offset, record['postal_code'] = read_data(buf, offset + 1)
offset = offset + 1
for j in range(3):
latitude += (ord(buf[offset + j]) << (j * 8))
for j in range(3):
longitude += (ord(buf[offset + j + 3]) << (j * 8))
record['latitude'] = (latitude / 10000.0) - 180.0
record['longitude'] = (longitude / 10000.0) - 180.0
if self._databaseType in (CITY_EDITION_REV1, CITY_EDITION_REV1_V6):
if record['country_code'] == 'US':
dma_area = 0
for j in range(3):
dma_area += ord(buf[offset + j + 6]) << (j * 8)
record['dma_code'] = int(floor(dma_area / 1000))
record['area_code'] = dma_area % 1000
record['metro_code'] = DMA_MAP.get(record['dma_code'])
params = (record['country_code'], record['region_code'])
record['time_zone'] = time_zone_by_country_and_region(*params)
return record
def _gethostbyname(self, hostname):
"""
Hostname lookup method, supports both IPv4 and IPv6.
"""
if self._databaseType in IPV6_EDITIONS:
response = socket.getaddrinfo(hostname, 0, socket.AF_INET6)
family, socktype, proto, canonname, sockaddr = response[0]
address, port, flow, scope = sockaddr
return address
else:
return socket.gethostbyname(hostname)
def id_by_name(self, hostname):
"""
Returns the database ID for specified hostname.
The id might be useful as array index. 0 is unknown.
:arg hostname: Hostname to get ID from.
"""
addr = self._gethostbyname(hostname)
return self.id_by_addr(addr)
def id_by_addr(self, addr):
"""
Returns the database ID for specified address.
The ID might be useful as array index. 0 is unknown.
:arg addr: IPv4 or IPv6 address (eg. 203.0.113.30)
"""
if self._databaseType in (PROXY_EDITION, NETSPEED_EDITION_REV1, NETSPEED_EDITION_REV1_V6):
raise GeoIPError('Invalid database type; this database is not supported')
ipv = 6 if addr.find(':') >= 0 else 4
if ipv == 4 and self._databaseType not in (COUNTRY_EDITION, NETSPEED_EDITION):
raise GeoIPError('Invalid database type; this database supports IPv6 addresses, not IPv4')
if ipv == 6 and self._databaseType != COUNTRY_EDITION_V6:
raise GeoIPError('Invalid database type; this database supports IPv4 addresses, not IPv6')
ipnum = ip2long(addr)
return self._seek_country(ipnum) - COUNTRY_BEGIN
def last_netmask(self):
"""
Returns the netmask depth of the last lookup.
"""
return self._netmask
def country_code_by_addr(self, addr):
"""
Returns 2-letter country code (e.g. US) from IP address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
VALID_EDITIONS = (COUNTRY_EDITION, COUNTRY_EDITION_V6)
if self._databaseType in VALID_EDITIONS:
country_id = self.id_by_addr(addr)
return COUNTRY_CODES[country_id]
elif self._databaseType in REGION_CITY_EDITIONS:
return self.region_by_addr(addr).get('country_code')
raise GeoIPError('Invalid database type, expected Country, City or Region')
def country_code_by_name(self, hostname):
"""
Returns 2-letter country code (e.g. US) from hostname.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.country_code_by_addr(addr)
def netspeed_by_addr(self, addr):
"""
Returns NetSpeed name from address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
if self._databaseType == NETSPEED_EDITION:
return NETSPEED_NAMES[self.id_by_addr(addr)]
elif self._databaseType in (NETSPEED_EDITION_REV1,
NETSPEED_EDITION_REV1_V6):
ipnum = ip2long(addr)
return self._get_org(ipnum)
raise GeoIPError(
'Invalid database type, expected NetSpeed or NetSpeedCell')
def netspeed_by_name(self, hostname):
"""
Returns NetSpeed name from hostname. Can be Unknown, Dial-up,
Cable, or Corporate.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.netspeed_by_addr(addr)
def country_name_by_addr(self, addr):
"""
Returns full country name for specified IP address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
VALID_EDITIONS = (COUNTRY_EDITION, COUNTRY_EDITION_V6)
if self._databaseType in VALID_EDITIONS:
country_id = self.id_by_addr(addr)
return COUNTRY_NAMES[country_id]
elif self._databaseType in CITY_EDITIONS:
return self.record_by_addr(addr).get('country_name')
else:
message = 'Invalid database type, expected Country or City'
raise GeoIPError(message)
def country_name_by_name(self, hostname):
"""
Returns full country name for specified hostname.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.country_name_by_addr(addr)
def org_by_addr(self, addr):
"""
Returns Organization, ISP, or ASNum name for given IP address.
:arg addr: IP address (e.g. 203.0.113.30)
"""
valid = (ORG_EDITION, ISP_EDITION,
ASNUM_EDITION, ASNUM_EDITION_V6)
if self._databaseType not in valid:
message = 'Invalid database type, expected Org, ISP or ASNum'
raise GeoIPError(message)
ipnum = ip2long(addr)
return self._get_org(ipnum)
def org_by_name(self, hostname):
"""
Returns Organization, ISP, or ASNum name for given hostname.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.org_by_addr(addr)
isp_by_addr = org_by_addr
isp_by_name = org_by_name
asn_by_addr = org_by_addr
asn_by_name = org_by_name
def record_by_addr(self, addr):
"""
Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg addr: IP address (e.g. 203.0.113.30)
"""
if self._databaseType not in CITY_EDITIONS:
message = 'Invalid database type, expected City'
raise GeoIPError(message)
ipnum = ip2long(addr)
rec = self._get_record(ipnum)
if not rec:
return None
return rec
def record_by_name(self, hostname):
"""
Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.record_by_addr(addr)
def region_by_addr(self, addr):
"""
Returns dictionary containing `country_code` and `region_code`.
:arg addr: IP address (e.g. 203.0.113.30)
"""
if self._databaseType not in REGION_CITY_EDITIONS:
message = 'Invalid database type, expected Region or City'
raise GeoIPError(message)
ipnum = ip2long(addr)
return self._get_region(ipnum)
def region_by_name(self, hostname):
"""
Returns dictionary containing `country_code` and `region_code`.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.region_by_addr(addr)
def time_zone_by_addr(self, addr):
"""
Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg addr: IP address (e.g. 203.0.113.30)
"""
if self._databaseType not in CITY_EDITIONS:
message = 'Invalid database type, expected City'
raise GeoIPError(message)
ipnum = ip2long(addr)
return self._get_record(ipnum).get('time_zone')
def time_zone_by_name(self, hostname):
"""
Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.time_zone_by_addr(addr)
|
irrgit/hexchat-plugins
|
hexchat-oper/extras/pygeoip.py
|
Python
|
gpl-3.0
| 59,717
|
[
"BWA",
"COLUMBUS"
] |
92601f132c76a6973a0bfc971b10c8cfc524f932c1a539ef91400170547ff0a0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import numpy as np
import matplotlib.pyplot as plt
import abel
# This example demonstrates a BASEX transform of an image obtained using a
# velocity map imaging (VMI) photoelecton spectrometer to record the
# photoelectron angualar distribution resulting from above threshold ionization (ATI)
# in xenon gas using a ~40 femtosecond, 800 nm laser pulse.
# This spectrum was recorded in 2012 in the Kapteyn-Murnane research group at
# JILA / The University of Colorado at Boulder
# by Dan Hickstein and co-workers (contact DanHickstein@gmail.com)
# http://journals.aps.org/prl/abstract/10.1103/PhysRevLett.109.073004
#
# Before you start your own transform, identify the central pixel of the image.
# It's nice to use a program like ImageJ for this.
# http://imagej.nih.gov/ij/
# Specify the path to the file
filename = os.path.join('data', 'Xenon_ATI_VMI_800_nm_649x519.tif')
# Name the output files
output_image = filename[:-4] + '_Abel_transform.png'
output_text = filename[:-4] + '_speeds.txt'
output_plot = filename[:-4] + '_comparison.pdf'
# Step 1: Load an image file as a numpy array
print('Loading ' + filename)
raw_data = plt.imread(filename).astype('float64')
# Step 2: Specify the center in y,x (vert,horiz) format
center = (245,340)
# or, use automatic centering
# center = 'com'
# center = 'gaussian'
# Step 3: perform the BASEX transform!
print('Performing the inverse Abel transform:')
recon = abel.Transform(raw_data, direction='inverse', method='basex',
center=center, transform_options={'basis_dir':'./'},
verbose=True).transform
speeds = abel.tools.vmi.angular_integration(recon)
# Set up some axes
fig = plt.figure(figsize=(15,4))
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
# Plot the raw data
im1 = ax1.imshow(raw_data,origin='lower',aspect='auto')
fig.colorbar(im1,ax=ax1,fraction=.1,shrink=0.9,pad=0.03)
ax1.set_xlabel('x (pixels)')
ax1.set_ylabel('y (pixels)')
# Plot the 2D transform
im2 = ax2.imshow(recon,origin='lower',aspect='auto',clim=(0,2000))
fig.colorbar(im2,ax=ax2,fraction=.1,shrink=0.9,pad=0.03)
ax2.set_xlabel('x (pixels)')
ax2.set_ylabel('y (pixels)')
# Plot the 1D speed distribution
ax3.plot(*speeds)
ax3.set_xlabel('Speed (pixel)')
ax3.set_ylabel('Yield (log)')
ax3.set_yscale('log')
#ax3.set_ylim(1e2,1e5)
# Prettify the plot a little bit:
plt.subplots_adjust(left=0.06,bottom=0.17,right=0.95,top=0.89,wspace=0.35,hspace=0.37)
# Show the plots
plt.show()
|
rth/PyAbel
|
examples/example_basex_photoelectron.py
|
Python
|
mit
| 2,703
|
[
"Gaussian"
] |
92a344f8e1bb8a401f4f8beb5b796cffb336eaaafe0d83b40166456bd9bc31ae
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
"""Various helper functions related to VTK.
"""
import vtk
def set_mapper_input(mapper, data):
""" VTK 5 and 6 function to set a mapper input """
try:
mapper.SetInput(data)
except AttributeError:
mapper.SetInputData(data)
def pointCloudActor(points, color = (0, 0, 0, 255)):
"""Create a vtkActor representing a point cloud.
Args:
points: iterable of points [[x0, y0, z0], [x1, y1, z1], ...]
color: color of the points in the RBBA format
Returns:
An instance of vtkActor
"""
vtk_points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(4)
colors.SetName("Colors")
for (x, y, z) in points:
point_id = vtk_points.InsertNextPoint(x, y, z)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(point_id)
colors.InsertNextTuple4(color[0], color[1], color[2], color[3])
poly_data = vtk.vtkPolyData()
poly_data.SetPoints(vtk_points)
poly_data.SetVerts(vertices)
poly_data.GetPointData().SetScalars(colors)
mapper = vtk.vtkPolyDataMapper()
set_mapper_input(mapper, poly_data)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def boxesActor(boxes, color = (0, 0, 0, 100), wireframe=False):
"""Create a vtkActor representing a list of hexahedron.
The hexahedrons are assumed to be aligned with the coordinate axis, and are
given using their extremal points.
Args:
box: [[[xmin, ymin, zmin], [xmax, ymax, zmax]], ...]
color: RGBA color
wireframe: if True, the boxes are drawn in wireframe mode
"""
grid = vtk.vtkUnstructuredGrid()
points = vtk.vtkPoints()
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(4)
colors.SetName("Colors")
for (index, box) in enumerate(boxes):
colors.InsertNextTuple4(color[0], color[1], color[2], 100)
P0 = [box[0][0], box[0][1], box[0][2]]
P1 = [box[1][0], box[0][1], box[0][2]]
P2 = [box[1][0], box[1][1], box[0][2]]
P3 = [box[0][0], box[1][1], box[0][2]]
P4 = [box[0][0], box[0][1], box[1][2]]
P5 = [box[1][0], box[0][1], box[1][2]]
P6 = [box[1][0], box[1][1], box[1][2]]
P7 = [box[0][0], box[1][1], box[1][2]]
points.InsertNextPoint(P0)
points.InsertNextPoint(P1)
points.InsertNextPoint(P2)
points.InsertNextPoint(P3)
points.InsertNextPoint(P4)
points.InsertNextPoint(P5)
points.InsertNextPoint(P6)
points.InsertNextPoint(P7)
hexa = vtk.vtkHexahedron()
hexa.GetPointIds().SetNumberOfIds(8)
for i in range(8):
hexa.GetPointIds().SetId(i, 8 * index + i)
grid.InsertNextCell(hexa.GetCellType(), hexa.GetPointIds())
grid.SetPoints(points)
grid.GetCellData().SetScalars(colors)
mapper = vtk.vtkDataSetMapper()
set_mapper_input(mapper, grid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if wireframe:
actor.GetProperty().SetRepresentationToWireframe()
actor.GetProperty().SetLineWidth(2.)
return actor
def quadsActor(bounds, color):
"""Create solid, axis-aligned quads at 0 in Z.
Args:
bounds: [[[xmin, ymin], [xmax, ymax]], ...]
color: [R, G, B, A]
"""
points = vtk.vtkPoints()
quads = vtk.vtkCellArray()
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(4)
for (index, bound) in enumerate(bounds):
colors.InsertNextTuple4(*color)
(low, high) = bound
points.InsertNextPoint(low[0], low[1], 0)
points.InsertNextPoint(high[0], low[1], 0)
points.InsertNextPoint(high[0], high[1], 0)
points.InsertNextPoint(low[0], high[1], 0)
quad = vtk.vtkQuad()
for i in range(4):
quad.GetPointIds().SetId(i, 4 * index + i)
quads.InsertNextCell(quad)
poly_data = vtk.vtkPolyData()
poly_data.SetPoints(points)
poly_data.SetPolys(quads)
poly_data.GetCellData().SetScalars(colors)
mapper = vtk.vtkPolyDataMapper()
set_mapper_input(mapper, poly_data)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.RotateZ(180)
actor.RotateY(180)
return actor
def legendActor(text):
"""Display the given text in the lower left corner of the window.
"""
actor = vtk.vtkTextActor()
actor.GetTextProperty().SetFontSize(24)
actor.SetInput(text)
actor.GetTextProperty().SetColor(0, 0, 0)
return actor
def getRenderWindowInteractor(renderer, interactor_style):
"""Return a vtkRenderWindowInteractor from a renderer and interactor style.
"""
render_window = vtk.vtkRenderWindow()
render_window.AddRenderer(renderer)
render_window_interactor = vtk.vtkRenderWindowInteractor()
render_window_interactor.SetInteractorStyle(interactor_style)
render_window_interactor.SetRenderWindow(render_window)
render_window_interactor.Initialize()
render_window.Render()
return render_window_interactor
|
BenoitLBen/runtime
|
tools/vtk_utils.py
|
Python
|
mit
| 5,115
|
[
"VTK"
] |
00270fbd448ad56b666cf949d0489cd08e33fe25ac0d60619ff0b572c52cd4c0
|
"""
Alpha diversity measures (:mod:`skbio.diversity.alpha`)
=======================================================
.. currentmodule:: skbio.diversity.alpha
This package provides implementations of alpha diversity measures, including
measures of richness, dominance, and evenness. Some functions generate
confidence intervals (CIs). These functions have the suffix ``_ci``.
Functions
---------
.. autosummary::
:toctree:
ace
berger_parker_d
brillouin_d
chao1
chao1_ci
dominance
doubles
enspie
esty_ci
faith_pd
fisher_alpha
gini_index
goods_coverage
heip_e
kempton_taylor_q
lladser_ci
lladser_pe
margalef
mcintosh_d
mcintosh_e
menhinick
michaelis_menten_fit
observed_otus
osd
pielou_e
robbins
shannon
simpson
simpson_e
singles
strong
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from ._ace import ace
from ._chao1 import chao1, chao1_ci
from ._faith_pd import faith_pd
from ._base import (
berger_parker_d, brillouin_d, dominance, doubles, enspie,
esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q,
margalef, mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit,
observed_otus, osd, pielou_e, robbins, shannon, simpson, simpson_e,
singles, strong)
from ._gini import gini_index
from ._lladser import lladser_pe, lladser_ci
__all__ = ['ace', 'chao1', 'chao1_ci', 'berger_parker_d',
'brillouin_d', 'dominance', 'doubles', 'enspie', 'esty_ci',
'faith_pd', 'fisher_alpha', 'gini_index', 'goods_coverage',
'heip_e', 'kempton_taylor_q', 'margalef', 'mcintosh_d',
'mcintosh_e', 'menhinick', 'michaelis_menten_fit', 'observed_otus',
'osd', 'pielou_e', 'robbins', 'shannon', 'simpson', 'simpson_e',
'singles', 'strong', 'lladser_pe', 'lladser_ci']
|
gregcaporaso/scikit-bio
|
skbio/diversity/alpha/__init__.py
|
Python
|
bsd-3-clause
| 2,159
|
[
"scikit-bio"
] |
dcfe0261c7004cb4af467385b26cd034bd2075e8dedf7f816535427e430cb6d2
|
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import sys, os, socket, ssl
from zeroinstall import _
from zeroinstall.injector import download
from zeroinstall.support import ssl_match_hostname
if sys.version_info[0] > 2:
from urllib import request as urllib2
from http.client import HTTPSConnection, HTTPException
else:
import urllib2
from httplib import HTTPSConnection, HTTPException
try:
# http://pypi.python.org/pypi/certifi
import certifi
_fallback_ca_bundle = certifi.where()
except:
# Final fallback (last known signer of keylookup)
_fallback_ca_bundle = os.path.join(os.path.dirname(__file__), "EquifaxSecureCA.crt")
# Note: on MacOS X at least, it will also look in the system keychain provided that you supply *some* CAs.
# (if you don't specify any trusted CAs, Python trusts everything!)
# So, the "fallback" option doesn't necessarily mean that other sites won't work.
for ca_bundle in [
"/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/Arch Linux
"/etc/pki/tls/certs/ca-bundle.crt", # Fedora/RHEL
"/etc/ssl/ca-bundle.pem", # openSUSE/SLE (claimed)
"/var/lib/ca-certificates/ca-bundle.pem.new", # openSUSE (actual)
_fallback_ca_bundle]:
if os.path.exists(ca_bundle):
class ValidatingHTTPSConnection(HTTPSConnection):
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if hasattr(self, '_tunnel_host') and self._tunnel_host:
self.sock = sock
self._tunnel()
sock = ssl.wrap_socket(sock, cert_reqs = ssl.CERT_REQUIRED, ca_certs = ca_bundle)
ssl_match_hostname.match_hostname(sock.getpeercert(), self.host)
self.sock = sock
class ValidatingHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
"""@type host: str"""
return ValidatingHTTPSConnection(host)
MyHTTPSHandler = ValidatingHTTPSHandler
break
else:
raise Exception("No root CA's found (not even the built-in one!); security of HTTPS connections cannot be verified")
class Redirect(Exception):
def __init__(self, req):
Exception.__init__(self, "Redirect")
self.req = req
class MyRedirectHandler(urllib2.HTTPRedirectHandler):
"""Throw an exception on redirects instead of continuing. The redirect will be handled in the main thread
so it can work with connection pooling."""
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""@type code: int
@type msg: str
@type newurl: str"""
new_req = urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
if new_req:
raise Redirect(new_req)
# Our handler differs from the Python default in that:
# - we don't support file:// URLs
# - we don't follow HTTP redirects
_my_urlopen = urllib2.OpenerDirector()
for klass in [urllib2.ProxyHandler, urllib2.UnknownHandler, urllib2.HTTPHandler,
urllib2.HTTPDefaultErrorHandler, MyRedirectHandler,
urllib2.FTPHandler, urllib2.HTTPErrorProcessor, MyHTTPSHandler]:
_my_urlopen.add_handler(klass())
def download_in_thread(url, target_file, if_modified_since, notify_done):
"""@type url: str
@type target_file: file"""
src = None
try:
#print "Child downloading", url
if url.startswith('http:') or url.startswith('https:') or url.startswith('ftp:'):
req = urllib2.Request(url)
if url.startswith('http:') and if_modified_since:
req.add_header('If-Modified-Since', if_modified_since)
src = _my_urlopen.open(req)
else:
raise Exception(_('Unsupported URL protocol in: %s') % url)
if sys.version_info[0] > 2:
sock_recv = src.fp.read1 # Python 3
else:
try:
sock_recv = src.fp._sock.recv # Python 2
except AttributeError:
sock_recv = src.fp.fp._sock.recv # Python 2.5 on FreeBSD
while True:
data = sock_recv(256)
if not data: break
target_file.write(data)
target_file.flush()
notify_done(download.RESULT_OK)
except (urllib2.HTTPError, urllib2.URLError, HTTPException, socket.error) as ex:
if isinstance(ex, urllib2.HTTPError) and ex.code == 304: # Not modified
notify_done(download.RESULT_NOT_MODIFIED)
else:
#print >>sys.stderr, "Error downloading '" + url + "': " + (str(ex) or str(ex.__class__.__name__))
__, ex, tb = sys.exc_info()
notify_done(download.RESULT_FAILED, (download.DownloadError(_('Error downloading {url}: {ex}').format(url = url, ex = ex)), tb))
except Redirect as ex:
notify_done(download.RESULT_REDIRECT, redirect = ex.req.get_full_url())
except Exception as ex:
__, ex, tb = sys.exc_info()
notify_done(download.RESULT_FAILED, (ex, tb))
finally:
if src is not None:
src.close()
|
slovenwd/0install
|
zeroinstall/injector/_download_child.py
|
Python
|
lgpl-2.1
| 4,719
|
[
"VisIt"
] |
47599cf7e11d483567c770764447470afcc56efe48f74fec220ee27a0d13c3b0
|
import inspect
import math
import json
import weakref
from jmespath import exceptions
from jmespath.compat import string_type as STRING_TYPE
from jmespath.compat import get_methods
# python types -> jmespath types
TYPES_MAP = {
'bool': 'boolean',
'list': 'array',
'dict': 'object',
'NoneType': 'null',
'unicode': 'string',
'str': 'string',
'float': 'number',
'int': 'number',
'OrderedDict': 'object',
'_Projection': 'array',
'_Expression': 'expref',
}
# jmespath types -> python types
REVERSE_TYPES_MAP = {
'boolean': ('bool',),
'array': ('list', '_Projection'),
'object': ('dict', 'OrderedDict',),
'null': ('None',),
'string': ('unicode', 'str'),
'number': ('float', 'int'),
'expref': ('_Expression',),
}
def populate_function_table(cls):
func_table = cls.FUNCTION_TABLE
for name, method in get_methods(cls):
signature = getattr(method, 'signature', None)
if signature is not None:
func_table[name[6:]] = {"function": method,
"signature": signature}
return cls
def builtin_function(*arguments):
def _record_arity(func):
func.signature = arguments
return func
return _record_arity
@populate_function_table
class RuntimeFunctions(object):
# The built in functions are automatically populated in the FUNCTION_TABLE
# using the @builtin_function decorator on methods defined in this class.
FUNCTION_TABLE = {
}
def __init__(self):
self._interpreter = None
@property
def interpreter(self):
if self._interpreter is None:
return None
else:
return self._interpreter()
@interpreter.setter
def interpreter(self, value):
# A weakref is used because we have
# a cyclic reference and we want to allow
# for the memory to be properly freed when
# the objects are no longer needed.
self._interpreter = weakref.ref(value)
def call_function(self, function_name, resolved_args):
try:
spec = self.FUNCTION_TABLE[function_name]
except KeyError:
raise exceptions.UnknownFunctionError(
"Unknown function: %s()" % function_name)
function = spec['function']
signature = spec['signature']
self._validate_arguments(resolved_args, signature, function_name)
return function(self, *resolved_args)
def _validate_arguments(self, args, signature, function_name):
if signature and signature[-1].get('variadic'):
if len(args) < len(signature):
raise exceptions.VariadictArityError(
len(signature), len(args), function_name)
elif len(args) != len(signature):
raise exceptions.ArityError(
len(signature), len(args), function_name)
return self._type_check(args, signature, function_name)
def _type_check(self, actual, signature, function_name):
for i in range(len(signature)):
allowed_types = signature[i]['types']
if allowed_types:
self._type_check_single(actual[i], allowed_types,
function_name)
def _type_check_single(self, current, types, function_name):
# Type checking involves checking the top level type,
# and in the case of arrays, potentially checking the types
# of each element.
allowed_types, allowed_subtypes = self._get_allowed_pytypes(types)
# We're not using isinstance() on purpose.
# The type model for jmespath does not map
# 1-1 with python types (booleans are considered
# integers in python for example).
actual_typename = type(current).__name__
if actual_typename not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, current,
self._convert_to_jmespath_type(actual_typename), types)
# If we're dealing with a list type, we can have
# additional restrictions on the type of the list
# elements (for example a function can require a
# list of numbers or a list of strings).
# Arrays are the only types that can have subtypes.
if allowed_subtypes:
self._subtype_check(current, allowed_subtypes,
types, function_name)
def _get_allowed_pytypes(self, types):
allowed_types = []
allowed_subtypes = []
for t in types:
type_ = t.split('-', 1)
if len(type_) == 2:
type_, subtype = type_
allowed_subtypes.append(REVERSE_TYPES_MAP[subtype])
else:
type_ = type_[0]
allowed_types.extend(REVERSE_TYPES_MAP[type_])
return allowed_types, allowed_subtypes
def _subtype_check(self, current, allowed_subtypes, types, function_name):
if len(allowed_subtypes) == 1:
# The easy case, we know up front what type
# we need to validate.
allowed_subtypes = allowed_subtypes[0]
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed_subtypes:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
elif len(allowed_subtypes) > 1 and current:
# Dynamic type validation. Based on the first
# type we see, we validate that the remaining types
# match.
first = type(current[0]).__name__
for subtypes in allowed_subtypes:
if first in subtypes:
allowed = subtypes
break
else:
raise exceptions.JMESPathTypeError(
function_name, current[0], first, types)
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
@builtin_function({'types': ['number']})
def _func_abs(self, arg):
return abs(arg)
@builtin_function({'types': ['array-number']})
def _func_avg(self, arg):
return sum(arg) / float(len(arg))
@builtin_function({'types': [], 'variadic': True})
def _func_not_null(self, *arguments):
for argument in arguments:
if argument is not None:
return argument
@builtin_function({'types': []})
def _func_to_string(self, arg):
if isinstance(arg, STRING_TYPE):
return arg
else:
return json.dumps(arg, separators=(',', ':'),
default=str)
@builtin_function({'types': []})
def _func_to_number(self, arg):
if isinstance(arg, (list, dict, bool)):
return None
elif arg is None:
return None
elif isinstance(arg, (int, float)):
return arg
else:
try:
if '.' in arg:
return float(arg)
else:
return int(arg)
except ValueError:
return None
@builtin_function({'types': ['array', 'string']}, {'types': []})
def _func_contains(self, subject, search):
return search in subject
@builtin_function({'types': ['string', 'array', 'object']})
def _func_length(self, arg):
return len(arg)
@builtin_function({'types': ['string']}, {'types': ['string']})
def _func_ends_with(self, search, suffix):
return search.endswith(suffix)
@builtin_function({'types': ['string']}, {'types': ['string']})
def _func_starts_with(self, search, suffix):
return search.startswith(suffix)
@builtin_function({'types': ['array', 'string']})
def _func_reverse(self, arg):
if isinstance(arg, STRING_TYPE):
return arg[::-1]
else:
return list(reversed(arg))
@builtin_function({"types": ['number']})
def _func_ceil(self, arg):
return math.ceil(arg)
@builtin_function({"types": ['number']})
def _func_floor(self, arg):
return math.floor(arg)
@builtin_function({"types": ['string']}, {"types": ['array-string']})
def _func_join(self, separator, array):
return separator.join(array)
@builtin_function({"types": ['array-number', 'array-string']})
def _func_max(self, arg):
if arg:
return max(arg)
else:
return None
@builtin_function({"types": ['array-number', 'array-string']})
def _func_min(self, arg):
if arg:
return min(arg)
else:
return None
@builtin_function({"types": ['array-string', 'array-number']})
def _func_sort(self, arg):
return list(sorted(arg))
@builtin_function({"types": ['array-number']})
def _func_sum(self, arg):
return sum(arg)
@builtin_function({"types": ['object']})
def _func_keys(self, arg):
# To be consistent with .values()
# should we also return the indices of a list?
return list(arg.keys())
@builtin_function({"types": ['object']})
def _func_values(self, arg):
return list(arg.values())
@builtin_function({'types': []})
def _func_type(self, arg):
if isinstance(arg, STRING_TYPE):
return "string"
elif isinstance(arg, bool):
return "boolean"
elif isinstance(arg, list):
return "array"
elif isinstance(arg, dict):
return "object"
elif isinstance(arg, (float, int)):
return "number"
elif arg is None:
return "null"
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_sort_by(self, array, expref):
if not array:
return array
# sort_by allows for the expref to be either a number of
# a string, so we have some special logic to handle this.
# We evaluate the first array element and verify that it's
# either a string of a number. We then create a key function
# that validates that type, which requires that remaining array
# elements resolve to the same type as the first element.
required_type = self._convert_to_jmespath_type(
self.interpreter.visit(expref.expression, array[0]))
if required_type not in ['number', 'string']:
raise exceptions.JMESPathTypeError(
'sort_by', array[0], required_type, ['string', 'number'])
keyfunc = self._create_key_func(expref.expression,
[required_type],
'sort_by')
return list(sorted(array, key=keyfunc))
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_min_by(self, array, expref):
keyfunc = self._create_key_func(expref.expression,
['number', 'string'],
'min_by')
return min(array, key=keyfunc)
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_max_by(self, array, expref):
keyfunc = self._create_key_func(expref.expression,
['number', 'string'],
'min_by')
return max(array, key=keyfunc)
def _create_key_func(self, expr_node, allowed_types, function_name):
interpreter = self.interpreter
def keyfunc(x):
result = interpreter.visit(expr_node, x)
jmespath_type = self._convert_to_jmespath_type(result)
if jmespath_type not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, result, jmespath_type, allowed_types)
return result
return keyfunc
def _convert_to_jmespath_type(self, pyobject):
return TYPES_MAP.get(type(pyobject).__name__, 'unknown')
|
mruse/aliyun-cli
|
aliyuncli/jmespath/functions.py
|
Python
|
apache-2.0
| 12,257
|
[
"VisIt"
] |
8193a6ba5e6a72a3b1ccbf564ec75b1b915b6b829501eb078820ab29802f3b8e
|
"""
Tests for discussion pages
"""
import datetime
from pytz import UTC
from unittest import skip
from uuid import uuid4
from nose.plugins.attrib import attr
from .helpers import BaseDiscussionTestCase
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ...pages.lms.learner_profile import LearnerProfilePage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
MultipleThreadFixture)
from .helpers import BaseDiscussionMixin
THREAD_CONTENT_WITH_LATEX = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n
**(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} =
\\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$
$= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0}
+\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2)
$H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $.
$A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and
$H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n
**Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$,
Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no
$(cos\\omega - cos\\theta)$ factor.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
$P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$,
dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta|
< \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
"""
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_2')
class DiscussionHomePageTest(UniqueCourseTest):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
@attr('shard_2')
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_mathjax_rendering(self):
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertTrue(self.thread_page.is_discussion_body_visible())
self.thread_page.verify_mathjax_preview_available()
self.thread_page.verify_mathjax_rendered()
def test_markdown_reference_link(self):
"""
Check markdown editor renders reference link correctly
and colon(:) in reference link is not converted to %3a
"""
sample_link = "http://example.com/colon:test"
thread_content = """[enter link description here][1]\n[1]: http://example.com/colon:test"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=thread_content,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertEqual(self.thread_page.get_link_href(), sample_link)
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
@attr('shard_2')
class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase):
"""
Tests for the discussion page with multiple threads
"""
def setUp(self):
super(DiscussionTabMultipleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.thread_count = 2
self.thread_ids = []
self.setup_multiple_threads(thread_count=self.thread_count)
self.thread_page_1 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[0]
)
self.thread_page_2 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[1]
)
self.thread_page_1.visit()
def setup_multiple_threads(self, thread_count):
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
thread_body = "Dummy Long text body." * 50
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id, body=thread_body),
)
self.thread_ids.append(thread_id)
view = MultipleThreadFixture(threads)
view.push()
def test_page_scroll_on_thread_change_view(self):
"""
Check switching between threads changes the page focus
"""
# verify threads are rendered on the page
self.assertTrue(
self.thread_page_1.check_threads_rendered_successfully(thread_count=self.thread_count)
)
# From the thread_page_1 open & verify next thread
self.thread_page_1.click_and_open_thread(thread_id=self.thread_ids[1])
self.assertTrue(self.thread_page_2.is_browser_on_page())
# Verify that the focus is changed
self.thread_page_2.check_focus_is_set(selector=".discussion-article")
@attr('shard_2')
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .display-vote'))
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .display-vote'))
@attr('shard_2')
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"), [
Comment(id="comment_other_author"),
Comment(id="comment_self_author", user_id=self.user_id, thread_id="comment_deletion_test_thread")
]
)
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_2')
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should be voted successfully
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should be reported successfully
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.vote_response('response_self_author')
page.vote_response('response_other_author')
page.report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('shard_2')
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
@skip # TODO: See TNL-3943
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('shard_2')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.thread_ids = []
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.expand()
def setup_multiple_inline_threads(self, thread_count):
"""
Set up multiple treads on the page by passing 'thread_count'
"""
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id),
)
self.thread_ids.append(thread_id)
thread_fixture = MultipleThreadFixture(threads)
thread_fixture.add_response(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)],
threads[0]
)
thread_fixture.push()
def test_page_while_expanding_inline_discussion(self):
"""
Tests for the Inline Discussion page with multiple treads. Page should not focus 'thread-wrapper'
after loading responses.
"""
self.setup_multiple_inline_threads(thread_count=3)
self.discussion_page.expand_discussion()
thread_page = InlineDiscussionThreadPage(self.browser, self.thread_ids[0])
thread_page.expand()
# Check if 'thread-wrapper' is focused after expanding thread
self.assertFalse(thread_page.check_if_selector_is_focused(selector='.thread-wrapper'))
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
def test_dual_discussion_module(self):
"""
Scenario: Two discussion module in one unit shouldn't override their actions
Given that I'm on courseware page where there are two inline discussion
When I click on one discussion module new post button
Then it should add new post form of that module in DOM
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
And I click on second discussion module new post button
Then it should add new post form of second module in DOM
And I should be shown second discussion new post form
And I shouldn't be shown first discussion module new post form
And I have two new post form in the DOM
When I click back on first module new post button
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
self.discussion_page.click_new_post_button()
with self.discussion_page.handle_alert():
self.discussion_page.click_cancel_new_post()
self.additional_discussion_page.click_new_post_button()
self.assertFalse(self.discussion_page._is_element_visible(".new-post-article"))
with self.additional_discussion_page.handle_alert():
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
self.assertFalse(self.additional_discussion_page._is_element_visible(".new-post-article"))
@attr('shard_2')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
return page
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
def test_pagination_window_reposition(self):
page = self.check_pages(50)
page.click_next_page()
page.wait_for_ajax()
self.assertTrue(page.is_window_on_top())
def test_redirects_to_learner_profile(self):
"""
Scenario: Verify that learner-profile link is present on forum discussions page and we can navigate to it.
Given that I am on discussion forum user's profile page.
And I can see a username on left sidebar
When I click on my username.
Then I will be navigated to Learner Profile page.
And I can my username on Learner Profile page
"""
learner_profile_page = LearnerProfilePage(self.browser, self.PROFILED_USERNAME)
page = self.check_pages(1)
page.click_on_sidebar_username()
learner_profile_page.wait_for_page()
self.assertTrue(learner_profile_page.field_is_visible('username'))
@attr('shard_2')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('shard_2')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "date")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
IndonesiaX/edx-platform
|
common/test/acceptance/tests/discussion/test_discussion.py
|
Python
|
agpl-3.0
| 46,842
|
[
"VisIt"
] |
c7e1e34c3e9d1f30d2c68f967d1a0e4f1269a97a673a8fca25eee899300671ca
|
import pysam
is_paired = 0
is_proper = 0
for read in pysam.AlignmentFile("ex1.bam", "rb"):
is_paired += read.is_paired
is_proper += read.is_proper_pair
print ("there are alignments of %i paired reads" % is_paired)
print ("there are %i proper paired alignments" % is_proper)
|
brendanofallon/pysam
|
tests/python_flagstat.py
|
Python
|
mit
| 285
|
[
"pysam"
] |
63dd340dfb76609b1405f21a31b8a3ab755fed66d9235dc19056d29b72ccc176
|
#!/usr/bin/python
# break : break is a keyword which takes you out of a loop.
# sys.exit: takes you out of the program.
# Task : make sure you give your friend only 3 chances.
import sys
my_answer = raw_input("do you want to play the game? y/n :")
if my_answer == 'n':
sys.exit()
my_num = 7 # hidden in my palm
#test = True
# you get into the while block only if your condition satisfies as True
while True:
guess_num = int(raw_input("please guess the number:"))
if guess_num > my_num:
print "Buddy you guessed a slightly larger number"
elif guess_num < my_num:
print "Buddy you guessed a slightly smaller number"
elif guess_num == my_num:
print "congo!! you guessed the right number"
#test=False
break
print "Thanks for playing.Please visit us again."
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-58/my_game.py
|
Python
|
gpl-3.0
| 773
|
[
"VisIt"
] |
2f27efa49cb302c7f38ca849d247281e91f22410228c2bca53ed2104ce4913a6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.