text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
]
|
{
"content_hash": "2af2ae96924ecbb524dc07c6fd8b1bf9",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 14.625,
"alnum_prop": 0.6495726495726496,
"repo_name": "djb1815/Essex-MuSoc",
"id": "c650e6df983de912f3107537a7314604bf5fa56b",
"size": "117",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "musoc_web/schedule/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105"
},
{
"name": "HTML",
"bytes": "26649"
},
{
"name": "Python",
"bytes": "14418"
}
],
"symlink_target": ""
}
|
class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return "[%f %f]" % (self.value, self.threshold)
def __cmp__(self, other):
if hasattr(other, "value"):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)
return cmp(self.value, other)
|
{
"content_hash": "eb71ac31157edd67351a75066cee4024",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 29.6875,
"alnum_prop": 0.5410526315789473,
"repo_name": "metapolator/mutatormathtools",
"id": "259d74704ec101e67637f27124ed20edd144b49c",
"size": "475",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python_modules/lib/python/defcon/tools/fuzzyNumber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "3108"
},
{
"name": "Python",
"bytes": "2101371"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
}
|
from CIM15.IEC61970.Informative.InfGMLSupport.GmlDiagramObject import GmlDiagramObject
class GmlPointGeometry(GmlDiagramObject):
"""Typically used for rendering power system resources and/or point assets.Typically used for rendering power system resources and/or point assets.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'GmlPointGeometry' instance.
"""
super(GmlPointGeometry, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
|
{
"content_hash": "42e872f6d0d397a038bef296fd010c7d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 151,
"avg_line_length": 30.42105263157895,
"alnum_prop": 0.6505190311418685,
"repo_name": "rwl/PyCIM",
"id": "2393d796d4fd6e9a2e3593549e7a46e2e92c15e9",
"size": "1678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Informative/InfGMLSupport/GmlPointGeometry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
"""
SECURITY ENDPOINTS CHECK
Add auth checks called /checklogged and /testadmin
"""
from __future__ import division, absolute_import
from .. import myself, lic, get_logger
from .base import ExtendedApiResource
from . import decorators as decorate
from flask_security import roles_required, auth_token_required
from confs import config
__author__ = myself
__copyright__ = myself
__license__ = lic
logger = get_logger(__name__)
class Verify(ExtendedApiResource):
""" API online test """
def get(self):
return "Hello World!"
class VerifyLogged(ExtendedApiResource):
""" Token authentication test """
@decorate.apimethod
@auth_token_required
def get(self):
return "Valid user"
class VerifyAdmin(ExtendedApiResource):
""" Token and Role authentication test """
@decorate.apimethod
@auth_token_required
@roles_required(config.ROLE_ADMIN)
def get(self):
return "I am admin!"
|
{
"content_hash": "0920416935ac79e66eed513579d4f98d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 62,
"avg_line_length": 22.046511627906977,
"alnum_prop": 0.6972573839662447,
"repo_name": "pdonorio/rest-mock",
"id": "94a34e52b7eb509f50bbcab94b27868c2fa78f1b",
"size": "973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restapi/resources/checkauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "431"
},
{
"name": "Python",
"bytes": "198187"
},
{
"name": "Shell",
"bytes": "890"
}
],
"symlink_target": ""
}
|
"""Transport for Python logging handler.
Logs directly to the the Stackdriver Logging API with a synchronous call.
"""
from google.cloud.logging import _helpers
from google.cloud.logging.handlers.transports.base import Transport
class SyncTransport(Transport):
"""Basic sychronous transport.
Uses this library's Logging client to directly make the API call.
"""
def __init__(self, client, name):
self.logger = client.logger(name)
def send(
self, record, message, resource=None, labels=None, trace=None, span_id=None
):
"""Overrides transport.send().
:type record: :class:`logging.LogRecord`
:param record: Python log record that the handler was called with.
:type message: str
:param message: The message from the ``LogRecord`` after being
formatted by the associated log formatters.
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry.
:type labels: dict
:param labels: (Optional) Mapping of labels for the entry.
"""
info = {"message": message, "python_logger": record.name}
self.logger.log_struct(
info,
severity=_helpers._normalize_severity(record.levelno),
resource=resource,
labels=labels,
trace=trace,
span_id=span_id,
)
|
{
"content_hash": "6352820ff5c1acae4dd21f9fbfe82c1e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 83,
"avg_line_length": 32.266666666666666,
"alnum_prop": 0.6363636363636364,
"repo_name": "tswast/google-cloud-python",
"id": "e87eb4885fbfa893ed04dfad0e35f4007b63949a",
"size": "2027",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "logging/google/cloud/logging/handlers/transports/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
}
|
"""
Tests of inherited privilege handling.
"""
import unittest
import sys
from PyQt4.QtGui import QApplication
from datafinder.core.item.privileges.principal import SPECIAL_PRINCIPALS
from datafinder.gui.user.dialogs.privilege_dialog.main import PrivilegeDialog
from datafinder_test.gui.user.dialogs.privilege_dialog.main import PrivilegeItemMock, PrivilegeRepositoryMock
__version__ = "$Revision-Id$"
class InheritedPrivilegeControllerTest(unittest.TestCase):
""" Tests of the inherited privileges controller. """
_application = QApplication(sys.argv)
def setUp(self):
""" Creates the privilege dialog and all required mocks. """
# Setup repository
# "/" -> 2 principals, "/test" -> no principals
selectedItem = PrivilegeItemMock("/test/test.pdf", None)
self._repositoryMock = PrivilegeRepositoryMock([selectedItem])
rootItem = self._repositoryMock.nodeFromPath("/")
rootItem.acl.addDefaultPrincipal(SPECIAL_PRINCIPALS[1])
middleItem = self._repositoryMock.nodeFromPath("/test")
middleItem.acl.clearPrivileges(SPECIAL_PRINCIPALS[0])
# Creating the dialog
self._privilegeDialog = PrivilegeDialog(self._repositoryMock)
self._privilegeDialog.item = selectedItem
self._principalController = self._privilegeDialog._principalSearchController
self._model = self._privilegeDialog._inheritedPrivilegesModel
self._controller = self._privilegeDialog._inheritedPrivilegesController
# Checking model content
self.assertEquals(self._model.rowCount(), 2)
def testPrivilegeSelection(self):
""" Tests correct handling of privilege table selection. """
# Selecting existing rows
self._controller._privilegeWidget.selectRow(0)
self.assertFalse(self._controller._selectItemWidget.hasEmptySelection)
self._controller._privilegeWidget.selectRow(1)
self.assertEquals(unicode(self._controller._selectItemWidget.pathLineEdit.text()), "/")
# Selecting non-existing row
self._controller._privilegeWidget.selectRow(2)
self._controller._privilegeWidget.selectionModel().clear()
self.assertFalse(len(self._controller._privilegeWidget.selectedIndexes()) > 0)
self.assertEquals(unicode(self._controller._selectItemWidget.pathLineEdit.text()), "/")
def testTreeSelection(self):
""" Tests correct handling of selections in the item tree. """
# Testing initial selection of the root item
self.assertEquals(len(self._controller._privilegeWidget.selectedIndexes()), 8)
# Testing selection of the middle item with no table entries
index = self._repositoryMock.indexFromPath("/test")
self._controller._selectItemWidget.selectedIndexes = [index]
self.assertEquals(unicode(self._controller._selectItemWidget.pathLineEdit.text()), "/test")
self.assertEquals(len(self._controller._privilegeWidget.selectedIndexes()), 0)
def testEdit(self):
""" Tests the editing of an inherited item. """
self._controller._privilegeWidget.selectRow(0)
self._controller._editButton.click()
self.assertEquals(len(self._model._itemRowMap), 0)
self.assertEquals(self._privilegeDialog.item.path, "/")
|
{
"content_hash": "2eff0383588c932fed1fff9eb4acc7b8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 109,
"avg_line_length": 42.97530864197531,
"alnum_prop": 0.6762424590634875,
"repo_name": "DLR-SC/DataFinder",
"id": "00809c1f125d06a1264ccd157ac5fd2e36c97b36",
"size": "5265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unittest/datafinder_test/gui/user/dialogs/privilege_dialog/inherited_privileges_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
}
|
"""
The PhraseEngine for extracting phrases from discussions
"""
from chunker import BigramChunker, TrigramChunker
from segmenter import *
from collocate import *
from exceptions import PhraseEngineException, PhraseEngineServiceException
from nltk.corpus import conll2000, conll2002
import random
class PhraseEngine(object):
def __init__(self, ):
self.document = None
def set_document(self,document=None):
self.document = document
return self
def _build_training_sents(self ):
# This method randomly select a corpus from the provided lists and then
# build and return a train sentences that the chunkers will use
corpuses = [(conll2000,'train.txt'),(conll2002,'esp.train')]
#trainer = random.choice(corpuses)
#train_sents = trainer[0].chunked_sents(trainer[1],chunk_types=['NP'])
train_sents = conll2000.chunked_sents('train.txt',chunk_types=['NP'])
return train_sents
def _build_chunkers(self):
# Build a map of chunkers that will be used for chunking the
# sentences
chunkers = []
# select a trainer that will be used for training the chunkers
train_sents = self._build_training_sents()
if not train_sents:
raise PhraseEngineException('Could not build train sents for chunkers')
chunkers.append(BigramChunker(train_sents))
chunkers.append(TrigramChunker(train_sents))
return chunkers
def _generate_sentences(self):
# From the given document generate sentences that we need to POS tag
if not self.document or not len(self.document):
return None
segmenter = TextSegmentation(self.document)
sentences = segmenter.segment()
if not sentences:
raise PhraseEngineException('Attempting to generate sentences from document but failed')
return sentences
def _generat_word_phrase(self,word_tags=[]):
# This uses the PhraseFuture engine to generate word phrases
# that would forma lists of dict with frequency,word mappings
words = []
if not word_tags:
return words
word_future = PhraseFuture()
try:
word_future.setChunks(word_tags)
words = word_future.parse()
except Exception as e:
raise PhraseEngineException(str(e))
return words
def build_phrases(self):
# This is the main entry point from which the document is processed to
# generate the phrases and then returns a list of dicts
words = []
if not self.document:
raise PhraseEngineServiceException('No document is set for processing')
# We have a document for processing
try:
# We first convert the document into sentences
sentences = self._generate_sentences()
# Build the chunkers that will find the phrases int he sentences
chunkers = self._build_chunkers()
word_tags = []
# Iterate through the chunkers and feed it with each of the sentences
# to build a tree, then from the tree find out all leaf that have node type 'NP'
# track these nodes for the contain what we are looking for
for chunker in chunkers:
for sent in sentences:
tree = chunker.parse(sent)
for subtree in tree.subtrees():
if subtree.node == 'NP':
word_tags.append(subtree.leaves())
if not word_tags:
print word_tags
raise PhraseEngineException('Parser could not generate sentences from document')
# Ok now that we have a word_tags we need to build collocations from it using
# the collocation future. The essence of building a collocation map is to find commonly
# used phrases and even new phrases that can stand or express an idea
collocation_future = CollocationFuture()
future_words = []
for wordtag in word_tags:
# convert to tuples
future_words.append(tuple(wordtag))
collocation_future.setChunks(future_words)
collocation_future.willParse()
collocations = collocation_future.parse()
if not collocations:
raise PhraseEngineException('Building collocations from tagged words failed')
# Ok finally we prepare the collocations for phrase extractions using the
# pos-tag as a guide to determining what a noun phrase should be. Occasionally the engine
# generates a very long phrase which means that the finall generated noun phrase still
# requires human to post-process the outcome
phrases = []
for collo in collocations:
for item in collo:
phrases.append(list(item))
phrase_future = PhraseFuture()
phrase_future.setChunks(phrases)
words = phrase_future.parse()
except (Exception, PhraseEngineException) as e:
raise PhraseEngineServiceException(e)
return words
|
{
"content_hash": "ee8710a31312c026308e01bff43df83b",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 101,
"avg_line_length": 41.5,
"alnum_prop": 0.6116774791473587,
"repo_name": "donama/phraseengine",
"id": "d5070642340466b0cf50801119078523f6d16687",
"size": "5417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23428"
}
],
"symlink_target": ""
}
|
import tornado.ioloop
import tornado.web
import logging
import os
import json
import db
class SmsModel:
def __init__(self):
pass
def fetch(self):
item, count = db.fetch_sms_task()
if item:
sms_id = item[0]
number = item[1]
message = item[2]
item = {'type': 'sendsms', 'id': sms_id, 'number': number, 'message': message}
return item
def enqueue(self, user_id, cmd_str):
cmd = json.loads(cmd_str)
cmd_type = cmd['type']
number = cmd['number']
msg = cmd['message']
# TODO user_id
result = db.add_new_sms(user_id, number, msg)
return result
def set_sms_sent_to_worker(self, worker_info, sms_id):
db.set_sms_sent_to_worker(worker_info, sms_id)
def dump(self):
pass
def auth(self, username, password):
return db.auth(username, password)
workers = []
model = SmsModel()
def try_send_one_message():
if len(workers) > 0:
handler = workers[0]
msg = model.fetch()
model.dump()
if msg is not None:
send(handler, msg)
def send(handler, msg):
handler.write(json.dumps(msg))
handler.finish()
workers.remove(handler)
print "workers after send:", workers
worker_info = str(vars(handler.request))
model.set_sms_sent_to_worker(worker_info, msg['id'])
print "set_sms_sent_to_worker complete!"
class MainHandler(tornado.web.RequestHandler):
def get(self):
# self.write("mainpage")
self.render('index.html')
class SendHandler(tornado.web.RequestHandler):
def post(self):
# self.write("get /test")
print "get /send"
number = self.get_argument('number', '', True)
message = self.get_argument('message', '', True)
user = self.get_argument('user', '')
password = self.get_argument('password', '')
if not number or not user or not password:
self.write('invalid params')
self.set_status(400)
return
ok, bundle = model.auth(user, password)
if not ok:
bundle['result'] = ok
self.write(json.dumps(bundle))
return
user_id = bundle['user_id']
sms = {'type': 'sendsms', 'number': number, 'message': message}
sms_json = json.dumps(sms)
ok, bundle = model.enqueue(user_id, sms_json)
model.dump()
try_send_one_message()
bundle['result'] = ok
self.write(json.dumps(bundle))
class CometHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
print "get /comet"
workers.append(self)
print "workers after append:", workers
try_send_one_message()
def on_connection_close(self):
print "get /comet closed"
workers.remove(self)
print "workers after on_connection_close:", workers
application = tornado.web.Application(
handlers = [
(r"/", MainHandler),
(r"/comet", CometHandler),
(r"/send", SendHandler),
], template_path=os.path.join(os.path.dirname(__file__), "templates")
)
if __name__ == "__main__":
port = 8080
application.listen(port)
print "listening on port:", port
tornado.ioloop.IOLoop.instance().start()
|
{
"content_hash": "10b9fa9979b88fa7dc470a3441894f0b",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 90,
"avg_line_length": 32.693069306930695,
"alnum_prop": 0.5854027861901878,
"repo_name": "eggfly/gosms",
"id": "64c6cad27a70c73f3482f67b4ec68734b77694a2",
"size": "3325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_server_tornado.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1602"
},
{
"name": "Java",
"bytes": "40726"
},
{
"name": "Python",
"bytes": "15229"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
import astroid
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
def register(linter):
linter.register_checker(ImportStudentCode(linter))
class ImportStudentCode(BaseChecker):
"""
An extra lint to ensure that studentcode isn't imported anywhere but
the appropriate places.
"""
__implements__ = IAstroidChecker
OK_TO_IMPORT_FUNCTIONS = {'run_student_code', 'runtime_test'}
name = 'import-student-code'
priority = -1
msgs = {
'E9999': (
'Imports studentcode',
'no-import-student-code',
"You shouldn't import student code, except in run_student_code"
),
}
def __init__(self, linter=None):
super(ImportStudentCode, self).__init__(linter)
self._stack = []
def _ok_to_import(self):
"""Are we inside a function where it's ok to import studentcode?"""
return len(self._stack) > 0 and len(self.OK_TO_IMPORT_FUNCTIONS & set(self._stack)) > 0
def visit_functiondef(self, node):
self._stack.append(node.name)
def leave_functiondef(self, node):
self._stack.pop()
def visit_importfrom(self, node):
if node.modname == 'studentcode':
if not self._ok_to_import():
self.add_message('no-import-student-code', node=node)
def visit_import(self, node):
mods = [name[0] for name in node.names]
if 'studentcode' in mods:
if not self._ok_to_import():
self.add_message('no-import-student-code', node=node)
|
{
"content_hash": "8af01ff994f7d5a5487d2b85ae87ffe1",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 95,
"avg_line_length": 30.307692307692307,
"alnum_prop": 0.6186548223350253,
"repo_name": "pioneers/PieCentral",
"id": "8387005f199d1409e92d99ef2c6c903e42d4ecc7",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtime/lints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11624"
},
{
"name": "C++",
"bytes": "98532"
},
{
"name": "CSS",
"bytes": "1291"
},
{
"name": "HTML",
"bytes": "89377"
},
{
"name": "JavaScript",
"bytes": "143349"
},
{
"name": "Makefile",
"bytes": "5767"
},
{
"name": "Protocol Buffer",
"bytes": "2041"
},
{
"name": "Python",
"bytes": "286387"
},
{
"name": "Ruby",
"bytes": "1676"
},
{
"name": "Shell",
"bytes": "26387"
}
],
"symlink_target": ""
}
|
import json, urllib2
from settings import *
class AllocineObject(object):
_dict = dict()
def __new__(self, code, **kwargs):
AllocineObject._dict.setdefault(self.__module__,{})
if code in AllocineObject._dict[self.__module__]:
retval = AllocineObject._dict[self.__module__][code]
retval.__init__(code, **kwargs)
else:
retval = super(AllocineObject, self).__new__(self, code, **kwargs)
AllocineObject._dict[self.__module__][code] = retval
return retval
def __init__(self, code, **kwargs):
self.code = code
for k,v in kwargs.items():
self.__dict__[k] = v
def __unicode__(self):
return self.__class__.__name__
def __repr__(self):
return ("<%s #%s: %s>" % (self.__class__.__name__, self.code, self.__unicode__())).encode("utf8")
def getInfo(self, profile = "small"):
url = "http://api.allocine.fr/rest/v3/%s?partner=%s&format=json&code=%s&profile=%s" % (self.__class__.__name__.lower(), PARTNER_CODE, self.code, profile)
output = urllib2.urlopen(url).read()
d = json.loads(output)
for k,v in d[self.__class__.__name__.lower()].items():
self.__dict__[k] = v
|
{
"content_hash": "ff06d86ec213c9000816b5e2ba5ee26d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 157,
"avg_line_length": 34.96969696969697,
"alnum_prop": 0.5961871750433275,
"repo_name": "gtnx/python-allocine",
"id": "983c89672ea7532fc4e333e74b81dec95606a3a5",
"size": "1154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allocine/AllocineObject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5685"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 Wordnik, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class DeploymentEvents:
def __init__(self):
self.swaggerTypes = {
'id': 'str',
'firstEvent': 'int',
'lastEvent': 'int',
'deploymentTotalEvents': 'int',
'events': 'list[str]'
}
self.id = None # str
self.firstEvent = None # int
self.lastEvent = None # int
self.deploymentTotalEvents = None # int
self.events = None # list[str]
|
{
"content_hash": "dcb5c55f77650fedb7be5fb4a634a92c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 31.87878787878788,
"alnum_prop": 0.6283269961977186,
"repo_name": "Fewbytes/cosmo-manager-rest-client",
"id": "958911035e9d6f900572fd9ba96e066be6c9e45b",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cosmo_manager_rest_client/swagger/models/DeploymentEvents.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52318"
}
],
"symlink_target": ""
}
|
import os
import sys
PY3= sys.version > '3'
import numpy
import pytest
from scipy import optimize
try:
import pynbody
_PYNBODY_LOADED= True
except ImportError:
_PYNBODY_LOADED= False
from galpy import orbit, potential
from galpy.util import _rotate_to_arbitrary_vector, coords
#Test whether the normalization of the potential works
def test_normalize_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'isNonAxi') and tp.isNonAxi:
continue # skip, bc vcirc not well defined
if not hasattr(tp,'normalize'): continue
tp.normalize(1.)
assert (tp.Rforce(1.,0.)+1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (tp.vcirc(1.)**2.-1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
tp.normalize(.5)
if hasattr(tp,'toPlanar'):
ptp= tp.toPlanar()
else:
ptp= tp
assert (ptp.Rforce(1.,0.)+.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (ptp.vcirc(1.)**2.-0.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
# Also test SphericalShell and RingPotential's setup, bc not done elsewhere
tp= potential.SphericalShellPotential(normalize=1.)
assert (tp.Rforce(1.,0.)+1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (tp.vcirc(1.)**2.-1.)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
tp= potential.RingPotential(normalize=0.5)
assert (tp.Rforce(1.,0.)+0.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
assert (tp.vcirc(1.)**2.-0.5)**2. < 10.**-16., \
"Normalization of %s potential fails" % p
return None
#Test whether the derivative of the potential is minus the force
def test_forceAsDeriv_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
if _PYNBODY_LOADED:
pots.append('mockSnapshotRZPotential')
pots.append('mockInterpSnapshotRZPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
pots.append('mockMovingObjectPotential')
pots.append('mockMovingObjectPotentialExplPlummer')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('zRotatedTriaxialNFWPotential')
pots.append('yRotatedTriaxialNFWPotential')
pots.append('fullyRotatedTriaxialNFWPotential')
pots.append('fullyRotatednoGLTriaxialNFWPotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('mockAxisymmetricFerrersPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
pots.append('mockTimeDependentAmplitudeWrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -6. #these are more difficult
tol['RazorThinExponentialDiskPotential']= -6.
tol['AnyAxisymmetricRazorThinDiskPotential']= -4.9
tol['mockInterpRZPotential']= -4.
tol['FerrersPotential']= -7.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#Radial force
for ii in range(len(Rs)):
for jj in range(len(Zs)):
dr= 10.**-8.
newR= Rs[ii]+dr
dr= newR-Rs[ii] #Representable number
if isinstance(tp,potential.linearPotential):
mpotderivR= (potential.evaluatelinearPotentials(tp,Rs[ii])
-potential.evaluatelinearPotentials(tp,Rs[ii]+dr))/dr
tRforce= potential.evaluatelinearForces(tp,Rs[ii])
elif isinstance(tp,potential.planarPotential):
mpotderivR= (potential.evaluateplanarPotentials(tp,Rs[ii],phi=Zs[jj])-potential.evaluateplanarPotentials(tp,Rs[ii]+dr,phi=Zs[jj]))/dr
tRforce= potential.evaluateplanarRforces(tp,Rs[ii],
phi=Zs[jj])
else:
mpotderivR= (potential.evaluatePotentials(tp,Rs[ii],Zs[jj],phi=1.)
-potential.evaluatePotentials(tp,Rs[ii]+dr,Zs[jj],phi=1.))/dr
tRforce= potential.evaluateRforces(tp,Rs[ii],Zs[jj],phi=1.)
if tRforce**2. < 10.**ttol:
assert mpotderivR**2. < 10.**ttol, \
f"Calculation of the Radial force as the Radial derivative of the {p} potential fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tRforce-mpotderivR):e}, rel. diff = {numpy.fabs((tRforce-mpotderivR)/tRforce):e}"
else:
assert (tRforce-mpotderivR)**2./tRforce**2. < 10.**ttol, \
f"Calculation of the Radial force as the Radial derivative of the {p} potential fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tRforce-mpotderivR):e}, rel. diff = {numpy.fabs((tRforce-mpotderivR)/tRforce):e}"
#azimuthal torque, if it exists
if isinstance(tp,potential.linearPotential): continue
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mpotderivphi= (tp(Rs[ii],phi=phis[jj])-tp(Rs[ii],phi=phis[jj]+dphi))/dphi
tphitorque= potential.evaluateplanarphitorques(tp,Rs[ii],
phi=phis[jj])
else:
mpotderivphi= (tp(Rs[ii],0.05,phi=phis[jj])-tp(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tphitorque= potential.evaluatephitorques(tp,Rs[ii],0.05,
phi=phis[jj])
try:
if tphitorque**2. < 10.**ttol:
assert(mpotderivphi**2. < 10.**ttol)
else:
assert((tphitorque-mpotderivphi)**2./tphitorque**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError(f"Calculation of the azimuthal torque as the azimuthal derivative of the {p} potential fails at (R,phi) = ({Rs[ii]:.3f},{phis[jj]:.3f}); diff = {numpy.fabs(tphitorque-mpotderivphi):e}, rel. diff = {numpy.fabs((tphitorque-mpotderivphi)/tphitorque):e}")
else:
raise AssertionError(f"Calculation of the azimuthal torque as the azimuthal derivative of the {p} potential fails at (R,Z,phi) = ({Rs[ii]:.3f},0.05,{phis[jj]:.3f}); diff = {numpy.fabs(tphitorque-mpotderivphi):e}, rel. diff = {numpy.fabs((tphitorque-mpotderivphi)/tphitorque):e}")
#Vertical force, if it exists
if isinstance(tp,potential.planarPotential) \
or isinstance(tp,potential.linearPotential): continue
for ii in range(len(Rs)):
for jj in range(len(Zs)):
##Excluding KuzminDiskPotential when z = 0
if Zs[jj]==0 and isinstance(tp,potential.KuzminDiskPotential):
continue
dz= 10.**-8.
newZ= Zs[jj]+dz
dz= newZ-Zs[jj] #Representable number
mpotderivz= (tp(Rs[ii],Zs[jj],phi=1.)-tp(Rs[ii],Zs[jj]+dz,phi=1.))/dz
tzforce= potential.evaluatezforces(tp,Rs[ii],Zs[jj],phi=1.)
if tzforce**2. < 10.**ttol:
assert mpotderivz**2. < 10.**ttol, \
f"Calculation of the vertical force as the vertical derivative of the {p} potential fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(mpotderivz):e}, rel. diff = {numpy.fabs((tzforce-mpotderivz)/tzforce):e}"
else:
assert (tzforce-mpotderivz)**2./tzforce**2. < 10.**ttol, \
f"Calculation of the vertical force as the vertical derivative of the {p} potential fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(mpotderivz):e}, rel. diff = {numpy.fabs((tzforce-mpotderivz)/tzforce):e}"
#Test whether the second derivative of the potential is minus the derivative of the force
def test_2ndDeriv_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
pots.append('oblateHernquistPotential') # in case these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockAxisymmetricFerrersPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
pots.append('mockTimeDependentAmplitudeWrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
tol['RazorThinExponentialDiskPotential']= -6.
tol['AnyAxisymmetricRazorThinDiskPotential']= -4.5
tol['mockInterpRZPotential']= -4.
tol['DehnenBarPotential']= -7.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if hasattr(tp,'_R2deriv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
if p == 'RazorThinExponentialDiskPotential' and numpy.fabs(Zs[jj]) > 0.: continue #Not implemented
dr= 10.**-8.
newR= Rs[ii]+dr
dr= newR-Rs[ii] #Representable number
if isinstance(tp,potential.linearPotential):
mRforcederivR= (tp.Rforce(Rs[ii])-tp.Rforce(Rs[ii]+dr))/dr
tR2deriv= tp.R2deriv(Rs[ii])
elif isinstance(tp,potential.planarPotential):
mRforcederivR= (tp.Rforce(Rs[ii],Zs[jj])-tp.Rforce(Rs[ii]+dr,Zs[jj]))/dr
tR2deriv= potential.evaluateplanarR2derivs(tp,Rs[ii],
phi=Zs[jj])
else:
mRforcederivR= (tp.Rforce(Rs[ii],Zs[jj],phi=1.)-tp.Rforce(Rs[ii]+dr,Zs[jj],phi=1.))/dr
tR2deriv= potential.evaluateR2derivs(tp,Rs[ii],Zs[jj],phi=1.)
if tR2deriv**2. < 10.**ttol:
assert mRforcederivR**2. < 10.**ttol, \
f"Calculation of the second Radial derivative of the potential as the Radial derivative of the {p} Radial force fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tR2deriv-mRforcederivR):e}, rel. diff = {numpy.fabs((tR2deriv-mRforcederivR)/tR2deriv):e}"
else:
assert (tR2deriv-mRforcederivR)**2./tR2deriv**2. < 10.**ttol, \
f"Calculation of the second Radial derivative of the potential as the Radial derivative of the {p} Radial force fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tR2deriv-mRforcederivR):e}, rel. diff = {numpy.fabs((tR2deriv-mRforcederivR)/tR2deriv):e}"
#2nd azimuthal
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_phi2deriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mphitorquederivphi= (tp.phitorque(Rs[ii],phi=phis[jj])-tp.phitorque(Rs[ii],phi=phis[jj]+dphi))/dphi
tphi2deriv= tp.phi2deriv(Rs[ii],phi=phis[jj])
else:
mphitorquederivphi= (tp.phitorque(Rs[ii],0.05,phi=phis[jj])-tp.phitorque(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tphi2deriv= potential.evaluatephi2derivs(tp,Rs[ii],0.05,phi=phis[jj])
try:
if tphi2deriv**2. < 10.**ttol:
assert(mphitorquederivphi**2. < 10.**ttol)
else:
assert((tphi2deriv-mphitorquederivphi)**2./tphi2deriv**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError(f"Calculation of the second azimuthal derivative of the potential as the azimuthal derivative of the {p} azimuthal torque fails at (R,phi) = ({Rs[ii]:.3f},{phis[jj]:.3f}); diff = {numpy.fabs(tphi2deriv-mphitorquederivphi):e}, rel. diff = {numpy.fabs((tphi2deriv-mphitorquederivphi)/tphi2deriv):e}")
else:
raise AssertionError(f"Calculation of the second azimuthal derivative of the potential as the azimuthal derivative of the {p} azimuthal torque fails at (R,Z,phi) = ({Rs[ii]:.3f},0.05,{phis[jj]:.3f}); diff = {numpy.fabs(tphi2deriv-mphitorquederivphi):e}, rel. diff = {numpy.fabs((tphi2deriv-mphitorquederivphi)/tphi2deriv):e}")
#mixed radial azimuthal: Isn't this the same as what's below??
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rphideriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mRforcederivphi= (tp.Rforce(Rs[ii],phi=phis[jj])-tp.Rforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tRphideriv= tp.Rphideriv(Rs[ii],phi=phis[jj])
else:
mRforcederivphi= (tp.Rforce(Rs[ii],0.05,phi=phis[jj])-tp.Rforce(Rs[ii],0.05,phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluateRphiderivs(tp,Rs[ii],0.05,phi=phis[jj])
try:
if tRphideriv**2. < 10.**ttol:
assert(mRforcederivphi**2. < 10.**ttol)
else:
assert((tRphideriv-mRforcederivphi)**2./tRphideriv**2. < 10.**ttol)
except AssertionError:
if isinstance(tp,potential.planarPotential):
raise AssertionError(f"Calculation of the mixed radial, azimuthal derivative of the potential as the azimuthal derivative of the {p} Radial force fails at (R,phi) = ({Rs[ii]:.3f},{phis[jj]:.3f}); diff = {numpy.fabs(tRphideriv-mRforcederivphi):e}, rel. diff = {numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv):e}")
else:
raise AssertionError(f"Calculation of the mixed radial, azimuthal derivative of the potential as the azimuthal derivative of the {p} azimuthal torque fails at (R,Z,phi) = ({Rs[ii]:.3f},0.05,{phis[jj]:.3f}); diff = {numpy.fabs(tRphideriv-mRforcederivphi):e}, rel. diff = {numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv):e}")
#2nd vertical
if not isinstance(tp,potential.planarPotential) \
and not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_z2deriv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
if p == 'TwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'specialTwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'DehnenTwoPowerSphericalPotential': continue # Not implemented, or badly defined
if p == 'DehnenCoreTwoPowerSphericalPotential': continue # Not implemented, or badly defined
if p == 'HernquistTwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'JaffeTwoPowerSphericalPotential': continue #Not implemented, or badly defined
if p == 'NFWTwoPowerSphericalPotential': continue #Not implemented, or badly defined
#Excluding KuzminDiskPotential at z = 0
if p == 'KuzminDiskPotential' and Zs[jj] == 0: continue
dz= 10.**-8.
newz= Zs[jj]+dz
dz= newz-Zs[jj] #Representable number
mzforcederivz= (tp.zforce(Rs[ii],Zs[jj],phi=1.)-tp.zforce(Rs[ii],Zs[jj]+dz,phi=1.))/dz
tz2deriv= potential.evaluatez2derivs(tp,Rs[ii],Zs[jj],phi=1.)
if tz2deriv**2. < 10.**ttol:
assert mzforcederivz**2. < 10.**ttol, \
f"Calculation of the second vertical derivative of the potential as the vertical derivative of the {p} vertical force fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tz2deriv-mzforcederivz):e}, rel. diff = {numpy.fabs((tz2deriv-mzforcederivz)/tz2deriv):e}"
else:
assert (tz2deriv-mzforcederivz)**2./tz2deriv**2. < 10.**ttol, \
f"Calculation of the second vertical derivative of the potential as the vertical derivative of the {p} vertical force fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tz2deriv-mzforcederivz):e}, rel. diff = {numpy.fabs((tz2deriv-mzforcederivz)/tz2deriv):e}"
#mixed radial vertical
if not isinstance(tp,potential.planarPotential) \
and not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rzderiv'):
for ii in range(len(Rs)):
for jj in range(len(Zs)):
#Excluding KuzminDiskPotential at z = 0
if p == 'KuzminDiskPotential' and Zs[jj] == 0: continue
# if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
dz= 10.**-8.
newz= Zs[jj]+dz
dz= newz-Zs[jj] #Representable number
mRforcederivz= (tp.Rforce(Rs[ii],Zs[jj],phi=1.)-tp.Rforce(Rs[ii],Zs[jj]+dz,phi=1.))/dz
tRzderiv= potential.evaluateRzderivs(tp,Rs[ii],Zs[jj],phi=1.)
if tRzderiv**2. < 10.**ttol:
assert mRforcederivz**2. < 10.**ttol, \
f"Calculation of the mixed radial vertical derivative of the potential as the vertical derivative of the {p} radial force fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tRzderiv-mRforcederivz):e}, rel. diff = {numpy.fabs((tRzderiv-mRforcederivz)/tRzderiv):e}"
else:
assert (tRzderiv-mRforcederivz)**2./tRzderiv**2. < 10.**ttol, \
f"Calculation of the mixed radial vertical derivative of the potential as the vertical derivative of the {p} radial force fails at (R,Z) = ({Rs[ii]:.3f},{Zs[jj]:.3f}); diff = {numpy.fabs(tRzderiv-mRforcederivz):e}, rel. diff = {numpy.fabs((tRzderiv-mRforcederivz)/tRzderiv):e}"
#mixed radial, azimuthal
if not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_Rphideriv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
# if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
if isinstance(tp,potential.planarPotential):
mRforcederivphi= (tp.Rforce(Rs[ii],phi=phis[jj])\
-tp.Rforce(Rs[ii],phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluateplanarPotentials(tp,Rs[ii],
phi=phis[jj],dR=1,dphi=1)
else:
mRforcederivphi= (tp.Rforce(Rs[ii],0.1,phi=phis[jj])\
-tp.Rforce(Rs[ii],0.1,phi=phis[jj]+dphi))/dphi
tRphideriv= potential.evaluatePotentials(tp,Rs[ii],0.1,
phi=phis[jj],dR=1,dphi=1)
if tRphideriv**2. < 10.**ttol:
assert mRforcederivphi**2. < 10.**ttol, \
f"Calculation of the mixed radial azimuthal derivative of the potential as the azimuthal derivative of the {p} radial force fails at (R,phi) = ({Rs[ii]:.3f},{phis[jj]:.3f}); diff = {numpy.fabs(tRphideriv-mRforcederivphi):e}, rel. diff = {numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv):e}"
else:
assert (tRphideriv-mRforcederivphi)**2./tRphideriv**2. < 10.**ttol, \
f"Calculation of the mixed radial azimuthal derivative of the potential as the azimuthal derivative of the {p} radial force fails at (R,phi) = ({Rs[ii]:.3f},{phis[jj]:.3f}); diff = {numpy.fabs(tRphideriv-mRforcederivphi):e}, rel. diff = {numpy.fabs((tRphideriv-mRforcederivphi)/tRphideriv):e}"
#mixed azimuthal, vertical
if not isinstance(tp,potential.planarPotential) \
and not isinstance(tp,potential.linearPotential) \
and hasattr(tp,'_phizderiv'):
for ii in range(len(Rs)):
for jj in range(len(phis)):
# if p == 'RazorThinExponentialDiskPotential': continue #Not implemented, or badly defined
dphi= 10.**-8.
newphi= phis[jj]+dphi
dphi= newphi-phis[jj] #Representable number
mzforcederivphi= (tp.zforce(Rs[ii],0.1,phi=phis[jj])\
-tp.zforce(Rs[ii],0.1,phi=phis[jj]+dphi))/dphi
tphizderiv= potential.evaluatephizderivs(tp,Rs[ii],0.1,phi=phis[jj])
if tphizderiv**2. < 10.**ttol:
assert mzforcederivphi**2. < 10.**ttol, \
f"Calculation of the mixed azimuthal vertical derivative of the potential as the azimuthal derivative of the {p} vertical force fails at (R,phi) = ({Rs[ii]:.3f},{phis[jj]:.3f}); diff = {numpy.fabs(tphizderiv-mzforcederivphi):e}, rel. diff = {numpy.fabs((tphizderiv-mzforcederivphi)/tphizderiv):e}"
else:
assert (tphizderiv-mzforcederivphi)**2./tphizderiv**2. < 10.**ttol, \
f"Calculation of the mixed azimuthal vertical derivative of the potential as the azimuthal derivative of the {p} vertical force fails at (R,phi) = ({Rs[ii]:.3f},{phis[jj]:.3f}); diff = {numpy.fabs(tphizderiv-mzforcederivphi):e}, rel. diff = {numpy.fabs((tphizderiv-mzforcederivphi)/tphizderiv):e}"
#Test whether the Poisson equation is satisfied if _dens and the relevant second derivatives are implemented
def test_poisson_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('oblateHernquistPotential') # in cae these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
pots.append('mockTimeDependentAmplitudeWrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
tol['SpiralArmsPotential']= -3 #these are more difficult
tol['rotatingSpiralArmsPotential']= -3
tol['specialSpiralArmsPotential']= -4
tol['SolidBodyRotationSpiralArmsPotential']= -2.9 #these are more difficult
tol['nestedListPotential']= -3 #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if not hasattr(tp,'_dens') or not hasattr(tp,'_R2deriv') \
or not hasattr(tp,'_Rforce') or not hasattr(tp,'phi2deriv') \
or not hasattr(tp,'_z2deriv'):
continue
for ii in range(len(Rs)):
for jj in range(len(Zs)):
for kk in range(len(phis)):
tpoissondens= tp.dens(Rs[ii],Zs[jj],phi=phis[kk],
forcepoisson=True)
tdens= potential.evaluateDensities(tp,Rs[ii],Zs[jj],
phi=phis[kk],
forcepoisson=False)
if tdens**2. < 10.**ttol:
assert tpoissondens**2. < 10.**ttol, \
f"Poisson equation relation between the derivatives of the potential and the implemented density is not satisfied for the {p} potential at (R,Z,phi) = ({Rs[ii]:.3f},{Zs[jj]:.3f},{phis[kk]:.3f}); diff = {numpy.fabs(tdens-tpoissondens):e}, rel. diff = {numpy.fabs((tdens-tpoissondens)/tdens):e}"
else:
assert (tpoissondens-tdens)**2./tdens**2. < 10.**ttol, \
f"Poisson equation relation between the derivatives of the potential and the implemented density is not satisfied for the {p} potential at (R,Z,phi) = ({Rs[ii]:.3f},{Zs[jj]:.3f},{phis[kk]:.3f}); diff = {numpy.fabs(tdens-tpoissondens):e}, rel. diff = {numpy.fabs((tdens-tpoissondens)/tdens):e}"
return None
#Test whether the (integrated) Poisson equation is satisfied if _surfdens and the relevant second derivatives are implemented
def test_poisson_surfdens_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('testMWPotential')
"""
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('oblateHernquistPotential') # in cae these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
"""
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential') # R2deriv not implemented for |Z| > 0
for p in rmpots:
pots.remove(p)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([.125,0.25,1.,10.])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
#tolerances in log10
tol= {}
tol['default']= -8.
tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
tol['SphericalShellPotential']= -0 # Direct integration fails to deal with delta function!
#tol['SpiralArmsPotential']= -3 #these are more difficult
#tol['rotatingSpiralArmsPotential']= -3
#tol['specialSpiralArmsPotential']= -4
#tol['SolidBodyRotationSpiralArmsPotential']= -2.9 #these are more difficult
#tol['nestedListPotential']= -3 #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#2nd radial
if not hasattr(tp,'_surfdens') or not hasattr(tp,'_R2deriv') \
or not hasattr(tp,'_Rforce') or not hasattr(tp,'phi2deriv') \
or not hasattr(tp,'_zforce') \
or (tclass._surfdens == potential.Potential._surfdens and not p == 'FlattenedPowerPotential'): # make sure _surfdens is explicitly implemented
continue
for ii in range(len(Rs)):
for kk in range(len(phis)):
for jj in range(len(Zs)):
tpoissondens= tp.surfdens(Rs[ii],Zs[jj],phi=phis[kk],
forcepoisson=True)
tdens= potential.evaluateSurfaceDensities(tp,Rs[ii],Zs[jj],
phi=phis[kk],
forcepoisson=False)
if tdens**2. < 10.**ttol:
assert tpoissondens**2. < 10.**ttol, \
f"Poisson equation relation between the derivatives of the potential and the implemented surface density is not satisfied for the {p} potential at (R,Z,phi) = ({Rs[ii]:.3f},{Zs[jj]:.3f},{phis[kk]:.3f}); diff = {numpy.fabs(tdens-tpoissondens):e}, rel. diff = {numpy.fabs((tdens-tpoissondens)/tdens):e}"
else:
assert (tpoissondens-tdens)**2./tdens**2. < 10.**ttol, \
f"Poisson equation relation between the derivatives of the potential and the implemented surface density is not satisfied for the {p} potential at (R,Z,phi) = ({Rs[ii]:.3f},{Zs[jj]:.3f},{phis[kk]:.3f}); diff = {numpy.fabs(tdens-tpoissondens):e}, rel. diff = {numpy.fabs((tdens-tpoissondens)/tdens):e}"
if p == 'mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination': continue # takes a long time otherwise... skip after all z at one (R,phi)
return None
#Test whether the _evaluate function is correctly implemented in specifying derivatives
def test_evaluateAndDerivs_potential():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialFlattenedPowerPotential')
pots.append('specialPowerSphericalPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockMovingObjectPotential')
pots.append('oblateHernquistPotential') # in cae these are ever implemented
pots.append('oblateNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
pots.append('mockTimeDependentAmplitudeWrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
#tolerances in log10
tol= {}
tol['default']= -12.
#tol['DoubleExponentialDiskPotential']= -3. #these are more difficult
#tol['RazorThinExponentialDiskPotential']= -6.
for p in pots:
#if 'Isochrone' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
#Set tolerance
if p in list(tol.keys()): ttol= tol[p]
else: ttol= tol['default']
#1st radial
if isinstance(tp,potential.linearPotential):
continue
elif isinstance(tp,potential.planarPotential):
tevaldr= tp(1.2,phi=0.1,dR=1)
trforce= tp.Rforce(1.2,phi=0.1)
else:
tevaldr= tp(1.2,0.1,phi=0.1,dR=1)
trforce= tp.Rforce(1.2,0.1,phi=0.1)
if not tevaldr is None:
if tevaldr**2. < 10.**ttol:
assert trforce**2. < 10.**ttol, \
"Calculation of radial derivative through _evaluate and Rforce inconsistent for the %s potential" % p
else:
assert (tevaldr+trforce)**2./tevaldr**2. < 10.**ttol, \
"Calculation of radial derivative through _evaluate and Rforce inconsistent for the %s potential" % p
#2nd radial
hasR2= True
from galpy.potential import PotentialError
if 'RazorThin' in p: R2z= 0.
else: R2z= 0.1
try:
if isinstance(tp,potential.planarPotential):
tp.R2deriv(1.2)
else:
tp.R2deriv(1.2,R2z)
except PotentialError:
hasR2= False
if hasR2:
if isinstance(tp,potential.planarPotential):
tevaldr2= tp(1.2,phi=0.1,dR=2)
tr2deriv= tp.R2deriv(1.2,phi=0.1)
else:
tevaldr2= tp(1.2,R2z,phi=0.1,dR=2)
tr2deriv= tp.R2deriv(1.2,R2z,phi=0.1)
if not tevaldr2 is None:
if tevaldr2**2. < 10.**ttol:
assert tr2deriv*2. < 10.**ttol, \
"Calculation of 2nd radial derivative through _evaluate and R2deriv inconsistent for the %s potential" % p
else:
assert (tevaldr2-tr2deriv)**2./tevaldr2**2. < 10.**ttol, \
"Calculation of 2nd radial derivative through _evaluate and R2deriv inconsistent for the %s potential" % p
#1st phi
if isinstance(tp,potential.planarPotential):
tevaldphi= tp(1.2,phi=0.1,dphi=1)
tphitorque= tp.phitorque(1.2,phi=0.1)
else:
tevaldphi= tp(1.2,0.1,phi=0.1,dphi=1)
tphitorque= tp.phitorque(1.2,0.1,phi=0.1)
if not tevaldphi is None:
if tevaldphi**2. < 10.**ttol:
assert tphitorque**2. < 10.**ttol, \
"Calculation of azimuthal derivative through _evaluate and phitorque inconsistent for the %s potential" % p
else:
assert (tevaldphi+tphitorque)**2./tevaldphi**2. < 10.**ttol, \
"Calculation of azimuthal derivative through _evaluate and phitorque inconsistent for the %s potential" % p
#2nd phi
hasphi2= True
try:
if isinstance(tp,potential.planarPotential):
tp.phi2deriv(1.2,phi=0.1)
else:
tp.phi2deriv(1.2,0.1,phi=0.1)
except (PotentialError,AttributeError):
hasphi2= False
if hasphi2 and hasattr(tp,'_phi2deriv'):
if isinstance(tp,potential.planarPotential):
tevaldphi2= tp(1.2,phi=0.1,dphi=2)
tphi2deriv= tp.phi2deriv(1.2,phi=0.1)
else:
tevaldphi2= tp(1.2,0.1,phi=0.1,dphi=2)
tphi2deriv= tp.phi2deriv(1.2,0.1,phi=0.1)
if not tevaldphi2 is None:
if tevaldphi2**2. < 10.**ttol:
assert tphi2deriv*2. < 10.**ttol, \
"Calculation of 2nd azimuthal derivative through _evaluate and phi2deriv inconsistent for the %s potential" % p
else:
assert (tevaldphi2-tphi2deriv)**2./tevaldphi2**2. < 10.**ttol, \
"Calculation of 2nd azimuthal derivative through _evaluate and phi2deriv inconsistent for the %s potential" % p
# Test that much higher derivatives are not implemented
try: tp(1.2,0.1,dR=4,dphi=10)
except NotImplementedError: pass
else: raise AssertionError('Higher-order derivative request in potential __call__ does not raise NotImplementedError for %s' % p)
continue
#mixed radial,vertical
if isinstance(tp,potential.planarPotential):
tevaldrz= tp(1.2,0.1,phi=0.1,dR=1,dz=1)
trzderiv= tp.Rzderiv(1.2,0.1,phi=0.1)
else:
tevaldrz= tp(1.2,0.1,phi=0.1,dR=1,dz=1)
trzderiv= tp.Rzderiv(1.2,0.1,phi=0.1)
if not tevaldrz is None:
if tevaldrz**2. < 10.**ttol:
assert trzderiv*2. < 10.**ttol, \
"Calculation of mixed radial,vertical derivative through _evaluate and z2deriv inconsistent for the %s potential" % p
else:
assert (tevaldrz-trzderiv)**2./tevaldrz**2. < 10.**ttol, \
"Calculation of mixed radial,vertical derivative through _evaluate and z2deriv inconsistent for the %s potential" % p
return None
#Test that potentials can be multiplied or divided by a number
def test_amp_mult_divide():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
pots.append('NFWTwoPowerSphericalPotential')
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('testplanarMWPotential')
pots.append('testlinearMWPotential')
pots.append('mockInterpRZPotential')
if _PYNBODY_LOADED:
pots.append('mockSnapshotRZPotential')
pots.append('mockInterpSnapshotRZPotential')
pots.append('mockCosmphiDiskPotentialnegcp')
pots.append('mockCosmphiDiskPotentialnegp')
pots.append('mockDehnenBarPotentialT1')
pots.append('mockDehnenBarPotentialTm1')
pots.append('mockDehnenBarPotentialTm5')
pots.append('mockEllipticalDiskPotentialT1')
pots.append('mockEllipticalDiskPotentialTm1')
pots.append('mockEllipticalDiskPotentialTm5')
pots.append('mockSteadyLogSpiralPotentialT1')
pots.append('mockSteadyLogSpiralPotentialTm1')
pots.append('mockSteadyLogSpiralPotentialTm5')
pots.append('mockTransientLogSpiralPotential')
pots.append('mockFlatEllipticalDiskPotential') #for evaluate w/ nonaxi lists
pots.append('mockMovingObjectPotential')
pots.append('mockMovingObjectPotentialExplPlummer')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('zRotatedTriaxialNFWPotential')
pots.append('yRotatedTriaxialNFWPotential')
pots.append('fullyRotatedTriaxialNFWPotential')
pots.append('fullyRotatednoGLTriaxialNFWPotential')
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('mockAxisymmetricFerrersPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('rotatingSpiralArmsPotential')
pots.append('specialSpiralArmsPotential')
pots.append('DehnenSmoothDehnenBarPotential')
pots.append('mockDehnenSmoothBarPotentialT1')
pots.append('mockDehnenSmoothBarPotentialTm1')
pots.append('mockDehnenSmoothBarPotentialTm5')
pots.append('mockDehnenSmoothBarPotentialDecay')
pots.append('SolidBodyRotationSpiralArmsPotential')
pots.append('triaxialLogarithmicHaloPotential')
pots.append('CorotatingRotationSpiralArmsPotential')
pots.append('GaussianAmplitudeDehnenBarPotential')
pots.append('nestedListPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
R,Z,phi= 0.75,0.2,1.76
nums= numpy.random.uniform(size=len(pots)) # random set of amp changes
for num,p in zip(nums,pots):
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
if isinstance(tp,potential.linearPotential):
assert numpy.fabs(tp(R)*num-(num*tp)(R)) < 1e-10, "Multiplying a linearPotential with a number does not behave as expected"
# Other way...
assert numpy.fabs(tp(R)*num-(tp*num)(R)) < 1e-10, "Multiplying a linearPotential with a number does not behave as expected"
assert numpy.fabs(tp(R)/num-(tp/num)(R)) < 1e-10, "Dividing a linearPotential with a number does not behave as expected"
elif isinstance(tp,potential.planarPotential):
assert numpy.fabs(tp(R,phi=phi)*num-(num*tp)(R,phi=phi)) < 1e-10, "Multiplying a planarPotential with a number does not behave as expected"
# Other way...
assert numpy.fabs(tp(R,phi=phi)*num-(tp*num)(R,phi=phi)) < 1e-10, "Multiplying a planarPotential with a number does not behave as expected"
assert numpy.fabs(tp(R,phi=phi)/num-(tp/num)(R,phi=phi)) < 1e-10, "Dividing a planarPotential with a number does not behave as expected"
else:
assert numpy.fabs(tp(R,Z,phi=phi)*num-(num*tp)(R,Z,phi=phi)) < 1e-10, "Multiplying a Potential with a number does not behave as expected"
# Other way...
assert numpy.fabs(tp(R,Z,phi=phi)*num-(tp*num)(R,Z,phi=phi)) < 1e-10, "Multiplying a Potential with a number does not behave as expected"
assert numpy.fabs(tp(R,Z,phi=phi)/num-(tp/num)(R,Z,phi=phi)) < 1e-10, "Dividing a Potential with a number does not behave as expected"
return None
#Test whether potentials that support array input do so correctly
def test_potential_array_input():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
rmpots.append('FerrersPotential')
rmpots.append('PerfectEllipsoidPotential')
rmpots.append('TriaxialHernquistPotential')
rmpots.append('TriaxialJaffePotential')
rmpots.append('TriaxialNFWPotential')
rmpots.append('TwoPowerTriaxialPotential')
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
rmpots.append('AnyAxisymmetricRazorThinDiskPotential')
rmpots.append('AnySphericalPotential')
rmpots.append('SphericalShellPotential')
rmpots.append('HomogeneousSpherePotential')
rmpots.append('TriaxialGaussianPotential')
rmpots.append('PowerTriaxialPotential')
# These cannot be setup without arguments
rmpots.append('MovingObjectPotential')
rmpots.append('SnapshotRZPotential')
rmpots.append('InterpSnapshotRZPotential')
# 2D ones that cannot use this test
rmpots.append('CosmphiDiskPotential')
rmpots.append('EllipticalDiskPotential')
rmpots.append('LopsidedDiskPotential')
rmpots.append('HenonHeilesPotential')
rmpots.append('TransientLogSpiralPotential')
rmpots.append('SteadyLogSpiralPotential')
# 1D ones that cannot use this test
rmpots.append('IsothermalDiskPotential')
rmpots.append('KGPotential')
for p in rmpots:
pots.remove(p)
rs= numpy.linspace(0.1,2.,11)
zs= numpy.linspace(-2.,2.,11)
phis= numpy.linspace(0.,numpy.pi,11)
ts= numpy.linspace(0.,10.,11)
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
#Potential itself
tpevals= numpy.array([tp(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} evaluation does not work as expected for array inputs'
#Rforce
tpevals= numpy.array([tp.Rforce(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.Rforce(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} Rforce evaluation does not work as expected for array inputs'
#zforce
tpevals= numpy.array([tp.zforce(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.zforce(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} zforce evaluation does not work as expected for array inputs'
#phitorque
tpevals= numpy.array([tp.phitorque(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.phitorque(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} zforce evaluation does not work as expected for array inputs'
#R2deriv
if hasattr(tp,'_R2deriv'):
tpevals= numpy.array([tp.R2deriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.R2deriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} R2deriv evaluation does not work as expected for array inputs'
#z2deriv
if hasattr(tp,'_z2deriv') \
and not p == 'TwoPowerSphericalPotential': # latter bc done through R2deriv
tpevals= numpy.array([tp.z2deriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.z2deriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} z2deriv evaluation does not work as expected for array inputs'
#phi2deriv
if hasattr(tp,'_R2deriv'):
tpevals= numpy.array([tp.phi2deriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.phi2deriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} phi2deriv evaluation does not work as expected for array inputs'
#Rzderiv
if hasattr(tp,'_Rzderiv'):
tpevals= numpy.array([tp.Rzderiv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.Rzderiv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} Rzderiv evaluation does not work as expected for array inputs'
#Rphideriv
if hasattr(tp,'_Rphideriv'):
tpevals= numpy.array([tp.Rphideriv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.Rphideriv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} Rphideriv evaluation does not work as expected for array inputs'
#phizderiv
if hasattr(tp,'_phizderiv'):
tpevals= numpy.array([tp.phizderiv(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.phizderiv(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} phizderiv evaluation does not work as expected for array inputs'
#dens
tpevals= numpy.array([tp.dens(r,z,phi=phi,t=t) for (r,z,phi,t) in zip(rs,zs,phis,ts)])
assert numpy.all(numpy.fabs(tp.dens(rs,zs,phi=phis,t=ts)-tpevals) < 10.**-10.), \
f'{p} dens evaluation does not work as expected for array inputs'
return None
# Test that 1D potentials created using toVertical can handle array input if
# their 3D versions can
def test_toVertical_array():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
rmpots.append('FerrersPotential')
rmpots.append('PerfectEllipsoidPotential')
rmpots.append('TriaxialHernquistPotential')
rmpots.append('TriaxialJaffePotential')
rmpots.append('TriaxialNFWPotential')
rmpots.append('TwoPowerTriaxialPotential')
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
rmpots.append('AnyAxisymmetricRazorThinDiskPotential')
rmpots.append('AnySphericalPotential')
rmpots.append('SphericalShellPotential')
rmpots.append('HomogeneousSpherePotential')
rmpots.append('TriaxialGaussianPotential')
rmpots.append('PowerTriaxialPotential')
# These cannot be setup without arguments
rmpots.append('MovingObjectPotential')
rmpots.append('SnapshotRZPotential')
rmpots.append('InterpSnapshotRZPotential')
for p in rmpots:
pots.remove(p)
xs= numpy.linspace(-2.,2.,11)
ts= numpy.linspace(0.,10.,11)
for p in pots:
#if not 'NFW' in p: continue #For testing the test
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
# Only do 3D --> 1D potentials
if not isinstance(tp,potential.Potential): continue
tp= potential.toVerticalPotential(tp,0.8,phi=0.2)
#Potential itself
tpevals= numpy.array([tp(x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(tp(xs,t=ts)-tpevals) < 10.**-10.), \
f'{p} evaluation does not work as expected for array inputs for toVerticalPotential potentials'
#force
tpevals= numpy.array([tp.force(x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(tp.force(xs,t=ts)-tpevals) < 10.**-10.), \
f'{p} force evaluation does not work as expected for array inputs for toVerticalPotential'
# Also test Morgan's example
pot= potential.toVerticalPotential(potential.MWPotential2014,1.)
#Potential itself
tpevals= numpy.array([potential.evaluatelinearPotentials(pot,x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(potential.evaluatelinearPotentials(pot,xs,t=ts)-tpevals) < 10.**-10.), \
f'{p} evaluation does not work as expected for array inputs for toVerticalPotential potentials'
#Rforce
tpevals= numpy.array([potential.evaluatelinearForces(pot,x,t=t) for (x,t) in zip(xs,ts)])
assert numpy.all(numpy.fabs(potential.evaluatelinearForces(pot,xs,t=ts)-tpevals) < 10.**-10.), \
f'{p} force evaluation does not work as expected for array inputs for toVerticalPotential'
return None
#Test that all potentials can be evaluated at zero
def test_potential_at_zero():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
#pots.append('specialTwoPowerSphericalPotential')
#pots.append('DehnenTwoPowerSphericalPotential')
#pots.append('DehnenCoreTwoPowerSphericalPotential')
#pots.append('HernquistTwoPowerSphericalPotential')
#pots.append('JaffeTwoPowerSphericalPotential')
#pots.append('NFWTwoPowerSphericalPotential') # Difficult, and who cares?
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('mockInterpRZPotential')
if _PYNBODY_LOADED:
pots.append('mockSnapshotRZPotential')
pots.append('mockInterpSnapshotRZPotential')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
pots.append('zRotatedTriaxialNFWPotential') # Difficult bc of rotation
pots.append('yRotatedTriaxialNFWPotential') # Difficult bc of rotation
pots.append('fullyRotatedTriaxialNFWPotential') # Difficult bc of rotation
pots.append('fullyRotatednoGLTriaxialNFWPotential') # Difficult bc of rotation
pots.append('HernquistTwoPowerTriaxialPotential')
pots.append('NFWTwoPowerTriaxialPotential')
#pots.append('JaffeTwoPowerTriaxialPotential') # not finite
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
# Remove some more potentials that we don't support for now TO DO
rmpots.append('BurkertPotential') # Need to figure out...
#rmpots.append('FerrersPotential') # Need to figure out...
#rmpots.append('KuzminKutuzovStaeckelPotential') # Need to figure out...
rmpots.append('RazorThinExponentialDiskPotential') # Need to figure out...
rmpots.append('RingPotential') # Easy, but who cares?
#rmpots.append('SoftenedNeedleBarPotential') # Not that hard, but haven't done it
rmpots.append('SpiralArmsPotential')
rmpots.append('TwoPowerSphericalPotential') # Need to figure out
#rmpots.append('TwoPowerTriaxialPotential') # Need to figure out
# 2D ones that cannot use this test
rmpots.append('CosmphiDiskPotential')
rmpots.append('EllipticalDiskPotential')
rmpots.append('LopsidedDiskPotential')
rmpots.append('HenonHeilesPotential')
rmpots.append('TransientLogSpiralPotential')
rmpots.append('SteadyLogSpiralPotential')
# 1D ones that cannot use this test
rmpots.append('IsothermalDiskPotential')
rmpots.append('KGPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
assert not numpy.isnan(potential.evaluatePotentials(tp,0,0,phi=0.,t=0.)), f'Potential {p} evaluated at zero gave NaN'
# Also for arrays
if p == 'FerrersPotential' \
or p == 'HomogeneousSpherePotential' \
or p == 'PerfectEllipsoidPotential' \
or p == 'SphericalShellPotential' \
or p == 'AnyAxisymmetricRazorThinDiskPotential' \
or p == 'AnySphericalPotential' \
or p == 'mockRotatedAndTiltedMWP14WrapperPotential' \
or p == 'mockRotatedAndTiltedMWP14WrapperPotentialwInclination' \
or p == 'mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination' \
or p == 'mockRotatedTiltedOffsetMWP14WrapperPotential' \
or p == 'mockOffsetMWP14WrapperPotential' \
or 'riaxial' in p \
or 'oblate' in p \
or 'prolate' in p:
continue
assert not numpy.any(numpy.isnan(potential.evaluatePotentials(tp,numpy.zeros(4),numpy.zeros(4),phi=0.,t=0.))), f'Potential {p} evaluated at zero gave NaN'
return None
#Test that all potentials can be evaluated with large numbers and with infinity
def test_potential_at_infinity():
# One of the main reasons for this test is the implementation of vesc,
# which uses the potential at infinity. Import what vesc uses for infinity
from galpy.potential.plotEscapecurve import _INF
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
#pots.append('specialTwoPowerSphericalPotential')
pots.append('DehnenTwoPowerSphericalPotential')
pots.append('DehnenCoreTwoPowerSphericalPotential')
pots.append('HernquistTwoPowerSphericalPotential')
pots.append('JaffeTwoPowerSphericalPotential')
#pots.append('NFWTwoPowerSphericalPotential') # Difficult, and who cares?
pots.append('specialMiyamotoNagaiPotential')
pots.append('specialMN3ExponentialDiskPotentialPD')
pots.append('specialMN3ExponentialDiskPotentialSECH')
pots.append('specialPowerSphericalPotential')
pots.append('specialFlattenedPowerPotential')
pots.append('testMWPotential')
pots.append('mockInterpRZPotential')
#if _PYNBODY_LOADED:
# pots.append('mockSnapshotRZPotential')
# pots.append('mockInterpSnapshotRZPotential')
pots.append('oblateHernquistPotential')
pots.append('oblateNFWPotential')
pots.append('oblatenoGLNFWPotential')
pots.append('oblateJaffePotential')
pots.append('prolateHernquistPotential')
pots.append('prolateNFWPotential')
pots.append('prolateJaffePotential')
pots.append('triaxialHernquistPotential')
pots.append('triaxialNFWPotential')
pots.append('triaxialJaffePotential')
#pots.append('zRotatedTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('yRotatedTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('fullyRotatedTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('fullyRotatednoGLTriaxialNFWPotential') # Difficult bc of rotation
#pots.append('HernquistTwoPowerTriaxialPotential')
#pots.append('NFWTwoPowerTriaxialPotential')
#pots.append('JaffeTwoPowerTriaxialPotential')
pots.append('mockSCFZeeuwPotential')
pots.append('mockSCFNFWPotential')
pots.append('mockSCFAxiDensity1Potential')
pots.append('mockSCFAxiDensity2Potential')
pots.append('mockSCFDensityPotential')
pots.append('sech2DiskSCFPotential')
pots.append('expwholeDiskSCFPotential')
pots.append('nonaxiDiskSCFPotential')
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
pots.append('mockAdiabaticContractionMWP14WrapperPotential')
pots.append('mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotential')
pots.append('mockRotatedAndTiltedMWP14WrapperPotentialwInclination')
pots.append('mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination')
pots.append('mockRotatedTiltedOffsetMWP14WrapperPotential')
pots.append('mockOffsetMWP14WrapperPotential')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
# Remove some more potentials that we don't support for now TO DO
rmpots.append('FerrersPotential') # Need to figure out...
rmpots.append('KuzminKutuzovStaeckelPotential') # Need to figure out...
rmpots.append('RazorThinExponentialDiskPotential') # Need to figure out...
rmpots.append('SoftenedNeedleBarPotential') # Not that hard, but haven't done it
rmpots.append('SpiralArmsPotential') # Need to have 0 x cos = 0
rmpots.append('TwoPowerTriaxialPotential') # Need to figure out
# 2D ones that cannot use this test
rmpots.append('CosmphiDiskPotential')
rmpots.append('EllipticalDiskPotential')
rmpots.append('LopsidedDiskPotential')
rmpots.append('HenonHeilesPotential')
rmpots.append('TransientLogSpiralPotential')
rmpots.append('SteadyLogSpiralPotential')
# 1D ones that cannot use this test
rmpots.append('IsothermalDiskPotential')
rmpots.append('KGPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if hasattr(tp,'normalize'): tp.normalize(1.)
assert not numpy.isnan(potential.evaluatePotentials(tp,numpy.inf,0,phi=0.,t=0.)), f'Potential {p} evaluated at infinity gave NaN'
assert not numpy.isnan(potential.evaluatePotentials(tp,_INF,0,phi=0.,t=0.)), f'Potential {p} evaluated at vesc _INF gave NaN'
# Also for arrays
if p == 'HomogeneousSpherePotential' \
or p == 'PerfectEllipsoidPotential' \
or p == 'SphericalShellPotential' \
or p == 'AnyAxisymmetricRazorThinDiskPotential' \
or p == 'AnySphericalPotential' \
or p == 'mockRotatedAndTiltedMWP14WrapperPotential' \
or p == 'mockRotatedAndTiltedMWP14WrapperPotentialwInclination' \
or p == 'mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination' \
or p == 'mockRotatedTiltedOffsetMWP14WrapperPotential' \
or p == 'mockOffsetMWP14WrapperPotential' \
or 'riaxial' in p \
or 'oblate' in p \
or 'prolate' in p:
continue
assert not numpy.any(numpy.isnan(potential.evaluatePotentials(tp,numpy.inf*numpy.ones(4),numpy.zeros(4),phi=0.,t=0.))), f'Potential {p} evaluated at infinity gave NaN'
assert not numpy.any(numpy.isnan(potential.evaluatePotentials(tp,_INF*numpy.ones(4),numpy.zeros(4),phi=0.,t=0.))), f'Potential {p} evaluated at vesc _INF gave NaN'
return None
# Test that the amplitude for potentials with a finite mass and amp=mass is
# correct through the relation -r^2 F_r =~ GM at large r
def test_finitemass_amp():
r_large= 10000.
# KeplerPotential
mass= 3.
kp= potential.KeplerPotential(amp=mass)
assert numpy.fabs(mass+r_large**2.*kp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of KeplerPotential does not not equal total mass'
# IsochronePotential
r_large= 1000000000.
mass= 3.
ip= potential.IsochronePotential(amp=mass,b=0.4)
assert numpy.fabs(mass+r_large**2.*ip.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of IsochronePotential does not not equal total mass'
# PlummerPotential
r_large= 10000.
mass= 3.
pp= potential.PlummerPotential(amp=mass,b=0.4)
assert numpy.fabs(mass+r_large**2.*pp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of PlummerPotential does not not equal total mass'
# SphericalShellPotential
mass= 3.
sp= potential.SphericalShellPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*sp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of SphericalShellPotential does not not equal total mass'
# RingPotential
mass= 3.
rp= potential.RingPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*rp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of RingPotential does not not equal total mass'
# KuzminDiskPotential
r_large= 1000000000.
mass= 3.
kp= potential.KuzminDiskPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*kp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of KuzminDiskPotential does not not equal total mass'
# MiyamotoNagaiPotential
r_large= 1000000000.
mass= 3.
mp= potential.MiyamotoNagaiPotential(amp=mass,a=0.4)
assert numpy.fabs(mass+r_large**2.*mp.rforce(r_large/numpy.sqrt(2.),r_large/numpy.sqrt(2.),)) < 1e-8, 'Mass amp parameter of MiyamotoNagaiPotential does not not equal total mass'
return None
# Test that the spherically radial force is correct
def test_rforce():
# Spherical potentials: Rforce = rforce x R / r; zforce = rforce x z /r
pp= potential.PlummerPotential(amp=2.,b=2.)
R,z= 1.3, 0.4
r= numpy.sqrt(R*R+z*z)
assert numpy.fabs(pp.Rforce(R,z)*r/R-pp.rforce(R,z)) < 10.**-10., 'rforce does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluateRforces(pp,R,z)*r/R-potential.evaluaterforces(pp,R,z)) < 10.**-10., 'evaluaterforces does not behave as expected for spherical potentials'
return None
def test_rforce_dissipative():
# Use dynamical friction along a radial orbit at z=0 --> spherical
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
R,z,phi= 1.3, 0., 1.1
v= [0.1,0.,0.]
r= numpy.sqrt(R*R+z*z)
assert numpy.fabs(cdfc.Rforce(R,z,phi=phi,v=v)*r/R-cdfc.rforce(R,z,phi=phi,v=v)) < 10.**-10., 'rforce does not behave as expected for spherical potentials for dissipative forces'
assert numpy.fabs(potential.evaluateRforces([pp,cdfc],R,z,phi=phi,v=v)*r/R-potential.evaluaterforces([pp,cdfc],R,z,phi=phi,v=v)) < 10.**-10., 'evaluaterforces does not behave as expected for spherical potentials for dissipative forces'
assert numpy.fabs(potential.evaluateRforces(cdfc,R,z,phi=phi,v=v)*r/R-potential.evaluaterforces(cdfc,R,z,phi=phi,v=v)) < 10.**-10., 'evaluaterforces does not behave as expected for spherical potentials for dissipative forces'
return None
# Test that the spherically second radial derivative is correct
def test_r2deriv():
# Spherical potentials: Rforce = rforce x R / r; zforce = rforce x z /r
# and R2deriv = r2deriv x (R/r)^2 - rforce x z^2/r^3
# and z2deriv = z2deriv x (z/r)^2 - rforce x R^2/R^3
# and Rzderiv = r2deriv x Rz/r^2 + rforce x Rz/r^3
pp= potential.PlummerPotential(amp=2.,b=2.)
R,z= 1.3, 0.4
r= numpy.sqrt(R*R+z*z)
assert numpy.fabs(pp.R2deriv(R,z)-pp.r2deriv(R,z)*(R/r)**2.+pp.rforce(R,z)*z**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(pp.z2deriv(R,z)-pp.r2deriv(R,z)*(z/r)**2.+pp.rforce(R,z)*R**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(pp.Rzderiv(R,z)-pp.r2deriv(R,z)*R*z/r**2.-pp.rforce(R,z)*R*z/r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluateR2derivs([pp],R,z)-potential.evaluater2derivs([pp],R,z)*(R/r)**2.+potential.evaluaterforces([pp],R,z)*z**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluatez2derivs([pp],R,z)-potential.evaluater2derivs([pp],R,z)*(z/r)**2.+potential.evaluaterforces([pp],R,z)*R**2./r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
assert numpy.fabs(potential.evaluateRzderivs([pp],R,z)-potential.evaluater2derivs([pp],R,z)*R*z/r**2.-potential.evaluaterforces([pp],R,z)*R*z/r**3.) < 10.**-10., 'r2deriv does not behave as expected for spherical potentials'
return None
# Check that the masses are calculated correctly for spherical potentials
def test_mass_spher():
#PowerPotential close to Kepler should be very steep
pp= potential.PowerSphericalPotential(amp=2.,alpha=2.999)
kp= potential.KeplerPotential(amp=2.)
assert numpy.fabs(((3.-2.999)/(4.*numpy.pi)*pp.mass(10.)-kp.mass(10.))/kp.mass(10.)) < 10.**-2., "Mass for PowerSphericalPotential close to KeplerPotential is not close to KeplerPotential's mass"
pp= potential.PowerSphericalPotential(amp=2.)
#mass = amp x r^(3-alpha)
tR= 1.
assert numpy.fabs(potential.mass(pp,tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-10., 'Mass for PowerSphericalPotential not as expected'
tR= 2.
assert numpy.fabs(potential.mass([pp],tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-10., 'Mass for PowerSphericalPotential not as expected'
tR= 20.
assert numpy.fabs(pp.mass(tR,forceint=True)-pp._amp*tR**(3.-pp.alpha)) < 10.**-9., 'Mass for PowerSphericalPotential not as expected'
#Test that for a cut-off potential, the mass far beyond the cut-off is
# 2pi rc^(3-alpha) gamma(1.5-alpha/2)
pp= potential.PowerSphericalPotentialwCutoff(amp=2.)
from scipy import special
expecMass= 2.*pp._amp*numpy.pi*pp.rc**(3.-pp.alpha)*special.gamma(1.5-pp.alpha/2.)
tR= 5.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
tR= 15.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
tR= 50.
assert numpy.fabs((pp.mass(tR,forceint=True)-expecMass)/expecMass) < 10.**-6., 'Mass of PowerSphericalPotentialwCutoff far beyond the cut-off not as expected'
#Jaffe and Hernquist both have finite masses, NFW diverges logarithmically
jp= potential.JaffePotential(amp=2.,a=0.1)
hp= potential.HernquistPotential(amp=2.,a=0.1)
np= potential.NFWPotential(amp=2.,a=0.1)
tR= 10.
# Limiting behavior
jaffemass= jp._amp*(1.-jp.a/tR)
hernmass= hp._amp/2.*(1.-2.*hp.a/tR)
nfwmass= np._amp*(numpy.log(tR/np.a)-1.+np.a/tR)
assert numpy.fabs((jp.mass(tR,forceint=True)-jaffemass)/jaffemass) < 10.**-3., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((hp.mass(tR,forceint=True)-hernmass)/hernmass) < 10.**-3., 'Limit mass for Hernquist potential not as expected'
assert numpy.fabs((np.mass(tR,forceint=True)-nfwmass)/nfwmass) < 10.**-2., 'Limit mass for NFW potential not as expected'
tR= 200.
# Limiting behavior, add z, to test that too
jaffemass= jp._amp*(1.-jp.a/tR)
hernmass= hp._amp/2.*(1.-2.*hp.a/tR)
nfwmass= np._amp*(numpy.log(tR/np.a)-1.+np.a/tR)
assert numpy.fabs((jp.mass(tR,forceint=True)-jaffemass)/jaffemass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((hp.mass(tR,forceint=True)-hernmass)/hernmass) < 10.**-6., 'Limit mass for Jaffe potential not as expected'
assert numpy.fabs((np.mass(tR,forceint=True)-nfwmass)/nfwmass) < 10.**-4., 'Limit mass for NFW potential not as expected'
# Burkert as an example of a SphericalPotential
bp= potential.BurkertPotential(amp=2.,a=3.)
assert numpy.fabs(bp.mass(4.2,forceint=True)-bp.mass(4.2)) < 1e-6, "Mass computed with SphericalPotential's general implementation incorrect"
return None
# Check that the masses are implemented correctly for spherical potentials
def test_mass_spher_analytic():
#TwoPowerSphericalPotentials all have explicitly implemented masses
dcp= potential.DehnenCoreSphericalPotential(amp=2.)
jp= potential.JaffePotential(amp=2.)
hp= potential.HernquistPotential(amp=2.)
np= potential.NFWPotential(amp=2.)
tp= potential.TwoPowerSphericalPotential(amp=2.)
dp= potential.DehnenSphericalPotential(amp=2.)
pp= potential.PlummerPotential(amp=2.,b=1.3)
tR= 2.
assert numpy.fabs(dcp.mass(tR,forceint=True)-dcp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Dehnen Core potential'
assert numpy.fabs(jp.mass(tR,forceint=True)-jp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Jaffe potential'
assert numpy.fabs(hp.mass(tR,forceint=True)-hp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Hernquist potential'
assert numpy.fabs(np.mass(tR,forceint=True)-np.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for NFW potential'
assert numpy.fabs(tp.mass(tR,forceint=True)-tp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for TwoPowerSpherical potential'
assert numpy.fabs(dp.mass(tR,forceint=True)-dp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for DehnenSphericalPotential potential, for not z is None'
assert numpy.fabs(pp.mass(tR,forceint=True)-pp.mass(tR)) < 10.**-10., 'Explicit mass does not agree with integral of the density for Plummer potential'
return None
# Check that the masses are calculated correctly for axisymmetric potentials
def test_mass_axi():
#For Miyamoto-Nagai, we know that mass integrated over everything should be equal to amp, so
mp= potential.MiyamotoNagaiPotential(amp=1.)
assert numpy.fabs(mp.mass(200.,20.)-1.) < 0.01, 'Total mass of Miyamoto-Nagai potential w/ amp=1 is not equal to 1'
# Also spherical
assert numpy.fabs(mp.mass(200.)-1.) < 0.01, 'Total mass of Miyamoto-Nagai potential w/ amp=1 is not equal to 1'
#For a double-exponential disk potential, the
# mass(R,z) = amp x hR^2 x hz x (1-(1+R/hR)xe^(-R/hR)) x (1-e^(-Z/hz)
dp= potential.DoubleExponentialDiskPotential(amp=2.)
def dblexpmass(r,z,dp):
return 4.*numpy.pi*dp._amp*dp._hr**2.*dp._hz*(1.-(1.+r/dp._hr)*numpy.exp(-r/dp._hr))*(1.-numpy.exp(-z/dp._hz))
tR,tz= 0.01,0.01
assert numpy.fabs(dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp)) < 5e-8, 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 0.1,0.05
assert numpy.fabs(dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp)) < 3e-7, 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 1.,0.1
assert numpy.fabs(dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp)) < 1e-6, 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 5.,0.1
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-5., 'Mass for DoubleExponentialDiskPotential incorrect'
tR,tz= 5.,1.
assert numpy.fabs((dp.mass(tR,tz,forceint=True)-dblexpmass(tR,tz,dp))/dblexpmass(tR,tz,dp)) < 10.**-5., 'Mass for DoubleExponentialDiskPotential incorrect'
# Razor thin disk
rp= potential.RazorThinExponentialDiskPotential(amp=2.)
def razexpmass(r,z,dp):
return 2.*numpy.pi*rp._amp*rp._hr**2.*(1.-(1.+r/rp._hr)*numpy.exp(-r/rp._hr))
tR,tz= 0.01,0.01
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 0.1,0.05
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 1.,0.1
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 5.,0.1
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
tR,tz= 5.,1.
assert numpy.fabs((rp.mass(tR,tz)-razexpmass(tR,tz,rp))/razexpmass(tR,tz,rp)) < 10.**-10., 'Mass for RazorThinExponentialDiskPotential incorrect'
# Kuzmin disk, amp = mass
kp= potential.KuzminDiskPotential(amp=2.,a=3.)
assert numpy.fabs(kp.mass(1000.,20.)-2.) < 1e-2, 'Mass for KuzminDiskPotential incorrect'
assert numpy.fabs(kp.mass(1000.)-2.) < 1e-2, 'Mass for KuzminDiskPotential incorrect'
#Test that nonAxi raises error
from galpy.orbit import Orbit
mop= potential.MovingObjectPotential(Orbit([1.,0.1,1.1,0.1,0.,0.]))
with pytest.raises(NotImplementedError) as excinfo:
mop.mass(1.,0.)
# also for lists
with pytest.raises(NotImplementedError) as excinfo:
potential.mass(mop,1.,0.)
with pytest.raises(NotImplementedError) as excinfo:
potential.mass([mop],1.,0.)
return None
# Check that the masses are calculated correctly for spheroidal potentials
def test_mass_spheroidal():
# PerfectEllipsoidPotential: total mass is amp, no matter what the axis ratio
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=1.3,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=1.,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=1.,c=1.)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
pep= potential.PerfectEllipsoidPotential(amp=2.,a=3.,b=.7,c=.5)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for PerfectEllipsoidPotential is incorrect'
# For TwoPowerTriaxial, the masses should be bxc times that for the spherical version
b= 0.7
c= 0.5
tpp= potential.TriaxialJaffePotential(amp=2.,a=3.,b=b,c=c)
sp= potential.JaffePotential(amp=2.,a=3.)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
tpp= potential.TriaxialHernquistPotential(amp=2.,a=3.,b=b,c=c)
sp= potential.HernquistPotential(amp=2.,a=3.)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
tpp= potential.TriaxialNFWPotential(amp=2.,a=3.,b=b,c=c)
sp= potential.NFWPotential(amp=2.,a=3.)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
tpp= potential.TwoPowerTriaxialPotential(amp=2.,a=3.,b=b,c=c,alpha=1.1,beta=4.1)
sp= potential.TwoPowerSphericalPotential(amp=2.,a=3.,alpha=1.1,beta=4.1)
assert numpy.fabs(tpp.mass(1.3)/b/c-sp.mass(1.3)) < 1e-6, 'TwoPowerTriaxialPotential mass incorrect'
# For TriaxialGaussianPotential, total mass is amp, no matter b/c
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=1.3,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=1.,c=1.9)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=1.,c=1.)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
pep= potential.TriaxialGaussianPotential(amp=2.,sigma=3.,b=.7,c=.5)
assert numpy.fabs(pep.mass(1000.)-2.) < 1e-2, 'Total mass for TriaxialGaussianPotential is incorrect'
# Dummy EllipsoidalPotential for testing the general approach
from galpy.potential.EllipsoidalPotential import EllipsoidalPotential
class dummy(EllipsoidalPotential):
def __init__(self,amp=1.,b=1.,c=1.,
zvec=None,pa=None,glorder=50,
normalize=False,ro=None,vo=None):
EllipsoidalPotential.__init__(self,amp=amp,b=b,c=c,
zvec=zvec,pa=pa,
glorder=glorder,
ro=ro,vo=vo)
return None
def _mdens(self,m):
return m**-2.
b= 1.2
c= 1.7
dp= dummy(amp=2.,b=b,c=c)
r= 1.9
assert numpy.fabs(dp.mass(r)/b/c-4.*numpy.pi*2.*r) < 1e-6, 'General potential.EllipsoidalPotential mass incorrect'
r= 3.9
assert numpy.fabs(dp.mass(r)/b/c-4.*numpy.pi*2.*r) < 1e-6, 'General potential.EllipsoidalPotential mass incorrect'
return None
# Check that toVertical and toPlanar work
def test_toVertical_toPlanar():
#Grab all of the potentials
pots= [p for p in dir(potential)
if ('Potential' in p and not 'plot' in p and not 'RZTo' in p
and not 'FullTo' in p and not 'toPlanar' in p
and not 'evaluate' in p and not 'Wrapper' in p
and not 'toVertical' in p)]
pots.append('mockInterpSphericalPotential')
pots.append('mockInterpSphericalPotentialwForce')
rmpots= ['Potential','MWPotential','MWPotential2014',
'MovingObjectPotential',
'interpRZPotential', 'linearPotential', 'planarAxiPotential',
'planarPotential', 'verticalPotential','PotentialError',
'SnapshotRZPotential','InterpSnapshotRZPotential',
'EllipsoidalPotential','NumericalPotentialDerivativesMixin',
'SphericalPotential','interpSphericalPotential']
if False:
rmpots.append('DoubleExponentialDiskPotential')
rmpots.append('RazorThinExponentialDiskPotential')
for p in rmpots:
pots.remove(p)
for p in pots:
#Setup instance of potential
try:
tclass= getattr(potential,p)
except AttributeError:
tclass= getattr(sys.modules[__name__],p)
tp= tclass()
if not hasattr(tp,'normalize'): continue #skip these
tp.normalize(1.)
if isinstance(tp,potential.linearPotential) or \
isinstance(tp,potential.planarPotential):
continue
tpp= tp.toPlanar()
assert isinstance(tpp,potential.planarPotential), \
"Conversion into planar potential of potential %s fails" % p
tlp= tp.toVertical(1.,phi=2.)
assert isinstance(tlp,potential.linearPotential), \
"Conversion into linear potential of potential %s fails" % p
def test_RZToplanarPotential():
lp= potential.LogarithmicHaloPotential(normalize=1.)
plp= potential.RZToplanarPotential(lp)
assert isinstance(plp,potential.planarPotential), 'Running an RZPotential through RZToplanarPotential does not produce a planarPotential'
#Check that a planarPotential through RZToplanarPotential is still planar
pplp= potential.RZToplanarPotential(plp)
assert isinstance(pplp,potential.planarPotential), 'Running a planarPotential through RZToplanarPotential does not produce a planarPotential'
#Check that a list with a mix of planar and 3D potentials produces list of planar
ppplp= potential.RZToplanarPotential([lp,plp])
for p in ppplp:
assert isinstance(p,potential.planarPotential), 'Running a list with a mix of planar and 3D potentials through RZToPlanarPotential does not produce a list of planar potentials'
# Check that giving an object that is not a list or Potential instance produces an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential('something else')
# Check that given a list of objects that are not a Potential instances gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential([3,4,45])
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential([lp,3,4,45])
# Check that using a non-axisymmetric potential gives an error
lpna= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential(lpna)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToplanarPotential([lpna])
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToplanarPotential([pp,cdfc])
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToplanarPotential(cdfc)
return None
def test_toPlanarPotential():
tnp= potential.TriaxialNFWPotential(normalize=1.,b=0.5)
ptnp= potential.toPlanarPotential(tnp)
assert isinstance(ptnp,potential.planarPotential), 'Running a non-axisymmetric Potential through toPlanarPotential does not produce a planarPotential'
# Also for list
ptnp= potential.toPlanarPotential([tnp])
assert isinstance(ptnp[0],potential.planarPotential), 'Running a non-axisymmetric Potential through toPlanarPotential does not produce a planarPotential'
#Check that a planarPotential through toPlanarPotential is still planar
pptnp= potential.toPlanarPotential(tnp)
assert isinstance(pptnp,potential.planarPotential), 'Running a planarPotential through toPlanarPotential does not produce a planarPotential'
try:
ptnp= potential.toPlanarPotential('something else')
except potential.PotentialError:
pass
else:
raise AssertionError('Using toPlanarPotential with a string rather than an Potential or a planarPotential did not raise PotentialError')
# Check that list of objects that are not potentials gives error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toPlanarPotential([3,4,45])
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.toPlanarPotential([pp,cdfc])
return None
def test_RZToverticalPotential():
lp= potential.LogarithmicHaloPotential(normalize=1.)
plp= potential.RZToverticalPotential(lp,1.2)
assert isinstance(plp,potential.linearPotential), 'Running an RZPotential through RZToverticalPotential does not produce a linearPotential'
#Check that a verticalPotential through RZToverticalPotential is still vertical
pplp= potential.RZToverticalPotential(plp,1.2)
assert isinstance(pplp,potential.linearPotential), 'Running a linearPotential through RZToverticalPotential does not produce a linearPotential'
# Also for list
pplp= potential.RZToverticalPotential([plp],1.2)
assert isinstance(pplp[0],potential.linearPotential), 'Running a linearPotential through RZToverticalPotential does not produce a linearPotential'
# Check that giving an object that is not a list or Potential instance produces an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential('something else',1.2)
# Check that given a list of objects that are not a Potential instances gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([3,4,45],1.2)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([lp,3,4,45],1.2)
# Check that giving a planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential(lp.toPlanar(),1.2)
# Check that giving a list of planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([lp.toPlanar()],1.2)
# Check that using a non-axisymmetric potential gives an error
lpna= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential(lpna,1.2)
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.RZToverticalPotential([lpna],1.2)
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToverticalPotential([pp,cdfc],1.2)
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.RZToverticalPotential(cdfc,1.2)
return None
def test_toVerticalPotential():
tnp= potential.TriaxialNFWPotential(normalize=1.,b=0.5)
ptnp= potential.toVerticalPotential(tnp,1.2,phi=0.8)
assert isinstance(ptnp,potential.linearPotential), 'Running a non-axisymmetric Potential through toVerticalPotential does not produce a linearPotential'
# Also for list
ptnp= potential.toVerticalPotential([tnp],1.2,phi=0.8)
assert isinstance(ptnp[0],potential.linearPotential), 'Running a non-axisymmetric Potential through toVerticalPotential does not produce a linearPotential'
#Check that a linearPotential through toVerticalPotential is still vertical
ptnp= potential.toVerticalPotential(tnp,1.2,phi=0.8)
pptnp= potential.toVerticalPotential(ptnp,1.2,phi=0.8)
assert isinstance(pptnp,potential.linearPotential), 'Running a linearPotential through toVerticalPotential does not produce a linearPotential'
# also for list
pptnp= potential.toVerticalPotential([ptnp],1.2,phi=0.8)
assert isinstance(pptnp[0],potential.linearPotential), 'Running a linearPotential through toVerticalPotential does not produce a linearPotential'
try:
ptnp= potential.toVerticalPotential('something else',1.2,phi=0.8)
except potential.PotentialError:
pass
else:
raise AssertionError('Using toVerticalPotential with a string rather than an Potential or a linearPotential did not raise PotentialError')
# Check that giving a planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toVerticalPotential(tnp.toPlanar(),1.2,phi=0.8)
# Check that giving a list of planarPotential gives an error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toVerticalPotential([tnp.toPlanar()],1.2,phi=0.8)
# Check that giving a list of non-potentials gives error
with pytest.raises(potential.PotentialError) as excinfo:
plp= potential.toVerticalPotential([3,4,45],1.2)
# Check that giving potential.ChandrasekharDynamicalFrictionForce
# gives an error
pp= potential.PlummerPotential(amp=1.12,b=2.)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=pp,sigmar=lambda r: 1./numpy.sqrt(2.))
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.toVerticalPotential([pp,cdfc],1.2,phi=0.8)
with pytest.raises(NotImplementedError) as excinfo:
plp= potential.toVerticalPotential(cdfc,1.2,phi=0.8)
# Check that running a non-axisymmetric potential through toVertical w/o
# phi gives an error
with pytest.raises(potential.PotentialError) as excinfo:
ptnp= potential.toVerticalPotential(tnp,1.2)
return None
# Sanity check the derivative of the rotation curve and the frequencies in the plane
def test_dvcircdR_omegac_epifreq_rl_vesc():
#Derivative of rotation curve
#LogarithmicHaloPotential: rotation everywhere flat
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert lp.dvcircdR(1.)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=1"
assert lp.dvcircdR(0.5)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=0.5"
assert lp.dvcircdR(2.)**2. < 10.**-16., \
"LogarithmicHaloPotential's rotation curve is not flat at R=2"
#Kepler potential, vc = vc_0(R/R0)^-0.5 -> dvcdR= -0.5 vc_0 (R/R0)**-1.5
kp= potential.KeplerPotential(normalize=1.)
assert (kp.dvcircdR(1.)+0.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=1"
assert (kp.dvcircdR(0.5)+0.5**-0.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=0.5"
assert (kp.dvcircdR(2.)+0.5**2.5)**2. < 10.**-16., \
"KeplerPotential's rotation curve is not what it should be at R=2"
#Rotational frequency
assert (lp.omegac(1.)-1.)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=1"
assert (lp.omegac(0.5)-2.)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=0.5"
assert (lp.omegac(2.)-0.5)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=2"
assert (lp.toPlanar().omegac(2.)-0.5)**2. < 10.**-16., \
"LogarithmicHalo's rotational frequency is off at R=2 through planarPotential"
#Epicycle frequency, flat rotation curve
assert (lp.epifreq(1.)-numpy.sqrt(2.)*lp.omegac(1.))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=1"
assert (lp.epifreq(0.5)-numpy.sqrt(2.)*lp.omegac(0.5))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=0.5"
assert (lp.epifreq(2.0)-numpy.sqrt(2.)*lp.omegac(2.0))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=2"
assert (lp.toPlanar().epifreq(2.0)-numpy.sqrt(2.)*lp.omegac(2.0))**2. < 10.**-16., \
"LogarithmicHalo's epicycle and rotational frequency are inconsistent with kappa = sqrt(2) Omega at R=, through planar2"
#Epicycle frequency, Kepler
assert (kp.epifreq(1.)-kp.omegac(1.))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=1"
assert (kp.epifreq(0.5)-kp.omegac(0.5))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=0.5"
assert (kp.epifreq(2.)-kp.omegac(2.))**2. < 10.**-16., \
"KeplerPotential's epicycle and rotational frequency are inconsistent with kappa = Omega at R=2"
#Check radius of circular orbit, Kepler
assert (kp.rl(1.)-1.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=1."
assert (kp.rl(0.5)-1./4.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=0.5"
assert (kp.rl(2.)-4.)**2. < 10.**-16., \
"KeplerPotential's radius of a circular orbit is wrong at Lz=2."
#Check radius of circular orbit, PowerSphericalPotential with close-to-flat rotation curve
pp= potential.PowerSphericalPotential(alpha=1.8,normalize=1.)
assert (pp.rl(1.)-1.)**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=1."
assert (pp.rl(0.5)-0.5**(10./11.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=0.5"
assert (pp.rl(2.)-2.**(10./11.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=2."
#Check radius of circular orbit, PowerSphericalPotential with steeper rotation curve
pp= potential.PowerSphericalPotential(alpha=0.5,normalize=1.)
assert (pp.rl(1.)-1.)**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=1."
assert (pp.rl(0.0625)-0.0625**(4./7.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=0.0625"
assert (pp.rl(16.)-16.**(4./7.))**2. < 10.**-16., \
"PowerSphericalPotential's radius of a circular orbit is wrong at Lz=16."
#Check radius in MWPotential2014 at very small lz, to test small lz behavior
lz= 0.000001
assert numpy.fabs(potential.vcirc(potential.MWPotential2014,potential.rl(potential.MWPotential2014,lz))*potential.rl(potential.MWPotential2014,lz)-lz) < 1e-12, 'Radius of circular orbit at small Lz in MWPotential2014 does not work as expected'
#Escape velocity of Kepler potential
assert (kp.vesc(1.)**2.-2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=1"
assert (kp.vesc(0.5)**2.-2.*kp.vcirc(0.5)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=0.5"
assert (kp.vesc(2.)**2.-2.*kp.vcirc(2.)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=2"
assert (kp.toPlanar().vesc(2.)**2.-2.*kp.vcirc(2.)**2.)**2. < 10.**-16., \
"KeplerPotential's escape velocity is wrong at R=2, through planar"
# W/ different interface
assert (kp.vcirc(1.)-potential.vcirc(kp,1.))**2. < 10.**-16., \
"KeplerPotential's circular velocity does not agree between kp.vcirc and vcirc(kp)"
assert (kp.vcirc(1.)-potential.vcirc(kp.toPlanar(),1.))**2. < 10.**-16., \
"KeplerPotential's circular velocity does not agree between kp.vcirc and vcirc(kp.toPlanar)"
assert (kp.vesc(1.)-potential.vesc(kp,1.))**2. < 10.**-16., \
"KeplerPotential's escape velocity does not agree between kp.vesc and vesc(kp)"
assert (kp.vesc(1.)-potential.vesc(kp.toPlanar(),1.))**2. < 10.**-16., \
"KeplerPotential's escape velocity does not agree between kp.vesc and vesc(kp.toPlanar)"
return None
def test_vcirc_phi_axi():
# Test that giving phi to vcirc for an axisymmetric potential doesn't
# affect the answer
kp= potential.KeplerPotential(normalize=1.)
phis= numpy.linspace(0.,numpy.pi,101)
vcs= numpy.array([kp.vcirc(1.,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-1.) < 10.**-8.), 'Setting phi= in vcirc for axisymmetric potential gives different answers for different phi'
# One at a different radius
R= 0.5
vcs= numpy.array([kp.vcirc(R,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-kp.vcirc(R)) < 10.**-8.), 'Setting phi= in vcirc for axisymmetric potential gives different answers for different phi'
return None
def test_vcirc_phi_nonaxi():
# Test that giving phi to vcirc for a non-axisymmetric potential does
# affect the answer
tnp= potential.TriaxialNFWPotential(b=0.4,normalize=1.)
# limited phi range
phis= numpy.linspace(numpy.pi/5.,numpy.pi/2.,5)
vcs= numpy.array([tnp.vcirc(1.,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-1.) > 0.01), 'Setting phi= in vcirc for axisymmetric potential does not give different answers for different phi'
# One at a different radius
R= 0.5
vcs= numpy.array([tnp.vcirc(R,phi) for phi in phis])
assert numpy.all(numpy.fabs(vcs-tnp.vcirc(R,phi=0.)) > 0.01), 'Setting phi= in vcirc for axisymmetric potential does not give different answers for different phi'
return None
def test_vcirc_vesc_special():
#Test some special cases of vcirc and vesc
dp= potential.EllipticalDiskPotential()
try:
potential.plotRotcurve([dp])
except (AttributeError,potential.PotentialError): #should be raised
pass
else:
raise AssertionError("plotRotcurve for non-axisymmetric potential should have raised AttributeError, but didn't")
try:
potential.plotEscapecurve([dp])
except AttributeError: #should be raised
pass
else:
raise AssertionError("plotEscapecurve for non-axisymmetric potential should have raised AttributeError, but didn't")
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(potential.calcRotcurve(lp,0.8)-lp.vcirc(0.8)) < 10.**-16., 'Circular velocity calculated with calcRotcurve not the same as that calculated with vcirc'
assert numpy.fabs(potential.calcEscapecurve(lp,0.8)-lp.vesc(0.8)) < 10.**-16., 'Escape velocity calculated with calcEscapecurve not the same as that calculated with vcirc'
return None
def test_lindbladR():
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(lp.lindbladR(0.5,'corotation')-2.) < 10.**-10., 'Location of co-rotation resonance is wrong for LogarithmicHaloPotential'
assert numpy.fabs(lp.omegac(lp.lindbladR(0.5,2))-2./(2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=2 resonance is wrong for LogarithmicHaloPotential'
assert numpy.fabs(lp.omegac(lp.lindbladR(0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Also through general interface
assert numpy.fabs(lp.omegac(potential.lindbladR(lp,0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Also for planar
assert numpy.fabs(lp.omegac(lp.toPlanar().lindbladR(0.5,-2))+2./(-2.-numpy.sqrt(2.))*0.5) < 10.**-14., 'Location of m=-2 resonance is wrong for LogarithmicHaloPotential'
#Test non-existent ones
mp= potential.MiyamotoNagaiPotential(normalize=1.,a=0.3)
assert mp.lindbladR(3.,2) is None, 'MiyamotoNagai w/ OmegaP=3 should not have a inner m=2 LindbladR'
assert mp.lindbladR(6.,'corotation') is None, 'MiyamotoNagai w/ OmegaP=6 should not have a inner m=2 LindbladR'
#Test error
try:
lp.lindbladR(0.5,'wrong resonance')
except OSError:
pass
else:
raise AssertionError("lindbladR w/ wrong m input should have raised IOError, but didn't")
return None
def test_rE_flatvc():
# Test the rE function for the case of a flat rotation curve
# Expected rE when vc(1)=1 is exp(E-1/2) (e.g., Dehnen 1999 epicycle)
lp= potential.LogarithmicHaloPotential(normalize=1.)
def expected_rE(E):
return numpy.exp(E-0.5)
Es= numpy.linspace(-10.,20.,101)
rEs= numpy.array([lp.rE(E) for E in Es])
assert numpy.amax(numpy.fabs(rEs-expected_rE(Es))) < 1e-6, 'rE method does not give the expected result for a flat rotation curve'
# Also as function
rEs= numpy.array([potential.rE(lp,E) for E in Es])
assert numpy.amax(numpy.fabs(rEs-expected_rE(Es))) < 1e-6, 'rE method does not give the expected result for a flat rotation curve'
return None
def test_rE_powervc():
# Test the rE function for the case of a power-law rotation curve: v = r^beta
# Expected rE when vc(1)=1 is (2 beta E / [1+beta])**(1./[2beta])
# (e.g., Dehnen 1999 epicycle)
betas= [-0.45,-0.2,0.6,0.9]
def expected_rE(E,beta):
return (2.*beta*E/(1.+beta))**(1./2./beta)
for beta in betas:
pp= PowerSphericalPotential(alpha=2.-2.*beta,normalize=1.)
rmin, rmax= 1e-8,1e5
Emin= pp.vcirc(rmin)**2./2.+pp(rmin,0.)
Emax= pp.vcirc(rmax)**2./2.+pp(rmax,0.)
Es= numpy.linspace(Emin,Emax,101)
# Test both method and function
if beta < 0.:
rEs= numpy.array([pp.rE(E) for E in Es])
else:
rEs= numpy.array([potential.rE(pp,E) for E in Es])
assert numpy.amax(numpy.fabs(rEs-expected_rE(Es,beta))) < 1e-8, 'rE method does not give the expected result for a power-law rotation curve'
return None
def test_rE_MWPotential2014():
# Test the rE function for MWPotential2014
# No closed-form true answer, so just check that the expected relation holds
def Ec(r):
return potential.vcirc(potential.MWPotential2014,r)**2./2.\
+potential.evaluatePotentials(potential.MWPotential2014,r,0.)
rmin, rmax= 1e-8,1e5
Emin= Ec(rmin)
Emax= Ec(rmax)
Es= numpy.linspace(Emin,Emax,101)
rEs= numpy.array([potential.rE(potential.MWPotential2014,E) for E in Es])
Ecs= numpy.array([Ec(rE) for rE in rEs])
assert numpy.amax(numpy.fabs(Ecs-Es)) < 1e-8, 'rE method does not give the expected result for MWPotential2014'
return None
def test_LcE_flatvc():
# Test the LcE function for the case of a flat rotation curve
# Expected LcE when vc(1)=1 is exp(E-1/2) (e.g., Dehnen 1999 epicycle)
lp= potential.LogarithmicHaloPotential(normalize=1.)
def expected_LcE(E):
return numpy.exp(E-0.5)
Es= numpy.linspace(-10.,20.,101)
LcEs= numpy.array([lp.LcE(E) for E in Es])
assert numpy.amax(numpy.fabs(LcEs-expected_LcE(Es))) < 1e-6, 'LcE method does not give the expected result for a flat rotation curve'
# Also as function
LcEs= numpy.array([potential.LcE(lp,E) for E in Es])
assert numpy.amax(numpy.fabs(LcEs-expected_LcE(Es))) < 1e-6, 'LcE method does not give the expected result for a flat rotation curve'
return None
def test_LcE_powervc():
# Test the LcE function for the case of a power-law rotation curve: v = r^beta
# Expected LcE when vc(1)=1 is (2 beta E / [1+beta])**([1.+beta]/[2beta])
# (e.g., Dehnen 1999 epicycle)
betas= [-0.45,-0.2,0.6,0.9]
def expected_LcE(E,beta):
return (2.*beta*E/(1.+beta))**((1.+beta)/2./beta)
for beta in betas:
pp= PowerSphericalPotential(alpha=2.-2.*beta,normalize=1.)
rmin, rmax= 1e-8,1e5
Emin= pp.vcirc(rmin)**2./2.+pp(rmin,0.)
Emax= pp.vcirc(rmax)**2./2.+pp(rmax,0.)
Es= numpy.linspace(Emin,Emax,101)
# Test both method and function
if beta < 0.:
LcEs= numpy.array([pp.LcE(E) for E in Es])
else:
LcEs= numpy.array([potential.LcE(pp,E) for E in Es])
assert numpy.amax(numpy.fabs(LcEs-expected_LcE(Es,beta))) < 1e-5, 'LcE method does not give the expected result for a power-law rotation curve'
return None
def test_vterm():
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert numpy.fabs(lp.vterm(30.,deg=True)-0.5*(lp.omegac(0.5)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=30 is incorrect'
assert numpy.fabs(lp.vterm(numpy.pi/3.,deg=False)-numpy.sqrt(3.)/2.*(lp.omegac(numpy.sqrt(3.)/2.)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=60 in rad is incorrect'
#Also using general interface
assert numpy.fabs(potential.vterm(lp,30.,deg=True)-0.5*(lp.omegac(0.5)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=30 is incorrect'
assert numpy.fabs(potential.vterm(lp,numpy.pi/3.,deg=False)-numpy.sqrt(3.)/2.*(lp.omegac(numpy.sqrt(3.)/2.)-1.)) < 10.**-10., 'vterm for LogarithmicHaloPotential at l=60 in rad is incorrect'
return None
def test_flattening():
#Simple tests: LogarithmicHalo
qs= [0.75,1.,1.25]
for q in qs:
lp= potential.LogarithmicHaloPotential(normalize=1.,q=q)
assert (lp.flattening(1.,0.001)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (1.,0.001)" % q
assert (lp.flattening(1.,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (1.,0.1)" % q
assert (lp.flattening(0.5,0.001)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.001)" % q
assert (lp.flattening(0.5,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.1)" % q
#One test with the general interface
assert (potential.flattening(lp,0.5,0.1)-q)**2. < 10.**-16., \
"Flattening of LogarithmicHaloPotential w/ q= %f is not equal to q at (R,z) = (0.5,0.1), through potential.flattening" % q
#Check some spherical potentials
kp= potential.KeplerPotential(normalize=1.)
assert (kp.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of KeplerPotential is not equal to 1 at (R,z) = (1.,0.02)"
np= potential.NFWPotential(normalize=1.,a=5.)
assert (np.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of NFWPotential is not equal to 1 at (R,z) = (1.,0.02)"
hp= potential.HernquistPotential(normalize=1.,a=5.)
assert (hp.flattening(1.,0.02)-1.)**2. < 10.**-16., \
"Flattening of HernquistPotential is not equal to 1 at (R,z) = (1.,0.02)"
#Disk potentials should be oblate everywhere
mp= potential.MiyamotoNagaiPotential(normalize=1.,a=0.5,b=0.05)
assert mp.flattening(1.,0.1) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (1.,0.1)"
assert mp.flattening(1.,2.) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (1.,2.)"
assert mp.flattening(3.,3.) <= 1., \
"Flattening of MiyamotoNagaiPotential w/ a=0.5, b=0.05 is > 1 at (R,z) = (3.,3.)"
return None
def test_verticalfreq():
#For spherical potentials, vertical freq should be equal to rotational freq
lp= potential.LogarithmicHaloPotential(normalize=1.,q=1.)
kp= potential.KeplerPotential(normalize=1.)
np= potential.NFWPotential(normalize=1.)
bp= potential.BurkertPotential(normalize=1.)
rs= numpy.linspace(0.2,2.,21)
for r in rs:
assert numpy.fabs(lp.verticalfreq(r)-lp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
assert numpy.fabs(kp.verticalfreq(r)-kp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
#Through general interface
assert numpy.fabs(potential.verticalfreq(np,r)-np.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
assert numpy.fabs(potential.verticalfreq([bp],r)-bp.omegac(r)) < 10.**-10., \
'Verticalfreq for spherical potential does not equal rotational freq'
#For Double-exponential disk potential, epi^2+vert^2-2*rot^2 =~ 0 at very large distances (no longer explicitly, because we don't use a Kepler potential anylonger)
if True:
dp= potential.DoubleExponentialDiskPotential(normalize=1.,hr=0.05,hz=0.01)
assert numpy.fabs(dp.epifreq(1.)**2.+dp.verticalfreq(1.)**2.-2.*dp.omegac(1.)**2.) < 10.**-4., 'epi^2+vert^2-2*rot^2 !=~ 0 for dblexp potential, very far from center'
#Closer to the center, this becomes the Poisson eqn.
assert numpy.fabs(dp.epifreq(.125)**2.+dp.verticalfreq(.125)**2.-2.*dp.omegac(.125)**2.-4.*numpy.pi*dp.dens(0.125,0.))/4./numpy.pi/dp.dens(0.125,0.) < 10.**-3., 'epi^2+vert^2-2*rot^2 !=~ dens for dblexp potential'
return None
def test_planar_nonaxi():
dp= potential.EllipticalDiskPotential()
try:
potential.evaluateplanarPotentials(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarPotentials for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarRforces(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarRforces for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarphitorques(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarphitorques for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
try:
potential.evaluateplanarR2derivs(dp,1.)
except potential.PotentialError:
pass
else:
raise AssertionError('evaluateplanarR2derivs for non-axisymmetric potential w/o specifying phi did not raise PotentialError')
return None
def test_ExpDisk_special():
#Test some special cases for the ExponentialDisk potentials
#Test that array input works
dp= potential.DoubleExponentialDiskPotential(normalize=1.)
rs= numpy.linspace(0.1,2.11)
zs= numpy.ones_like(rs)*0.1
#Potential itself
dpevals= numpy.array([dp(r,z) for (r,z) in zip(rs,zs)])
assert numpy.all(numpy.fabs(dp(rs,zs)-dpevals) < 10.**-10.), \
'DoubleExppnentialDiskPotential evaluation does not work as expected for array inputs'
#Rforce
#dpevals= numpy.array([dp.Rforce(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.Rforce(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential Rforce evaluation does not work as expected for array inputs'
#zforce
#dpevals= numpy.array([dp.zforce(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.zforce(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential zforce evaluation does not work as expected for array inputs'
#R2deriv
#dpevals= numpy.array([dp.R2deriv(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential R2deriv evaluation does not work as expected for array inputs'
#z2deriv
#dpevals= numpy.array([dp.z2deriv(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential z2deriv evaluation does not work as expected for array inputs'
#Rzderiv
#dpevals= numpy.array([dp.Rzderiv(r,z) for (r,z) in zip(rs,zs)])
#assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs)-dpevals) < 10.**-10.), \
# 'DoubleExppnentialDiskPotential Rzderiv evaluation does not work as expected for array inputs'
#Check the PotentialError for z=/=0 evaluation of R2deriv of RazorThinDiskPotential
rp= potential.RazorThinExponentialDiskPotential(normalize=1.)
try: rp.R2deriv(1.,0.1)
except potential.PotentialError: pass
else: raise AssertionError("RazorThinExponentialDiskPotential's R2deriv did not raise AttributeError for z=/= 0 input")
return None
def test_DehnenBar_special():
#Test some special cases for the DehnenBar potentials
#Test that array input works
dp= potential.DehnenBarPotential()
#Test from rs < rb through to rs > rb
rs= numpy.linspace(0.1*dp._rb,2.11*dp._rb)
zs= numpy.ones_like(rs)*0.1
phis=numpy.ones_like(rs)*0.1
#Potential itself
dpevals= numpy.array([dp(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential evaluation does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential evaluation does not work as expected for array inputs'
#Rforce
dpevals= numpy.array([dp.Rforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rforce evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.Rforce(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rforce does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.Rforce(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rforce does not work as expected for array inputs'
#zforce
dpevals= numpy.array([dp.zforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.zforce(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.zforce(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce does not work as expected for array inputs'
#phitorque
dpevals= numpy.array([dp.phitorque(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phitorque(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential zforce evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.phitorque(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.phitorque(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phitorque does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.phitorque(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.phitorque(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phitorque does not work as expected for array inputs'
#R2deriv
dpevals= numpy.array([dp.R2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential R2deriv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.R2deriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential R2deriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.R2deriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential R2deriv does not work as expected for array inputs'
#z2deriv
dpevals= numpy.array([dp.z2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.z2deriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.z2deriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv does not work as expected for array inputs'
#phi2deriv
dpevals= numpy.array([dp.phi2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential z2deriv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.phi2deriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phi2deriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.phi2deriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phi2deriv does not work as expected for array inputs'
#Rzderiv
dpevals= numpy.array([dp.Rzderiv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rzderiv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.Rzderiv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rzderiv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.Rzderiv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rzderiv does not work as expected for array inputs'
#Rphideriv
dpevals= numpy.array([dp.Rphideriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rphideriv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.Rphideriv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rphideriv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.Rphideriv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential Rphideriv does not work as expected for array inputs'
#phizderiv
dpevals= numpy.array([dp.phizderiv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phizderiv(rs,zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phizderiv evaluation does not work as expected for array inputs'
# R array, z not an array
dpevals= numpy.array([dp.phizderiv(r,zs[0],phi) for (r,phi) in zip(rs,phis)])
assert numpy.all(numpy.fabs(dp.phizderiv(rs,zs[0],phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phizderiv does not work as expected for array inputs'
# z array, R not an array
dpevals= numpy.array([dp.phizderiv(rs[0],z,phi) for (z,phi) in zip(zs,phis)])
assert numpy.all(numpy.fabs(dp.phizderiv(rs[0],zs,phis)-dpevals) < 10.**-10.), \
'DehnenBarPotential phizderiv does not work as expected for array inputs'
return None
def test_SpiralArm_special():
#Test some special cases for the DehnenBar potentials
#Test that array input works
dp= potential.SpiralArmsPotential()
rs= numpy.linspace(0.1,2.,11)
zs= numpy.ones_like(rs)*0.1
phis=numpy.ones_like(rs)*0.1
#Potential itself
dpevals= numpy.array([dp(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential evaluation does not work as expected for array inputs'
#Rforce
dpevals= numpy.array([dp.Rforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rforce evaluation does not work as expected for array inputs'
#zforce
dpevals= numpy.array([dp.zforce(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.zforce(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential zforce evaluation does not work as expected for array inputs'
#phitorque
dpevals= numpy.array([dp.phitorque(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phitorque(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential zforce evaluation does not work as expected for array inputs'
#R2deriv
dpevals= numpy.array([dp.R2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.R2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential R2deriv evaluation does not work as expected for array inputs'
#z2deriv
dpevals= numpy.array([dp.z2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.z2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential z2deriv evaluation does not work as expected for array inputs'
#phi2deriv
dpevals= numpy.array([dp.phi2deriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phi2deriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential z2deriv evaluation does not work as expected for array inputs'
#Rzderiv
dpevals= numpy.array([dp.Rzderiv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rzderiv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rzderiv evaluation does not work as expected for array inputs'
#Rphideriv
dpevals= numpy.array([dp.Rphideriv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.Rphideriv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rzderiv evaluation does not work as expected for array inputs'
#phizderiv
dpevals= numpy.array([dp.phizderiv(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.phizderiv(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rzderiv evaluation does not work as expected for array inputs'
#dens
dpevals= numpy.array([dp.dens(r,z,phi) for (r,z,phi) in zip(rs,zs,phis)])
assert numpy.all(numpy.fabs(dp.dens(rs,zs,phis)-dpevals) < 10.**-10.), \
'SpiralArmsPotential Rzderiv evaluation does not work as expected for array inputs'
return None
def test_MovingObject_density():
mp= mockMovingObjectPotential()
#Just test that the density far away from the object is close to zero
assert numpy.fabs(mp.dens(5.,0.)) < 10.**-8., 'Density far away from MovingObject is not close to zero'
return None
# test specialSelf for TwoPowerSphericalPotential
def test_TwoPowerSphericalPotentialSpecialSelf():
# TODO replace manual additions with an automatic method
# that checks the signatures all methods in all potentials
kw = dict(amp=1.,a=1.,normalize=False,ro=None,vo=None)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125])
pot = potential.TwoPowerSphericalPotential(alpha=0, beta=4,**kw)
comp = potential.DehnenCoreSphericalPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.TwoPowerSphericalPotential(alpha=1, beta=4,**kw)
comp = potential.HernquistPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.TwoPowerSphericalPotential(alpha=2, beta=4,**kw)
comp = potential.JaffePotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.TwoPowerSphericalPotential(alpha=1, beta=3,**kw)
comp = potential.NFWPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
return None
def test_DehnenSphericalPotentialSpecialSelf():
# TODO replace manual additions with an automatic method
# that checks the signatures all methods in all potentials
kw = dict(amp=1.,a=1.,normalize=False,ro=None,vo=None)
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125])
pot = potential.DehnenSphericalPotential(alpha=0,**kw)
comp = potential.DehnenCoreSphericalPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
assert all(pot._R2deriv(Rs, Zs) == comp._R2deriv(Rs, Zs))
assert all(pot._Rzderiv(Rs, Zs) == comp._Rzderiv(Rs, Zs))
pot = potential.DehnenSphericalPotential(alpha=1,**kw)
comp = potential.HernquistPotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
pot = potential.DehnenSphericalPotential(alpha=2,**kw)
comp = potential.JaffePotential(**kw)
assert all(pot._evaluate(Rs, Zs) == comp._evaluate(Rs, Zs))
assert all(pot._Rforce(Rs, Zs) == comp._Rforce(Rs, Zs))
assert all(pot._zforce(Rs, Zs) == comp._zforce(Rs, Zs))
return None
# Test that MWPotential is what it's supposed to be
def test_MWPotential2014():
pot= potential.MWPotential2014
V0, R0= 220., 8.
#Check the parameters of the bulge
assert pot[0].rc == 1.9/R0, "MWPotential2014's bulge cut-off radius is incorrect"
assert pot[0].alpha == 1.8, "MWPotential2014's bulge power-law exponent is incorrect"
assert numpy.fabs(pot[0].Rforce(1.,0.)+0.05) < 10.**-14., "MWPotential2014's bulge amplitude is incorrect"
#Check the parameters of the disk
assert numpy.fabs(pot[1]._a-3./R0) < 10.**-14., "MWPotential2014's disk scale length is incorrect"
assert numpy.fabs(pot[1]._b-0.28/R0) < 10.**-14., "MWPotential2014's disk scale height is incorrect"
assert numpy.fabs(pot[1].Rforce(1.,0.)+0.60) < 10.**-14., "MWPotential2014's disk amplitude is incorrect"
#Check the parameters of the halo
assert numpy.fabs(pot[2].a-16./R0) < 10.**-14., "MWPotential2014's halo scale radius is incorrect"
assert numpy.fabs(pot[2].Rforce(1.,0.)+0.35) < 10.**-14., "MWPotential2014's halo amplitude is incorrect"
return None
# Test that the McMillan17 potential is what it's supposed to be
def test_McMillan17():
from galpy.potential.mwpotentials import McMillan17
from galpy.util import conversion
ro,vo= McMillan17[0]._ro, McMillan17[0]._vo
# Check some numbers from Table 3 of McMillan17: vertical force at the Sun
assert numpy.fabs(-potential.evaluatezforces(McMillan17,1.,1.1/8.21,
use_physical=False)
*conversion.force_in_2piGmsolpc2(vo,ro)-73.9) < 0.2, 'Vertical force at the Sun in McMillan17 does not agree with what it should be'
# Halo density at the Sun
assert numpy.fabs(potential.evaluateDensities(McMillan17[1],1.,0.,
use_physical=False)
*conversion.dens_in_msolpc3(vo,ro)-0.0101) < 1e-4, 'Halo density at the Sun in McMillan17 does not agree with what it should be'
# Halo concentration
assert numpy.fabs(McMillan17[1].conc(overdens=94.,wrtcrit=True,H=70.4)-15.4) < 1e-1, 'Halo concentration in McMillan17 does not agree with what it is supposed to be'
# Let's compute the mass of the NFWPotenial and add the paper's number for the mass in stars and gas. The following is the total mass in units of $10^11\,M_\odot$:
assert numpy.fabs((McMillan17[1].mass(50./8.21,quantity=False))/10.**11.+0.543+0.122-5.1) < 1e-1, 'Mass within 50 kpc in McMillan17 does not agree with what it is supposed to be'
# Mass of the bulge is slightly off
assert numpy.fabs((McMillan17[2].mass(50./8.21,quantity=False))/10.**9.-9.23) < 4e-1, 'Bulge mass in McMillan17 does not agree with what it is supposed to be'
# Mass in stars, compute bulge+disk and subtract what's supposed to be gas
assert numpy.fabs((McMillan17[0].mass(50./8.21,quantity=False)+McMillan17[2].mass(50./8.21,quantity=False))/10.**10.-1.22-5.43) < 1e-1, 'Stellar massi n McMillan17 does not agree with what it is supposed to be'
return None
# Test that the Cautun20 potential is what it's supposed to be
def test_Cautun20():
from galpy.potential.mwpotentials import Cautun20
from galpy.util import conversion
ro,vo= Cautun20[0]._ro, Cautun20[0]._vo
# Check the rotation velocity at a few distances
# at the Sun
assert numpy.fabs(potential.vcirc(Cautun20,1.,quantity=False)-230.1) < 1e-1, 'Total circular velocity at the Sun in Cautun20 does not agree with what it should be'
assert numpy.fabs(potential.vcirc(Cautun20[0],1.,quantity=False)-157.6) < 1e-1, 'Halo circular velocity at the Sun in Cautun20 does not agree with what it should be'
assert numpy.fabs(potential.vcirc(Cautun20[1],1.,quantity=False)-151.2) < 1e-1, 'Disc circular velocity at the Sun in Cautun20 does not agree with what it should be'
assert numpy.fabs(potential.vcirc(Cautun20[2],1.,quantity=False)-70.8) < 1e-1, 'Bulge circular velocity at the Sun in Cautun20 does not agree with what it should be'
# at 50 kpc
assert numpy.fabs(potential.vcirc(Cautun20,50./ro,quantity=False)-184.3) < 1e-1, 'Total circular velocity at 50 kpc in Cautun20 does not agree with what it should be'
assert numpy.fabs(potential.vcirc(Cautun20[0],50./ro,quantity=False)-166.9) < 1e-1, 'Halo circular velocity at 50 kpc in Cautun20 does not agree with what it should be'
assert numpy.fabs(potential.vcirc(Cautun20[1],50./ro,quantity=False)-68.9) < 1e-1, 'Disc circular velocity at 50 kpc in Cautun20 does not agree with what it should be'
assert numpy.fabs(potential.vcirc(Cautun20[2],50./ro,quantity=False)-28.3) < 1e-1, 'Bulge circular velocity at 50 kpc in Cautun20 does not agree with what it should be'
# check the enclosed halo mass
assert numpy.fabs((Cautun20[0].mass(50./ro,quantity=False))/10.**11-3.23) < 1e-2, 'DM halo mass within 50 kpc in Cautun20 does not agree with what it is supposed to be'
assert numpy.fabs((Cautun20[0].mass(200./ro,quantity=False))/10.**11-9.03) < 1e-2, 'DM halo mass within 50 kpc in Cautun20 does not agree with what it is supposed to be'
# check the CGM density
assert numpy.fabs(potential.evaluateDensities(Cautun20[3],1.,0.,use_physical=False)
*conversion.dens_in_msolpc3(vo,ro)*1.e5-9.34) < 1e-2, 'CGM density at the Sun in Cautun20 does not agree with what it should be'
assert numpy.fabs(potential.evaluateDensities(Cautun20[3],50./ro,0.,use_physical=False)
*conversion.dens_in_msolpc3(vo,ro)*1.e6-6.49) < 1e-2, 'CGM density at 50 kpc in Cautun20 does not agree with what it should be'
# Halo density at the Sun
assert numpy.fabs(potential.evaluateDensities(Cautun20[0],1.,0.,use_physical=False)
*conversion.dens_in_msolpc3(vo,ro)*1.e3-8.8) < 5e-2, 'Halo density at the Sun in Cautun20 does not agree with what it should be'
return None
# Test that the Irrgang13 potentials are what they are supposed to be
def test_Irrgang13():
from galpy.potential.mwpotentials import (Irrgang13I, Irrgang13II,
Irrgang13III)
# Model I
ro,vo= Irrgang13I[0]._ro, Irrgang13I[0]._vo
# Check some numbers from Table 1 of Irrgang13: circular velocity at the Sun
assert numpy.fabs(potential.vcirc(Irrgang13I,1.,quantity=False)-242.) < 1e-2, 'Circular velocity at the Sun in Irrgang13I does not agree with what it should be'
# Mass of the bulge
assert numpy.fabs(Irrgang13I[0].mass(100.,quantity=False)/1e9-9.5) < 1e-2, 'Mass of the bulge in Irrgang13I does not agree with what it should be'
# Mass of the disk
assert numpy.fabs(Irrgang13I[1].mass(100.,10.,quantity=False)/1e10-6.6) < 1e-2, 'Mass of the disk in Irrgang13I does not agree with what it should be'
# Mass of the halo (go to edge in Irrgang13I)
assert numpy.fabs(Irrgang13I[2].mass(200./ro,quantity=False)/1e12-1.8) < 1e-1, 'Mass of the halo in Irrgang13I does not agree with what it should be'
# Escape velocity at the Sun
assert numpy.fabs(potential.vesc(Irrgang13I,1.,quantity=False)-616.4) < 1e0, 'Escape velocity at the Sun in Irrgang13I does not agree with what it should be'
# Oort A
assert numpy.fabs(0.5*(potential.vcirc(Irrgang13I,1.,use_physical=False)-potential.dvcircdR(Irrgang13I,1.,use_physical=False))*vo/ro-15.06) < 1e-1, 'Oort A in Irrgang13I does not agree with what it should be'
# Oort B
assert numpy.fabs(-0.5*(potential.vcirc(Irrgang13I,1.,use_physical=False)+potential.dvcircdR(Irrgang13I,1.,use_physical=False))*vo/ro+13.74) < 1e-1, 'Oort B in Irrgang13I does not agree with what it should be'
# Model II
ro,vo= Irrgang13II[0]._ro, Irrgang13II[0]._vo
# Check some numbers from Table 2 of Irrgang13: circular velocity at the Sun
assert numpy.fabs(potential.vcirc(Irrgang13II,1.,quantity=False)-240.4) < 3e-2, 'Circular velocity at the Sun in Irrgang13II does not agree with what it should be'
# Mass of the bulge
assert numpy.fabs(Irrgang13II[0].mass(100.,quantity=False)/1e9-4.1) < 1e-1, 'Mass of the bulge in Irrgang13II does not agree with what it should be'
# Mass of the disk
assert numpy.fabs(Irrgang13II[1].mass(100.,10.,quantity=False)/1e10-6.6) < 1e-1, 'Mass of the disk in Irrgang13II does not agree with what it should be'
# Mass of the halo (go to edge in Irrgang13II)
assert numpy.fabs(Irrgang13II[2].mass(100.,quantity=False)/1e12-1.6) < 1e-1, 'Mass of the halo in Irrgang13II does not agree with what it should be'
# Escape velocity at the Sun
assert numpy.fabs(potential.vesc(Irrgang13II,1.,quantity=False)-575.9) < 1e0, 'Escape velocity at the Sun in Irrgang13II does not agree with what it should be'
# Oort A
assert numpy.fabs(0.5*(potential.vcirc(Irrgang13II,1.,use_physical=False)-potential.dvcircdR(Irrgang13II,1.,use_physical=False))*vo/ro-15.11) < 1e-1, 'Oort A in Irrgang13II does not agree with what it should be'
# Oort B
assert numpy.fabs(-0.5*(potential.vcirc(Irrgang13II,1.,use_physical=False)+potential.dvcircdR(Irrgang13II,1.,use_physical=False))*vo/ro+13.68) < 1e-1, 'Oort B in Irrgang13II does not agree with what it should be'
# Model III
ro,vo= Irrgang13III[0]._ro, Irrgang13III[0]._vo
# Check some numbers from Table 3 of Irrgang13: circular velocity at the Sun
assert numpy.fabs(potential.vcirc(Irrgang13III,1.,quantity=False)-239.7) < 3e-2, 'Circular velocity at the Sun in Irrgang13III does not agree with what it should be'
# Mass of the bulge
assert numpy.fabs(Irrgang13III[0].mass(100.,quantity=False)/1e9-10.2) < 1e-1, 'Mass of the bulge in Irrgang13III does not agree with what it should be'
# Mass of the disk
assert numpy.fabs(Irrgang13III[1].mass(100.,10.,quantity=False)/1e10-7.2) < 1e-1, 'Mass of the disk in Irrgang13III does not agree with what it should be'
# Escape velocity at the Sun
assert numpy.fabs(potential.vesc(Irrgang13III,1.,quantity=False)-811.5) < 1e0, 'Escape velocity at the Sun in Irrgang13III does not agree with what it should be'
# Oort A
assert numpy.fabs(0.5*(potential.vcirc(Irrgang13III,1.,use_physical=False)-potential.dvcircdR(Irrgang13III,1.,use_physical=False))*vo/ro-14.70) < 1e-1, 'Oort A in Irrgang13III does not agree with what it should be'
# Oort B
assert numpy.fabs(-0.5*(potential.vcirc(Irrgang13III,1.,use_physical=False)+potential.dvcircdR(Irrgang13III,1.,use_physical=False))*vo/ro+14.08) < 1e-1, 'Oort B in Irrgang13III does not agree with what it should be'
return None
# Test that the Dehnen & Binney (1998) models are what they are supposed to be
def test_DehnenBinney98():
from galpy.potential.mwpotentials import (DehnenBinney98I,
DehnenBinney98II,
DehnenBinney98III,
DehnenBinney98IV)
check_DehnenBinney98_model(DehnenBinney98I,model='model 1')
check_DehnenBinney98_model(DehnenBinney98II,model='model 2')
check_DehnenBinney98_model(DehnenBinney98III,model='model 3')
check_DehnenBinney98_model(DehnenBinney98IV,model='model 4')
return None
def check_DehnenBinney98_model(pot,model='model 1'):
from galpy.util import conversion
truth= {'model 1':
{'SigmaR0':43.3,
'vc':222.,
'Fz':68.,
'A':14.4,
'B':-13.3},
'model 2':
{'SigmaR0':52.1,
'vc':217.,
'Fz':72.2,
'A':14.3,
'B':-12.9},
'model 3':
{'SigmaR0':52.7,
'vc':217.,
'Fz':72.5,
'A':14.1,
'B':-13.1},
'model 4':
{'SigmaR0':50.7,
'vc':220.,
'Fz':72.1,
'A':13.8,
'B':-13.6}
}
phys_kwargs= conversion.get_physical(pot)
ro= phys_kwargs.get('ro')
vo= phys_kwargs.get('vo')
assert numpy.fabs(pot[1].surfdens(1.,10./ro)-truth[model]['SigmaR0']) < 0.2, f'Surface density at R0 in Dehnen & Binney (1998) {model} does not agree with paper value'
assert numpy.fabs(potential.vcirc(pot,1.)-truth[model]['vc']) < 0.5, f'Circular velocity at R0 in Dehnen & Binney (1998) {model} does not agree with paper value'
assert numpy.fabs(-potential.evaluatezforces(pot,1.,1.1/ro,use_physical=False)*conversion.force_in_2piGmsolpc2(vo,ro)-truth[model]['Fz']) < 0.2, f'Vertical force at R0 in Dehnen & Binney (1998) {model} does not agree with paper value'
assert numpy.fabs(0.5*(potential.vcirc(pot,1.,use_physical=False)-potential.dvcircdR(pot,1.,use_physical=False))*vo/ro-truth[model]['A']) < 0.05, f'Oort A in Dehnen & Binney (1998) {model} does not agree with paper value'
assert numpy.fabs(-0.5*(potential.vcirc(pot,1.,use_physical=False)+potential.dvcircdR(pot,1.,use_physical=False))*vo/ro-truth[model]['B']) < 0.05, f'Oort A in Dehnen & Binney (1998) {model} does not agree with paper value'
return None
# Test that the virial setup of NFW works
def test_NFW_virialsetup_wrtmeanmatter():
H, Om, overdens, wrtcrit= 71., 0.32, 201., False
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(conc-np.conc(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's concentration does not work"
assert numpy.fabs(mvir-np.mvir(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)/10.**12.) < 10.**-6., "NFWPotential virial setup's virial mass does not work"
return None
def test_NFW_virialsetup_wrtcrit():
H, Om, overdens, wrtcrit= 71., 0.32, 201., True
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(conc-np.conc(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)) < 10.**-6., "NFWPotential virial setup's concentration does not work"
assert numpy.fabs(mvir-np.mvir(H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)/10.**12.) < 10.**-6., "NFWPotential virial setup's virial mass does not work"
return None
def test_TriaxialNFW_virialsetup_wrtmeanmatter():
H, Om, overdens, wrtcrit= 71., 0.32, 201., False
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
tnp= potential.TriaxialNFWPotential(b=0.3,c=0.7,
conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(np.a-tnp.a) < 10.**-10., "TriaxialNFWPotential virial setup's concentration does not work"
assert numpy.fabs(np._amp-tnp._amp*4.*numpy.pi*tnp.a**3) < 10.**-6., "TriaxialNFWPotential virial setup's virial mass does not work"
return None
def test_TriaxialNFW_virialsetup_wrtcrit():
H, Om, overdens, wrtcrit= 71., 0.32, 201., True
ro, vo= 220., 8.
conc, mvir= 12., 1.1
np= potential.NFWPotential(conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
tnp= potential.TriaxialNFWPotential(b=0.3,c=0.7,
conc=conc,mvir=mvir,vo=vo,ro=ro,
H=H,Om=Om,overdens=overdens,
wrtcrit=wrtcrit)
assert numpy.fabs(np.a-tnp.a) < 10.**-10., "TriaxialNFWPotential virial setup's concentration does not work"
assert numpy.fabs(np._amp-tnp._amp*4.*numpy.pi*tnp.a**3) < 10.**-6., "TriaxialNFWPotential virial setup's virial mass does not work"
return None
# Test that setting up an NFW potential with rmax,vmax works as expected
def test_NFW_rmaxvmaxsetup():
rmax, vmax= 1.2, 3.23
np= potential.NFWPotential(rmax=rmax,vmax=vmax)
assert numpy.fabs(np.rmax()-rmax) < 10.**-10., 'NFWPotential setup with rmax,vmax does not work as expected'
assert numpy.fabs(np.vmax()-vmax) < 10.**-10., 'NFWPotential setup with rmax,vmax does not work as expected'
return None
def test_conc_attributeerror():
pp= potential.PowerSphericalPotential(normalize=1.)
#This potential doesn't have a scale, so we cannot calculate the concentration
try: pp.conc(220.,8.)
except AttributeError: pass
else: raise AssertionError('conc function for potential w/o scale did not raise AttributeError')
return None
def test_mvir_attributeerror():
mp= potential.MiyamotoNagaiPotential(normalize=1.)
#Don't think I will ever implement the virial radius for this
try: mp.mvir(220.,8.)
except AttributeError: pass
else: raise AssertionError('mvir function for potential w/o rvir did not raise AttributeError')
return None
# Test that virial quantities are correctly computed when specifying a different (ro,vo) pair from Potential setup (see issue #290)
def test_NFW_virialquantities_diffrovo():
from galpy.util import conversion
H, Om, overdens, wrtcrit= 71., 0.32, 201., False
ro_setup, vo_setup= 220., 8.
ros= [7.,8.,9.]
vos= [220.,230.,240.]
for ro,vo in zip(ros,vos):
np= potential.NFWPotential(amp=2.,a=3.,
ro=ro_setup,vo=vo_setup)
# Computing the overdensity in physical units
od= (np.mvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)\
/4./numpy.pi*3.\
/np.rvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)**3.)\
*(10.**6./H**2.*8.*numpy.pi/3./Om*(4.302*10.**-6.))
assert numpy.fabs(od-overdens) < 0.1, "NFWPotential's virial quantities computed in physical units with different (ro,vo) from setup are incorrect"
od= (np.mvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit,use_physical=False)\
/4./numpy.pi*3.\
/np.rvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit,use_physical=False)**3.)\
*conversion.dens_in_meanmatterdens(vo,ro,H=H,Om=Om)
assert numpy.fabs(od-overdens) < 0.01, "NFWPotential's virial quantities computed in internal units with different (ro,vo) from setup are incorrect"
# Also test concentration
assert numpy.fabs(np.conc(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)\
-np.rvir(ro=ro,vo=vo,H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit)/np._scale/ro) < 0.01, "NFWPotential's concentration computed for different (ro,vo) from setup is incorrect"
return None
# Test that rmax and vmax are correctly determined for an NFW potential
def test_NFW_rmaxvmax():
# Setup with rmax,vmax
rmax, vmax= 1.2, 3.23
np= potential.NFWPotential(rmax=rmax,vmax=vmax)
# Now determine rmax and vmax numerically
rmax_opt= optimize.minimize_scalar(lambda r: -np.vcirc(r),
bracket=[0.01,100.])['x']
assert numpy.fabs(rmax_opt-rmax) < 10.**-7., \
'NFW rmax() function does not behave as expected'
assert numpy.fabs(np.vcirc(rmax_opt)-vmax) < 10.**-8., \
'NFW rmax() function does not behave as expected'
assert numpy.fabs(np.vcirc(rmax_opt)-np.vmax()) < 10.**-8., \
'NFW vmax() function does not behave as expected'
return None
def test_LinShuReductionFactor():
#Test that the LinShuReductionFactor is implemented correctly, by comparing to figure 1 in Lin & Shu (1966)
from galpy.potential import (LinShuReductionFactor,
LogarithmicHaloPotential, epifreq, omegac)
lp= LogarithmicHaloPotential(normalize=1.) #work in flat rotation curve
#nu^2 = 0.2, x=4 for m=2,sigmar=0.1
# w/ nu = m(OmegaP-omegac)/epifreq, x=sr^2*k^2/epifreq^2
R,m,sr = 0.9,2.,0.1
tepi, tomegac= epifreq(lp,R), omegac(lp,R)
OmegaP= tepi*numpy.sqrt(0.2)/m+tomegac #leads to nu^2 = 0.2
k= numpy.sqrt(4.)*tepi/sr
assert numpy.fabs(LinShuReductionFactor(lp,R,sr,m=m,k=k,OmegaP=OmegaP)-0.18) < 0.01, 'LinShuReductionFactor does not agree w/ Figure 1 from Lin & Shu (1966)'
#nu^2 = 0.8, x=10
OmegaP= tepi*numpy.sqrt(0.8)/m+tomegac #leads to nu^2 = 0.8
k= numpy.sqrt(10.)*tepi/sr
assert numpy.fabs(LinShuReductionFactor(lp,R,sr,m=m,k=k,OmegaP=OmegaP)-0.04) < 0.01, 'LinShuReductionFactor does not agree w/ Figure 1 from Lin & Shu (1966)'
#Similar test, but using a nonaxiPot= input
from galpy.potential import SteadyLogSpiralPotential
sp= SteadyLogSpiralPotential(m=2.,omegas=OmegaP,alpha=k*R)
assert numpy.fabs(LinShuReductionFactor(lp,R,sr,nonaxiPot=sp)-0.04) < 0.01, 'LinShuReductionFactor does not agree w/ Figure 1 from Lin & Shu (1966)'
#Test exception
try:
LinShuReductionFactor(lp,R,sr)
except OSError: pass
else: raise AssertionError("LinShuReductionFactor w/o nonaxiPot set or k=,m=,OmegaP= set did not raise IOError")
return None
def test_nemoaccname():
#There is no real good way to test this (I think), so I'm just testing to
#what I think is the correct output now to make sure this isn't
#accidentally changed
# Log
lp= potential.LogarithmicHaloPotential(normalize=1.)
assert lp.nemo_accname() == 'LogPot', "Logarithmic potential's NEMO name incorrect"
# NFW
np= potential.NFWPotential(normalize=1.)
assert np.nemo_accname() == 'NFW', "NFW's NEMO name incorrect"
# Miyamoto-Nagai
mp= potential.MiyamotoNagaiPotential(normalize=1.)
assert mp.nemo_accname() == 'MiyamotoNagai', "MiyamotoNagai's NEMO name incorrect"
# Power-spherical w/ cut-off
pp= potential.PowerSphericalPotentialwCutoff(normalize=1.)
assert pp.nemo_accname() == 'PowSphwCut', "Power-spherical potential w/ cuto-ff's NEMO name incorrect"
# MN3ExponentialDiskPotential
mp= potential.MN3ExponentialDiskPotential(normalize=1.)
assert mp.nemo_accname() == 'MiyamotoNagai+MiyamotoNagai+MiyamotoNagai', "MN3ExponentialDiskPotential's NEMO name incorrect"
# Plummer
pp= potential.PlummerPotential(normalize=1.)
assert pp.nemo_accname() == 'Plummer', "PlummerPotential's NEMO name incorrect"
# Hernquist
hp= potential.HernquistPotential(normalize=1.)
assert hp.nemo_accname() == 'Dehnen', "HernquistPotential's NEMO name incorrect"
return None
def test_nemoaccnamepars_attributeerror():
# Use BurkertPotential (unlikely that I would implement that one in NEMO soon)
bp= potential.BurkertPotential(normalize=1.)
try: bp.nemo_accname()
except AttributeError: pass
else:
raise AssertionError('nemo_accname for potential w/o accname does not raise AttributeError')
try: bp.nemo_accpars(220.,8.)
except AttributeError: pass
else:
raise AssertionError('nemo_accpars for potential w/o accname does not raise AttributeError')
return None
def test_nemoaccnames():
# Just test MWPotential2014 and a single potential
# MWPotential2014
assert potential.nemo_accname(potential.MWPotential2014) == 'PowSphwCut+MiyamotoNagai+NFW', "MWPotential2014's NEMO name is incorrect"
# Power-spherical w/ cut-off
pp= potential.PowerSphericalPotentialwCutoff(normalize=1.)
assert potential.nemo_accname(pp) == 'PowSphwCut', "Power-spherical potential w/ cut-off's NEMO name incorrect"
return None
def test_nemoaccpars():
# Log
lp= potential.LogarithmicHaloPotential(amp=2.,core=3.,q=27.) #completely ridiculous, but tests scalings
vo, ro= 2., 3.
vo/= 1.0227121655399913
ap= lp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-8.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-729.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-1.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
assert numpy.fabs(float(ap[4])-27.0) < 10.**-8., "Logarithmic potential's NEMO accpars incorrect"
# Miyamoto-Nagai
mp= potential.MiyamotoNagaiPotential(amp=3.,a=2.,b=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= mp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-18.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
# Power-spherical w/ cut-off
pp= potential.PowerSphericalPotentialwCutoff(amp=3.,alpha=4.,rc=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= pp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-11907.0) < 10.**-4., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-4.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
# NFW
np= potential.NFWPotential(amp=1./0.2162165954,a=1./16)
vo, ro= 3., 4.
vo/= 1.0227121655399913
ap= np.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "NFW's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-0.25) < 10.**-8., "NFW's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-12.0) < 10.**-8., "NFW's NEMO accpars incorrect"
# MN3ExponentialDiskPotential
mn= potential.MN3ExponentialDiskPotential(normalize=1.,hr=2.,hz=0.5)
vo, ro= 3., 4.
ap= mn.nemo_accpars(vo,ro).replace('#',',').split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[4])-0) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[8])-0) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
# Test ratios
assert numpy.fabs(float(ap[1])/float(ap[5])-mn._mn3[0]._amp/mn._mn3[1]._amp) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])/float(ap[9])-mn._mn3[0]._amp/mn._mn3[2]._amp) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])/float(ap[6])-mn._mn3[0]._a/mn._mn3[1]._a) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])/float(ap[10])-mn._mn3[0]._a/mn._mn3[2]._a) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])/float(ap[7])-1.) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])/float(ap[11])-1.) < 10.**-8., "MN3ExponentialDiskPotential 's NEMO accpars incorrect"
# Plummer
pp= potential.PlummerPotential(amp=3.,b=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= pp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Plummer's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "Plummer's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-45.0) < 10.**-8., "Plummer's NEMO accpars incorrect"
# Hernquist
hp= potential.HernquistPotential(amp=2.,a=1./4.)
vo, ro= 3., 4.
vo/= 1.0227121655399913
ap= hp.nemo_accpars(vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Hernquist's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1.) < 10.**-8., "Hernquist's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-9.*4) < 10.**-7., "Hernquist's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-1.0) < 10.**-8., "Hernquist's NEMO accpars incorrect"
return None
def test_nemoaccparss():
# Just combine a few of the above ones
# Miyamoto + PowerSpherwCut
mp= potential.MiyamotoNagaiPotential(amp=3.,a=2.,b=5.)
pp= potential.PowerSphericalPotentialwCutoff(amp=3.,alpha=4.,rc=5.)
vo, ro= 7., 9.
vo/= 1.0227121655399913
ap= potential.nemo_accpars(mp,vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-18.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "MiyamotoNagai's NEMO accpars incorrect"
# PowSpherwCut
ap= potential.nemo_accpars(pp,vo,ro).split(',')
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-11907.0) < 10.**-4., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-4.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Power-spherical potential w/ cut-off's NEMO accpars incorrect"
# Combined
apc= potential.nemo_accpars([mp,pp],vo,ro).split('#')
ap= apc[0].split(',') # should be MN
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-1323.0) < 10.**-5., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-18.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
ap= apc[1].split(',') # should be PP
assert numpy.fabs(float(ap[0])-0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[1])-11907.0) < 10.**-4., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[2])-4.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
assert numpy.fabs(float(ap[3])-45.0) < 10.**-8., "Miyamoto+Power-spherical potential w/ cut-off's NEMO accpars incorrect"
return None
def test_MN3ExponentialDiskPotential_inputs():
#Test the inputs of the MN3ExponentialDiskPotential
# IOError for hz so large that b is negative
try:
mn= potential.MN3ExponentialDiskPotential(amp=1.,hz=50.)
except OSError: pass
else:
raise AssertionError("MN3ExponentialDiskPotential with ridiculous hz should have given IOError, but didn't")
# Warning when b/Rd > 3 or (b/Rd > 1.35 and posdens)
#Turn warnings into errors to test for them
import warnings
from galpy.util import galpyWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
mn= MN3ExponentialDiskPotential(normalize=1.,hz=1.438,hr=1.)
# Should raise warning bc of MN3ExponentialDiskPotential,
# might raise others
raisedWarning= False
for wa in w:
raisedWarning= ('MN3ExponentialDiskPotential' in str(wa.message))
if raisedWarning: break
assert raisedWarning, "MN3ExponentialDiskPotential w/o posdens, but with b/Rd > 3 did not raise galpyWarning"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
mn= MN3ExponentialDiskPotential(normalize=1.,hr=1.,hz=0.7727,
posdens=True)
raisedWarning= False
for wa in w:
raisedWarning= ('MN3ExponentialDiskPotential' in str(wa.message))
if raisedWarning: break
assert raisedWarning, "MN3ExponentialDiskPotential w/o posdens, but with b/Rd > 1.35 did not raise galpyWarning"
return None
def test_MN3ExponentialDiskPotential_hz():
#Test that we correctly convert from hz/Rd to b/Rd
# exp
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=1.,sech=False)
assert numpy.fabs(mn._brd-1.875) < 0.05, "b/Rd not computed correctly for exponential profile"
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=2.,hz=1.,sech=False)
assert numpy.fabs(mn._brd-0.75) < 0.05, "b/Rd not computed correctly for exponential profile"
# sech
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=2.,sech=True)
assert numpy.fabs(mn._brd-2.1) < 0.05, "b/Rd not computed correctly for sech^2 profile"
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=2.,hz=2.,sech=True)
assert numpy.fabs(mn._brd-0.9) < 0.05, "b/Rd not computed correctly for sech^2 profile"
return None
def test_MN3ExponentialDiskPotential_approx():
# Test that the 3MN approximation works to the advertised level
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(4.,5.*.001)
assert numpy.fabs(mn.mass(4.,5.*.001)-dpmass)/dpmass < 0.005, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(4.,5.*0.6)
assert numpy.fabs(mn.mass(4.,10.*0.6)-dpmass)/dpmass < 0.01, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness w/ sech
mn= potential.MN3ExponentialDiskPotential(amp=.5,hr=1.,hz=1.24,sech=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(4.,5.*0.6)
assert numpy.fabs(mn.mass(4.,20.*0.6)-dpmass)/dpmass < 0.01, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# At 10 Rd
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(10.,5.*.001)
assert numpy.fabs(mn.mass(10.,5.*.001)-dpmass)/dpmass < 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(10.,5.*0.6)
assert numpy.fabs(mn.mass(10.,10.*0.6)-dpmass)/dpmass < 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness w/ sech
mn= potential.MN3ExponentialDiskPotential(amp=0.5,hr=1.,hz=1.24,sech=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(10.,5.*0.6)
assert numpy.fabs(mn.mass(10.,20.*0.6)-dpmass)/dpmass < 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# For posdens the deviations are larger
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(4.,5.*.001)
assert numpy.fabs(mn.mass(4.,5.*.001)-dpmass)/dpmass < 0.015, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(4.,5.*0.6)
assert numpy.fabs(mn.mass(4.,10.*0.6)-dpmass)/dpmass < 0.015, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# At 10 Rd
# Zero thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.001,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.001)
dpmass= dp.mass(10.,5.*.001)
assert numpy.fabs(mn.mass(10.,5.*.001)-dpmass)/dpmass > 0.04, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
assert numpy.fabs(mn.mass(10.,5.*.001)-dpmass)/dpmass < 0.07, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
# Finite thickness
mn= potential.MN3ExponentialDiskPotential(amp=1.,hr=1.,hz=0.62,sech=False,
posdens=True)
dp= potential.DoubleExponentialDiskPotential(amp=1.,hr=1.,hz=0.62)
dpmass= dp.mass(10.,5.*0.6)
assert numpy.fabs(mn.mass(10.,10.*0.6)-dpmass)/dpmass < 0.08, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
assert numpy.fabs(mn.mass(10.,10.*0.6)-dpmass)/dpmass > 0.03, "MN3ExponentialDiskPotential does not approximate the enclosed mass as advertised"
return None
def test_TwoPowerTriaxialPotential_vs_TwoPowerSphericalPotential():
# Test that TwoPowerTriaxialPotential with spherical parameters is the same
# as TwoPowerSphericalPotential
tol= -4. # tough general case
rs= numpy.linspace(0.001,25.,1001)
tnp= potential.TwoPowerTriaxialPotential(normalize=1.,b=1.,c=1.,a=1.5,
alpha=1.5,beta=3.5)
np= potential.TwoPowerSphericalPotential(normalize=1.,a=1.5,
alpha=1.5,beta=3.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for TwoPowerSphericalPotential and spherical version of TwoPowerTriaxialPotential'
# Also do specific cases
tol= -8. # much better
# Hernquist
tnp= potential.TriaxialHernquistPotential(normalize=1.,b=1.,c=1.,a=1.5)
np= potential.HernquistPotential(normalize=1.,a=1.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for Hernquist and spherical version of TriaxialHernquist'
# NFW
tnp= potential.TriaxialNFWPotential(normalize=1.,b=1.,c=1.,a=1.5)
np= potential.NFWPotential(normalize=1.,a=1.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for NFW and spherical version of TriaxialNFW'
# Jaffe
tnp= potential.TriaxialJaffePotential(normalize=1.,b=1.,c=1.,a=1.5)
np= potential.JaffePotential(normalize=1.,a=1.5)
assert numpy.all(numpy.fabs(numpy.array(\
[numpy.sqrt(tnp.Rforce(r,0.)/np.Rforce(r,0.)) for r in rs])-1.) < 10.**tol), 'Vcirc not the same for Jaffe and spherical version of TriaxialJaffe'
return None
# Test that TwoPowerTriaxial setup raises an error for bad values of alpha
# and beta
def test_TwoPowerTriaxialPotential_alphahigherror():
with pytest.raises(IOError) as excinfo:
dummy= potential.TwoPowerTriaxialPotential(alpha=3.5)
return None
def test_TwoPowerTriaxialPotential_betalowerror():
with pytest.raises(IOError) as excinfo:
dummy= potential.TwoPowerTriaxialPotential(beta=1.)
return None
# Test that DehnenSphericalPotential setup raises an error for bad values of alpha
def test_DehnenSphericalPotential_alphalowhigherror():
with pytest.raises(IOError) as excinfo:
dummy= potential.DehnenSphericalPotential(alpha=-.5)
with pytest.raises(IOError) as excinfo:
dummy= potential.DehnenSphericalPotential(alpha=3.5)
return None
# Test that FerrersPotential raises a value error for n < 0
def test_FerrersPotential_nNegative():
with pytest.raises(ValueError) as excinfo:
dummy= potential.FerrersPotential(n=-1.)
return None
# Test that SphericalShellPotential raises a value error for normalize=True and a > 1
def test_SphericalShellPotential_normalizer0():
with pytest.raises(ValueError) as excinfo:
dummy= potential.SphericalShellPotential(normalize=1.,a=2.)
return None
# Test that RingPotential raises a value error for normalize=True and a > 1
def test_RingPotential_normalizer0():
with pytest.raises(ValueError) as excinfo:
dummy= potential.RingPotential(normalize=1.,a=2.)
return None
def test_planeRotatedNFWPotential():
# Test that the rotation according to pa works as expected
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,b=0.5,
pa=30./180.*numpy.pi)
# Compute the potential at a fixed radius, minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
pot= numpy.array([tnp(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
# Also do a negative angle
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,b=0.5,
pa=-60./180.*numpy.pi)
# Compute the potential at a fixed radius, minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
pot= numpy.array([tnp(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(Rs,0.,phi=phi) for phi in phis])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential rotated around the z axis does not behave as expected'
return None
def test_zaxisRotatedNFWPotential():
from galpy.util import coords
# Test that the rotation according to zvec works as expected
pa= 30./180.*numpy.pi
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,c=0.5,
zvec=[0.,-numpy.sin(pa),numpy.cos(pa)])
# Compute the potential at a fixed radius in the y/z plane,
# minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
xs= numpy.zeros_like(phis)
ys= Rs*numpy.cos(phis)
zs= Rs*numpy.sin(phis)
tR,tphi,tz= coords.rect_to_cyl(xs,ys,zs)
pot= numpy.array([tnp(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-30./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
# Another one
pa= -60./180.*numpy.pi
tnp= potential.TriaxialNFWPotential(normalize=1.,a=1.5,c=0.5,
zvec=[-numpy.sin(pa),0.,numpy.cos(pa)])
# Compute the potential at a fixed radius in the z/z plane,
# minimum should be at pa!
Rs= 0.8
phis= numpy.linspace(0.,numpy.pi,1001)
xs= Rs*numpy.cos(phis)
ys= numpy.zeros_like(phis)
zs= Rs*numpy.sin(phis)
tR,tphi,tz= coords.rect_to_cyl(xs,ys,zs)
pot= numpy.array([tnp(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmin(pot)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
# Same for density, but max instead
dens= numpy.array([tnp.dens(r,z,phi=phi) for r,z,phi in zip(tR,tz,tphi)])
minphi= numpy.argmax(dens)
minphi_pred= numpy.argmin(numpy.fabs(phis-120./180.*numpy.pi))
assert minphi == minphi_pred, 'Flattened NFW potential with rotated z axis does not behave as expected'
return None
def test_nonaxierror_function():
# Test that the code throws an exception when calling a non-axisymmetric
# potential without phi
tnp= potential.TriaxialNFWPotential(amp=1.,b=0.7,c=0.9)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatePotentials(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateDensities(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateRforces(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatezforces(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatephitorques(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateR2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatez2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateRzderivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatephi2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateRphiderivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluatephizderivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluaterforces(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluater2derivs(tnp,1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
potential.evaluateSurfaceDensities(tnp,1.,0.1)
return None
def test_SoftenedNeedleBarPotential_density():
# Some simple tests of the density of the SoftenedNeedleBarPotential
# For a spherical softening kernel, density should be symmetric to y/z
sbp= potential.SoftenedNeedleBarPotential(normalize=1.,a=1.,c=.1,b=0.,
pa=0.)
assert numpy.fabs(sbp.dens(2.,0.,phi=numpy.pi/4.)-sbp.dens(numpy.sqrt(2.),numpy.sqrt(2.),phi=0.)) < 10.**-13., 'SoftenedNeedleBarPotential with spherical softening kernel does not appear to have a spherically symmetric density'
# Another one
assert numpy.fabs(sbp.dens(4.,0.,phi=numpy.pi/4.)-sbp.dens(2.*numpy.sqrt(2.),2.*numpy.sqrt(2.),phi=0.)) < 10.**-13., 'SoftenedNeedleBarPotential with spherical softening kernel does not appear to have a spherically symmetric density'
# For a flattened softening kernel, the density at (y,z) should be higher than at (z,y)
sbp= potential.SoftenedNeedleBarPotential(normalize=1.,a=1.,c=.1,b=0.3,
pa=0.)
assert sbp.dens(2.,0.,phi=numpy.pi/4.) > sbp.dens(numpy.sqrt(2.),numpy.sqrt(2.),phi=0.), 'SoftenedNeedleBarPotential with flattened softening kernel does not appear to have a consistent'
# Another one
assert sbp.dens(4.,0.,phi=numpy.pi/4.) > sbp.dens(2.*numpy.sqrt(2.),2.*numpy.sqrt(2.),phi=0.), 'SoftenedNeedleBarPotential with flattened softening kernel does not appear to have a consistent'
return None
def test_DiskSCFPotential_SigmaDerivs():
# Test that the derivatives of Sigma are correctly implemented in DiskSCF
# Very rough finite difference checks
dscfp= potential.DiskSCFPotential(dens=lambda R,z: 1.,# doesn't matter
Sigma=[{'type':'exp','h':1./3.,'amp':1.},
{'type':'expwhole','h':1./3.,
'amp':1.,'Rhole':0.5}],
hz=[{'type':'exp','h':1./27.},
{'type':'sech2','h':1./27.}],
a=1.,N=2,L=2)
# Sigma exp
testRs= numpy.linspace(0.3,1.5,101)
dR= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Sigma[0](testRs+dR)-dscfp._Sigma[0](testRs))/dR-dscfp._dSigmadR[0](testRs))/dscfp._dSigmadR[0](testRs)) < 10.**-7.), "Derivative dSigmadR does not agree with finite-difference derivative of Sigma for exponential profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dSigmadR[0](testRs+dR)-dscfp._dSigmadR[0](testRs))/dR-dscfp._d2SigmadR2[0](testRs))/dscfp._d2SigmadR2[0](testRs)) < 10.**-7.), "Derivative d2SigmadR2 does not agree with finite-difference derivative of dSigmadR for exponential profile in DiskSCFPotential"
# Sigma expwhole
dR= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Sigma[1](testRs+dR)-dscfp._Sigma[1](testRs))/dR-dscfp._dSigmadR[1](testRs))/dscfp._dSigmadR[1](testRs)) < 10.**-4.), "Derivative dSigmadR does not agree with finite-difference derivative of Sigma for exponential-with-hole profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dSigmadR[1](testRs+dR)-dscfp._dSigmadR[1](testRs))/dR-dscfp._d2SigmadR2[1](testRs))/dscfp._d2SigmadR2[1](testRs)) < 10.**-4.), "Derivative d2SigmadR2 does not agree with finite-difference derivative of dSigmadR for exponential-with-hole profile in DiskSCFPotential"
return None
def test_DiskSCFPotential_verticalDerivs():
# Test that the derivatives of Sigma are correctly implemented in DiskSCF
# Very rough finite difference checks
dscfp= potential.DiskSCFPotential(dens=lambda R,z: 1.,# doesn't matter
Sigma=[{'type':'exp','h':1./3.,'amp':1.},
{'type':'expwhole','h':1./3.,
'amp':1.,'Rhole':0.5}],
hz=[{'type':'exp','h':1./27.},
{'type':'sech2','h':1./27.}],
a=1.,N=2,L=2)
# Vertical exp
testzs= numpy.linspace(0.1/27.,3./27,101)
dz= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Hz[0](testzs+dz)-dscfp._Hz[0](testzs))/dz-dscfp._dHzdz[0](testzs))/dscfp._dHzdz[0](testzs)) < 10.**-5.5), "Derivative dHzdz does not agree with finite-difference derivative of Hz for exponential profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dHzdz[0](testzs+dz)-dscfp._dHzdz[0](testzs))/dz-dscfp._hz[0](testzs))/dscfp._hz[0](testzs)) < 10.**-6.), "Derivative hz does not agree with finite-difference derivative of dHzdz for exponential profile in DiskSCFPotential"
# Vertical sech^2
dz= 10.**-8.
assert numpy.all(numpy.fabs(((dscfp._Hz[1](testzs+dz)-dscfp._Hz[1](testzs))/dz-dscfp._dHzdz[1](testzs))/dscfp._dHzdz[1](testzs)) < 10.**-5.5), "Derivative dSigmadz does not agree with finite-difference derivative of Sigma for sech2 profile in DiskSCFPotential"
assert numpy.all(numpy.fabs(((dscfp._dHzdz[1](testzs+dz)-dscfp._dHzdz[1](testzs))/dz-dscfp._hz[1](testzs))/dscfp._hz[1](testzs)) < 10.**-6.), "Derivative hz does not agree with finite-difference derivative of dHzdz for sech2 profile in DiskSCFPotential"
return None
def test_DiskSCFPotential_nhzNeqnsigmaError():
with pytest.raises(ValueError) as excinfo:
dummy= potential.DiskSCFPotential(\
dens=lambda R,z: numpy.exp(-3.*R)\
*1./numpy.cosh(z/2.*27.)**2./4.*27.,
Sigma={'h': 1./3.,
'type': 'exp', 'amp': 1.0},
hz=[{'type':'sech2','h':1./27.},{'type':'sech2','h':1./27.}],
a=1.,N=5,L=5)
return None
def test_DiskSCFPotential_againstDoubleExp():
# Test that the DiskSCFPotential approx. of a dbl-exp disk agrees with
# DoubleExponentialDiskPotential
dp= potential.DoubleExponentialDiskPotential(amp=13.5,hr=1./3.,hz=1./27.)
dscfp= potential.DiskSCFPotential(dens=lambda R,z: dp.dens(R,z),
Sigma_amp=1.,
Sigma=lambda R: numpy.exp(-3.*R),
dSigmadR=lambda R: -3.*numpy.exp(-3.*R),
d2SigmadR2=lambda R: 9.*numpy.exp(-3.*R),
hz={'type':'exp','h':1./27.},
a=1.,N=10,L=10)
testRs= numpy.linspace(0.3,1.5,101)
testzs= numpy.linspace(0.1/27.,3./27,101)
testR= 0.9*numpy.ones_like(testzs)
testz= 1.5/27.*numpy.ones_like(testRs)
# Test potential
assert numpy.all(numpy.fabs((dp(testRs,testz)-dscfp(testRs,testz))/dscfp(testRs,testz)) < 10.**-2.5), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
assert numpy.all(numpy.fabs((dp(testR,testzs)-dscfp(testR,testzs))/dscfp(testRs,testz)) < 10.**-2.5), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# Rforce
assert numpy.all(numpy.fabs((numpy.array([dp.Rforce(r,z) for (r,z) in zip(testRs,testz)])-dscfp.Rforce(testRs,testz))/dscfp.Rforce(testRs,testz)) < 10.**-2.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
assert numpy.all(numpy.fabs((numpy.array([dp.Rforce(r,z) for (r,z) in zip(testR,testzs)])-dscfp.Rforce(testR,testzs))/dscfp.Rforce(testRs,testz)) < 10.**-2.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# zforce
assert numpy.all(numpy.fabs((numpy.array([dp.zforce(r,z) for (r,z) in zip(testRs,testz)])-dscfp.zforce(testRs,testz))/dscfp.zforce(testRs,testz)) < 10.**-1.5), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# Following has rel. large difference at high z
assert numpy.all(numpy.fabs((numpy.array([dp.zforce(r,z) for (r,z) in zip(testR,testzs)])-dscfp.zforce(testR,testzs))/dscfp.zforce(testRs,testz)) < 10.**-1.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
return None
def test_DiskSCFPotential_againstDoubleExp_dens():
# Test that the DiskSCFPotential approx. of a dbl-exp disk agrees with
# DoubleExponentialDiskPotential
dp= potential.DoubleExponentialDiskPotential(amp=13.5,hr=1./3.,hz=1./27.)
dscfp= potential.DiskSCFPotential(dens=lambda R,z: dp.dens(R,z),
Sigma={'type':'exp','h':1./3.,'amp':1.},
hz={'type':'exp','h':1./27.},
a=1.,N=10,L=10)
testRs= numpy.linspace(0.3,1.5,101)
testzs= numpy.linspace(0.1/27.,3./27,101)
testR= 0.9*numpy.ones_like(testzs)
testz= 1.5/27.*numpy.ones_like(testRs)
# Test density
assert numpy.all(numpy.fabs((dp.dens(testRs,testz)-dscfp.dens(testRs,testz))/dscfp.dens(testRs,testz)) < 10.**-1.25), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
# difficult at high z
assert numpy.all(numpy.fabs((dp.dens(testR,testzs)-dscfp.dens(testR,testzs))/dscfp.dens(testRs,testz)) < 10.**-1.), "DiskSCFPotential for double-exponential disk does not agree with DoubleExponentialDiskPotential"
return None
def test_WrapperPotential_dims():
# Test that WrapperPotentials get assigned to Potential/planarPotential
# correctly, based on input pot=
from galpy.potential.WrapperPotential import (WrapperPotential,
parentWrapperPotential,
planarWrapperPotential)
dp= potential.DehnenBarPotential()
# 3D pot should be Potential, Wrapper, parentWrapper, not planarX
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
assert isinstance(dwp,potential.Potential), 'WrapperPotential for 3D pot= is not an instance of Potential'
assert not isinstance(dwp,potential.planarPotential), 'WrapperPotential for 3D pot= is an instance of planarPotential'
assert isinstance(dwp,parentWrapperPotential), 'WrapperPotential for 3D pot= is not an instance of parentWrapperPotential'
assert isinstance(dwp,WrapperPotential), 'WrapperPotential for 3D pot= is not an instance of WrapperPotential'
assert not isinstance(dwp,planarWrapperPotential), 'WrapperPotential for 3D pot= is an instance of planarWrapperPotential'
# 2D pot should be Potential, Wrapper, parentWrapper, not planarX
dwp= potential.DehnenSmoothWrapperPotential(pot=dp.toPlanar())
assert isinstance(dwp,potential.planarPotential), 'WrapperPotential for 3D pot= is not an instance of planarPotential'
assert not isinstance(dwp,potential.Potential), 'WrapperPotential for 3D pot= is an instance of Potential'
assert isinstance(dwp,parentWrapperPotential), 'WrapperPotential for 3D pot= is not an instance of parentWrapperPotential'
assert isinstance(dwp,planarWrapperPotential), 'WrapperPotential for 3D pot= is not an instance of planarWrapperPotential'
assert not isinstance(dwp,WrapperPotential), 'WrapperPotential for 3D pot= is an instance of WrapperPotential'
return None
def test_Wrapper_potinputerror():
# Test that setting up a WrapperPotential with anything other than a
# (list of) planar/Potentials raises an error
with pytest.raises(ValueError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=1)
return None
def test_Wrapper_incompatibleunitserror():
# Test that setting up a WrapperPotential with a potential with
# incompatible units to the wrapper itself raises an error
# 3D
ro,vo= 8., 220.
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=ro,vo=vo*1.1)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo*1.1)
# 2D
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo).toPlanar()
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=ro,vo=vo*1.1)
with pytest.raises(AssertionError) as excinfo:
potential.DehnenSmoothWrapperPotential(pot=hp,ro=1.1*ro,vo=vo*1.1)
return None
def test_WrapperPotential_unittransfer_3d():
# Test that units are properly transferred between a potential and its
# wrapper
from galpy.util import conversion
ro,vo= 9., 230.
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo)
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly transferred to wrapper when wrapping a potential with ro set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly transferred to wrapper when wrapping a potential with vo set"
# Just set ro
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro)
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert not hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly transferred to wrapper when wrapping a potential with ro set"
# Just set vo
hp= potential.HernquistPotential(amp=0.55,a=1.3,vo=vo)
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert not hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly transferred to wrapper when wrapping a potential with vo set"
return None
def test_WrapperPotential_unittransfer_2d():
# Test that units are properly transferred between a potential and its
# wrapper
from galpy.util import conversion
ro,vo= 9., 230.
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro,vo=vo).toPlanar()
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly transferred to wrapper when wrapping a potential with ro set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly transferred to wrapper when wrapping a potential with vo set"
# Just set ro
hp= potential.HernquistPotential(amp=0.55,a=1.3,ro=ro).toPlanar()
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert not hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['ro']-ro) < 1e-10, "ro not properly transferred to wrapper when wrapping a potential with ro set"
# Just set vo
hp= potential.HernquistPotential(amp=0.55,a=1.3,vo=vo).toPlanar()
hpw= potential.DehnenSmoothWrapperPotential(pot=hp)
hpw_phys= conversion.get_physical(hpw,include_set=True)
assert not hpw_phys['roSet'], "ro not set when wrapping a potential with ro set"
assert hpw_phys['voSet'], "vo not set when wrapping a potential with vo set"
assert numpy.fabs(hpw_phys['vo']-vo) < 1e-10, "vo not properly transferred to wrapper when wrapping a potential with vo set"
return None
def test_WrapperPotential_serialization():
import pickle
from galpy.potential.WrapperPotential import WrapperPotential
dp= potential.DehnenBarPotential()
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
pickled_dwp= pickle.dumps(dwp)
unpickled_dwp= pickle.loads(pickled_dwp)
assert isinstance(unpickled_dwp,WrapperPotential), 'Deserialized WrapperPotential is not an instance of WrapperPotential'
testRs= numpy.linspace(0.1,1,100)
testzs= numpy.linspace(-1,1,100)
testphis= numpy.linspace(0,2*numpy.pi,100)
testts= numpy.linspace(0,1,100)
for R,z,phi,t in zip(testRs,testzs,testphis,testts):
assert dwp(R,z,phi,t) == unpickled_dwp(R,z,phi,t), 'Deserialized WrapperPotential does not agree with original WrapperPotential'
def test_WrapperPotential_print():
dp= potential.DehnenBarPotential()
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
assert print(dwp) is None, 'Printing a 3D wrapper potential fails'
dp= potential.DehnenBarPotential().toPlanar()
dwp= potential.DehnenSmoothWrapperPotential(pot=dp)
assert print(dwp) is None, 'Printing a 2D wrapper potential fails'
return None
def test_dissipative_ignoreInPotentialDensity2ndDerivs():
# Test that dissipative forces are ignored when they are included in lists
# given to evaluatePotentials, evaluateDensities, and evaluate2ndDerivs
lp= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=lp,sigmar=lambda r: 1./numpy.sqrt(2.))
R,z= 2.,0.4
assert numpy.fabs(potential.evaluatePotentials([lp,cdfc],R,z,phi=1.)-potential.evaluatePotentials([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluatePotentials'
assert numpy.fabs(potential.evaluateDensities([lp,cdfc],R,z,phi=1.)-potential.evaluateDensities([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateDensities'
assert numpy.fabs(potential.evaluateR2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluateR2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateR2derivs'
assert numpy.fabs(potential.evaluatez2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluatez2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluatez2derivs'
assert numpy.fabs(potential.evaluateRzderivs([lp,cdfc],R,z,phi=1.)-potential.evaluateRzderivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateRzderivs'
assert numpy.fabs(potential.evaluatephi2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluatephi2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluatephi2derivs'
assert numpy.fabs(potential.evaluateRphiderivs([lp,cdfc],R,z,phi=1.)-potential.evaluateRphiderivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluateRphiderivs'
assert numpy.fabs(potential.evaluatephizderivs([lp,cdfc],R,z,phi=1.)-potential.evaluatephizderivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluatephizderivs'
assert numpy.fabs(potential.evaluater2derivs([lp,cdfc],R,z,phi=1.)-potential.evaluater2derivs([lp,cdfc],R,z,phi=1.)) < 1e-10, 'Dissipative forces not ignored in evaluater2derivs'
return None
def test_dissipative_noVelocityError():
# Test that calling evaluateXforces for a dissipative potential
# without including velocity produces an error
lp= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,b=0.8)
cdfc= potential.ChandrasekharDynamicalFrictionForce(\
GMs=0.01,const_lnLambda=8.,
dens=lp,sigmar=lambda r: 1./numpy.sqrt(2.))
R,z,phi= 2.,0.4,1.1
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluateRforces([lp,cdfc],R,z,phi=phi)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluatephitorques([lp,cdfc],R,z,phi=phi)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluatezforces([lp,cdfc],R,z,phi=phi)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.evaluaterforces([lp,cdfc],R,z,phi=phi)
return None
def test_RingPotential_correctPotentialIntegral():
# Test that the RingPotential's potential is correct, by comparing it to a
# direct integral solution of the Poisson equation
from scipy import integrate, special
# Direct solution
def pot(R,z,amp=1.,a=0.75):
return -amp\
*integrate.quad(lambda k: special.jv(0,k*R)*special.jv(0,k*a)*numpy.exp(-k*numpy.fabs(z)),0.,numpy.infty)[0]
rp= potential.RingPotential(amp=3.,a=0.75)
# Just check a bunch of (R,z)s; z=0 the direct integration doesn't work well, so we don't check that
Rs, zs= [1.2,1.2,0.2,0.2], [0.1,-1.1,-0.1,1.1]
for R,z in zip(Rs,zs):
assert numpy.fabs(pot(R,z,amp=3.)-rp(R,z)) < 1e-8, f'RingPotential potential evaluation does not agree with direct integration at (R,z) = ({R},{z})'
return None
def test_DehnenSmoothWrapper_decay():
# Test that DehnenSmoothWrapperPotential with decay=True is the opposite
# of decay=False
lp= potential.LogarithmicHaloPotential(normalize=1.)
pot_grow= potential.DehnenSmoothWrapperPotential(pot=lp,tform=4.,
tsteady=3.)
pot_decay= potential.DehnenSmoothWrapperPotential(pot=lp,tform=4.,
tsteady=3.,decay=True)
ts= numpy.linspace(0.,10.,1001)
assert numpy.amax(numpy.fabs(lp(2.,0.,ts)-[pot_grow(2.,0.,t=t)+pot_decay(2.,0.,t=t) for t in ts])) < 1e-10, 'DehnenSmoothWrapper with decay=True is not the opposite of the same with decay=False'
assert numpy.amax(numpy.fabs(lp.Rforce(2.,0.,ts)-[pot_grow.Rforce(2.,0.,t=t)+pot_decay.Rforce(2.,0.,t=t) for t in ts])) < 1e-10, 'DehnenSmoothWrapper with decay=True is not the opposite of the same with decay=False'
return None
def test_AdiabaticContractionWrapper():
# Some basic tests of adiabatic contraction
dm1= AdiabaticContractionWrapperPotential(\
pot=potential.MWPotential2014[2],
baryonpot=potential.MWPotential2014[:2],
f_bar=None,method='cautun')
dm2= AdiabaticContractionWrapperPotential(\
pot=potential.MWPotential2014[2],
baryonpot=potential.MWPotential2014[:2],
f_bar=0.157,method='cautun')
dm3= AdiabaticContractionWrapperPotential(\
pot=potential.MWPotential2014[2],
baryonpot=potential.MWPotential2014[:2],
f_bar=0.157,method='blumenthal')
dm4= AdiabaticContractionWrapperPotential(\
pot=potential.MWPotential2014[2],
baryonpot=potential.MWPotential2014[:2],
f_bar=0.157,method='gnedin')
# at large r, the contraction should be almost negligible (1% for Cautun)
r = 50.
assert numpy.fabs(dm1.vcirc(r)/potential.MWPotential2014[2].vcirc(r)-1.02) < 1e-2, '"cautun" adiabatic contraction at large distances'
assert numpy.fabs(dm2.vcirc(r)/potential.MWPotential2014[2].vcirc(r)-0.97) < 1e-2, '"cautun" adiabatic contraction at large distances'
assert numpy.fabs(dm3.vcirc(r)/potential.MWPotential2014[2].vcirc(r)-0.98) < 1e-2, '"blumenthal" adiabatic contraction at large distances'
assert numpy.fabs(dm4.vcirc(r)/potential.MWPotential2014[2].vcirc(r)-0.98) < 1e-2, '"gnedin" adiabatic contraction at large distances'
# For MWPotential2014, contraction at 1 kpc should be about 4 in mass for
# Cautun (their Fig. 2; Mstar ~ 7e10 Msun)
r= 1./dm1._ro
assert numpy.fabs(dm1.mass(r)/potential.MWPotential2014[2].mass(r)-3.40) < 1e-2, '"cautun" adiabatic contraction does not agree at R ~ 1 kpc'
assert numpy.fabs(dm2.mass(r)/potential.MWPotential2014[2].mass(r)-3.18) < 1e-2, '"cautun" adiabatic contraction does not agree at R ~ 1 kpc'
assert numpy.fabs(dm3.mass(r)/potential.MWPotential2014[2].mass(r)-4.22) < 1e-2, '"blumenthal" adiabatic contraction does not agree at R ~ 1 kpc'
assert numpy.fabs(dm4.mass(r)/potential.MWPotential2014[2].mass(r)-4.04) < 1e-2, '"gnedin" adiabatic contraction does not agree at R ~ 1 kpc'
# At 10 kpc, it should be more like 2
r= 10./dm1._ro
assert numpy.fabs(dm1.mass(r)/potential.MWPotential2014[2].mass(r)-1.78) < 1e-2, '"cautun" adiabatic contraction does not agree at R ~ 10 kpc'
assert numpy.fabs(dm2.mass(r)/potential.MWPotential2014[2].mass(r)-1.64) < 1e-2, '"cautun" adiabatic contraction does not agree at R ~ 10 kpc'
assert numpy.fabs(dm3.mass(r)/potential.MWPotential2014[2].mass(r)-1.67) < 1e-2, '"blumenthal" adiabatic contraction does not agree at R ~ 10 kpc'
assert numpy.fabs(dm4.mass(r)/potential.MWPotential2014[2].mass(r)-1.43) < 1e-2, '"gnedin" adiabatic contraction does not agree at R ~ 10 kpc'
return None
def test_RotateAndTiltWrapper():
# some tests of the rotate and tilt wrapper
zvec= numpy.array([numpy.sqrt(1/3.),numpy.sqrt(1/3.),numpy.sqrt(1/3.)])
zvec/= numpy.sqrt(numpy.sum(zvec**2))
rot= _rotate_to_arbitrary_vector(numpy.array([[0.,0.,1.]]), zvec, inv=True)[0]
galaxy_pa= 0.3
pa_rot= numpy.array([[numpy.cos(galaxy_pa),numpy.sin(galaxy_pa),0.],
[-numpy.sin(galaxy_pa),numpy.cos(galaxy_pa),0.],
[0.,0.,1.]])
rot= numpy.dot(pa_rot, rot)
xyz_test= numpy.array([0.5,0.5,0.5])
Rphiz_test= coords.rect_to_cyl(xyz_test[0], xyz_test[1], xyz_test[2])
txyz_test= numpy.dot(rot, xyz_test)
tRphiz_test= coords.rect_to_cyl(txyz_test[0], txyz_test[1], txyz_test[2])
testpot= potential.RotateAndTiltWrapperPotential(zvec=zvec,galaxy_pa=galaxy_pa,pot=potential.MWPotential2014)
#test against the transformed potential and a MWPotential evaluated at the transformed coords
assert (evaluatePotentials(testpot, Rphiz_test[0], Rphiz_test[2], phi=Rphiz_test[1])-evaluatePotentials(potential.MWPotential2014, tRphiz_test[0], tRphiz_test[2], phi=tRphiz_test[1])) < 1e-6, 'Evaluating potential at same relative position in a Rotated and tilted MWPotential2014 and non-Rotated does not give same result'
# Also a triaxial NFW
NFW_wrapped= potential.RotateAndTiltWrapperPotential(zvec=zvec, galaxy_pa=galaxy_pa, pot=potential.TriaxialNFWPotential(amp=1.,b=0.7,c=0.5))
NFW_rot= potential.TriaxialNFWPotential(amp=1., zvec=zvec, pa=galaxy_pa,b=0.7,c=0.5)
assert (evaluatePotentials(NFW_wrapped, Rphiz_test[0], Rphiz_test[2], phi=Rphiz_test[1])-evaluatePotentials(NFW_rot, Rphiz_test[0], Rphiz_test[2], phi=Rphiz_test[1])) < 1e-6, 'Wrapped and Internally rotated NFW potentials do not match when evaluated at the same point'
# Try not specifying galaxy_pa, shouldn be =0
NFW_wrapped= potential.RotateAndTiltWrapperPotential(zvec=zvec,pot=potential.TriaxialNFWPotential(amp=1.,b=0.7,c=0.5))
NFW_rot= potential.TriaxialNFWPotential(amp=1., zvec=zvec,pa=0.,b=0.7,c=0.5)
assert (evaluatePotentials(NFW_wrapped, Rphiz_test[0], Rphiz_test[2], phi=Rphiz_test[1])-evaluatePotentials(NFW_rot, Rphiz_test[0], Rphiz_test[2], phi=Rphiz_test[1])) < 1e-6, 'Wrapped and Internally rotated NFW potentials do not match when evaluated at the same point'
# Try not specifying zvec, should be =[0,0,1]
NFW_wrapped= potential.RotateAndTiltWrapperPotential(galaxy_pa=galaxy_pa, pot=potential.TriaxialNFWPotential(amp=1.,b=0.7,c=0.5))
NFW_rot= potential.TriaxialNFWPotential(amp=1., zvec=[0.,0.,1.],pa=galaxy_pa,b=0.7,c=0.5)
assert (evaluatePotentials(NFW_wrapped, Rphiz_test[0], Rphiz_test[2], phi=Rphiz_test[1])-evaluatePotentials(NFW_rot, Rphiz_test[0], Rphiz_test[2], phi=Rphiz_test[1])) < 1e-6, 'Wrapped and Internally rotated NFW potentials do not match when evaluated at the same point'
#make sure the offset works as intended
# triaxial NFW at x,y,z = [20.,0.,3.]
NFW_wrapped= potential.RotateAndTiltWrapperPotential(zvec=zvec, galaxy_pa=galaxy_pa, offset=[20.,0.,3.], pot=potential.TriaxialNFWPotential(amp=1.,b=0.7,c=0.5))
NFW_rot= potential.TriaxialNFWPotential(amp=1., zvec=zvec, pa=galaxy_pa,b=0.7,c=0.5)
assert (evaluatePotentials(NFW_wrapped, 0., 0., phi=0.)-evaluatePotentials(NFW_rot, 20., - 3., phi=numpy.pi)) < 1e-6, 'Wrapped + Offset and Internally rotated NFW potentials do not match when evaluated at the same point'
def test_integration_RotateAndTiltWrapper():
## test a quick orbit integration to hit the C code (also test pure python)
#two potentials, one offset
offset = [3.,2.,1.]
mwpot = potential.MWPotential2014
mwpot_wrapped = potential.RotateAndTiltWrapperPotential(pot=potential.MWPotential2014, offset=offset)
#initialise orbit
ro = 8.
orb = orbit.Orbit(ro=ro)
#another, offset by the same as the potential
init = orb.vxvv[0]
R, vR, vT, z, vz, phi = init
x, y, z = coords.cyl_to_rect(R, phi, z)
vx, vy, vz = coords.cyl_to_rect_vec(vR, vT, vz, phi)
tx, ty, tz = x-offset[0], y-offset[1], z-offset[2]
tR, tphi, tz = coords.rect_to_cyl(tx, ty, tz)
tvR, tvT, tvz = coords.rect_to_cyl_vec(vx, vy, vz, tR, tphi, tz, cyl=True)
orb_t = orbit.Orbit([tR, tvR, tvT, tz, tvz, tphi], ro=ro)
#integrate
ts = numpy.linspace(0.,1.,1000)
orb.integrate(ts, pot=mwpot, method='dop853')
orb_t.integrate(ts, pot=mwpot_wrapped, method='dop853')
#translate other orbit to match first one:
orb_vxvv = orb_t.getOrbit()
R, vR, vT, z, vz, phi = orb_vxvv[:,0], orb_vxvv[:,1], orb_vxvv[:,2], orb_vxvv[:,3], orb_vxvv[:,4], orb_vxvv[:,5]
x, y, z = coords.cyl_to_rect(R, phi, z)
vx, vy, vz = coords.cyl_to_rect_vec(vR, vT, vz, phi)
tx, ty, tz = x+offset[0], y+offset[1], z+offset[2]
tR, tphi, tz = coords.rect_to_cyl(tx, ty, tz)
#check equal
Rphi = numpy.dstack([orb.R(ts), orb.z(ts)])[0]
Rphi_t = numpy.dstack([tR*ro,tz*ro])[0]
assert numpy.all(numpy.fabs(Rphi-Rphi_t) < 10.**-10), 'Pure python orbit integration in an offset potential does not work as expected'
#reinitialise orbits, just to be sure
orb = orbit.Orbit(ro=ro)
init = orb.vxvv[0]
R, vR, vT, z, vz, phi = init
offset = [3.,2.,1.]
x, y, z = coords.cyl_to_rect(R, phi, z)
vx, vy, vz = coords.cyl_to_rect_vec(vR, vT, vz, phi)
tx, ty, tz = x-offset[0], y-offset[1], z-offset[2]
tR, tphi, tz = coords.rect_to_cyl(tx, ty, tz)
tvR, tvT, tvz = coords.rect_to_cyl_vec(vx, vy, vz, tR, tphi, tz, cyl=True)
orb_t = orbit.Orbit([tR, tvR, tvT, tz, tvz, tphi], ro=ro)
#integrate, use C
orb.integrate(ts, pot=mwpot, method='dop853_c')
orb_t.integrate(ts, pot=mwpot_wrapped, method='dop853_c')
orb_vxvv = orb_t.getOrbit()
R, vR, vT, z, vz, phi = orb_vxvv[:,0], orb_vxvv[:,1], orb_vxvv[:,2], orb_vxvv[:,3], orb_vxvv[:,4], orb_vxvv[:,5]
x, y, z = coords.cyl_to_rect(R, phi, z)
vx, vy, vz = coords.cyl_to_rect_vec(vR, vT, vz, phi)
tx, ty, tz = x+offset[0], y+offset[1], z+offset[2]
tR, tphi, tz = coords.rect_to_cyl(tx, ty, tz)
#check equal
Rphi = numpy.dstack([orb.R(ts), orb.z(ts)])[0]
Rphi_t = numpy.dstack([tR*ro,tz*ro])[0]
assert numpy.all(numpy.fabs(Rphi-Rphi_t) < 10.**-10), 'C orbit integration in an offset potential does not work as expected'
return None
def test_vtermnegl_issue314():
# Test related to issue 314: vterm for negative l
rp= potential.RazorThinExponentialDiskPotential(normalize=1.,hr=3./8.)
assert numpy.fabs(rp.vterm(0.5)+rp.vterm(-0.5)) < 10.**-8., 'vterm for negative l does not behave as expected'
return None
def test_Ferrers_Rzderiv_issue319():
# Test that the Rz derivative works for the FerrersPotential (issue 319)
fp= potential.FerrersPotential(normalize=1.)
from scipy.misc import derivative
rzderiv= fp.Rzderiv(0.5,0.2,phi=1.)
rzderiv_finitediff= derivative(lambda x: -fp.zforce(x,0.2,phi=1.),
0.5,dx=10.**-8.)
assert numpy.fabs(rzderiv-rzderiv_finitediff) < 10.**-7., 'Rzderiv for FerrersPotential does not agree with finite-difference calculation'
return None
def test_rtide():
#Test that rtide is being calculated properly in select potentials
lp=potential.LogarithmicHaloPotential()
assert abs(1.0-lp.rtide(1.,0.,M=1.0)/0.793700525984) < 10.**-12.,"Calculation of rtide in logarithmic potential fails"
pmass=potential.PlummerPotential(b=0.0)
assert abs(1.0-pmass.rtide(1.,0.,M=1.0)/0.693361274351) < 10.**-12., "Calculation of rtide in point-mass potential fails"
# Also test function interface
assert abs(1.0-potential.rtide([lp],1.,0.,M=1.0)/0.793700525984) < 10.**-12.,"Calculation of rtide in logarithmic potential fails"
pmass=potential.PlummerPotential(b=0.0)
assert abs(1.0-potential.rtide([pmass],1.,0.,M=1.0)/0.693361274351) < 10.**-12., "Calculation of rtide in point-mass potential fails"
return None
def test_rtide_noMError():
# Test the running rtide without M= input raises error
lp=potential.LogarithmicHaloPotential()
with pytest.raises(potential.PotentialError) as excinfo:
dummy= lp.rtide(1.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.rtide([lp],1.,0.)
return None
def test_ttensor():
pmass= potential.KeplerPotential(normalize=1.)
tij=pmass.ttensor(1.0,0.0,0.0)
# Full tidal tensor here should be diag(2,-1,-1)
assert numpy.all(numpy.fabs(tij-numpy.diag([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also test eigenvalues
tij=pmass.ttensor(1.0,0.0,0.0,eigenval=True)
assert numpy.all(numpy.fabs(tij-numpy.array([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also test function interface
tij= potential.ttensor([pmass],1.0,0.0,0.0)
# Full tidal tensor here should be diag(2,-1,-1)
assert numpy.all(numpy.fabs(tij-numpy.diag([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also test eigenvalues
tij= potential.ttensor([pmass],1.0,0.0,0.0,eigenval=True)
assert numpy.all(numpy.fabs(tij-numpy.array([2,-1,-1])) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
# Also Test symmetry when y!=0 and z!=0
tij= potential.ttensor([pmass],1.0,1.0,1.0)
assert numpy.all(numpy.fabs(tij[0][1]-tij[1][0]) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
assert numpy.all(numpy.fabs(tij[0][2]-tij[2][0]) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
assert numpy.all(numpy.fabs(tij[1][2]-tij[2][1]) < 1e-10), "Calculation of tidal tensor in point-mass potential fails"
return None
def test_ttensor_trace():
# Test that the trace of the tidal tensor == -4piG density for a bunch of
# potentials
pots= [potential.KeplerPotential(normalize=1.),
potential.LogarithmicHaloPotential(normalize=3.,q=0.8),
potential.MiyamotoNagaiPotential(normalize=0.5,a=3.,b=0.5)]
R,z,phi= 1.3,-0.2,2.
for pot in pots:
assert numpy.fabs(numpy.trace(pot.ttensor(R,z,phi=phi))+4.*numpy.pi*pot.dens(R,z,phi=phi)) < 1e-10, 'Trace of the tidal tensor not equal 4piG density'
# Also test a list
assert numpy.fabs(numpy.trace(potential.ttensor(potential.MWPotential2014,R,z,phi=phi))+4.*numpy.pi*potential.evaluateDensities(potential.MWPotential2014,R,z,phi=phi)) < 1e-10, 'Trace of the tidal tensor not equal 4piG density'
return None
def test_ttensor_nonaxi():
# Test that computing the tidal tensor for a non-axi potential raises error
lp= potential.LogarithmicHaloPotential(normalize=1.,b=0.8,q=0.7)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= lp.ttensor(1.,0.,0.)
with pytest.raises(potential.PotentialError) as excinfo:
dummy= potential.ttensor(lp,1.,0.,0.)
return None
# Test that zvc_range returns the range over which the zvc is defined for a
# given E,Lz
def test_zvc_range():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmin,0.)+Lz**2./2./Rmin**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmax,0.)+Lz**2./2./Rmax**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
R_a_little_less= Rmin-1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_less,0.)+Lz**2./2./R_a_little_less**2. > E, 'zvc_range does not give the minimum R for which Phi_eff(R,0) < E'
R_a_little_more= Rmax+1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_more,0.)+Lz**2./2./R_a_little_more**2. > E, 'zvc_range does not give the maximum R for which Phi_eff(R,0) < E'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmin,0.)+Lz**2./2./Rmin**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rmax,0.)+Lz**2./2./Rmax**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
R_a_little_less= Rmin-1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_less,0.)+Lz**2./2./R_a_little_less**2. > E, 'zvc_range does not give the minimum R for which Phi_eff(R,0) < E'
R_a_little_more= Rmax+1e-4
assert potential.evaluatePotentials(potential.MWPotential2014,R_a_little_more,0.)+Lz**2./2./R_a_little_more**2. > E, 'zvc_range does not give the maximum R for which Phi_eff(R,0) < E'
# Also one for a single potential
pot= potential.PlummerPotential(normalize=True)
E, Lz= -1.9, 0.2
Rmin, Rmax= pot.zvc_range(E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rmin,0.)+Lz**2./2./Rmin**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
assert numpy.fabs(potential.evaluatePotentials(pot,Rmax,0.)+Lz**2./2./Rmax**2.-E) < 1e-8, 'zvc_range does not return radius at which Phi_eff(R,0) = E'
R_a_little_less= Rmin-1e-4
assert potential.evaluatePotentials(pot,R_a_little_less,0.)+Lz**2./2./R_a_little_less**2. > E, 'zvc_range does not give the minimum R for which Phi_eff(R,0) < E'
R_a_little_more= Rmax+1e-4
assert potential.evaluatePotentials(pot,R_a_little_more,0.)+Lz**2./2./R_a_little_more**2. > E, 'zvc_range does not give the maximum R for which Phi_eff(R,0) < E'
return None
# Test that we get [NaN,NaN] when there are no orbits for this combination of E and Lz
def test_zvc_range_undefined():
# Set up circular orbit at Rc, then ask for Lz > Lzmax(E)
Rc= 0.6653
E= potential.evaluatePotentials(potential.MWPotential2014,Rc,0.)\
+potential.vcirc(potential.MWPotential2014,Rc)**2./2.
Lzmax= Rc*potential.vcirc(potential.MWPotential2014,Rc)
assert numpy.all(numpy.isnan(potential.zvc_range(potential.MWPotential2014,E,Lzmax+1e-4))), 'zvc_range does not return [NaN,NaN] when no orbits exist at this combination of (E,Lz)'
return None
def test_zvc_at_rminmax():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmin,E,Lz)) < 1e-8, 'zvc at minimum from zvc_range is not at zero height'
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmax,E,Lz)) < 1e-8, 'zvc at maximum from zvc_range is not at zero height'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmin,E,Lz)) < 1e-8, 'zvc at minimum from zvc_range is not at zero height'
assert numpy.fabs(potential.zvc(potential.MWPotential2014,Rmax,E,Lz)) < 1e-8, 'zvc at maximum from zvc_range is not at zero height'
# Also for a single potential
pot= potential.PlummerPotential(normalize=True)
E, Lz= -1.9, 0.2
Rmin, Rmax= pot.zvc_range(E,Lz)
assert numpy.fabs(pot.zvc(Rmin,E,Lz)) < 1e-8, 'zvc at minimum from zvc_range is not at zero height'
assert numpy.fabs(pot.zvc(Rmax,E,Lz)) < 1e-8, 'zvc at maximum from zvc_range is not at zero height'
return None
def test_zvc():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
Rtrial= 0.5*(Rmin+Rmax)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.25*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.75*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
Rtrial= 0.5*(Rmin+Rmax)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.25*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.75*(Rmax-Rmin)
ztrial= potential.zvc(potential.MWPotential2014,Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(potential.MWPotential2014,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
# Also for a single potential
pot= potential.PlummerPotential(normalize=True)
E, Lz= -1.9, 0.2
Rmin, Rmax= pot.zvc_range(E,Lz)
Rtrial= 0.5*(Rmin+Rmax)
ztrial= pot.zvc(Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.25*(Rmax-Rmin)
ztrial= pot.zvc(Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
Rtrial= Rmin+0.75*(Rmax-Rmin)
ztrial= pot.zvc(Rtrial,E,Lz)
assert numpy.fabs(potential.evaluatePotentials(pot,Rtrial,ztrial)+Lz**2./2./Rtrial**2.-E) < 1e-8, 'zvc does not return the height at which Phi_eff(R,z) = E'
return None
# Test that zvc outside of zvc_range is NaN
def test_zvc_undefined():
E, Lz= -1.25, 0.6
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmin-1e-4,E,Lz)), 'zvc at R < Rmin is not NaN'
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmax+1e-4,E,Lz)), 'zvc at R > Rmax is not NaN'
# Another one for good measure
E, Lz= -2.25, 0.2
Rmin, Rmax= potential.zvc_range(potential.MWPotential2014,E,Lz)
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmin-1e-4,E,Lz)), 'zvc at R < Rmin is not NaN'
assert numpy.isnan(potential.zvc(potential.MWPotential2014,Rmax+1e-4,E,Lz)), 'zvc at R > Rmax is not NaN'
return None
# Check that we get the correct ValueError if no solution can be found
def test_zvc_valueerror():
E, Lz= -1.25+100, 0.6
with pytest.raises(ValueError) as excinfo:
potential.zvc(potential.MWPotential2014,0.7,E+100,Lz)
return None
def test_rhalf():
# Test some known cases
a= numpy.pi
# Hernquist, r12= (1+sqrt(2))a
hp= potential.HernquistPotential(amp=1.,a=a)
assert numpy.fabs(hp.rhalf()-(1.+numpy.sqrt(2.))*a) < 1e-10, 'Half-mass radius of the Hernquist potential incorrect'
# DehnenSpherical, r12= a/(2^(1/(3-alpha)-1)
alpha= 1.34
hp= potential.DehnenSphericalPotential(amp=1.,a=a,alpha=alpha)
assert numpy.fabs(hp.rhalf()-a/(2**(1./(3.-alpha))-1.)) < 1e-10, 'Half-mass radius of the DehnenSpherical potential incorrect'
# Plummer, r12= b/sqrt(1/0.5^(2/3)-1)
pp= potential.PlummerPotential(amp=1.,b=a)
assert numpy.fabs(potential.rhalf(pp)-a/numpy.sqrt(0.5**(-2./3.)-1.)) < 1e-10, 'Half-mass radius of the Plummer potential incorrect'
return None
def test_tdyn():
# Spherical: tdyn = 2piR/vc
a= numpy.pi
# Hernquist
hp= potential.HernquistPotential(amp=1.,a=a)
R= 1.4
assert numpy.fabs(hp.tdyn(R)-2.*numpy.pi*R/hp.vcirc(R)) < 1e-10, 'Dynamical time of the Hernquist potential incorrect'
# DehnenSpherical
alpha= 1.34
hp= potential.DehnenSphericalPotential(amp=1.,a=a,alpha=alpha)
assert numpy.fabs(potential.tdyn(hp,R)-2.*numpy.pi*R/hp.vcirc(R)) < 1e-10, 'Dynamical time of the DehnenSpherical potential incorrect'
# Axi, this approx. holds
hp= potential.MiyamotoNagaiPotential(amp=1.,a=a,b=a/5.)
R= 3.4
assert numpy.fabs(hp.tdyn(R)/(2.*numpy.pi*R/hp.vcirc(R))-1.) < 0.03, 'Dynamical time of the Miyamoto-Nagai potential incorrect'
return None
def test_NumericalPotentialDerivativesMixin():
# Test that the NumericalPotentialDerivativesMixin works as expected
def get_mixin_first_instance(cls,*args,**kwargs):
# Function to return instance of a class for Potential cls where
# the NumericalPotentialDerivativesMixin comes first, so all derivs
# are numerical (should otherwise always be used second!)
class NumericalPot(potential.NumericalPotentialDerivativesMixin,cls):
def __init__(self,*args,**kwargs):
potential.NumericalPotentialDerivativesMixin.__init__(self,
kwargs)
cls.__init__(self,*args,**kwargs)
return NumericalPot(*args,**kwargs)
# Function to check all numerical derivatives
def check_numerical_derivs(Pot,NumPot,tol=1e-6,tol2=1e-5):
# tol: tolerance for forces, tol2: tolerance for 2nd derivatives
# Check wide range of R,z,phi
Rs= numpy.array([0.5,1.,2.])
Zs= numpy.array([0.,.125,-.125,0.25,-0.25])
phis= numpy.array([0.,0.5,-0.5,1.,-1.,
numpy.pi,0.5+numpy.pi,
1.+numpy.pi])
for ii in range(len(Rs)):
for jj in range(len(Zs)):
for kk in range(len(phis)):
# Forces
assert numpy.fabs((Pot.Rforce(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.Rforce(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.Rforce(Rs[ii],Zs[jj],phi=phis[kk])) < tol, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct Rforce'
assert numpy.fabs((Pot.zforce(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.zforce(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.zforce(Rs[ii],Zs[jj],phi=phis[kk])**(Zs[jj] > 0.)) < tol, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct zforce'
assert numpy.fabs((Pot.phitorque(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.phitorque(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.phitorque(Rs[ii],Zs[jj],phi=phis[kk])**Pot.isNonAxi) < tol, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct phitorque'
# Second derivatives
assert numpy.fabs((Pot.R2deriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.R2deriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.R2deriv(Rs[ii],Zs[jj],phi=phis[kk])) < tol2, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct R2deriv'
assert numpy.fabs((Pot.z2deriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.z2deriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.z2deriv(Rs[ii],Zs[jj],phi=phis[kk])) < tol2, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct z2deriv'
assert numpy.fabs((Pot.phi2deriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.phi2deriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.phi2deriv(Rs[ii],Zs[jj],phi=phis[kk])**Pot.isNonAxi) < tol2, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct phi2deriv'
assert numpy.fabs((Pot.Rzderiv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.Rzderiv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.Rzderiv(Rs[ii],Zs[jj],phi=phis[kk])**(Zs[jj] > 0.)) < tol2, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct Rzderiv'
assert numpy.fabs((Pot.Rphideriv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.Rphideriv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.Rphideriv(Rs[ii],Zs[jj],phi=phis[kk])**Pot.isNonAxi) < tol2, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct Rphideriv'
assert numpy.fabs((Pot.phizderiv(Rs[ii],Zs[jj],phi=phis[kk])
-NumPot.phizderiv(Rs[ii],Zs[jj],phi=phis[kk]))/Pot.phizderiv(Rs[ii],Zs[jj],phi=phis[kk])**(Pot.isNonAxi*(Zs[jj] != 0.))) < tol2, f'NumericalPotentialDerivativesMixin applied to {Pot.__class__.__name__} Potential does not give the correct phizderiv'
return None
# Now check some potentials
# potential.MiyamotoNagaiPotential
mp= potential.MiyamotoNagaiPotential(amp=1.,a=0.5,b=0.05)
num_mp= get_mixin_first_instance(potential.MiyamotoNagaiPotential,
amp=1.,a=0.5,b=0.05)
check_numerical_derivs(mp,num_mp)
# potential.DehnenBarPotential
dp= potential.DehnenBarPotential()
num_dp= get_mixin_first_instance(potential.DehnenBarPotential)
check_numerical_derivs(dp,num_dp)
return None
# Test that we don't get the "FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated" numpy warning for the SCF potential; issue #347
def test_scf_tupleindexwarning():
import warnings
with warnings.catch_warnings(record=True):
warnings.simplefilter("error",FutureWarning)
p= mockSCFZeeuwPotential()
p.Rforce(1.,0.)
# another one reported by Nil, now problem is with array input
with warnings.catch_warnings(record=True):
warnings.simplefilter("error",FutureWarning)
p= mockSCFZeeuwPotential()
p.Rforce(numpy.atleast_1d(1.),numpy.atleast_1d(0.))
return None
# Test that conversion between xi and R works as expected
def test_scf_xiToR():
from galpy.potential.SCFPotential import _RToxi, _xiToR
a= numpy.pi
r= 1.4
assert numpy.fabs(_xiToR(_RToxi(r,a=a),a=a)-r) < 1e-10, "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
xi= 1.3
assert numpy.fabs(_RToxi(_xiToR(xi,a=a),a=a)-xi) < 1e-10, "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
# Also for arrays
r= numpy.linspace(0.1,5.3,21)
assert numpy.all(numpy.fabs(_xiToR(_RToxi(r,a=a),a=a)-r) < 1e-10), "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
xi= numpy.linspace(-0.9,0.9,21)
assert numpy.all(numpy.fabs(_RToxi(_xiToR(xi,a=a),a=a)-xi) < 1e-10), "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
# Check 0 and inf
r= 0
assert numpy.fabs(_RToxi(r,a=a)+1) < 1e-10, "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
xi= -1.
assert numpy.fabs(_xiToR(xi,a=a)) < 1e-10, "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
r= numpy.inf
assert numpy.fabs(_RToxi(r,a=a)-1) < 1e-10, "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
xi= 1.
assert numpy.isinf(_xiToR(xi,a=a)), "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
# Also for arrays with zero and inf
r= numpy.concatenate((numpy.linspace(0.,5.3,21),[numpy.inf]))
assert numpy.all(numpy.fabs(_xiToR(_RToxi(r,a=a),a=a)[:-1]-r[:-1]) < 1e-10), "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
assert numpy.fabs(_RToxi(r,a=a)[-1]-1.) < 1e-10, "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
xi= numpy.linspace(-1,1,21)
assert numpy.all(numpy.fabs(_RToxi(_xiToR(xi,a=a),a=a)-xi) < 1e-10), "_RToxi and _xiToR aren't each other's inverse in r <-> xi conversion used in SCF potential"
return None
# Test that attempting to multiply or divide a potential by something other than a number raises an error
def test_mult_divide_error():
# 3D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9)
with pytest.raises(TypeError) as excinfo:
pot*[1.,2.]
with pytest.raises(TypeError) as excinfo:
[1.,2.]*pot
with pytest.raises(TypeError) as excinfo:
pot/[1.,2.]
# 2D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toPlanar()
with pytest.raises(TypeError) as excinfo:
pot*[1.,2.]
with pytest.raises(TypeError) as excinfo:
[1.,2.]*pot
with pytest.raises(TypeError) as excinfo:
pot/[1.,2.]
# 1D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toVertical(1.1)
with pytest.raises(TypeError) as excinfo:
pot*[1.,2.]
with pytest.raises(TypeError) as excinfo:
[1.,2.]*pot
with pytest.raises(TypeError) as excinfo:
pot/[1.,2.]
return None
# Test that arithmetically adding potentials returns lists of potentials
def test_add_potentials():
assert potential.MWPotential2014 == potential.MWPotential2014[0]+potential.MWPotential2014[1]+potential.MWPotential2014[2], 'Potential addition of components of MWPotential2014 does not give MWPotential2014'
# 3D
pot1= potential.LogarithmicHaloPotential(normalize=1.,q=0.9)
pot2= potential.MiyamotoNagaiPotential(normalize=0.2,a=0.4,b=0.1)
pot3= potential.HernquistPotential(normalize=0.4,a=0.1)
assert pot1+pot2 == [pot1,pot2]
assert pot1+pot2+pot3 == [pot1,pot2,pot3]
assert (pot1+pot2)+pot3 == [pot1,pot2,pot3]
assert pot1+(pot2+pot3) == [pot1,pot2,pot3]
# 2D
pot1= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toPlanar()
pot2= potential.MiyamotoNagaiPotential(normalize=0.2,a=0.4,b=0.1).toPlanar()
pot3= potential.HernquistPotential(normalize=0.4,a=0.1).toPlanar()
assert pot1+pot2 == [pot1,pot2]
assert pot1+pot2+pot3 == [pot1,pot2,pot3]
assert (pot1+pot2)+pot3 == [pot1,pot2,pot3]
assert pot1+(pot2+pot3) == [pot1,pot2,pot3]
# 1D
pot1= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toVertical(1.1)
pot2= potential.MiyamotoNagaiPotential(normalize=0.2,a=0.4,b=0.1).toVertical(1.1)
pot3= potential.HernquistPotential(normalize=0.4,a=0.1).toVertical(1.1)
assert pot1+pot2 == [pot1,pot2]
assert pot1+pot2+pot3 == [pot1,pot2,pot3]
assert (pot1+pot2)+pot3 == [pot1,pot2,pot3]
assert pot1+(pot2+pot3) == [pot1,pot2,pot3]
return None
# Test that attempting to multiply or divide a potential by something other
# than a number raises a TypeError (test both left and right)
def test_add_potentials_error():
# 3D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9)
with pytest.raises(TypeError) as excinfo:
3+pot
with pytest.raises(TypeError) as excinfo:
pot+3
# 2D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toPlanar()
with pytest.raises(TypeError) as excinfo:
3+pot
with pytest.raises(TypeError) as excinfo:
pot+3
# 1D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9).toVertical(1.1)
with pytest.raises(TypeError) as excinfo:
3+pot
with pytest.raises(TypeError) as excinfo:
pot+3
return None
# Test that adding potentials with incompatible unit systems raises an error
def test_add_potentials_unitserror():
# 3D
ro, vo= 8., 220.
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo)
potro= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo)
potvo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo*1.1)
potrovo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo*1.1)
with pytest.raises(AssertionError) as excinfo: pot+potro
with pytest.raises(AssertionError) as excinfo: pot+potvo
with pytest.raises(AssertionError) as excinfo: pot+potrovo
with pytest.raises(AssertionError) as excinfo: potro+pot
with pytest.raises(AssertionError) as excinfo: potvo+pot
with pytest.raises(AssertionError) as excinfo: potrovo+pot
# 2D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo).toPlanar()
potro= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo).toPlanar()
potvo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo*1.1).toPlanar()
potrovo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo*1.1).toPlanar()
with pytest.raises(AssertionError) as excinfo: pot+potro
with pytest.raises(AssertionError) as excinfo: pot+potvo
with pytest.raises(AssertionError) as excinfo: pot+potrovo
with pytest.raises(AssertionError) as excinfo: potro+pot
with pytest.raises(AssertionError) as excinfo: potvo+pot
with pytest.raises(AssertionError) as excinfo: potrovo+pot
# 1D
pot= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo).toVertical(1.1)
potro= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo).toVertical(1.1)
potvo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro,vo=vo*1.1).toVertical(1.1)
potrovo= potential.LogarithmicHaloPotential(normalize=1.,q=0.9,
ro=ro*1.1,vo=vo*1.1).toVertical(1.1)
with pytest.raises(AssertionError) as excinfo: pot+potro
with pytest.raises(AssertionError) as excinfo: pot+potvo
with pytest.raises(AssertionError) as excinfo: pot+potrovo
with pytest.raises(AssertionError) as excinfo: potro+pot
with pytest.raises(AssertionError) as excinfo: potvo+pot
with pytest.raises(AssertionError) as excinfo: potrovo+pot
return None
# Test unit handling of interpolated Spherical potentials
def test_interSphericalPotential_unithandling():
pot= potential.HernquistPotential(amp=1.,a=2.,ro=8.3,vo=230.)
# Test that setting up the interpolated potential with inconsistent units
# raises a RuntimeError
with pytest.raises(RuntimeError):
ipot= potential.interpSphericalPotential(rforce=pot,rgrid=numpy.geomspace(0.01,5.,201),ro=7.5)
with pytest.raises(RuntimeError):
ipot= potential.interpSphericalPotential(rforce=pot,rgrid=numpy.geomspace(0.01,5.,201),vo=210.)
# Check that units are properly transferred
ipot= potential.interpSphericalPotential(rforce=pot,rgrid=numpy.geomspace(0.01,5.,201))
assert ipot._roSet, 'ro of interpSphericalPotential not set, even though that of parent was set'
assert ipot._ro == pot._ro, 'ro of interpSphericalPotential does not agree with that of the parent potential'
assert ipot._voSet, 'vo of interpSphericalPotential not set, even though that of parent was set'
assert ipot._vo == pot._vo, 'vo of interpSphericalPotential does not agree with that of the parent potential'
return None
# Test that the amplitude of the isothermal disk potential is set correctly (issue #400)
def test_isodisk_amplitude_issue400():
# Morgan's example
z= numpy.linspace(-0.1,0.1,10001)
pot= potential.IsothermalDiskPotential(amp=0.1,sigma=20.5/220.)
# Density at z=0 should be 0.1, no density or 2nd deriv for 1D at this
# point, so manually compute
z= numpy.linspace(-2e-4,2e-4,5)
dens_at_0= 1./(numpy.pi*4)*numpy.gradient(numpy.gradient(pot(z),z),z)[2]
assert numpy.fabs(dens_at_0-0.1) < 1e-7, 'Density at z=0 for IsothermalDiskPotential is not correct'
return None
def test_TimeDependentAmplitudeWrapperPotential_against_DehnenSmooth():
# Test that TimeDependentAmplitudeWrapperPotential acts the same as DehnenSmooth
# Test = LogPot + DehnenBar grown smoothly
# Both using the DehnenSmoothWrapper and the new TimeDependentAmplitudeWrapperPotential
from galpy.orbit import Orbit
lp= potential.LogarithmicHaloPotential()
dbp= potential.DehnenBarPotential(tform=-100000.,tsteady=1.)
dp= potential.DehnenSmoothWrapperPotential(pot=dbp)
tp= potential.TimeDependentAmplitudeWrapperPotential(pot=dbp,A=dp._smooth)
# Orbit of the Sun
o= Orbit()
ts= numpy.linspace(0.,-20.,1001)
o.integrate(ts,lp+dp)
ott= o()
ott.integrate(ts,lp+tp)
tol= 1e-10
assert numpy.amax(numpy.fabs(o.x(ts)-ott.x(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.y(ts)-ott.y(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.z(ts)-ott.z(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.vx(ts)-ott.vx(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.vy(ts)-ott.vy(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.vz(ts)-ott.vz(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
return None
def test_TimeDependentAmplitudeWrapperPotential_against_DehnenSmooth_2d():
# Test that TimeDependentAmplitudeWrapperPotential acts the same as DehnenSmooth
# Test = LogPot + DehnenBar grown smoothly
# Both using the DehnenSmoothWrapper and the new TimeDependentAmplitudeWrapperPotential
from galpy.orbit import Orbit
lp= potential.LogarithmicHaloPotential()
dbp= potential.DehnenBarPotential(tform=-100000.,tsteady=1.)
dp= potential.DehnenSmoothWrapperPotential(pot=dbp)
tp= potential.TimeDependentAmplitudeWrapperPotential(pot=dbp,A=dp._smooth)
# Orbit of the Sun
o= Orbit().toPlanar()
ts= numpy.linspace(0.,-20.,1001)
o.integrate(ts,lp+dp)
ott= o()
ott.integrate(ts,lp+tp)
tol= 1e-10
assert numpy.amax(numpy.fabs(o.x(ts)-ott.x(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.y(ts)-ott.y(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.vx(ts)-ott.vx(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
assert numpy.amax(numpy.fabs(o.vy(ts)-ott.vy(ts))) <tol, 'Integrating an orbit in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
return None
def test_TimeDependentAmplitudeWrapperPotential_against_DehnenSmooth_2d_dxdv():
# Test that TimeDependentAmplitudeWrapperPotential acts the same as DehnenSmooth
# Test = LogPot + DehnenBar grown smoothly
# Both using the DehnenSmoothWrapper and the new TimeDependentAmplitudeWrapperPotential
from galpy.orbit import Orbit
lp= potential.LogarithmicHaloPotential()
dbp= potential.DehnenBarPotential(tform=-100000.,tsteady=1.)
dp= potential.DehnenSmoothWrapperPotential(pot=dbp)
tp= potential.TimeDependentAmplitudeWrapperPotential(pot=dbp,A=dp._smooth)
# Orbit of the Sun
o= Orbit().toPlanar()
ts= numpy.linspace(0.,-20.,1001)
o.integrate_dxdv([1.,0.,0.,0.],ts,lp+dp,rectIn=True,rectOut=True)
ott= o()
ott.integrate_dxdv([1.,0.,0.,0.],ts,lp+tp,rectIn=True,rectOut=True)
tol= 1e-10
assert numpy.amax(numpy.fabs(o.getOrbit_dxdv()-ott.getOrbit_dxdv())) <tol, 'Integrating an orbit with dxdv in a growing DehnenSmoothWrapper does not agree between DehnenSmooth and TimeDependentWrapper'
return None
def test_TimeDependentAmplitudeWrapperPotential_inputerrors():
# TypeError when A not supplied
lp= potential.LogarithmicHaloPotential()
with pytest.raises(TypeError,match="A= input to TimeDependentAmplitudeWrapperPotential should be a function"):
tp= TimeDependentAmplitudeWrapperPotential(pot=lp)
# TypeError when suppplying a function with no argument
with pytest.raises(TypeError,match="A= input to TimeDependentAmplitudeWrapperPotential should be a function that can be called with a single parameter"):
tp= TimeDependentAmplitudeWrapperPotential(pot=lp,A=lambda: 1.)
# TypeError when suppplying a function with more than 1 argument
with pytest.raises(TypeError,match="A= input to TimeDependentAmplitudeWrapperPotential should be a function that can be called with a single parameter"):
tp= TimeDependentAmplitudeWrapperPotential(pot=lp,A=lambda x,y: x+y)
# But having additional arguments have defaults should be allowed
tp= TimeDependentAmplitudeWrapperPotential(pot=lp,A=lambda x,y=1.: x+y)
# Return value should be a number
with pytest.raises(TypeError,match=r"A= function needs to return a number \(specifically, a numbers.Number\)"):
tp= TimeDependentAmplitudeWrapperPotential(pot=lp,A=lambda t: (t,t+1))
with pytest.raises(TypeError,match=r"A= function needs to return a number \(specifically, a numbers.Number\)"):
tp= TimeDependentAmplitudeWrapperPotential(pot=lp,A=lambda t: numpy.array([t]))
return None
def test_phiforce_deprecation():
# Test that phiforce is being deprecated correctly for phitorque
import warnings
# Check that we've removed phiforce in the correct version
from packaging.version import parse as parse_version
deprecation_version= parse_version('1.9')
from galpy import __version__ as galpy_version
galpy_version= parse_version(galpy_version)
should_be_removed= galpy_version >= deprecation_version
# Now test
lp= potential.LogarithmicHaloPotential()
# Method
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",FutureWarning)
try:
lp.phiforce(1.,0.1)
except AttributeError:
if not should_be_removed:
raise AssertionError('phiforce stopped working before it is supposed to have been removed')
else:
if should_be_removed:
raise AssertionError('phiforce not removed when it was supposed to be removed')
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == 'phiforce has been renamed phitorque, because it has always really been a torque (per unit mass); please switch to the new method name, because the old name will be removed in v1.9 and may be re-used for the actual phi force component')
if raisedWarning: break
assert raisedWarning, "phiforce deprecation did not raise the expected warning"
# Function
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",FutureWarning)
try:
potential.evaluatephiforces(lp,1.,0.1)
except AttributeError:
if not should_be_removed:
raise AssertionError('phiforce stopped working before it is supposed to have been removed')
else:
if should_be_removed:
raise AssertionError('phiforce not removed when it was supposed to be removed')
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == 'evaluatephiforces has been renamed evaluatephitorques, because it has always really been a torque (per unit mass); please switch to the new method name, because the old name will be removed in v1.9 and may be re-used for the actual phi force component')
if raisedWarning: break
assert raisedWarning, "phiforce deprecation did not raise the expected warning"
def test_phiforce_deprecation_2d():
# Test that phiforce is being deprecated correctly for phitorque
import warnings
# Check that we've removed phiforce in the correct version
from packaging.version import parse as parse_version
deprecation_version= parse_version('1.9')
from galpy import __version__ as galpy_version
galpy_version= parse_version(galpy_version)
should_be_removed= galpy_version >= deprecation_version
# Now test
lp= potential.LogarithmicHaloPotential().toPlanar()
# Method
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",FutureWarning)
try:
lp.phiforce(1.,0.1)
except AttributeError:
if not should_be_removed:
raise AssertionError('phiforce stopped working before it is supposed to have been removed')
else:
if should_be_removed:
raise AssertionError('phiforce not removed when it was supposed to be removed')
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == 'phiforce has been renamed phitorque, because it has always really been a torque (per unit mass); please switch to the new method name, because the old name will be removed in v1.9 and may be re-used for the actual phi force component')
if raisedWarning: break
assert raisedWarning, "phiforce deprecation did not raise the expected warning"
# Function
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",FutureWarning)
try:
potential.evaluateplanarphiforces(lp,1.,0.1)
except AttributeError:
if not should_be_removed:
raise AssertionError('phiforce stopped working before it is supposed to have been removed')
else:
if should_be_removed:
raise AssertionError('phiforce not removed when it was supposed to be removed')
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == 'evaluateplanarphiforces has been renamed evaluateplanarphitorques, because it has always really been a torque (per unit mass); please switch to the new method name, because the old name will be removed in v1.9 and may be re-used for the actual phi force component')
if raisedWarning: break
assert raisedWarning, "phiforce deprecation did not raise the expected warning"
# Test that Pot is required to be a positional argument for Potential functions
def test_potential_Pot_is_positional():
from galpy import potential
from galpy.potential import MWPotential2014
for func in [potential.evaluatePotentials,
potential.evaluateRforces,
potential.evaluatezforces,
potential.evaluateR2derivs,
potential.evaluatez2derivs,
potential.evaluateRzderivs,
potential.evaluaterforces,
potential.evaluatephitorques,
potential.evaluateDensities,
potential.evaluateSurfaceDensities,
potential.flattening,
potential.rtide,
potential.ttensor]:
with pytest.raises(TypeError) as excinfo:
func(Pot=MWPotential2014,R=1.,z=0.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
for func in [potential.omegac,
potential.epifreq,
potential.verticalfreq,
potential.rhalf,
potential.tdyn]:
with pytest.raises(TypeError) as excinfo:
func(Pot=MWPotential2014,R=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
# Special cases
with pytest.raises(TypeError) as excinfo:
potential.evaluatephiforces(Pot=MWPotential2014,R=1.,z=0.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.lindbladR(Pot=MWPotential2014,OmegaP=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.rl(Pot=MWPotential2014,lz=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.rE(Pot=MWPotential2014,E=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.LcE(Pot=MWPotential2014,E=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.vterm(Pot=MWPotential2014,l=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.zvc_range(Pot=MWPotential2014,E=1.,Lz=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.zvc(Pot=MWPotential2014,R=1.,E=1.,Lz=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
with pytest.raises(TypeError) as excinfo:
potential.rhalf(Pot=MWPotential2014)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
return None
# Test that Pot is required to be a positional argument for Potential functions
def test_potential_Pot_is_positional_planar():
from galpy import potential
from galpy.potential import MWPotential2014
for func in [potential.evaluateplanarPotentials,
potential.evaluateplanarRforces,
potential.evaluateplanarR2derivs,
potential.evaluateplanarphitorques]:
with pytest.raises(TypeError) as excinfo:
func(Pot=potential.toPlanarPotential(MWPotential2014),R=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
return None
# Test that Pot is required to be a positional argument for Potential functions
def test_potential_Pot_is_positional_linear():
from galpy import potential
from galpy.potential import MWPotential2014
for func in [potential.evaluatelinearPotentials,
potential.evaluatelinearForces]:
with pytest.raises(TypeError) as excinfo:
func(Pot=potential.toVerticalPotential(MWPotential2014,1.),x=1.)
assert "required positional argument: 'Pot'" in excinfo.value.args[0]
return None
# Issue #495
def test_diskscf_overflow():
from galpy.actionAngle import estimateDeltaStaeckel
from galpy.orbit import Orbit
from galpy.potential.mwpotentials import McMillan17
from galpy.util import conversion
ro17= conversion.get_physical(McMillan17)['ro']
vo17= conversion.get_physical(McMillan17)['vo']
o17= Orbit([209.3, 26.8, 46.5, -1.16, -0.88, 189.11], radec=True, ro = ro17, vo=vo17)
delta= estimateDeltaStaeckel(McMillan17,o17.R(use_physical=False),o17.z(use_physical=False))
assert not numpy.isnan(delta), 'estimateDeltaStaeckel returns NaN due to overflow in DiskSCFPotential'
def test_InterpSnapshotRZPotential_pickling():
# Test that InterpSnapshotRZPotential can be pickled (see #507, #509)
if not _PYNBODY_LOADED:
pytest.skip()
import pickle
import pynbody
from galpy.potential import InterpSnapshotRZPotential
# Set up simple snapshot: 1 star!
s= pynbody.new(star=1)
s['mass']= 1.
s['eps']= 0.
spi= InterpSnapshotRZPotential(s)
test= pickle.dumps(spi)
newspi= pickle.loads(test)
# Inside the grid
assert numpy.fabs(newspi(1.,0.)-spi(1.,0.)) < 1e-10, "Unpickled InterpSnapshotRZPotential does not return the same potential as original instance"
# Outside the grid, needs _origPot
assert numpy.fabs(newspi(1.,10.)-spi(1.,10.)) < 1e-10, "Unpickled InterpSnapshotRZPotential does not return the same potential as original instance"
return None
def test_plotting():
import tempfile
#Some tests of the plotting routines, to make sure they don't fail
kp= potential.KeplerPotential(normalize=1.)
#Plot the rotation curve
kp.plotRotcurve()
kp.toPlanar().plotRotcurve() #through planar interface
kp.plotRotcurve(Rrange=[0.01,10.],
grid=101,
savefilename=None)
potential.plotRotcurve([kp])
potential.plotRotcurve([kp],Rrange=[0.01,10.],
grid=101,
savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.plotRotcurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
#Then plot using the saved file
kp.plotRotcurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
#Plot the escape-velocity curve
kp.plotEscapecurve()
kp.toPlanar().plotEscapecurve() #Through planar interface
kp.plotEscapecurve(Rrange=[0.01,10.],
grid=101,
savefilename=None)
potential.plotEscapecurve([kp])
potential.plotEscapecurve([kp],Rrange=[0.01,10.],
grid=101,
savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.plotEscapecurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
#Then plot using the saved file
kp.plotEscapecurve(Rrange=[0.01,10.],
grid=101,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
#Plot the potential itself
kp.plot()
kp.plot(t=1.,rmin=0.01,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
effective=False,Lz=None,xy=True,
xrange=[0.01,1.8],yrange=[-0.55,0.55],justcontours=True,
ncontours=11,savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.plot(t=1.,rmin=0.01,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
effective=False,Lz=None,
xrange=[0.01,1.8],yrange=[-0.55,0.55],
ncontours=11,savefilename=tmp_savefilename)
#Then plot using the saved file
kp.plot(t=1.,rmin=0.01,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
effective=False,Lz=None,
xrange=[0.01,1.8],yrange=[-0.55,0.55],
ncontours=11,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotPotentials([kp])
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
potential.plotPotentials([kp],
rmin=0.01,rmax=1.8,nrs=11,
zmin=-0.55,zmax=0.55,nzs=11,
justcontours=True,xy=True,
ncontours=11,savefilename=tmp_savefilename)
#Then plot using the saved file
potential.plotPotentials([kp],t=1.,
rmin=0.01,rmax=1.8,nrs=11,
zmin=-0.55,zmax=0.55,nzs=11,
ncontours=11,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
#Plot the effective potential
kp.plot()
kp.plot(effective=True,Lz=1.)
try:
kp.plot(effective=True,Lz=None)
except RuntimeError:
pass
else:
raise AssertionError("Potential.plot with effective=True, but Lz=None did not return a RuntimeError")
potential.plotPotentials([kp],effective=True,Lz=1.)
try:
potential.plotPotentials([kp],effective=True,Lz=None)
except RuntimeError:
pass
else:
raise AssertionError("Potential.plot with effective=True, but Lz=None did not return a RuntimeError")
#Plot the density of a LogarithmicHaloPotential
lp= potential.LogarithmicHaloPotential(normalize=1.)
lp.plotDensity()
lp.plotDensity(t=1.,rmin=0.05,rmax=1.8,nrs=11,zmin=-0.55,zmax=0.55,nzs=11,
aspect=1.,log=True,justcontours=True,xy=True,
ncontours=11,savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
lp.plotDensity(savefilename=tmp_savefilename)
#Then plot using the saved file
lp.plotDensity(savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotDensities([lp])
potential.plotDensities([lp],t=1.,
rmin=0.05,rmax=1.8,nrs=11,
zmin=-0.55,zmax=0.55,nzs=11,
aspect=1.,log=True,xy=True,
justcontours=True,
ncontours=11,savefilename=None)
#Plot the surface density of a LogarithmicHaloPotential
lp= potential.LogarithmicHaloPotential(normalize=1.)
lp.plotSurfaceDensity()
lp.plotSurfaceDensity(t=1.,z=2.,xmin=0.05,xmax=1.8,nxs=11,
ymin=-0.55,ymax=0.55,nys=11,
aspect=1.,log=True,justcontours=True,
ncontours=11,savefilename=None)
#Also while saving the result
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
lp.plotSurfaceDensity(savefilename=tmp_savefilename)
#Then plot using the saved file
lp.plotSurfaceDensity(savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotSurfaceDensities([lp])
potential.plotSurfaceDensities([lp],t=1.,z=2.,
xmin=0.05,xmax=1.8,nxs=11,
ymin=-0.55,ymax=0.55,nys=11,
aspect=1.,log=True,
justcontours=True,
ncontours=11,savefilename=None)
#Plot the potential itself for a 2D potential
kp.toPlanar().plot()
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
kp.toPlanar().plot(Rrange=[0.01,1.8],grid=11,
savefilename=tmp_savefilename)
#Then plot using the saved file
kp.toPlanar().plot(Rrange=[0.01,1.8],grid=11,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
dp= potential.EllipticalDiskPotential()
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
dp.plot(xrange=[0.01,1.8],yrange=[0.01,1.8],gridx=11,gridy=11,
ncontours=11,savefilename=tmp_savefilename)
#Then plot using the saved file
dp.plot(xrange=[0.01,1.8],yrange=[0.01,1.8],gridx=11,gridy=11,
ncontours=11,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
potential.plotplanarPotentials([dp],gridx=11,gridy=11)
#Tests of linearPotential plotting
lip= potential.RZToverticalPotential(potential.MiyamotoNagaiPotential(normalize=1.),1.)
lip.plot()
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
lip.plot(t=0.,min=-15.,max=15,ns=21,savefilename=tmp_savefilename)
#Then plot using the saved file
lip.plot(t=0.,min=-15.,max=15,ns=21,savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
savefile, tmp_savefilename= tempfile.mkstemp()
try:
os.close(savefile) #Easier this way
os.remove(tmp_savefilename)
#First save
potential.plotlinearPotentials(lip,t=0.,min=-15.,max=15,ns=21,
savefilename=tmp_savefilename)
#Then plot using the saved file
potential.plotlinearPotentials(lip,t=0.,min=-15.,max=15,ns=21,
savefilename=tmp_savefilename)
finally:
os.remove(tmp_savefilename)
return None
#Classes for testing Integer TwoSphericalPotential and for testing special
# cases of some other potentials
from galpy.potential import (BurkertPotential, DiskSCFPotential,
FerrersPotential, FlattenedPowerPotential,
LogarithmicHaloPotential, MiyamotoNagaiPotential,
MN3ExponentialDiskPotential, MWPotential,
NullPotential, PowerSphericalPotential,
SoftenedNeedleBarPotential, SpiralArmsPotential,
TriaxialHernquistPotential,
TriaxialJaffePotential, TriaxialNFWPotential,
TwoPowerSphericalPotential,
TwoPowerTriaxialPotential, interpRZPotential)
class mockSphericalSoftenedNeedleBarPotential(SoftenedNeedleBarPotential):
def __init__(self):
SoftenedNeedleBarPotential.__init__(self,amp=1.,a=0.000001,b=0.,
c=10.,omegab=0.,pa=0.)
self.normalize(1.)
self.isNonAxi= False
return None
def _evaluate(self,R,z,phi=0.,t=0.):
if phi is None: phi= 0.
x,y,z= self._compute_xyz(R,phi,z,t)
Tp, Tm= self._compute_TpTm(x,y,z)
return numpy.log((x-self._a+Tm)/(x+self._a+Tp))/2./self._a
class specialTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.5,beta=3.)
return None
class DehnenTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.5,beta=4.)
return None
class DehnenCoreTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=0,beta=4.)
return None
class HernquistTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.,beta=4.)
return None
class JaffeTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=2.,beta=4.)
return None
class NFWTwoPowerSphericalPotential(TwoPowerSphericalPotential):
def __init__(self):
TwoPowerSphericalPotential.__init__(self,amp=1.,a=5.,alpha=1.,beta=3.)
return None
class specialPowerSphericalPotential(PowerSphericalPotential):
def __init__(self):
PowerSphericalPotential.__init__(self,amp=1.,alpha=2.)
return None
class specialMiyamotoNagaiPotential(MiyamotoNagaiPotential):
def __init__(self):
MiyamotoNagaiPotential.__init__(self,amp=1.,a=0.,b=0.1)
return None
class specialFlattenedPowerPotential(FlattenedPowerPotential):
def __init__(self):
FlattenedPowerPotential.__init__(self,alpha=0.)
return None
class specialMN3ExponentialDiskPotentialPD(MN3ExponentialDiskPotential):
def __init__(self):
MN3ExponentialDiskPotential.__init__(self,normalize=1.,posdens=True)
return None
class specialMN3ExponentialDiskPotentialSECH(MN3ExponentialDiskPotential):
def __init__(self):
MN3ExponentialDiskPotential.__init__(self,normalize=1.,sech=True)
return None
class BurkertPotentialNoC(BurkertPotential):
def __init__(self):
# Just to force not using C
BurkertPotential.__init__(self)
self.hasC= False
self.hasC_dxdv= False
return None
class oblateHernquistPotential(TriaxialHernquistPotential):
def __init__(self):
TriaxialHernquistPotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class oblateNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class oblatenoGLNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.,c=.2,glorder=None)
return None
class oblateJaffePotential(TriaxialJaffePotential):
def __init__(self):
TriaxialJaffePotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class prolateHernquistPotential(TriaxialHernquistPotential):
def __init__(self):
TriaxialHernquistPotential.__init__(self,normalize=1.,b=1.,c=1.8)
return None
class prolateNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.,c=1.8)
return None
class prolateJaffePotential(TriaxialJaffePotential):
def __init__(self):
TriaxialJaffePotential.__init__(self,normalize=1.,b=1.,c=1.8)
return None
class rotatingSpiralArmsPotential(SpiralArmsPotential):
def __init__(self):
SpiralArmsPotential.__init__(self, omega=1.1)
class specialSpiralArmsPotential(SpiralArmsPotential):
def __init__(self):
SpiralArmsPotential.__init__(self, omega=1.3, N=4., Cs=[8./3./numpy.pi, 1./2., 8./15./numpy.pi])
class triaxialHernquistPotential(TriaxialHernquistPotential):
def __init__(self):
TriaxialHernquistPotential.__init__(self,normalize=1.,b=1.4,c=0.6)
return None
class triaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=.2,c=1.8)
return None
class triaxialJaffePotential(TriaxialJaffePotential):
def __init__(self):
TriaxialJaffePotential.__init__(self,normalize=1.,b=0.4,c=0.7)
return None
class zRotatedTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
zvec=[numpy.sin(0.5),0.,numpy.cos(0.5)])
return None
class yRotatedTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
pa=0.2)
return None
class fullyRotatedTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
zvec=[numpy.sin(0.5),0.,numpy.cos(0.5)],
pa=0.2)
return None
class fullyRotatednoGLTriaxialNFWPotential(TriaxialNFWPotential):
def __init__(self):
TriaxialNFWPotential.__init__(self,normalize=1.,b=1.5,c=.2,
zvec=[numpy.sin(0.5),0.,numpy.cos(0.5)],
pa=0.2,glorder=None)
return None
class triaxialLogarithmicHaloPotential(LogarithmicHaloPotential):
def __init__(self):
LogarithmicHaloPotential.__init__(self,normalize=1.,b=0.7,q=0.9,
core=0.5)
return None
def OmegaP(self):
return 0.
# Implementations through TwoPowerTriaxialPotential
class HernquistTwoPowerTriaxialPotential(TwoPowerTriaxialPotential):
def __init__(self):
TwoPowerTriaxialPotential.__init__(self,amp=1.,a=5.,alpha=1.,beta=4.,
b=0.3,c=1.8)
return None
class NFWTwoPowerTriaxialPotential(TwoPowerTriaxialPotential):
def __init__(self):
TwoPowerTriaxialPotential.__init__(self,amp=1.,a=2.,alpha=1.,beta=3.,
b=1.3,c=0.8)
self.isNonAxi= True # to test planar-from-full
return None
class JaffeTwoPowerTriaxialPotential(TwoPowerTriaxialPotential):
def __init__(self):
TwoPowerTriaxialPotential.__init__(self,amp=1.,a=5.,alpha=2.,beta=4.,
b=1.3,c=1.8)
return None
class testNullPotential(NullPotential):
def normalize(self,norm):
pass
# Other DiskSCFPotentials
class sech2DiskSCFPotential(DiskSCFPotential):
def __init__(self):
DiskSCFPotential.__init__(self,
dens=lambda R,z: numpy.exp(-3.*R)\
*1./numpy.cosh(z/2.*27.)**2./4.*27.,
Sigma={'h': 1./3.,
'type': 'exp', 'amp': 1.0},
hz={'type':'sech2','h':1./27.},
a=1.,N=5,L=5)
return None
class expwholeDiskSCFPotential(DiskSCFPotential):
def __init__(self):
# Add a Hernquist potential because otherwise the density near the
# center is zero
from galpy.potential import HernquistPotential
hp= HernquistPotential(normalize=0.5)
DiskSCFPotential.__init__(self,\
dens=lambda R,z: 13.5*numpy.exp(-0.5/(R+10.**-10.)
-3.*R-numpy.fabs(z)*27.)
+hp.dens(R,z),
Sigma={'h': 1./3.,
'type': 'expwhole','amp': 1.0,
'Rhole':0.5},
hz={'type':'exp','h':1./27.},
a=1.,N=5,L=5)
return None
# Same as above, but specify type as 'exp' and give Rhole, to make sure that
# case is handled correctly
class altExpwholeDiskSCFPotential(DiskSCFPotential):
def __init__(self):
# Add a Hernquist potential because otherwise the density near the
# center is zero
from galpy.potential import HernquistPotential
hp= HernquistPotential(normalize=0.5)
DiskSCFPotential.__init__(self,\
dens=lambda R,z: 13.5*numpy.exp(-0.5/(R+10.**-10.)
-3.*R-numpy.fabs(z)*27.)
+hp.dens(R,z),
Sigma={'h': 1./3.,
'type': 'exp','amp': 1.0,
'Rhole':0.5},
hz={'type':'exp','h':1./27.},
a=1.,N=5,L=5)
return None
class nonaxiDiskSCFPotential(DiskSCFPotential):
def __init__(self):
thp= triaxialHernquistPotential()
DiskSCFPotential.__init__(self,\
dens= lambda R,z,phi: 13.5*numpy.exp(-3.*R)\
*numpy.exp(-27.*numpy.fabs(z))
+thp.dens(R,z,phi=phi),
Sigma_amp=[0.5,0.5],
Sigma=[lambda R: numpy.exp(-3.*R),
lambda R: numpy.exp(-3.*R)],
dSigmadR=[lambda R: -3.*numpy.exp(-3.*R),
lambda R: -3.*numpy.exp(-3.*R)],
d2SigmadR2=[lambda R: 9.*numpy.exp(-3.*R),
lambda R: 9.*numpy.exp(-3.*R)],
hz=lambda z: 13.5*numpy.exp(-27.
*numpy.fabs(z)),
Hz=lambda z: (numpy.exp(-27.*numpy.fabs(z))-1.
+27.*numpy.fabs(z))/54.,
dHzdz=lambda z: 0.5*numpy.sign(z)*\
(1.-numpy.exp(-27.*numpy.fabs(z))),
N=5,L=5)
return None
# An axisymmetric FerrersPotential
class mockAxisymmetricFerrersPotential(FerrersPotential):
def __init__(self):
FerrersPotential.__init__(self,normalize=1.,b=1.,c=.2)
return None
class mockInterpRZPotential(interpRZPotential):
def __init__(self):
interpRZPotential.__init__(self,RZPot=MWPotential,
rgrid=(0.01,2.1,101),zgrid=(0.,0.26,101),
logR=True,
interpPot=True,interpRforce=True,
interpzforce=True,interpDens=True)
class mockSnapshotRZPotential(potential.SnapshotRZPotential):
def __init__(self):
# Test w/ equivalent of KeplerPotential: one mass
kp= potential.KeplerPotential(amp=1.)
s= pynbody.new(star=1)
s['mass']= 1./numpy.fabs(kp.Rforce(1.,0.)) #forces vc(1,0)=1
s['eps']= 0.
potential.SnapshotRZPotential.__init__(self,s)
class mockInterpSnapshotRZPotential(potential.InterpSnapshotRZPotential):
def __init__(self):
# Test w/ equivalent of KeplerPotential: one mass
kp= potential.KeplerPotential(amp=1.)
s= pynbody.new(star=1)
s['mass']= 1./numpy.fabs(kp.Rforce(1.,0.)) #forces vc(1,0)=1
s['eps']= 0.
potential.InterpSnapshotRZPotential.__init__(self,s,
rgrid=(0.01,2.,101),
zgrid=(0.,0.3,101),
logR=False,
interpPot=True,
zsym=True)
# Some special cases of 2D, non-axisymmetric potentials, to make sure they
# are covered; need 3 to capture all of the transient behavior
from galpy.potential import (CosmphiDiskPotential, DehnenBarPotential,
EllipticalDiskPotential, HenonHeilesPotential,
SteadyLogSpiralPotential,
TransientLogSpiralPotential)
class mockDehnenBarPotentialT1(DehnenBarPotential):
def __init__(self):
DehnenBarPotential.__init__(self,omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
tform=0.5,tsteady=0.5,
alpha=0.01,Af=0.04)
class mockDehnenBarPotentialTm1(DehnenBarPotential):
def __init__(self):
DehnenBarPotential.__init__(self,omegab=1.9,rb=0.6,
barphi=25.*numpy.pi/180.,beta=0.,
tform=-1.,tsteady=1.01,
alpha=0.01,Af=0.04)
class mockDehnenBarPotentialTm5(DehnenBarPotential):
def __init__(self):
DehnenBarPotential.__init__(self,omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
tform=-5.,tsteady=2.,
alpha=0.01,Af=0.04)
class mockCosmphiDiskPotentialnegcp(CosmphiDiskPotential):
def __init__(self):
CosmphiDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,phio=0.01,m=1.,rb=0.9,
cp=-0.05,sp=0.05)
class mockCosmphiDiskPotentialnegp(CosmphiDiskPotential):
def __init__(self):
CosmphiDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=-1.,phio=0.01,m=1.,
cp=-0.05,sp=0.05)
class mockEllipticalDiskPotentialT1(EllipticalDiskPotential):
def __init__(self):
EllipticalDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,twophio=0.02,
tform=0.5,tsteady=1.,
cp=0.05,sp=0.05)
class mockEllipticalDiskPotentialTm1(EllipticalDiskPotential):
def __init__(self):
EllipticalDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,twophio=0.02,
tform=-1.,tsteady=None,
cp=-0.05,sp=0.05)
class mockEllipticalDiskPotentialTm5(EllipticalDiskPotential):
def __init__(self):
EllipticalDiskPotential.__init__(self,amp=1.,phib=25.*numpy.pi/180.,
p=1.,twophio=0.02,
tform=-5.,tsteady=-1.,
cp=-0.05,sp=0.05)
class mockSteadyLogSpiralPotentialT1(SteadyLogSpiralPotential):
def __init__(self):
SteadyLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3,
tform=0.5,tsteady=1.)
class mockSteadyLogSpiralPotentialTm1(SteadyLogSpiralPotential):
def __init__(self):
SteadyLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3,
tform=-1.,tsteady=None)
class mockSteadyLogSpiralPotentialTm5(SteadyLogSpiralPotential):
def __init__(self):
SteadyLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3,
tform=-1.,tsteady=-5.)
class mockTransientLogSpiralPotential(TransientLogSpiralPotential):
def __init__(self):
TransientLogSpiralPotential.__init__(self,amp=1.,omegas=0.65,A=-0.035,
m=2,gamma=numpy.pi/4.,
p=-0.3)
##Potentials used for mock SCF
def rho_Zeeuw(R, z=0., phi=0., a=1.):
r, theta, phi = coords.cyl_to_spher(R,z, phi)
return 3./(4*numpy.pi) * numpy.power((a + r),-4.) * a
def axi_density1(R, z=0, phi=0.):
r, theta, phi = coords.cyl_to_spher(R,z, phi)
h = potential.HernquistPotential()
return h.dens(R, z, phi)*(1 + numpy.cos(theta) + numpy.cos(theta)**2.)
def axi_density2(R, z=0, phi=0.):
r, theta, phi = coords.cyl_to_spher(R,z, phi)
return rho_Zeeuw(R,z,phi)*(1 +numpy.cos(theta) + numpy.cos(theta)**2)
def scf_density(R, z=0, phi=0.):
eps = .1
return axi_density2(R,z,phi)*(1 + eps*(numpy.cos(phi) + numpy.sin(phi)))
##Mock SCF class
class mockSCFZeeuwPotential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs_spherical(rho_Zeeuw,2)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFNFWPotential(potential.SCFPotential):
def __init__(self):
nfw = potential.NFWPotential()
Acos, Asin = potential.scf_compute_coeffs_spherical(nfw.dens,10)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFAxiDensity1Potential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs_axi(axi_density1,10,2)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFAxiDensity2Potential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs_axi(axi_density2,10,2)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
class mockSCFDensityPotential(potential.SCFPotential):
def __init__(self):
Acos, Asin = potential.scf_compute_coeffs(scf_density,10,10,phi_order=30)
potential.SCFPotential.__init__(self,amp=1.,Acos=Acos, Asin=Asin)
# Test interpSphericalPotential
class mockInterpSphericalPotential(potential.interpSphericalPotential):
def __init__(self):
hp= potential.HomogeneousSpherePotential(normalize=1.,R=1.1)
potential.interpSphericalPotential.__init__(self,rforce=hp,
rgrid=numpy.linspace(0.,1.1,201))
class mockInterpSphericalPotentialwForce(potential.interpSphericalPotential):
def __init__(self):
hp= potential.HomogeneousSpherePotential(normalize=1.,R=1.1)
potential.interpSphericalPotential.__init__(self,
rforce=lambda r: hp.Rforce(r,0.),
Phi0=hp(0.,0.),
rgrid=numpy.linspace(0.,1.1,201))
#Class to test potentials given as lists, st we can use their methods as class.
from galpy.potential import (Potential, _isNonAxi, evaluateDensities,
evaluatephitorques, evaluatephizderivs,
evaluateplanarphitorques,
evaluateplanarPotentials, evaluateplanarR2derivs,
evaluateplanarRforces, evaluatePotentials,
evaluateR2derivs, evaluateRforces,
evaluateRzderivs, evaluateSurfaceDensities,
evaluatez2derivs, evaluatezforces,
planarPotential)
class testMWPotential(Potential):
"""Initialize with potential in natural units"""
def __init__(self,potlist=MWPotential):
self._potlist= potlist
Potential.__init__(self,amp=1.)
self.isNonAxi= _isNonAxi(self._potlist)
return None
def _evaluate(self,R,z,phi=0,t=0,dR=0,dphi=0):
return evaluatePotentials(self._potlist,R,z,phi=phi,t=t,
dR=dR,dphi=dphi)
def _Rforce(self,R,z,phi=0.,t=0.):
return evaluateRforces(self._potlist,R,z,phi=phi,t=t)
def _phitorque(self,R,z,phi=0.,t=0.):
return evaluatephitorques(self._potlist,R,z,phi=phi,t=t)
def _zforce(self,R,z,phi=0.,t=0.):
return evaluatezforces(self._potlist,R,z,phi=phi,t=t)
def _R2deriv(self,R,z,phi=0.,t=0.):
return evaluateR2derivs(self._potlist,R,z,phi=phi,t=t)
def _z2deriv(self,R,z,phi=0.,t=0.):
return evaluatez2derivs(self._potlist,R,z,phi=phi,t=t)
def _Rzderiv(self,R,z,phi=0.,t=0.):
return evaluateRzderivs(self._potlist,R,z,phi=phi,t=t)
def _phi2deriv(self,R,z,phi=0.,t=0.):
return evaluatePotentials(self._potlist,R,z,phi=phi,t=t,dphi=2)
def _Rphideriv(self,R,z,phi=0.,t=0.):
return evaluatePotentials(self._potlist,R,z,phi=phi,t=t,dR=1,
dphi=1)
def _phizderiv(self,R,z,phi=0.,t=0.):
return evaluatephizderivs(self._potlist,R,z,phi=phi,t=t)
def _dens(self,R,z,phi=0.,t=0.,forcepoisson=False):
return evaluateDensities(self._potlist,R,z,phi=phi,t=t,
forcepoisson=forcepoisson)
def _surfdens(self,R,z,phi=0.,t=0.,forcepoisson=False):
return evaluateSurfaceDensities(self._potlist,R,z,phi=phi,t=t,
forcepoisson=forcepoisson)
def vcirc(self,R):
return potential.vcirc(self._potlist,R)
def normalize(self,norm,t=0.):
self._amp= norm
def OmegaP(self):
return 1.
#Class to test lists of planarPotentials
class testplanarMWPotential(planarPotential):
"""Initialize with potential in natural units"""
def __init__(self,potlist=MWPotential):
self._potlist= [p.toPlanar() for p in potlist if isinstance(p,Potential)]
self._potlist.extend([p for p in potlist if isinstance(p,planarPotential)])
planarPotential.__init__(self,amp=1.)
self.isNonAxi= _isNonAxi(self._potlist)
return None
def _evaluate(self,R,phi=0,t=0,dR=0,dphi=0):
return evaluateplanarPotentials(self._potlist,R,phi=phi,t=t)
def _Rforce(self,R,phi=0.,t=0.):
return evaluateplanarRforces(self._potlist,R,phi=phi,t=t)
def _phitorque(self,R,phi=0.,t=0.):
return evaluateplanarphitorques(self._potlist,R,phi=phi,t=t)
def _R2deriv(self,R,phi=0.,t=0.):
return evaluateplanarR2derivs(self._potlist,R,phi=phi,t=t)
def _phi2deriv(self,R,phi=0.,t=0.):
return evaluateplanarPotentials(self._potlist,R,phi=phi,t=t,dphi=2)
def _Rphideriv(self,R,phi=0.,t=0.):
return evaluateplanarPotentials(self._potlist,R,phi=phi,t=t,dR=1,
dphi=1)
def vcirc(self,R):
return potential.vcirc(self._potlist,R)
def normalize(self,norm,t=0.):
self._amp= norm
def OmegaP(self):
return 1.
class mockFlatEllipticalDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.EllipticalDiskPotential(phib=numpy.pi/2.,p=0.,tform=None,tsteady=None,twophio=14./220.)])
def OmegaP(self):
return 0.
class mockSlowFlatEllipticalDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.EllipticalDiskPotential(phib=numpy.pi/2.,p=0.,twophio=14./220.,tform=1.,tsteady=250.)])
def OmegaP(self):
return 0.
class mockFlatLopsidedDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.LopsidedDiskPotential(phib=numpy.pi/2.,p=0.,phio=10./220.)])
def OmegaP(self):
return 0.
class mockFlatCosmphiDiskPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.CosmphiDiskPotential(phib=numpy.pi/2.,p=0.,phio=10./220.)])
def OmegaP(self):
return 0.
class mockFlatCosmphiDiskwBreakPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.CosmphiDiskPotential(phib=numpy.pi/2.,p=0.,phio=10./220.,rb=0.99,m=6)])
def OmegaP(self):
return 0.
class mockFlatDehnenBarPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.DehnenBarPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockSlowFlatDehnenBarPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.DehnenBarPotential(tform=1.,tsteady=250.,rolr=2.5)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockFlatSteadyLogSpiralPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SteadyLogSpiralPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockSlowFlatSteadyLogSpiralPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SteadyLogSpiralPotential(tform=.1,tsteady=25.)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockFlatTransientLogSpiralPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.TransientLogSpiralPotential(to=-10.)]) #this way, it's basically a steady spiral
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockFlatSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SpiralArmsPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockRotatingFlatSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SpiralArmsPotential(omega=1.3)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockSpecialRotatingFlatSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
potential.SpiralArmsPotential(omega=1.3, N=4, Cs=[8/3/numpy.pi, 1/2, 8/15/numpy.pi])])
def OmegaP(self):
return self._potlist[1].OmegaP()
#Class to test lists of linearPotentials
from galpy.potential import (RZToverticalPotential, evaluatelinearForces,
evaluatelinearPotentials, linearPotential)
class testlinearMWPotential(linearPotential):
"""Initialize with potential in natural units"""
def __init__(self,potlist=MWPotential):
self._potlist= RZToverticalPotential(potlist,1.)
linearPotential.__init__(self,amp=1.)
return None
def _evaluate(self,R,phi=0,t=0,dR=0,dphi=0):
return evaluatelinearPotentials(self._potlist,R,t=t)
def _force(self,R,t=0.):
return evaluatelinearForces(self._potlist,R,t=t)
def normalize(self,norm,t=0.):
self._amp= norm
class mockCombLinearPotential(testlinearMWPotential):
def __init__(self):
testlinearMWPotential.__init__(self,
potlist=[potential.MWPotential[0],
potential.MWPotential[1].toVertical(1.),
potential.MWPotential[2].toVertical(1.)])
class mockSimpleLinearPotential(testlinearMWPotential):
def __init__(self):
testlinearMWPotential.__init__(self,
potlist=potential.MiyamotoNagaiPotential(normalize=1.).toVertical(1.))
from galpy.potential import PlummerPotential
class mockMovingObjectPotential(testMWPotential):
def __init__(self,rc=0.75,maxt=1.,nt=50):
from galpy.orbit import Orbit
self._rc= rc
o1= Orbit([self._rc,0.,1.,0.,0.,0.])
o2= Orbit([self._rc,0.,1.,0.,0.,numpy.pi])
lp= potential.LogarithmicHaloPotential(normalize=1.)
times= numpy.linspace(0.,maxt,nt)
o1.integrate(times,lp,method='dopr54_c')
o2.integrate(times,lp,method='dopr54_c')
self._o1p= potential.MovingObjectPotential(o1)
self._o2p= potential.MovingObjectPotential(o2)
testMWPotential.__init__(self,[self._o1p,self._o2p])
self.isNonAxi= True
return None
def phi2deriv(self,R,z,phi=0.,t=0.):
raise AttributeError
def OmegaP(self):
return 1./self._rc
class mockMovingObjectPotentialExplPlummer(testMWPotential):
def __init__(self,rc=0.75,maxt=1.,nt=50):
from galpy.orbit import Orbit
self._rc= rc
o1= Orbit([self._rc,0.,1.,0.,0.,0.])
o2= Orbit([self._rc,0.,1.,0.,0.,numpy.pi])
lp= potential.LogarithmicHaloPotential(normalize=1.)
times= numpy.linspace(0.,maxt,nt)
o1.integrate(times,lp,method='dopr54_c')
o2.integrate(times,lp,method='dopr54_c')
oplum = potential.PlummerPotential(amp=0.06, b=0.01)
self._o1p= potential.MovingObjectPotential(o1, pot=oplum)
self._o2p= potential.MovingObjectPotential(o2, pot=oplum)
testMWPotential.__init__(self,[self._o1p,self._o2p])
self.isNonAxi= True
return None
def phi2deriv(self,R,z,phi=0.,t=0.):
raise AttributeError
def OmegaP(self):
return 1./self._rc
class mockMovingObjectLongIntPotential(mockMovingObjectPotential):
def __init__(self,rc=0.75):
mockMovingObjectPotential.__init__(self,rc=rc,maxt=15.,nt=3001)
return None
# Classes to test wrappers
from galpy.potential import (AdiabaticContractionWrapperPotential,
CorotatingRotationWrapperPotential,
DehnenSmoothWrapperPotential,
GaussianAmplitudeWrapperPotential,
RotateAndTiltWrapperPotential,
SolidBodyRotationWrapperPotential,
TimeDependentAmplitudeWrapperPotential)
from galpy.potential.WrapperPotential import parentWrapperPotential
class DehnenSmoothDehnenBarPotential(DehnenSmoothWrapperPotential):
# This wrapped potential should be the same as the default DehnenBar
# for t > -99
#
# Need to use __new__ because new Wrappers are created using __new__
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(tform=-100.,tsteady=1.) #on after t=-99
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
tform=-4.*2.*numpy.pi/dpn.OmegaP())
# Additional DehnenSmooth instances to catch all smoothing cases
class mockDehnenSmoothBarPotentialT1(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
# tform=-4.*2.*numpy.pi/dpn.OmegaP())
tform=0.5,tsteady=0.5)
class mockDehnenSmoothBarPotentialTm1(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
tform=-1.,tsteady=1.01)
class mockDehnenSmoothBarPotentialTm5(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
tform=-5.,tsteady=2.)
class mockDehnenSmoothBarPotentialDecay(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
# tform=-4.*2.*numpy.pi/dpn.OmegaP())
tform=-0.5,tsteady=1.,decay=True)
class mockFlatDehnenSmoothBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
DehnenSmoothWrapperPotential(\
amp=1.,pot=dpn,tform=-4.*2.*numpy.pi/dpn.OmegaP(),
tsteady=2.*2*numpy.pi/dpn.OmegaP())])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
class mockSlowFlatDehnenSmoothBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
DehnenSmoothWrapperPotential(\
amp=1.,pot=dpn,tform=0.1,tsteady=500.)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
class mockSlowFlatDecayingDehnenSmoothBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
DehnenSmoothWrapperPotential(\
amp=1.,pot=dpn,tform=-250.,tsteady=500.,decay=True)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
# A DehnenSmoothWrappered version of LogarithmicHaloPotential for simple aAtest
class mockSmoothedLogarithmicHaloPotential(DehnenSmoothWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,
pot=potential.LogarithmicHaloPotential(normalize=1.),
tform=-1.,tsteady=0.5)
#SolidBodyWrapperPotential
class SolidBodyRotationSpiralArmsPotential(SolidBodyRotationWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
spn= potential.SpiralArmsPotential(omega=0.,phi_ref=0.)
return SolidBodyRotationWrapperPotential.__new__(cls,amp=1.,
pot=spn.toPlanar(),
omega=1.1,pa=0.4)
class mockFlatSolidBodyRotationSpiralArmsPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
SolidBodyRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential(),omega=1.3)])
def OmegaP(self):
return self._potlist[1].OmegaP()
# Special case to test handling of pure planarWrapper, not necessary for new wrappers
class mockFlatSolidBodyRotationPlanarSpiralArmsPotential(testplanarMWPotential):
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.).toPlanar(),
SolidBodyRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential().toPlanar(),omega=1.3)])
def OmegaP(self):
return self._potlist[1].OmegaP()
class testorbitHenonHeilesPotential(testplanarMWPotential):
# Need this class, bc orbit tests skip potentials that do not have
# .normalize, and HenonHeiles as a non-axi planarPotential instance
# does not
def __init__(self):
testplanarMWPotential.__init__(self,
potlist=[HenonHeilesPotential(amp=1.)])
def OmegaP(self):
# Non-axi, so need to set this to zero for Jacobi
return 0.
#CorotatingWrapperPotential
class CorotatingRotationSpiralArmsPotential(CorotatingRotationWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
spn= potential.SpiralArmsPotential(omega=0.,phi_ref=0.)
return CorotatingRotationWrapperPotential.__new__(cls,amp=1.,
pot=spn.toPlanar(),
vpo=1.1,beta=-0.2,
pa=0.4,to=3.)
class mockFlatCorotatingRotationSpiralArmsPotential(testMWPotential):
# With beta=1 this has a fixed pattern speed --> Jacobi conserved
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
CorotatingRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential(),vpo=1.3,beta=1.,pa=0.3,to=3.)])
def OmegaP(self):
return 1.3
# beta =/= 1 --> Liouville should still hold!
class mockFlatTrulyCorotatingRotationSpiralArmsPotential(testMWPotential):
# With beta=1 this has a fixed pattern speed --> Jacobi conserved
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
CorotatingRotationWrapperPotential(amp=1.,pot=potential.SpiralArmsPotential(),vpo=1.3,beta=0.1,pa=-0.3,to=-3.)])
def OmegaP(self):
return 1.3
#GaussianAmplitudeWrapperPotential
class GaussianAmplitudeDehnenBarPotential(GaussianAmplitudeWrapperPotential):
# Need to use __new__ because new Wrappers are created using __new__
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(tform=-100.,tsteady=1.) #on after t=-99
return GaussianAmplitudeWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
to=0.,sigma=1.)
# Basically constant
class mockFlatGaussianAmplitudeBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
GaussianAmplitudeWrapperPotential(\
amp=1.,pot=dpn,to=10,sigma=1000000.)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
#For Liouville
class mockFlatTrulyGaussianAmplitudeBarPotential(testMWPotential):
def __init__(self):
dpn= DehnenBarPotential(omegab=1.9,rb=0.4,
barphi=25.*numpy.pi/180.,beta=0.,
alpha=0.01,Af=0.04,
tform=-99.,tsteady=1.)
testMWPotential.__init__(self,\
potlist=[potential.LogarithmicHaloPotential(normalize=1.),
GaussianAmplitudeWrapperPotential(\
amp=1.,pot=dpn,to=10,sigma=1.)])
def OmegaP(self):
return self._potlist[1]._pot.OmegaP()
# A GaussianAmplitudeWrappered version of LogarithmicHaloPotential for simple aAtest
class mockGaussianAmplitudeSmoothedLogarithmicHaloPotential(GaussianAmplitudeWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
return GaussianAmplitudeWrapperPotential.__new__(cls,amp=1.,
pot=potential.LogarithmicHaloPotential(normalize=1.),
to=0.,sigma=100000000000000.)
class nestedListPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,
potlist=[potential.MWPotential2014,potential.SpiralArmsPotential()])
def OmegaP(self):
return self._potlist[1].OmegaP()
class mockAdiabaticContractionMWP14WrapperPotential(AdiabaticContractionWrapperPotential):
def __init__(self):
AdiabaticContractionWrapperPotential.__init__(self,\
pot=potential.MWPotential2014[2],
baryonpot=potential.MWPotential2014[:2],
f_bar=None)
class mockAdiabaticContractionMWP14ExplicitfbarWrapperPotential(AdiabaticContractionWrapperPotential):
def __init__(self):
AdiabaticContractionWrapperPotential.__init__(self,\
pot=potential.MWPotential2014[2],
baryonpot=potential.MWPotential2014[:2],
f_bar=0.1)
def normalize(self,norm):
self._amp*= norm/numpy.fabs(self.Rforce(1.,0.,use_physical=False))
class mockRotatedAndTiltedMWP14WrapperPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,potlist=[\
RotateAndTiltWrapperPotential(pot=potential.MWPotential2014,
zvec=[numpy.sqrt(1/3.),
numpy.sqrt(1/3.),
numpy.sqrt(1/3.)],
galaxy_pa=0.4)])
def OmegaP(self):
return 0.
class mockRotatedAndTiltedMWP14WrapperPotentialwInclination(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,potlist=[\
RotateAndTiltWrapperPotential(pot=potential.MWPotential2014,
inclination=2.,
galaxy_pa=0.3,
sky_pa=None)])
def OmegaP(self):
return 0.
class mockRotatedAndTiltedTriaxialLogHaloPotentialwInclination(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,potlist=[\
RotateAndTiltWrapperPotential(\
pot=potential.LogarithmicHaloPotential(normalize=1.,b=0.7,q=0.5),
inclination=2.,
galaxy_pa=0.3,
sky_pa=None)])
def OmegaP(self):
return 0.
class mockRotatedTiltedOffsetMWP14WrapperPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,potlist=[\
RotateAndTiltWrapperPotential(pot=potential.MWPotential2014,
zvec=[numpy.sqrt(1/3.),
numpy.sqrt(1/3.),
numpy.sqrt(1/3.)],
galaxy_pa=0.4,
offset=[1.,1.,1.]),])
def OmegaP(self):
return 0.
class mockOffsetMWP14WrapperPotential(testMWPotential):
def __init__(self):
testMWPotential.__init__(self,potlist=[\
RotateAndTiltWrapperPotential(pot=potential.MWPotential2014,
zvec=None,
galaxy_pa=None,
offset=[1.,1.,1.]),])
def OmegaP(self):
return 0.
#TimeDependentAmplitudeWrapperPotential
class mockTimeDependentAmplitudeWrapperPotential(TimeDependentAmplitudeWrapperPotential):
# Need to use __new__ because new Wrappers are created using __new__
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dpn= DehnenBarPotential(tform=-100.,tsteady=1.) #on after t=-99
dps= DehnenSmoothWrapperPotential(pot=dpn,\
tform=-4.*2.*numpy.pi/dpn.OmegaP())
return DehnenSmoothWrapperPotential.__new__(cls,amp=1.,pot=dpn,\
A=dps._smooth)
# A TimeDependentAmplitudeWrapperPotential version of LogarithmicHaloPotential for simple aAtest
class mockSmoothedLogarithmicHaloPotentialwTimeDependentAmplitudeWrapperPotential(TimeDependentAmplitudeWrapperPotential):
def __new__(cls,*args,**kwargs):
if kwargs.get('_init',False):
return parentWrapperPotential.__new__(cls,*args,**kwargs)
dps= DehnenSmoothWrapperPotential(pot=potential.LogarithmicHaloPotential(normalize=1.),
tform=-1.,tsteady=0.5)
return TimeDependentAmplitudeWrapperPotential.__new__(cls,amp=1.,
pot=potential.LogarithmicHaloPotential(normalize=1.),
A=dps._smooth)
|
{
"content_hash": "9c2b551df8f9803b17716e35a2848a4a",
"timestamp": "",
"source": "github",
"line_count": 5789,
"max_line_length": 355,
"avg_line_length": 57.731041630678874,
"alnum_prop": 0.6570368486408042,
"repo_name": "jobovy/galpy",
"id": "21d0fe4521db1aed78b9354e732c3777f59186ef",
"size": "334285",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_potential.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "479"
},
{
"name": "C",
"bytes": "527986"
},
{
"name": "C++",
"bytes": "16627"
},
{
"name": "Makefile",
"bytes": "423"
},
{
"name": "Python",
"bytes": "4970864"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
}
|
"""Tests for the Google Chrome History database plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import chrome as chrome_formatter
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import chrome
from plaso.parsers.sqlite_plugins import test_lib
class ChromeHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome History database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = chrome.ChromeHistoryPlugin()
def testProcess(self):
"""Tests the Process function on a Chrome History database file."""
test_file = self._GetTestFilePath(['History'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The History file contains 71 events (69 page visits, 1 file downloads).
self.assertEquals(len(event_objects), 71)
# Check the first page visited entry.
event_object = event_objects[0]
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.PAGE_VISITED)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-04-07 12:03:11')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_url = u'http://start.ubuntu.com/10.04/Google/'
self.assertEquals(event_object.url, expected_url)
expected_title = u'Ubuntu Start Page'
self.assertEquals(event_object.title, expected_title)
expected_msg = (
u'{0:s} ({1:s}) [count: 0] Host: start.ubuntu.com '
u'Visit Source: [SOURCE_FIREFOX_IMPORTED] Type: [LINK - User clicked '
u'a link] (URL not typed directly - no typed count)').format(
expected_url, expected_title)
expected_short = u'{0:s} ({1:s})'.format(expected_url, expected_title)
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first file downloaded entry.
event_object = event_objects[69]
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.FILE_DOWNLOADED)
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-05-23 08:35:30')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_url = (
u'http://fatloss4idiotsx.com/download/funcats/'
u'funcats_scr.exe')
self.assertEquals(event_object.url, expected_url)
expected_full_path = u'/home/john/Downloads/funcats_scr.exe'
self.assertEquals(event_object.full_path, expected_full_path)
expected_msg = (
u'{0:s} ({1:s}). Received: 1132155 bytes out of: '
u'1132155 bytes.').format(
expected_url, expected_full_path)
expected_short = u'{0:s} downloaded (1132155 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "59ad05669d5d7bae1a775a04605f4f37",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 36.411764705882355,
"alnum_prop": 0.7033925686591276,
"repo_name": "cvandeplas/plaso",
"id": "a47388972fc48cf8b5ed50c8f1025eddbb12ccd1",
"size": "3793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/sqlite_plugins/chrome_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2812257"
},
{
"name": "Shell",
"bytes": "22724"
}
],
"symlink_target": ""
}
|
import os
import subprocess
from Fabricate import util
# TODO: reduce this to a wrapper around os.environ[name].
class Configuration(dict):
def __init__(self, config_dict=None):
if config_dict is None:
config_dict = {}
dict.__init__(self, config_dict)
def __setattr__(self, name, value):
# Raise AttributeError if setting an attribute not already defined.
try:
current_value = dict.__getitem__(self, name)
except (KeyError):
raise AttributeError(name)
dict.__setitem__(self, name, value)
def __getattr__(self, name):
# Raise AttributeError if getting an attribute not already defined.
try:
return dict.__getitem__(self, name)
except (KeyError):
raise AttributeError(name)
_CONFIG = {
# Version information.
'Zed5MajorVersion': int(os.environ['ZED5_MAJOR']),
'Zed5MinorVersion': int(os.environ['ZED5_MINOR']),
'Zed5Revision': int(os.environ['ZED5_REV']),
'Zed5Version': os.environ['ZED5_VERSION'],
# This platform.
'OSName': os.environ['OS_NAME'],
'OSVersion': os.environ['OS_VERS'],
'Processor': os.environ['OS_ARCH'],
'Platform': os.environ['PLATFORM'],
'TopDir': os.environ['TOPDIR'],
'SourceDir': os.environ['CONFIG_SRCDIR'],
'OutputDir': os.environ['CONFIG_OUTPUTDIR'],
'PlatformDir': os.environ['CONFIG_PLATFORMDIR'],
'ObjBaseDir': os.environ['CONFIG_OBJBASEDIR'],
'PreInstallDir': os.environ['CONFIG_PREINSTALLDIR'],
'EtcDir': os.environ['CONFIG_ETCDIR'],
'BinDir': os.environ['CONFIG_BINDIR'],
'LibDir': os.environ['CONFIG_LIBDIR'],
'IncludeDir': os.environ['CONFIG_INCLUDEDIR'],
'IncludeZed5Dir': os.environ['CONFIG_INCLUDEZED5DIR'],
# Build settings -- defaults to working on Linux and MacOS X.
'Cxx': os.environ['CXX'],
'CxxFlagsCommon': os.environ['CXX_FLAGS'],
'CxxFlagsObject': '-c -DMOD_EXT=\"z5m\"',
'ExtraLibsCommon': '-lpthread',
'LinkBinary': os.environ['LINK_EXE'],
'LinkBinaryFlags': os.environ['LINK_EXE_FLAGS'],
'LLP': 'LD_LIBRARY_PATH',
'LinkLibraryShared': os.environ['LINK_SHARED'],
'LinkLibrarySharedFlags': os.environ['LINK_SHARED_FLAGS'],
'LibrarySharedExtension': os.environ['LINK_SHARED_EXT'],
'LinkLibraryStatic': os.environ['LINK_STATIC'],
'LinkLibraryStaticFlags': os.environ['LINK_STATIC_FLAGS'],
'LibraryStaticExtension': os.environ['LINK_STATIC_EXT'],
}
_config = None
def GetConfig():
global _config
if _config is None:
config_dict = {}
config_dict.update(_CONFIG)
config_dict.update(PlatformAndExternalOverrides(config_dict))
_config = Configuration(config_dict)
return _config
###########################################################################
# Configuration functions.
###########################################################################
def PlatformAndExternalOverrides(config, args=None):
# FIXME(ek): allow external changes via 'args'
# Fixup "LD_LIBRARY_PATH" variable name, if necessary.
if 'aix' == config['OSName']:
config['LLP'] = 'LIBPATH'
elif 'darwin' == config['OSName']:
config['LLP'] = 'DYLD_LIBRARY_PATH'
elif 'hpux' == config['OSName']:
config['LLP'] = 'SHLIB_PATH'
if config['OSName'] not in ['darwin', 'freebsd', 'openbsd']:
config['ExtraLibsCommon'] += ' -ldl -lrt'
if 'sunos' == config['OSName']:
config['ExtraLibsCommon'] += ' -lsocket'
config['ExtraLibsCommon'] += ' -lnsl'
return config
|
{
"content_hash": "daf70b16faa294c8c2e8bf8e77186388",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 75,
"avg_line_length": 30.45689655172414,
"alnum_prop": 0.6159071610529295,
"repo_name": "ekline/zed5",
"id": "7cc393ffae45c1c6bae82e55f84c5b9ec5174bce",
"size": "3701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Build/Fabricate/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1137"
},
{
"name": "C++",
"bytes": "1172917"
},
{
"name": "Makefile",
"bytes": "13144"
},
{
"name": "Python",
"bytes": "71694"
}
],
"symlink_target": ""
}
|
import unittest
from pyparsing import ParseException
from cwr.grammar.field import basic
"""
Tests for Time (T) fields.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestTimeName(unittest.TestCase):
def test_name_default(self):
"""
Tests that the default field name is correct for optional fields.
"""
field = basic.time()
self.assertEqual('Time Field', field.name)
def test_name_set(self):
"""
Tests that the given field name is set correctly for optional fields.
"""
name = "Field Name"
field = basic.time(name=name)
self.assertEqual(name, field.name)
def test_name_set_no_changes(self):
"""
Tests that the field name does not change for creating a new one
"""
field1 = basic.time(name='field1')
field2 = basic.time(name='field2')
self.assertEqual('field1', field1.name)
self.assertEqual('field2', field2.name)
class TestTimeValid(unittest.TestCase):
"""
Tests that the date field accepts and parse valid values.
"""
def setUp(self):
self.time = basic.time()
def test_common(self):
"""
Tests that the time field accepts a valid time.
"""
result = self.time.parseString('201510')[0]
self.assertEqual(20, result.hour)
self.assertEqual(15, result.minute)
self.assertEqual(10, result.second)
def test_max(self):
"""
Tests that the time field accepts the highest possible time.
"""
result = self.time.parseString('235959')[0]
self.assertEqual(23, result.hour)
self.assertEqual(59, result.minute)
self.assertEqual(59, result.second)
def test_min(self):
"""
Tests that the time field accepts the lowest possible time.
"""
result = self.time.parseString('000000')[0]
self.assertEqual(0, result.hour)
self.assertEqual(0, result.minute)
self.assertEqual(0, result.second)
class TestTimeException(unittest.TestCase):
"""
Tests that exceptions are thrown when using invalid values
"""
def setUp(self):
self.time = basic.time()
def test_wrong_hour(self):
"""
Tests that an exception is thrown when the hour is invalid.
"""
self.assertRaises(ParseException, self.time.parseString, '241122')
def test_wrong_minute(self):
"""
Tests that an exception is thrown when the minute is invalid.
"""
self.assertRaises(ParseException, self.time.parseString, '206022')
def test_wrong_second(self):
"""
Tests that an exception is thrown when the second is invalid.
"""
self.assertRaises(ParseException, self.time.parseString, '203060')
def test_empty(self):
"""
Tests that an exception is thrown when the string is empty.
"""
self.assertRaises(ParseException, self.time.parseString, '')
def test_spaces_head(self):
"""
Tests that an exception is thrown when the string is headed by spaces.
"""
self.assertRaises(ParseException, self.time.parseString, ' 203020')
def test_too_short(self):
"""
Tests that an exception is thrown when the string is too short.
"""
self.assertRaises(ParseException, self.time.parseString, '03020')
|
{
"content_hash": "0918a31a3bae3802682b7d94a4d6c2b2",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 78,
"avg_line_length": 27.634920634920636,
"alnum_prop": 0.6148765077541642,
"repo_name": "weso/CWR-DataApi",
"id": "01f4491d8695c12c72461f641ffb617c0a67ce61",
"size": "3507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/grammar/field/test_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3830"
},
{
"name": "Makefile",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "997385"
}
],
"symlink_target": ""
}
|
import comm_client
import model
from kivy.app import App
import gui
from threading import Thread
import time
DEBUG = 1
class Controller(App):
title = 'Router Performance Client'
def __init__(self):
super(Controller,self).__init__()
self.SERVERIP = '0'
self.communication = comm_client.CommClient(self)
self.model = model.Model()
self.sToDo = []
self.ACTIVE = True
self.STATE = 'DISCONNECTED'
def runTask(self, message):
self.taskToDo = self.parseMessage(message)
if not (self.taskToDo[0] == None):
task = self.model.getTask(self.taskToDo[0])
self.taskToDo.pop(0)
self.gui.updateExecutionStatus(task)
task.execute(self, self.taskToDo)
def getResults(self):
resultdict = {}
j = 0
for i in self.model.taskList:
if i.RESULT != None:
resultdict[j] = i.RESULT
i.RESULT = None
j+=1
return resultdict
def setServerDisabledTasks(self, mlist):
if mlist != "None":
tlist = mlist.split(',')
for i in tlist:
self.model.removeTask(int(i))
def receiveMessageFromServer(self):
try:
message = self.communication.receiveMessage()
return message
except:
raise
def receiveImageFromServer(self):
try:
img_data = self.communication.receiveImage()
return img_data
except:
raise
def sendMessageToServer(self, msg):
try:
self.communication.sendMessage(msg)
except:
raise
def sendTasksToServer(self, mylist):
try:
id_list = ''
for i in mylist:
id_list+=str(self.model.getTaskIDByName(i))+','
id_list = id_list[:-1]
print id_list
if self.communication.ISCONNECTED:
self.sendMessageToServer(id_list)
except:
print "[Send Tasks to Server Error] "
self.STATE="DISCONNECTED"
def parseMessage(self, message):
orders = None
try:
orders = message.split(';')
orders[0] = int(orders[0])
except ValueError, e:
print "[Unable to parse message] "+str(e)
finally:
return orders
def build(self):
self.gui = gui.GUI()
return self.gui
def main_loop():
while cr.ACTIVE:
print "STATE: "+cr.STATE
if cr.communication.ISCONNECTED == False:
cr.STATE == 'DISCONNECTED'
if cr.STATE == 'DISCONNECTED':
if not cr.communication.ISCONNECTED:
time.sleep(2)
else:
cr.STATE = 'CONNECTED'
elif cr.STATE == 'CONNECTED':
msg = ''
cr.gui.connectpopup.title='Getting information from server'
try:
print "Getting disabled tasks"
msg=cr.receiveMessageFromServer()
print "Finished"
cr.setServerDisabledTasks(msg)
except Exception, e:
print "[Main Loop Running State Error] "+str(e)
cr.STATE="DISCONNECTED"
cr.gui.connectpopup.dismiss()
continue
cr.gui.switchToTab("Tasks")
cr.gui.connectpopup.dismiss()
cr.STATE = 'IDLE'
elif cr.STATE == 'IDLE':
time.sleep(2)
elif cr.STATE == 'RUNNING':
msg = ''
counter = 0
cr.gui.updateProgressBar(counter)
while msg != "end":
try:
msg = cr.receiveMessageFromServer()
except Exception, e:
print "[Main Loop Running State Error] "+str(e)
cr.STATE="DISCONNECTED"
continue
if DEBUG:
print "Received message from Server: "+msg
if msg != "end":
counter += 1
cr.runTask(msg)
cr.gui.updateProgressBar(counter)
if msg == "end":
cr.gui.getResults()
cr.gui.switchToTab("Results")
cr.STATE = 'RESULT'
elif cr.STATE == 'RESULT':
time.sleep(2)
cr.gui.setConnectionStatus(cr.communication.ISCONNECTED)
if __name__ == '__main__':
try:
cr = Controller()
thread = Thread(target=main_loop, args=())
thread.start()
cr.run()
except Exception, e:
print str(e)
finally:
cr.ACTIVE = False
if cr.communication.ISCONNECTED:
cr.sendMessageToServer("disconnect")
cr.communication.disconnectFromServer()
|
{
"content_hash": "a4ccfee6c0e8c9f212a861f5daac57a6",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 71,
"avg_line_length": 26.977777777777778,
"alnum_prop": 0.5137973640856672,
"repo_name": "Spe3do/benchmark",
"id": "6dff5f30a1018e65e41791e6302ec665dd4c04df",
"size": "4927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/client/client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3749"
},
{
"name": "Python",
"bytes": "112058"
},
{
"name": "Shell",
"bytes": "9334"
}
],
"symlink_target": ""
}
|
import logging; log = logging.getLogger(__name__)
try:
import simplejson as json
except ImportError:
import json
import base64
import hashlib
import hmac
import inspect
import string
import time
import urllib
# 3rd party libraries that might not be present during initial install
# but we need to import for the version #
try:
import httplib2
import poster
except ImportError:
pass
__version__ = '20120430'
__author__ = u'Mike Lewis'
API_ENDPOINT = 'http://api.singleplatform.co'
# Number of times to retry http requests
NUM_REQUEST_RETRIES = 3
# Generic SinglePlatform exception
class SinglePlatformException(Exception): pass
error_types = {
}
def b64_key_to_binary(key):
"""Convert a base64 encoded key to binary"""
padding_factor = (4 - len(key) % 4) % 4
key += "=" * padding_factor
return base64.b64decode(unicode(key).translate(dict(zip(map(ord, u'-_'), u'+/'))))
class SinglePlatform(object):
"""SinglePlatform API wrapper"""
def __init__(self, client_id, signing_key, api_key=None):
"""Sets up the api object"""
binary_key = b64_key_to_binary(signing_key)
# Set up endpoints
self.base_requester = self.Requester(client_id, binary_key, api_key)
# Dynamically enable endpoints
self._attach_endpoints()
def _attach_endpoints(self):
"""Dynamically attach endpoint callables to this client"""
for name, endpoint in inspect.getmembers(self):
if inspect.isclass(endpoint) and issubclass(endpoint, self._Endpoint) and (endpoint is not self._Endpoint):
endpoint_instance = endpoint(self.base_requester)
setattr(self, endpoint_instance.endpoint, endpoint_instance)
class Requester(object):
"""Api requesting object"""
def __init__(self, client_id, binary_key, api_key=None):
"""Sets up the api object"""
self.api_key = api_key
self.client_id = client_id
self.binary_key = binary_key
def GET(self, path, params=None):
"""GET request that returns processed data"""
if not params: params = {}
# Attach the client id
params['client'] = self.client_id
# Get the uri and it's corresponding signature
relative_uri = self.build_uri(path, params)
params['sig'] = self.sign_uri(relative_uri)
# Include the API key if provided
if self.api_key:
params['apiKey'] = self.api_key
# Make the request, including the sig
final_uri = u'{API_ENDPOINT}{signed_uri}'.format(
API_ENDPOINT=API_ENDPOINT,
signed_uri=self.build_uri(path, params)
)
log.debug(u'GET url: {0}'.format(final_uri))
return _request_with_retry(final_uri)
def build_uri(self, path, params=None):
"""Construct a url to use"""
_params = {}
if params:
_params.update(params)
return '{path}?{params}'.format(
path=path,
params=urllib.urlencode(_params)
)
def sign_uri(self, uri):
"""Sign this uri"""
digest = hmac.new(self.binary_key, uri, hashlib.sha1).digest()
digest = base64.b64encode(digest)
digest = digest.translate(string.maketrans('+/', '-_'))
return digest.rstrip('=')
class _Endpoint(object):
"""Generic endpoint class"""
def __init__(self, requester):
"""Stores the request function for retrieving data"""
self.requester = requester
def _expanded_path(self, path=None):
"""Gets the expanded path, given this endpoint"""
return '/{expanded_path}'.format(
expanded_path='/'.join(p for p in (self.endpoint, path) if p)
)
def GET(self, path=None, *args, **kwargs):
"""Use the requester to get the data"""
return self.requester.GET(self._expanded_path(path), *args, **kwargs)
class Restaurants(_Endpoint):
"""Restaurant specific endpoint"""
endpoint = 'restaurants'
def search(self, params):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantssearch"""
return self.GET('search', params)
def location(self, LOCATION):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantsLOCATION"""
return self.GET('{LOCATION}'.format(LOCATION=LOCATION))
def menu(self, LOCATION):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantsLOCATIONmenu"""
return self.GET('{LOCATION}/menu'.format(LOCATION=LOCATION))
def shortmenu(self, LOCATION):
"""https://singleplatform.jira.com/wiki/display/PubDocs/SinglePlatform+Publisher+Integration#SinglePlatformPublisherIntegration-URIrestaurantsLOCATIONshortmenu"""
return self.GET('{LOCATION}/shortmenu'.format(LOCATION=LOCATION))
"""
Network helper functions
"""
def _request_with_retry(url, data=None):
"""Tries to load data from an endpoint using retries"""
for i in xrange(NUM_REQUEST_RETRIES):
try:
return _process_request_with_httplib2(url, data)
except SinglePlatformException, e:
# Some errors don't bear repeating
if e.__class__ in []: raise
if ((i + 1) == NUM_REQUEST_RETRIES): raise
time.sleep(1)
def _process_request_with_httplib2(url, data=None):
"""Make the request and handle exception processing"""
try:
h = httplib2.Http()
if data:
datagen, headers = poster.encode.multipart_encode(data)
data = ''.join(datagen)
method = 'POST'
else:
headers = {}
method = 'GET'
headers['Accept'] = u'application/json'
response, body = h.request(url, method, headers=headers, body=data)
data = _json_to_data(body)
# Default case, Got proper response
if response.status == 200:
return data
return _check_response(data)
except httplib2.HttpLib2Error, e:
log.error(e)
raise SinglePlatformException(u'Error connecting with SinglePlatform API')
def _json_to_data(s):
"""Convert a response string to data"""
try:
return json.loads(s)
except ValueError, e:
log.error('Invalid response: {0}'.format(e))
raise SinglePlatformException(e)
def _check_response(data):
"""Processes the response data"""
if data.get('ok') == u'true': return data
exc = error_types.get(data.get('status'))
if exc:
raise exc(data.get('status'))
else:
log.error(u'Unknown error type: {0}'.format(data.get('status')))
raise SinglePlatformException(data.get('status'))
|
{
"content_hash": "50ba5be2eccb0ce4dba6b2a107708d4f",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 174,
"avg_line_length": 34.12380952380953,
"alnum_prop": 0.6148478928272397,
"repo_name": "mLewisLogic/singleplatform",
"id": "8e357c0d2de209660712454ac0f6fa8cc63adef1",
"size": "7234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "singleplatform/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9921"
}
],
"symlink_target": ""
}
|
import sys
import numpy as np
import pytest
from taichi.lang.util import has_pytorch
import taichi as ti
from tests import test_utils
if has_pytorch():
import torch
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(arch=ti.cuda)
def test_torch_cuda_context():
device = torch.device("cuda:0")
x = torch.tensor([2.], requires_grad=True, device=device)
assert torch._C._cuda_hasPrimaryContext(0)
loss = x**2
loss.backward()
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test()
def test_torch_ad():
n = 32
x = ti.field(ti.f32, shape=n, needs_grad=True)
y = ti.field(ti.f32, shape=n, needs_grad=True)
@ti.kernel
def torch_kernel():
for i in range(n):
# Do whatever complex operations here
y[n - i - 1] = x[i] * x[i]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
x.from_torch(inp)
torch_kernel()
outp = y.to_torch()
return outp
@staticmethod
def backward(ctx, outp_grad):
ti.clear_all_gradients()
y.grad.from_torch(outp_grad)
torch_kernel.grad()
inp_grad = x.grad.to_torch()
return inp_grad
sqr = Sqr.apply
for i in range(10):
X = torch.tensor(2 * np.ones((n, ), dtype=np.float32),
requires_grad=True)
sqr(X).sum().backward()
ret = X.grad.cpu().numpy()
for j in range(n):
assert ret[j] == 4
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@pytest.mark.skipif(sys.platform == 'win32', reason='not working on Windows.')
# FIXME: crashes at glCreateShader when arch=ti.opengl
@test_utils.test(exclude=ti.opengl)
def test_torch_ad_gpu():
if not torch.cuda.is_available():
return
device = torch.device('cuda:0')
n = 32
x = ti.field(ti.f32, shape=n, needs_grad=True)
y = ti.field(ti.f32, shape=n, needs_grad=True)
@ti.kernel
def torch_kernel():
for i in range(n):
# Do whatever complex operations here
y[n - i - 1] = x[i] * x[i]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
x.from_torch(inp)
torch_kernel()
outp = y.to_torch(device=device)
return outp
@staticmethod
def backward(ctx, outp_grad):
ti.clear_all_gradients()
y.grad.from_torch(outp_grad)
torch_kernel.grad()
inp_grad = x.grad.to_torch(device=device)
return inp_grad
sqr = Sqr.apply
for i in range(10):
X = torch.tensor(2 * np.ones((n, ), dtype=np.float32),
requires_grad=True,
device=device)
sqr(X).sum().backward()
ret = X.grad.cpu().numpy()
for j in range(n):
assert ret[j] == 4
|
{
"content_hash": "4e09887c2ddff9290bf9b512fc2fc3ee",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 78,
"avg_line_length": 27.944444444444443,
"alnum_prop": 0.5646123260437376,
"repo_name": "yuanming-hu/taichi",
"id": "b7a6e5cf7c8bb10874087323455cc462bd58c971",
"size": "3018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/test_torch_ad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "66677"
},
{
"name": "C++",
"bytes": "3713898"
},
{
"name": "CMake",
"bytes": "69354"
},
{
"name": "Cuda",
"bytes": "20566"
},
{
"name": "GLSL",
"bytes": "10756"
},
{
"name": "Makefile",
"bytes": "994"
},
{
"name": "PowerShell",
"bytes": "9227"
},
{
"name": "Python",
"bytes": "2209929"
},
{
"name": "Shell",
"bytes": "12216"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scripts.assemblies import shielded_true_image
from scripts.utils import display_estimate
def display_transmission_sinogram():
import numpy as np
from pytracer import geometry as geo
radians = np.linspace(0, np.pi, 100)
arc_radians = np.linspace(-np.pi / 8, np.pi / 8, 100)
source, detector_points, extent = geo.fan_beam_paths(60, arc_radians, radians, extent=True)
measurement = transmission.scan(assembly_flat, source, detector_points)
plt.figure(figsize=(9, 6))
ax = plt.gca()
im = ax.imshow(np.exp(-measurement), interpolation='none', extent=extent, cmap='viridis')
plt.title('Transmission Sinogram', size=20)
plt.xlabel(r'Detector Angle $\theta$ (rad)', size=18)
plt.ylabel(r'Neutron Angle $\phi$ (rad)', size=18)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb = plt.colorbar(im, cax=cax)
cb.set_label(r'$P_{transmission}$', size=18, labelpad=15)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
true_image = shielded_true_image()
display_estimate(true_image)
plt.show()
display_transmission_sinogram()
|
{
"content_hash": "a1eaa5422c09bca0f2c70a4753488af6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 95,
"avg_line_length": 29.88095238095238,
"alnum_prop": 0.6796812749003984,
"repo_name": "abnowack/PyTracer",
"id": "259c83a3581ea656bdbcd9081ba86861c357994a",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/tests/refactor_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38620"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import textwrap
import pkgutil
import copy
import os
import json
from functools import reduce
try:
from math import gcd
except ImportError:
# Python 2
from fractions import gcd
# Create Lazy sentinal object to indicate that a template should be loaded
# on-demand from package_data
Lazy = object()
# Templates configuration class
# -----------------------------
class TemplatesConfig(object):
"""
Singleton object containing the current figure templates (aka themes)
"""
def __init__(self):
# Initialize properties dict
self._templates = {}
# Initialize built-in templates
default_templates = [
"ggplot2",
"seaborn",
"simple_white",
"plotly",
"plotly_white",
"plotly_dark",
"presentation",
"xgridoff",
"ygridoff",
"gridon",
"none",
]
for template_name in default_templates:
self._templates[template_name] = Lazy
self._validator = None
self._default = None
# ### Magic methods ###
# Make this act as a dict of templates
def __len__(self):
return len(self._templates)
def __contains__(self, item):
return item in self._templates
def __iter__(self):
return iter(self._templates)
def __getitem__(self, item):
if isinstance(item, str):
template_names = item.split("+")
else:
template_names = [item]
templates = []
for template_name in template_names:
template = self._templates[template_name]
if template is Lazy:
from plotly.graph_objs.layout import Template
if template_name == "none":
# "none" is a special built-in named template that applied no defaults
template = Template(data_scatter=[{}])
self._templates[template_name] = template
else:
# Load template from package data
path = os.path.join(
"package_data", "templates", template_name + ".json"
)
template_str = pkgutil.get_data("plotly", path).decode("utf-8")
template_dict = json.loads(template_str)
template = Template(template_dict, _validate=False)
self._templates[template_name] = template
templates.append(self._templates[template_name])
return self.merge_templates(*templates)
def __setitem__(self, key, value):
self._templates[key] = self._validate(value)
def __delitem__(self, key):
# Remove template
del self._templates[key]
# Check if we need to remove it as the default
if self._default == key:
self._default = None
def _validate(self, value):
if not self._validator:
from plotly.validators.layout import TemplateValidator
self._validator = TemplateValidator()
return self._validator.validate_coerce(value)
def keys(self):
return self._templates.keys()
def items(self):
return self._templates.items()
def update(self, d={}, **kwargs):
"""
Update one or more templates from a dict or from input keyword
arguments.
Parameters
----------
d: dict
Dictionary from template names to new template values.
kwargs
Named argument value pairs where the name is a template name
and the value is a new template value.
"""
for k, v in dict(d, **kwargs).items():
self[k] = v
# ### Properties ###
@property
def default(self):
"""
The name of the default template, or None if no there is no default
If not None, the default template is automatically applied to all
figures during figure construction if no explicit template is
specified.
The names of available templates may be retrieved with:
>>> import plotly.io as pio
>>> list(pio.templates)
Returns
-------
str
"""
return self._default
@default.setter
def default(self, value):
# Validate value
# Could be a Template object, the key of a registered template,
# Or a string containing the names of multiple templates joined on
# '+' characters
self._validate(value)
self._default = value
def __repr__(self):
return """\
Templates configuration
-----------------------
Default template: {default}
Available templates:
{available}
""".format(
default=repr(self.default), available=self._available_templates_str()
)
def _available_templates_str(self):
"""
Return nicely wrapped string representation of all
available template names
"""
available = "\n".join(
textwrap.wrap(
repr(list(self)),
width=79 - 8,
initial_indent=" " * 8,
subsequent_indent=" " * 9,
)
)
return available
def merge_templates(self, *args):
"""
Merge a collection of templates into a single combined template.
Templates are process from left to right so if multiple templates
specify the same propery, the right-most template will take
precedence.
Parameters
----------
args: list of Template
Zero or more template objects (or dicts with compatible properties)
Returns
-------
template:
A combined template object
Examples
--------
>>> pio.templates.merge_templates(
... go.layout.Template(layout={'font': {'size': 20}}),
... go.layout.Template(data={'scatter': [{'mode': 'markers'}]}),
... go.layout.Template(layout={'font': {'family': 'Courier'}}))
layout.Template({
'data': {'scatter': [{'mode': 'markers', 'type': 'scatter'}]},
'layout': {'font': {'family': 'Courier', 'size': 20}}
})
"""
if args:
return reduce(self._merge_2_templates, args)
else:
from plotly.graph_objs.layout import Template
return Template()
def _merge_2_templates(self, template1, template2):
"""
Helper function for merge_templates that merges exactly two templates
Parameters
----------
template1: Template
template2: Template
Returns
-------
Template:
merged template
"""
# Validate/copy input templates
result = self._validate(template1)
other = self._validate(template2)
# Cycle traces
for trace_type in result.data:
result_traces = result.data[trace_type]
other_traces = other.data[trace_type]
if result_traces and other_traces:
lcm = (
len(result_traces)
* len(other_traces)
// gcd(len(result_traces), len(other_traces))
)
# Cycle result traces
result.data[trace_type] = result_traces * (lcm // len(result_traces))
# Cycle other traces
other.data[trace_type] = other_traces * (lcm // len(other_traces))
# Perform update
result.update(other)
return result
# Make config a singleton object
# ------------------------------
templates = TemplatesConfig()
del TemplatesConfig
# Template utilities
# ------------------
def walk_push_to_template(fig_obj, template_obj, skip):
"""
Move style properties from fig_obj to template_obj.
Parameters
----------
fig_obj: plotly.basedatatypes.BasePlotlyType
template_obj: plotly.basedatatypes.BasePlotlyType
skip: set of str
Set of names of properties to skip
"""
from _plotly_utils.basevalidators import (
CompoundValidator,
CompoundArrayValidator,
is_array,
)
for prop in list(fig_obj._props):
if prop == "template" or prop in skip:
# Avoid infinite recursion
continue
fig_val = fig_obj[prop]
template_val = template_obj[prop]
validator = fig_obj._get_validator(prop)
if isinstance(validator, CompoundValidator):
walk_push_to_template(fig_val, template_val, skip)
if not fig_val._props:
# Check if we can remove prop itself
fig_obj[prop] = None
elif isinstance(validator, CompoundArrayValidator) and fig_val:
template_elements = list(template_val)
template_element_names = [el.name for el in template_elements]
template_propdefaults = template_obj[prop[:-1] + "defaults"]
for fig_el in fig_val:
element_name = fig_el.name
if element_name:
# No properties are skipped inside a named array element
skip = set()
if fig_el.name in template_element_names:
item_index = template_element_names.index(fig_el.name)
template_el = template_elements[item_index]
walk_push_to_template(fig_el, template_el, skip)
else:
template_el = fig_el.__class__()
walk_push_to_template(fig_el, template_el, skip)
template_elements.append(template_el)
template_element_names.append(fig_el.name)
# Restore element name
# since it was pushed to template above
fig_el.name = element_name
else:
walk_push_to_template(fig_el, template_propdefaults, skip)
template_obj[prop] = template_elements
elif not validator.array_ok or not is_array(fig_val):
# Move property value from figure to template
template_obj[prop] = fig_val
try:
fig_obj[prop] = None
except ValueError:
# Property cannot be set to None, move on.
pass
def to_templated(fig, skip=("title", "text")):
"""
Return a copy of a figure where all styling properties have been moved
into the figure's template. The template property of the resulting figure
may then be used to set the default styling of other figures.
Parameters
----------
fig
Figure object or dict representing a figure
skip
A collection of names of properties to skip when moving properties to
the template. Defaults to ('title', 'text') so that the text
of figure titles, axis titles, and annotations does not become part of
the template
Examples
--------
Imports
>>> import plotly.graph_objs as go
>>> import plotly.io as pio
Construct a figure with large courier text
>>> fig = go.Figure(layout={'title': 'Figure Title',
... 'font': {'size': 20, 'family': 'Courier'},
... 'template':"none"})
>>> fig # doctest: +NORMALIZE_WHITESPACE
Figure({
'data': [],
'layout': {'font': {'family': 'Courier', 'size': 20},
'template': '...', 'title': {'text': 'Figure Title'}}
})
Convert to a figure with a template. Note how the 'font' properties have
been moved into the template property.
>>> templated_fig = pio.to_templated(fig)
>>> templated_fig.layout.template
layout.Template({
'layout': {'font': {'family': 'Courier', 'size': 20}}
})
>>> templated_fig
Figure({
'data': [], 'layout': {'template': '...', 'title': {'text': 'Figure Title'}}
})
Next create a new figure with this template
>>> fig2 = go.Figure(layout={
... 'title': 'Figure 2 Title',
... 'template': templated_fig.layout.template})
>>> fig2.layout.template
layout.Template({
'layout': {'font': {'family': 'Courier', 'size': 20}}
})
The default font in fig2 will now be size 20 Courier.
Next, register as a named template...
>>> pio.templates['large_courier'] = templated_fig.layout.template
and specify this template by name when constructing a figure.
>>> go.Figure(layout={
... 'title': 'Figure 3 Title',
... 'template': 'large_courier'}) # doctest: +ELLIPSIS
Figure(...)
Finally, set this as the default template to be applied to all new figures
>>> pio.templates.default = 'large_courier'
>>> fig = go.Figure(layout={'title': 'Figure 4 Title'})
>>> fig.layout.template
layout.Template({
'layout': {'font': {'family': 'Courier', 'size': 20}}
})
Returns
-------
go.Figure
"""
# process fig
from plotly.basedatatypes import BaseFigure
from plotly.graph_objs import Figure
if not isinstance(fig, BaseFigure):
fig = Figure(fig)
# Process skip
if not skip:
skip = set()
else:
skip = set(skip)
# Always skip uids
skip.add("uid")
# Initialize templated figure with deep copy of input figure
templated_fig = copy.deepcopy(fig)
# Handle layout
walk_push_to_template(
templated_fig.layout, templated_fig.layout.template.layout, skip=skip
)
# Handle traces
trace_type_indexes = {}
for trace in list(templated_fig.data):
template_index = trace_type_indexes.get(trace.type, 0)
# Extend template traces if necessary
template_traces = list(templated_fig.layout.template.data[trace.type])
while len(template_traces) <= template_index:
# Append empty trace
template_traces.append(trace.__class__())
# Get corresponding template trace
template_trace = template_traces[template_index]
# Perform push properties to template
walk_push_to_template(trace, template_trace, skip=skip)
# Update template traces in templated_fig
templated_fig.layout.template.data[trace.type] = template_traces
# Update trace_type_indexes
trace_type_indexes[trace.type] = template_index + 1
# Remove useless trace arrays
any_non_empty = False
for trace_type in templated_fig.layout.template.data:
traces = templated_fig.layout.template.data[trace_type]
is_empty = [trace.to_plotly_json() == {"type": trace_type} for trace in traces]
if all(is_empty):
templated_fig.layout.template.data[trace_type] = None
else:
any_non_empty = True
# Check if we can remove the data altogether key
if not any_non_empty:
templated_fig.layout.template.data = None
return templated_fig
|
{
"content_hash": "6483e4737e029867e23bbe1b178385fb",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 90,
"avg_line_length": 30.47082494969819,
"alnum_prop": 0.563391442155309,
"repo_name": "plotly/plotly.py",
"id": "800525b95086fdf3ecd69fa89feb1055c6f70e80",
"size": "15144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/io/_templates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/theme_park/shared_invisible_object.iff"
result.attribute_template_id = -1
result.stfName("item_n","base_poi")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "80b972284bc76b8caf05e2ca67355e73",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 23.153846153846153,
"alnum_prop": 0.6910299003322259,
"repo_name": "obi-two/Rebelion",
"id": "2261c8aeec2299bfab27b00bb0726780096c9d17",
"size": "446",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/theme_park/shared_invisible_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from ABE_ADCPi import ADCPi
from ABE_helpers import ABEHelpers
from datetime import datetime, timedelta
import sys
import logging
import time
from numpy import interp, clip
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
class StraightLineSpeed:
def __init__(self, drive):
""" Standard Constructor """
logging.info("Straight Line Speed constructor")
# set up ADC
self.i2c_helper = ABEHelpers()
self.bus = self.i2c_helper.get_smbus()
self.adc = ADCPi(self.bus, 0x6a, 0x6b, 12)
# define fixed values
self.stopped = 0
self.full_forward = 1
self.slow_forward = 0.1
self.full_reverse = -0.5
self.slow_reverse = -0.1
self.left_steering = -0.1
self.right_steering = 0.1
self.straight = 0.05
self.distance_sensor = 1
# Voltage value we are aiming for (2 was close, 0.5 was further away)
self.nominal_distance = 45.0
self.distance_range_min = -20.0
self.distance_range_max = 20.0
# Drivetrain is passed in
self.drive = drive
self.killed = False
def stop(self):
"""Simple method to stop the challenge"""
self.killed = True
def run(self):
""" Main call to run the three point turn script """
# Drive forward for a set number of seconds keeping distance equal
logging.info("forward to turning point")
self.move_segment(total_timeout=2.0)
# Final set motors to neutral to stop
self.drive.set_neutral()
self.stop()
def move_segment(self, total_timeout=0):
logging.info("move_segment called with arguments: {0}".format(locals()))
# Note Line_sensor=0 if no line sensor exit required
# calculate timeout times
now = datetime.now()
end_timeout = now + timedelta(seconds=total_timeout)
# Throttle is static and does not change
throttle = self.full_forward
# Steering starts at zero (straight forward)
steering = self.straight
while not self.killed and (datetime.now() < end_timeout):
# If we have a line sensor, check it here. Bail if necesary
#if self.distance_sensor:
# voltage = self.adc.read_voltage(self.distance_sensor)
# # Distance calculation (units = cm)
# distance = 27.0/voltage
# distance_dif = distance - self.nominal_distance
#
# steering = clip(
# interp(
# distance_dif,
# [self.distance_range_min, self.distance_range_max]
# [self.left_steering, self.right_steering],
# ),
# self.left_steering,
# self.right_steering
# )
time.sleep(0.05)
# Had to invert throttle and steering channels to match RC mode
logging.info("mixing channels: {0} : {1}".format(throttle, steering))
self.drive.mix_channels_and_assign(steering, throttle)
logging.info("Finished manoeuvre")
|
{
"content_hash": "66657ea70b3562c10521f5c5b2594daf",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 81,
"avg_line_length": 34.79120879120879,
"alnum_prop": 0.5837018319646241,
"repo_name": "hackhitchin/piwars",
"id": "1ad79bf0afdf49bd639ddf2f856c7562b87594fb",
"size": "3240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "straight_line_speed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76918"
}
],
"symlink_target": ""
}
|
"""
Totally untested thread pool class.
Tries to not get more than "maximum" (but this is not a hard limit).
Kills off up to around half of its workers when more than half are idle.
"""
from __future__ import print_function
from __future__ import with_statement
from threading import Thread, RLock
from queue import Queue
CYCLE_TIME = 3
class WorkerThread (Thread):
def __init__ (self, pool):
Thread.__init__(self)
self._pool = pool
self.daemon = True
self.start()
def run (self):
with self._pool._lock:
self._pool._total += 1
while self._pool.running:
with self._pool._lock:
self._pool._available += 1
try:
func, args, kw = self._pool._tasks.get(True, CYCLE_TIME)
if func is None: return
except:
continue
finally:
with self._pool._lock:
self._pool._available -= 1
assert self._pool._available >= 0
try:
func(*args, **kw)
except Exception as e:
print("Worker thread exception", e)
self._pool._tasks.task_done()
with self._pool._lock:
self._pool._total -= 1
assert self._pool._total >= 0
class ThreadPool (object):
#NOTE: Assumes only one thread manipulates the pool
# (Add some locks to fix)
def __init__ (self, initial = 0, maximum = None):
self._available = 0
self._total = 0
self._tasks = Queue()
self.maximum = maximum
self._lock = RLock()
for i in range(initial):
self._new_worker
def _new_worker (self):
with self._lock:
if self.maximum is not None:
if self._total >= self.maximum:
# Too many!
return False
WorkerThread(self)
return True
def add (_self, _func, *_args, **_kwargs):
self.add_task(_func, args=_args, kwargs=_kwargs)
def add_task (self, func, args=(), kwargs={}):
while True:
self._lock.acquire()
if self._available == 0:
self._lock.release()
self._new_worker()
else:
break
self._tasks.put((func, args, kwargs))
if self.available > self._total // 2 and self.total > 8:
for i in range(self._total // 2 - 1):
self._tasks.put((None,None,None))
self._lock.release()
def join (self):
self._tasks.join()
|
{
"content_hash": "b73d6e2c4a046f97c8ddb7d9dc2c1fb7",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 72,
"avg_line_length": 24.516129032258064,
"alnum_prop": 0.5890350877192982,
"repo_name": "MurphyMc/pox",
"id": "01f1c9c943283c8e9015d8504fddccdbd651af68",
"size": "2860",
"binary": false,
"copies": "2",
"ref": "refs/heads/halosaur",
"path": "pox/lib/threadpool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "C++",
"bytes": "19091"
},
{
"name": "HTML",
"bytes": "999"
},
{
"name": "JavaScript",
"bytes": "9048"
},
{
"name": "Python",
"bytes": "1580617"
},
{
"name": "Shell",
"bytes": "22540"
}
],
"symlink_target": ""
}
|
import sys
import os.path
import pickle
import numpy as np
# Import the relevant PTS classes and modules
import pts.core.tools.archive as arch
from pts.eagle import config
from pts.eagle.database import Database
from pts.eagle.skirtrun import SkirtRun
# -----------------------------------------------------------------
# collects the statistics for a given list of skirt-runs into a single dictionary with a numpy array for each entry
# and dumps the dictionary in pickle format to the specified file.
def collect_info(skirtruns, outfilepath):
# collect the info in single dict
collection = { }
numruns = len(skirtruns)
for runindex in range(numruns):
vispath = skirtruns[runindex].vispath()
infofile = arch.listdir(vispath, "_info.txt")[0]
for line in arch.opentext(os.path.join(vispath,infofile)):
if not line.startswith("#"):
key,dummy,value = line.split(None, 2)
if not key in collection:
collection[key] = np.zeros(numruns)
collection[key][runindex] = float(value)
# dump it into file
outfile = open(outfilepath, 'w')
pickle.dump(collection, outfile)
outfile.close()
print "Created info collection " + outfilepath
# -----------------------------------------------------------------
# chain the command-line arguments into a query list
if len(sys.argv) <= 1: raise ValueError("This script expects one or more command-line arguments")
querylist = "('{}')".format("','".join(sys.argv[1:]))
namelist = "_".join(sys.argv[1:])
# get a list of SkirtRun objects for which to collect statistics, in order of run-id
db = Database()
query = "runstatus in ('completed','archived') and label in {0} and eaglesim in {0}".format(querylist)
runids = sorted([ row['runid'] for row in db.select(query) ])
skirtruns = [ SkirtRun(runid) for runid in runids ]
db.close()
# perform the collection
if len(skirtruns) > 0:
print "Collecting statistics from {} SKIRT-runs with label and eaglesim fields in {}...".format(len(skirtruns),querylist)
collect_info(skirtruns, os.path.join(config.collections_path,"{}_info_collection.dat".format(namelist)))
else:
print "There are no SKIRT-runs with label and eaglesim fields in {}.".format(querylist)
# -----------------------------------------------------------------
|
{
"content_hash": "146fe24ef0938a7c977de2e03f6a72aa",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 125,
"avg_line_length": 40.60344827586207,
"alnum_prop": 0.6348195329087049,
"repo_name": "Stargrazer82301/CAAPR",
"id": "ba8ce840e68810529cf74510f75588d902524cd8",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CAAPR/CAAPR_AstroMagic/PTS/pts/do/eagle/collect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "21972"
},
{
"name": "HTML",
"bytes": "2408"
},
{
"name": "Prolog",
"bytes": "16433"
},
{
"name": "Python",
"bytes": "4465217"
},
{
"name": "Shell",
"bytes": "3793"
}
],
"symlink_target": ""
}
|
import functools
from promise import Promise, is_thenable, promise_for_dict
from ...pyutils.cached_property import cached_property
from ...pyutils.default_ordered_dict import DefaultOrderedDict
from ...type import (GraphQLInterfaceType, GraphQLList, GraphQLNonNull,
GraphQLObjectType, GraphQLUnionType)
from ..base import ResolveInfo, Undefined, collect_fields, get_field_def
from ..values import get_argument_values
from ...error import GraphQLError
try:
from itertools import izip as zip
except:
pass
def get_base_type(type):
if isinstance(type, (GraphQLList, GraphQLNonNull)):
return get_base_type(type.of_type)
return type
def get_subfield_asts(context, return_type, field_asts):
subfield_asts = DefaultOrderedDict(list)
visited_fragment_names = set()
for field_ast in field_asts:
selection_set = field_ast.selection_set
if selection_set:
subfield_asts = collect_fields(
context, return_type, selection_set,
subfield_asts, visited_fragment_names
)
return subfield_asts
def get_resolvers(context, type, field_asts):
from .resolver import field_resolver
subfield_asts = get_subfield_asts(context, type, field_asts)
for response_name, field_asts in subfield_asts.items():
field_ast = field_asts[0]
field_name = field_ast.name.value
field_def = get_field_def(context and context.schema, type, field_name)
if not field_def:
continue
field_base_type = get_base_type(field_def.type)
field_fragment = None
info = ResolveInfo(
field_name,
field_asts,
field_base_type,
parent_type=type,
schema=context and context.schema,
fragments=context and context.fragments,
root_value=context and context.root_value,
operation=context and context.operation,
variable_values=context and context.variable_values,
)
if isinstance(field_base_type, GraphQLObjectType):
field_fragment = Fragment(
type=field_base_type,
field_asts=field_asts,
info=info,
context=context
)
elif isinstance(field_base_type, (GraphQLInterfaceType, GraphQLUnionType)):
field_fragment = AbstractFragment(
abstract_type=field_base_type,
field_asts=field_asts,
info=info,
context=context
)
resolver = field_resolver(field_def, exe_context=context, info=info, fragment=field_fragment)
args = get_argument_values(
field_def.args,
field_ast.arguments,
context and context.variable_values
)
yield (response_name, Field(resolver, args, context and context.context_value, info))
class Field(object):
__slots__ = ('fn', 'args', 'context', 'info')
def __init__(self, fn, args, context, info):
self.fn = fn
self.args = args
self.context = context
self.info = info
def execute(self, root):
return self.fn(root, self.args, self.context, self.info)
class Fragment(object):
def __init__(self, type, field_asts, context=None, info=None):
self.type = type
self.field_asts = field_asts
self.context = context
self.info = info
@cached_property
def partial_resolvers(self):
return list(get_resolvers(
self.context,
self.type,
self.field_asts
))
@cached_property
def fragment_container(self):
try:
fields = next(zip(*self.partial_resolvers))
except StopIteration:
fields = tuple()
class FragmentInstance(dict):
# def __init__(self):
# self.fields = fields
# _fields = ('c','b','a')
set = dict.__setitem__
# def set(self, name, value):
# self[name] = value
def __iter__(self):
return iter(fields)
return FragmentInstance
def have_type(self, root):
return not self.type.is_type_of or self.type.is_type_of(root, self.context.context_value, self.info)
def resolve(self, root):
if root and not self.have_type(root):
raise GraphQLError(
u'Expected value of type "{}" but got: {}.'.format(self.type, type(root).__name__),
self.info.field_asts
)
contains_promise = False
final_results = self.fragment_container()
# return OrderedDict(
# ((field_name, field_resolver(root, field_args, context, info))
# for field_name, field_resolver, field_args, context, info in self.partial_resolvers)
# )
for response_name, field_resolver in self.partial_resolvers:
result = field_resolver.execute(root)
if result is Undefined:
continue
if not contains_promise and is_thenable(result):
contains_promise = True
final_results[response_name] = result
if not contains_promise:
return final_results
return promise_for_dict(final_results)
# return {
# field_name: field_resolver(root, field_args, context, info)
# for field_name, field_resolver, field_args, context, info in self.partial_resolvers
# }
def resolve_serially(self, root):
def execute_field_callback(results, resolver):
response_name, field_resolver = resolver
result = field_resolver.execute(root)
if result is Undefined:
return results
if is_thenable(result):
def collect_result(resolved_result):
results[response_name] = resolved_result
return results
return result.then(collect_result)
results[response_name] = result
return results
def execute_field(prev_promise, resolver):
return prev_promise.then(lambda results: execute_field_callback(results, resolver))
return functools.reduce(execute_field, self.partial_resolvers, Promise.resolve(self.fragment_container()))
def __eq__(self, other):
return isinstance(other, Fragment) and (
other.type == self.type and
other.field_asts == self.field_asts and
other.context == self.context and
other.info == self.info
)
class AbstractFragment(object):
def __init__(self, abstract_type, field_asts, context=None, info=None):
self.abstract_type = abstract_type
self.field_asts = field_asts
self.context = context
self.info = info
self._fragments = {}
@cached_property
def possible_types(self):
return self.context.schema.get_possible_types(self.abstract_type)
@cached_property
def possible_types_with_is_type_of(self):
return [
(type, type.is_type_of) for type in self.possible_types if callable(type.is_type_of)
]
def get_fragment(self, type):
if isinstance(type, str):
type = self.context.schema.get_type(type)
if type not in self._fragments:
assert type in self.possible_types, (
'Runtime Object type "{}" is not a possible type for "{}".'
).format(type, self.abstract_type)
self._fragments[type] = Fragment(
type,
self.field_asts,
self.context,
self.info
)
return self._fragments[type]
def resolve_type(self, result):
return_type = self.abstract_type
context = self.context.context_value
if return_type.resolve_type:
return return_type.resolve_type(result, context, self.info)
for type, is_type_of in self.possible_types_with_is_type_of:
if is_type_of(result, context, self.info):
return type
def resolve(self, root):
_type = self.resolve_type(root)
fragment = self.get_fragment(_type)
return fragment.resolve(root)
|
{
"content_hash": "1f1872e037bb13968fbf3bb7c94c8f35",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 114,
"avg_line_length": 33.02777777777778,
"alnum_prop": 0.5923344947735192,
"repo_name": "wandb/client",
"id": "427acbaf2233e9adf221ba609cd0642d34b9ff3a",
"size": "8323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wandb/vendor/graphql-core-1.1/wandb_graphql/execution/experimental/fragment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
}
|
r"""
===============================================================================
pore_misc -- miscillaneous and generic functions to apply to pores
===============================================================================
"""
import scipy as _sp
def constant(geometry, value, **kwargs):
r"""
Assign specified constant value. This function is redundant and could be
accomplished with geometry['pore.prop'] = value.
"""
value = _sp.ones(geometry.num_pores(),)*value
return value
def random(geometry, seed=None, num_range=[0, 1], **kwargs):
r"""
Assign random number to pore bodies
note: should this be called 'poisson'?
"""
range_size = num_range[1]-num_range[0]
range_min = num_range[0]
_sp.random.seed(seed=seed)
value = _sp.random.rand(geometry.num_pores(),)
value = value*range_size + range_min
return value
def neighbor(network, geometry, throat_prop='', mode='min', **kwargs):
r"""
Adopt the minimum seed value from the neighboring throats
"""
Ps = geometry.pores()
data = geometry[throat_prop]
neighborTs = network.find_neighbor_throats(pores=Ps,
flatten=False,
mode='intersection')
values = _sp.ones((_sp.shape(Ps)[0],))*_sp.nan
if mode == 'min':
for pore in Ps:
values[pore] = _sp.amin(data[neighborTs[pore]])
if mode == 'max':
for pore in Ps:
values[pore] = _sp.amax(data[neighborTs[pore]])
if mode == 'mean':
for pore in Ps:
values[pore] = _sp.mean(data[neighborTs[pore]])
return values
|
{
"content_hash": "20941f10a002e30f7ece8b9061ac0d36",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 32.86274509803921,
"alnum_prop": 0.529236276849642,
"repo_name": "amdouglas/OpenPNM",
"id": "df22a28d6976371b919ee2ad45aa7d9a92b1e1f9",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenPNM/Geometry/models/pore_misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "525"
},
{
"name": "Python",
"bytes": "802968"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from .constants import Constants
class Configuration:
suffix = Constants.EMPTY_STRING
_validated = False
def __init__(self):
super().__init__()
def validate(self):
""" The most class members are asserted """
if not self._validated:
raise Exception('Configuration invalid')
@staticmethod
def getFormattedMonthYear():
return datetime.now().strftime("%Y-%m")
@staticmethod
def getFormattedDate():
return datetime.now().strftime("%Y-%m-%d-%H%M%S")
|
{
"content_hash": "14d6e8e30417321dfb171d59a074eab8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 57,
"avg_line_length": 23.541666666666668,
"alnum_prop": 0.6265486725663717,
"repo_name": "PublicHealthEngland/animal-welfare-assessment-grid",
"id": "6d273a6096bc82b7f2a6ad6d586556700635efcb",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prepare-build/uk_gov_phe_erdst_sc/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "72513"
},
{
"name": "HTML",
"bytes": "107458"
},
{
"name": "Java",
"bytes": "1033236"
},
{
"name": "JavaScript",
"bytes": "480036"
}
],
"symlink_target": ""
}
|
STORAGE_ACCOUNT_NAME = ''
STORAGE_ACCOUNT_KEY = ''
SAS = ''
IS_EMULATED = False
|
{
"content_hash": "07a8557a415fc5d1de0f4b2447706c4a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 25,
"avg_line_length": 20,
"alnum_prop": 0.6625,
"repo_name": "Azure/azure-storage-python",
"id": "882c5a0673b83b6db4e293bf2eb4c30892ff2b33",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "779"
},
{
"name": "Python",
"bytes": "1674801"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
}
|
from twisted.internet.defer import inlineCallbacks, returnValue
from twext.python.log import Logger
from txdav.caldav.icalendarstore import ComponentRemoveState
from uuid import UUID
log = Logger()
@inlineCallbacks
def getCalendarObjectForRecord(txn, record, uid):
"""
Get a copy of the event for a calendar user identified by a directory record.
NOTE: if more than one resource with the same UID is found, we will delete all but
one of them to avoid scheduling problems.
"""
if record and record.thisServer():
# Get record's calendar-home
calendar_home = yield txn.calendarHomeWithUID(record.uid)
if calendar_home is None:
returnValue(None)
# Get matching newstore objects
objectResources = (yield calendar_home.getCalendarResourcesForUID(uid))
if len(objectResources) > 1:
# Delete all but the first one
log.debug("Should only have zero or one scheduling object resource with UID '{uid}' in calendar home: {home!r}", uid=uid, home=calendar_home)
for resource in objectResources[1:]:
yield resource._removeInternal(internal_state=ComponentRemoveState.INTERNAL, useTrash=False)
objectResources = objectResources[:1]
returnValue(objectResources[0] if len(objectResources) == 1 else None)
else:
returnValue(None)
def normalizeCUAddr(addr):
"""
Normalize a cuaddr string by lower()ing it if it's a mailto:, or
removing trailing slash if it's a URL.
@param addr: a cuaddr string to normalize
@return: normalized string
"""
lower = addr.lower()
if lower.startswith("mailto:"):
addr = lower
if (
addr.startswith("/") or
addr.startswith("http:") or
addr.startswith("https:")
):
return addr.rstrip("/")
else:
return addr
def uidFromCalendarUserAddress(address):
"""
Try to extract a record UID from a calendar user address of the appropriate format.
Allowed formats are urn:x-uid, urn:uuid, or /principals/(__uids__).
@param address: calendar user address to operate on
@type address: L{str}
@return: the extracted uid or L{None}
@rtype: L{str} or L{None}
"""
address = normalizeCUAddr(address)
if address.startswith("urn:x-uid:"):
return address[10:]
elif address.startswith("urn:uuid:"):
try:
UUID(address[9:])
except ValueError:
log.info("Invalid GUID: {guid}", guid=address[9:])
return address[9:]
else:
return address[9:]
elif address.startswith("/principals/__uids__"):
parts = address.split("/")
if len(parts) == 4:
return parts[3]
return None
def extractEmailDomain(mailtoURI):
try:
addr = mailtoURI[7:].split("?")[0]
_ignore_account, addrDomain = addr.split("@")
except ValueError:
addrDomain = ""
return addrDomain
|
{
"content_hash": "9f9344deb3a7648ae9101ad9fbf1b107",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 153,
"avg_line_length": 30.242424242424242,
"alnum_prop": 0.6399465597862392,
"repo_name": "macosforge/ccs-calendarserver",
"id": "1f2e3c44255be3e702596b9082c31a9dd87b66f0",
"size": "3600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txdav/caldav/datastore/scheduling/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20160901_0830'),
]
operations = [
migrations.DeleteModel(
name='Profesion',
),
]
|
{
"content_hash": "2977ac887d44d5112972c58adf3cba43",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 44,
"avg_line_length": 17.9375,
"alnum_prop": 0.5993031358885017,
"repo_name": "mava-ar/sgk",
"id": "309c93171bf7c00a0aef3051ca89e4d39068cf6d",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/migrations/0005_delete_profesion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20411"
},
{
"name": "HTML",
"bytes": "81338"
},
{
"name": "JavaScript",
"bytes": "34107"
},
{
"name": "Python",
"bytes": "197385"
},
{
"name": "Shell",
"bytes": "1349"
}
],
"symlink_target": ""
}
|
class FinisherTemplate(object):
def __init__(self, name, description, message, body_message):
self.name = name
self.description = description
self.message = message
self.body_message = body_message
|
{
"content_hash": "ad4c010321eb40a7ff7f9f7735bc4ff8",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 65,
"avg_line_length": 39,
"alnum_prop": 0.6538461538461539,
"repo_name": "ChrisLR/Python-Roguelike-Template",
"id": "c15ab848523eafb28f804dcdc923b12cee398b1f",
"size": "234",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "combat/finisher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267082"
}
],
"symlink_target": ""
}
|
"""gcloud dns record-sets transaction abort command."""
import os
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
class Abort(base.Command):
"""Abort transaction.
This command aborts the transaction and deletes the transaction file.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To abort the transaction, run:
$ {command} -z MANAGED_ZONE
""",
}
def Run(self, args):
if not os.path.isfile(args.transaction_file):
raise exceptions.ToolException(
'transaction not found at [{0}]'.format(args.transaction_file))
os.remove(args.transaction_file)
log.status.Print('Aborted transaction [{0}].'.format(args.transaction_file))
|
{
"content_hash": "69c23eaa25c3e767f4246e4a8d5a3d39",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 80,
"avg_line_length": 25.40625,
"alnum_prop": 0.6765067650676507,
"repo_name": "wemanuel/smry",
"id": "c202952a18329ea1a098c42f6b1c893d006ce31a",
"size": "864",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/dns/commands/record_sets/transaction/abort.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.models import ContentType
from ordered_model.admin import OrderedModelAdmin
from polymorphic.admin import PolymorphicInlineSupportMixin, StackedPolymorphicInline, PolymorphicParentModelAdmin, \
PolymorphicChildModelAdmin
from django.db.models import Subquery
from django.template import Context, Template
from django.contrib import admin
from django.contrib.humanize.templatetags.humanize import intcomma
from django.forms import ModelForm
from django.urls import path, reverse, resolve
from django.utils.functional import cached_property
from django.utils.html import mark_safe
from mailing.admin import BaseEmailTemplateAdmin
from sponsors.models import *
from sponsors.models.benefits import RequiredAssetMixin
from sponsors import views_admin
from sponsors.forms import SponsorshipReviewAdminForm, SponsorBenefitAdminInlineForm, RequiredImgAssetConfigurationForm, \
SponsorshipBenefitAdminForm
from cms.admin import ContentManageableModelAdmin
class AssetsInline(GenericTabularInline):
model = GenericAsset
extra = 0
max_num = 0
has_delete_permission = lambda self, request, obj: False
readonly_fields = ["internal_name", "user_submitted_info", "value"]
def value(self, obj=None):
if not obj or not obj.value:
return ""
return obj.value
value.short_description = "Submitted information"
def user_submitted_info(self, obj=None):
return bool(self.value(obj))
user_submitted_info.short_description = "Fullfilled data?"
user_submitted_info.boolean = True
@admin.register(SponsorshipProgram)
class SponsorshipProgramAdmin(OrderedModelAdmin):
ordering = ("order",)
list_display = [
"name",
"move_up_down_links",
]
class MultiPartForceForm(ModelForm):
def is_multipart(self):
return True
class BenefitFeatureConfigurationInline(StackedPolymorphicInline):
form = MultiPartForceForm
class LogoPlacementConfigurationInline(StackedPolymorphicInline.Child):
model = LogoPlacementConfiguration
class TieredQuantityConfigurationInline(StackedPolymorphicInline.Child):
model = TieredQuantityConfiguration
class EmailTargetableConfigurationInline(StackedPolymorphicInline.Child):
model = EmailTargetableConfiguration
readonly_fields = ["display"]
def display(self, obj):
return "Enabled"
class RequiredImgAssetConfigurationInline(StackedPolymorphicInline.Child):
model = RequiredImgAssetConfiguration
form = RequiredImgAssetConfigurationForm
class RequiredTextAssetConfigurationInline(StackedPolymorphicInline.Child):
model = RequiredTextAssetConfiguration
class RequiredResponseAssetConfigurationInline(StackedPolymorphicInline.Child):
model = RequiredResponseAssetConfiguration
class ProvidedTextAssetConfigurationInline(StackedPolymorphicInline.Child):
model = ProvidedTextAssetConfiguration
class ProvidedFileAssetConfigurationInline(StackedPolymorphicInline.Child):
model = ProvidedFileAssetConfiguration
model = BenefitFeatureConfiguration
child_inlines = [
LogoPlacementConfigurationInline,
TieredQuantityConfigurationInline,
EmailTargetableConfigurationInline,
RequiredImgAssetConfigurationInline,
RequiredTextAssetConfigurationInline,
RequiredResponseAssetConfigurationInline,
ProvidedTextAssetConfigurationInline,
ProvidedFileAssetConfigurationInline,
]
@admin.register(SponsorshipBenefit)
class SponsorshipBenefitAdmin(PolymorphicInlineSupportMixin, OrderedModelAdmin):
change_form_template = "sponsors/admin/sponsorshipbenefit_change_form.html"
inlines = [BenefitFeatureConfigurationInline]
ordering = ("program", "order")
list_display = [
"program",
"short_name",
"package_only",
"internal_value",
"move_up_down_links",
]
list_filter = ["program", "package_only", "packages", "new", "a_la_carte", "unavailable"]
search_fields = ["name"]
form = SponsorshipBenefitAdminForm
fieldsets = [
(
"Public",
{
"fields": (
"name",
"description",
"program",
"packages",
"package_only",
"new",
"unavailable",
"a_la_carte",
),
},
),
(
"Internal",
{
"fields": (
"internal_description",
"internal_value",
"capacity",
"soft_capacity",
"legal_clauses",
"conflicts",
)
},
),
]
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/update-related-sponsorships",
self.admin_site.admin_view(self.update_related_sponsorships),
name="sponsors_sponsorshipbenefit_update_related",
),
]
return my_urls + urls
def update_related_sponsorships(self, *args, **kwargs):
return views_admin.update_related_sponsorships(self, *args, **kwargs)
@admin.register(SponsorshipPackage)
class SponsorshipPackageAdmin(OrderedModelAdmin):
ordering = ("order",)
list_display = ["name", "advertise", "move_up_down_links"]
list_filter = ["advertise"]
search_fields = ["name"]
def get_readonly_fields(self, request, obj=None):
readonly = []
if obj:
readonly.append("slug")
if not request.user.is_superuser:
readonly.append("logo_dimension")
return readonly
def get_prepopulated_fields(self, request, obj=None):
if not obj:
return {'slug': ['name']}
return {}
class SponsorContactInline(admin.TabularInline):
model = SponsorContact
raw_id_fields = ["user"]
extra = 0
class SponsorshipsInline(admin.TabularInline):
model = Sponsorship
fields = ["link", "status", "applied_on", "start_date", "end_date"]
readonly_fields = ["link", "status", "applied_on", "start_date", "end_date"]
can_delete = False
extra = 0
def link(self, obj):
url = reverse("admin:sponsors_sponsorship_change", args=[obj.id])
return mark_safe(f"<a href={url}>{obj.id}</a>")
link.short_description = "ID"
@admin.register(Sponsor)
class SponsorAdmin(ContentManageableModelAdmin):
inlines = [SponsorContactInline, SponsorshipsInline, AssetsInline]
search_fields = ["name"]
class SponsorBenefitInline(admin.TabularInline):
model = SponsorBenefit
form = SponsorBenefitAdminInlineForm
fields = ["sponsorship_benefit", "benefit_internal_value"]
extra = 0
def has_add_permission(self, request, obj=None):
has_add_permission = super().has_add_permission(request, obj=obj)
match = request.resolver_match
if match.url_name == "sponsors_sponsorship_change":
sponsorship = self.parent_model.objects.get(pk=match.kwargs["object_id"])
has_add_permission = has_add_permission and sponsorship.open_for_editing
return has_add_permission
def get_readonly_fields(self, request, obj=None):
if obj and not obj.open_for_editing:
return ["sponsorship_benefit", "benefit_internal_value"]
return []
def has_delete_permission(self, request, obj=None):
if not obj:
return True
return obj.open_for_editing
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.select_related("sponsorship_benefit__program", "program")
class TargetableEmailBenefitsFilter(admin.SimpleListFilter):
title = "targetable email benefits"
parameter_name = 'email_benefit'
@cached_property
def benefits(self):
qs = EmailTargetableConfiguration.objects.all().values_list("benefit_id", flat=True)
benefits = SponsorshipBenefit.objects.filter(id__in=Subquery(qs))
return {str(b.id): b for b in benefits}
def lookups(self, request, model_admin):
return [
(k, b.name) for k, b in self.benefits.items()
]
def queryset(self, request, queryset):
benefit = self.benefits.get(self.value())
if not benefit:
return queryset
# all sponsors benefit related with such sponsorship benefit
qs = SponsorBenefit.objects.filter(
sponsorship_benefit_id=benefit.id).values_list("sponsorship_id", flat=True)
return queryset.filter(id__in=Subquery(qs))
@admin.register(Sponsorship)
class SponsorshipAdmin(admin.ModelAdmin):
change_form_template = "sponsors/admin/sponsorship_change_form.html"
form = SponsorshipReviewAdminForm
inlines = [SponsorBenefitInline, AssetsInline]
search_fields = ["sponsor__name"]
list_display = [
"sponsor",
"status",
"package",
"applied_on",
"approved_on",
"start_date",
"end_date",
]
list_filter = ["status", "package", TargetableEmailBenefitsFilter]
actions = ["send_notifications"]
fieldsets = [
(
"Sponsorship Data",
{
"fields": (
"for_modified_package",
"sponsor_link",
"status",
"package",
"sponsorship_fee",
"get_estimated_cost",
"start_date",
"end_date",
"get_contract",
"level_name",
"overlapped_by",
),
},
),
(
"Sponsor Detailed Information",
{
"fields": (
"get_sponsor_name",
"get_sponsor_description",
"get_sponsor_landing_page_url",
"get_sponsor_web_logo",
"get_sponsor_print_logo",
"get_sponsor_primary_phone",
"get_sponsor_mailing_address",
"get_sponsor_contacts",
),
},
),
(
"User Customizations",
{
"fields": (
"get_custom_benefits_added_by_user",
"get_custom_benefits_removed_by_user",
),
"classes": ["collapse"],
},
),
(
"Events dates",
{
"fields": (
"applied_on",
"approved_on",
"rejected_on",
"finalized_on",
),
"classes": ["collapse"],
},
),
]
def get_fieldsets(self, request, obj=None):
fieldsets = []
for title, cfg in super().get_fieldsets(request, obj):
# disable collapse option in case of sponsorships with customizations
if title == "User Customizations" and obj:
if obj.user_customizations["added_by_user"] or obj.user_customizations["removed_by_user"]:
cfg["classes"] = []
fieldsets.append((title, cfg))
return fieldsets
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.select_related("sponsor", "package", "submited_by")
def send_notifications(self, request, queryset):
return views_admin.send_sponsorship_notifications_action(self, request, queryset)
send_notifications.short_description = 'Send notifications to selected'
def get_readonly_fields(self, request, obj):
readonly_fields = [
"for_modified_package",
"sponsor_link",
"status",
"applied_on",
"rejected_on",
"approved_on",
"finalized_on",
"level_name",
"get_estimated_cost",
"get_sponsor_name",
"get_sponsor_description",
"get_sponsor_landing_page_url",
"get_sponsor_web_logo",
"get_sponsor_print_logo",
"get_sponsor_primary_phone",
"get_sponsor_mailing_address",
"get_sponsor_contacts",
"get_contract",
"get_added_by_user",
"get_custom_benefits_added_by_user",
"get_custom_benefits_removed_by_user",
]
if obj and obj.status != Sponsorship.APPLIED:
extra = ["start_date", "end_date", "package", "level_name", "sponsorship_fee"]
readonly_fields.extend(extra)
return readonly_fields
def sponsor_link(self, obj):
url = reverse("admin:sponsors_sponsor_change", args=[obj.sponsor.id])
return mark_safe(f"<a href={url}>{obj.sponsor.name}</a>")
sponsor_link.short_description = "Sponsor"
def get_estimated_cost(self, obj):
cost = None
html = "This sponsorship has not customizations so there's no estimated cost"
if obj.for_modified_package:
msg = "This sponsorship has customizations and this cost is a sum of all benefit's internal values from when this sponsorship was created"
cost = intcomma(obj.estimated_cost)
html = f"{cost} USD <br/><b>Important: </b> {msg}"
return mark_safe(html)
get_estimated_cost.short_description = "Estimated cost"
def get_contract(self, obj):
if not obj.contract:
return "---"
url = reverse("admin:sponsors_contract_change", args=[obj.contract.pk])
html = f"<a href='{url}' target='_blank'>{obj.contract}</a>"
return mark_safe(html)
get_contract.short_description = "Contract"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/reject",
# TODO: maybe it would be better to create a specific
# group or permission to review sponsorship applications
self.admin_site.admin_view(self.reject_sponsorship_view),
name="sponsors_sponsorship_reject",
),
path(
"<int:pk>/approve-existing",
self.admin_site.admin_view(self.approve_signed_sponsorship_view),
name="sponsors_sponsorship_approve_existing_contract",
),
path(
"<int:pk>/approve",
self.admin_site.admin_view(self.approve_sponsorship_view),
name="sponsors_sponsorship_approve",
),
path(
"<int:pk>/enable-edit",
self.admin_site.admin_view(self.rollback_to_editing_view),
name="sponsors_sponsorship_rollback_to_edit",
),
path(
"<int:pk>/list-assets",
self.admin_site.admin_view(self.list_uploaded_assets_view),
name="sponsors_sponsorship_list_uploaded_assets",
),
]
return my_urls + urls
def get_sponsor_name(self, obj):
return obj.sponsor.name
get_sponsor_name.short_description = "Name"
def get_sponsor_description(self, obj):
return obj.sponsor.description
get_sponsor_description.short_description = "Description"
def get_sponsor_landing_page_url(self, obj):
return obj.sponsor.landing_page_url
get_sponsor_landing_page_url.short_description = "Landing Page URL"
def get_sponsor_web_logo(self, obj):
html = "{% load thumbnail %}{% thumbnail sponsor.web_logo '150x150' format='PNG' quality=100 as im %}<img src='{{ im.url}}'/>{% endthumbnail %}"
template = Template(html)
context = Context({'sponsor': obj.sponsor})
html = template.render(context)
return mark_safe(html)
get_sponsor_web_logo.short_description = "Web Logo"
def get_sponsor_print_logo(self, obj):
img = obj.sponsor.print_logo
html = ""
if img:
html = "{% load thumbnail %}{% thumbnail img '150x150' format='PNG' quality=100 as im %}<img src='{{ im.url}}'/>{% endthumbnail %}"
template = Template(html)
context = Context({'img': img})
html = template.render(context)
return mark_safe(html) if html else "---"
get_sponsor_print_logo.short_description = "Print Logo"
def get_sponsor_primary_phone(self, obj):
return obj.sponsor.primary_phone
get_sponsor_primary_phone.short_description = "Primary Phone"
def get_sponsor_mailing_address(self, obj):
sponsor = obj.sponsor
city_row = (
f"{sponsor.city} - {sponsor.get_country_display()} ({sponsor.country})"
)
if sponsor.state:
city_row = f"{sponsor.city} - {sponsor.state} - {sponsor.get_country_display()} ({sponsor.country})"
mail_row = sponsor.mailing_address_line_1
if sponsor.mailing_address_line_2:
mail_row += f" - {sponsor.mailing_address_line_2}"
html = f"<p>{city_row}</p>"
html += f"<p>{mail_row}</p>"
html += f"<p>{sponsor.postal_code}</p>"
return mark_safe(html)
get_sponsor_mailing_address.short_description = "Mailing/Billing Address"
def get_sponsor_contacts(self, obj):
html = ""
contacts = obj.sponsor.contacts.all()
primary = [c for c in contacts if c.primary]
not_primary = [c for c in contacts if not c.primary]
if primary:
html = "<b>Primary contacts</b><ul>"
html += "".join(
[f"<li>{c.name}: {c.email} / {c.phone}</li>" for c in primary]
)
html += "</ul>"
if not_primary:
html += "<b>Other contacts</b><ul>"
html += "".join(
[f"<li>{c.name}: {c.email} / {c.phone}</li>" for c in not_primary]
)
html += "</ul>"
return mark_safe(html)
get_sponsor_contacts.short_description = "Contacts"
def get_custom_benefits_added_by_user(self, obj):
benefits = obj.user_customizations["added_by_user"]
if not benefits:
return "---"
html = "".join(
[f"<p>{b}</p>" for b in benefits]
)
return mark_safe(html)
get_custom_benefits_added_by_user.short_description = "Added by User"
def get_custom_benefits_removed_by_user(self, obj):
benefits = obj.user_customizations["removed_by_user"]
if not benefits:
return "---"
html = "".join(
[f"<p>{b}</p>" for b in benefits]
)
return mark_safe(html)
get_custom_benefits_removed_by_user.short_description = "Removed by User"
def rollback_to_editing_view(self, request, pk):
return views_admin.rollback_to_editing_view(self, request, pk)
def reject_sponsorship_view(self, request, pk):
return views_admin.reject_sponsorship_view(self, request, pk)
def approve_sponsorship_view(self, request, pk):
return views_admin.approve_sponsorship_view(self, request, pk)
def approve_signed_sponsorship_view(self, request, pk):
return views_admin.approve_signed_sponsorship_view(self, request, pk)
def list_uploaded_assets_view(self, request, pk):
return views_admin.list_uploaded_assets(self, request, pk)
@admin.register(LegalClause)
class LegalClauseModelAdmin(OrderedModelAdmin):
list_display = ["internal_name"]
@admin.register(Contract)
class ContractModelAdmin(admin.ModelAdmin):
change_form_template = "sponsors/admin/contract_change_form.html"
list_display = [
"id",
"sponsorship",
"created_on",
"last_update",
"status",
"get_revision",
"document_link",
]
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.select_related("sponsorship__sponsor")
def get_revision(self, obj):
return obj.revision if obj.is_draft else "Final"
get_revision.short_description = "Revision"
fieldsets = [
(
"Info",
{
"fields": ("get_sponsorship_url", "status", "revision"),
},
),
(
"Editable",
{
"fields": (
"sponsor_info",
"sponsor_contact",
"benefits_list",
"legal_clauses",
),
},
),
(
"Files",
{
"fields": (
"document",
"document_docx",
"signed_document",
)
},
),
(
"Activities log",
{
"fields": (
"created_on",
"last_update",
"sent_on",
),
"classes": ["collapse"],
},
),
]
def get_readonly_fields(self, request, obj):
readonly_fields = [
"status",
"created_on",
"last_update",
"sent_on",
"sponsorship",
"revision",
"document",
"document_docx",
"signed_document",
"get_sponsorship_url",
]
if obj and not obj.is_draft:
extra = [
"sponsor_info",
"sponsor_contact",
"benefits_list",
"legal_clauses",
]
readonly_fields.extend(extra)
return readonly_fields
def document_link(self, obj):
html, url, msg = "---", "", ""
if obj.is_draft:
url = obj.preview_url
msg = "Preview document"
elif obj.document:
url = obj.document.url
msg = "Download Contract"
elif obj.signed_document:
url = obj.signed_document.url
msg = "Download Signed Contract"
if url and msg:
html = f'<a href="{url}" target="_blank">{msg}</a>'
return mark_safe(html)
document_link.short_description = "Contract document"
def get_sponsorship_url(self, obj):
if not obj.sponsorship:
return "---"
url = reverse("admin:sponsors_sponsorship_change", args=[obj.sponsorship.pk])
html = f"<a href='{url}' target='_blank'>{obj.sponsorship}</a>"
return mark_safe(html)
get_sponsorship_url.short_description = "Sponsorship"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/preview",
self.admin_site.admin_view(self.preview_contract_view),
name="sponsors_contract_preview",
),
path(
"<int:pk>/send",
self.admin_site.admin_view(self.send_contract_view),
name="sponsors_contract_send",
),
path(
"<int:pk>/execute",
self.admin_site.admin_view(self.execute_contract_view),
name="sponsors_contract_execute",
),
path(
"<int:pk>/nullify",
self.admin_site.admin_view(self.nullify_contract_view),
name="sponsors_contract_nullify",
),
]
return my_urls + urls
def preview_contract_view(self, request, pk):
return views_admin.preview_contract_view(self, request, pk)
def send_contract_view(self, request, pk):
return views_admin.send_contract_view(self, request, pk)
def execute_contract_view(self, request, pk):
return views_admin.execute_contract_view(self, request, pk)
def nullify_contract_view(self, request, pk):
return views_admin.nullify_contract_view(self, request, pk)
@admin.register(SponsorEmailNotificationTemplate)
class SponsorEmailNotificationTemplateAdmin(BaseEmailTemplateAdmin):
def get_form(self, request, obj=None, **kwargs):
help_texts = {
"content": SPONSOR_TEMPLATE_HELP_TEXT,
}
kwargs.update({"help_texts": help_texts})
return super().get_form(request, obj, **kwargs)
class AssetTypeListFilter(admin.SimpleListFilter):
title = "Asset Type"
parameter_name = 'type'
@property
def assets_types_mapping(self):
return {asset_type.__name__: asset_type for asset_type in GenericAsset.all_asset_types()}
def lookups(self, request, model_admin):
return [(k, v._meta.verbose_name_plural) for k, v in self.assets_types_mapping.items()]
def queryset(self, request, queryset):
asset_type = self.assets_types_mapping.get(self.value())
if not asset_type:
return queryset
return queryset.instance_of(asset_type)
class AssociatedBenefitListFilter(admin.SimpleListFilter):
title = "From Benefit Which Requires Asset"
parameter_name = 'from_benefit'
@property
def benefits_with_assets(self):
qs = BenefitFeature.objects.required_assets().values_list("sponsor_benefit__sponsorship_benefit",
flat=True).distinct()
benefits = SponsorshipBenefit.objects.filter(id__in=Subquery(qs))
return {str(b.id): b for b in benefits}
def lookups(self, request, model_admin):
return [(k, b.name) for k, b in self.benefits_with_assets.items()]
def queryset(self, request, queryset):
benefit = self.benefits_with_assets.get(self.value())
if not benefit:
return queryset
internal_names = [
cfg.internal_name
for cfg in benefit.features_config.all()
if hasattr(cfg, "internal_name")
]
return queryset.filter(internal_name__in=internal_names)
class AssetContentTypeFilter(admin.SimpleListFilter):
title = "Related Object"
parameter_name = 'content_type'
def lookups(self, request, model_admin):
qs = ContentType.objects.filter(model__in=["sponsorship", "sponsor"])
return [(c_type.pk, c_type.model.title()) for c_type in qs]
def queryset(self, request, queryset):
value = self.value()
if not value:
return queryset
return queryset.filter(content_type=value)
class AssetWithOrWithoutValueFilter(admin.SimpleListFilter):
title = "Value"
parameter_name = "value"
def lookups(self, request, model_admin):
return [
("with-value", "With value"),
("no-value", "Without value"),
]
def queryset(self, request, queryset):
value = self.value()
if not value:
return queryset
with_value_id = [asset.pk for asset in queryset if asset.value]
if value == "with-value":
return queryset.filter(pk__in=with_value_id)
else:
return queryset.exclude(pk__in=with_value_id)
@admin.register(GenericAsset)
class GenericAssetModelAdmin(PolymorphicParentModelAdmin):
list_display = ["id", "internal_name", "get_value", "content_type", "get_related_object"]
list_filter = [AssetContentTypeFilter, AssetTypeListFilter, AssetWithOrWithoutValueFilter,
AssociatedBenefitListFilter]
actions = ["export_assets_as_zipfile"]
def get_child_models(self, *args, **kwargs):
return GenericAsset.all_asset_types()
def get_queryset(self, *args, **kwargs):
return GenericAsset.objects.all_assets()
def get_actions(self, request):
actions = super().get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def has_add_permission(self, *args, **kwargs):
return False
@cached_property
def all_sponsors(self):
qs = Sponsor.objects.all()
return {sp.id: sp for sp in qs}
@cached_property
def all_sponsorships(self):
qs = Sponsorship.objects.all().select_related("package", "sponsor")
return {sp.id: sp for sp in qs}
def get_value(self, obj):
html = obj.value
if obj.value and getattr(obj.value, "url", None):
html = f"<a href='{obj.value.url}' target='_blank'>{obj.value}</a>"
return mark_safe(html)
get_value.short_description = "Value"
def get_related_object(self, obj):
"""
Returns the content_object as an URL and performs better because
of sponsors and sponsorship cached properties
"""
content_object = None
if obj.from_sponsorship:
content_object = self.all_sponsorships[obj.object_id]
elif obj.from_sponsor:
content_object = self.all_sponsors[obj.object_id]
if not content_object: # safety belt
return obj.content_object
html = f"<a href='{content_object.admin_url}' target='_blank'>{content_object}</a>"
return mark_safe(html)
get_related_object.short_description = "Associated with"
def export_assets_as_zipfile(self, request, queryset):
return views_admin.export_assets_as_zipfile(self, request, queryset)
export_assets_as_zipfile.short_description = "Export selected"
class GenericAssetChildModelAdmin(PolymorphicChildModelAdmin):
""" Base admin class for all GenericAsset child models """
base_model = GenericAsset
readonly_fields = ["uuid", "content_type", "object_id", "content_object", "internal_name"]
@admin.register(TextAsset)
class TextAssetModelAdmin(GenericAssetChildModelAdmin):
base_model = TextAsset
@admin.register(ImgAsset)
class ImgAssetModelAdmin(GenericAssetChildModelAdmin):
base_model = ImgAsset
@admin.register(FileAsset)
class ImgAssetModelAdmin(GenericAssetChildModelAdmin):
base_model = FileAsset
@admin.register(ResponseAsset)
class ResponseAssetModelAdmin(GenericAssetChildModelAdmin):
base_model = ResponseAsset
|
{
"content_hash": "fc1a16a4a092651f130138eb87774585",
"timestamp": "",
"source": "github",
"line_count": 908,
"max_line_length": 152,
"avg_line_length": 33.40418502202643,
"alnum_prop": 0.5899244996867891,
"repo_name": "proevo/pythondotorg",
"id": "9af126d39affbb0f6c8ec6467fc85774c78fa6da",
"size": "30331",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/django-allauth-0.51.0",
"path": "sponsors/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "711916"
},
{
"name": "JavaScript",
"bytes": "314514"
},
{
"name": "Makefile",
"bytes": "6811"
},
{
"name": "Python",
"bytes": "1448691"
},
{
"name": "Ruby",
"bytes": "218314"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
}
|
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
__revision__ = "$Id: build_clib.py 58495 2007-10-16 18:12:55Z guido.van.rossum $"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib', 'b',
"directory to build C/C++ libraries to"),
('build-temp', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries (presumably provided as a
command option 'libraries') is valid, i.e. it is a list of
2-tuples, where the tuples are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise."""
# Yechh, blecch, ackk: this is ripped straight out of build_ext.py,
# with only names changed to protect the innocent!
if not isinstance(libraries, list):
raise DistutilsSetupError(
"'libraries' option must be a list of tuples")
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError(
"each element of 'libraries' must a 2-tuple")
if isinstance(lib[0], str):
raise DistutilsSetupError(
"first element of each tuple in 'libraries' "
"must be a string (the library name)")
if '/' in lib[0] or (os.sep != '/' and os.sep in lib[0]):
raise DistutilsSetupError("bad library name '%s': "
"may not contain directory separators" % lib[0])
if not isinstance(lib[1], dict):
raise DistutilsSetupError(
"second element of each tuple in 'libraries' "
"must be a dictionary (build info)")
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
|
{
"content_hash": "8507cc9b7eef039fcd6e3d15caa1e1e6",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 81,
"avg_line_length": 39.616504854368934,
"alnum_prop": 0.5696605808111751,
"repo_name": "MalloyPower/parsing-python",
"id": "b60237c6854b0d0a83a7b795c3afddfbe7334676",
"size": "8161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.0/Lib/distutils/command/build_clib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
"""Feature learning based on sparse filtering"""
# Author: Jan Hendrik Metzen
# License: BSD 3 clause
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator
class SparseFiltering(BaseEstimator):
"""Sparse filtering
Unsupervised learning of features using the sparse filtering algorithm.
Features are linear in the inputs, i.e., f_j(x) = \sum_i w_{ij}x_i
This algorithm does not try to model the
data's distribution but rather to learn features which are sparsely
activated in the sense of
* Population Sparsity: for each image, only a small subset of features
is activated
* Lifetime Sparsity: each feature is only activated on a small
subset of the examples
* High Dispersal: Uniform activity distribution of features.
This is encoded as an objective function which maps the the weight vector w
onto a scalar value which is the smaller the more sparse the features are.
L-BFGS is used to minimize this objective function.
Parameters
----------
n_features : int,
Number of features to be learned.
maxfun : int,
Maximum number of evaluations of the objective function in L-BFGS-B.
Defaults to 500.
iprint : int,
Verbosity of the L-BFGS-B. Prints information regarding the objective
function every iprint iterations. Does not print any information if set
to -1. Defaults to -1.
Attributes
----------
`w_` : array, [n_features, n_inputs]
Sparse components extracted from the data.
Notes
-----
This implements the method described in `Jiquan Ngiam, Pang Wei Koh,
Zhenghao Chen, Sonia Bhaskar, Andrew Y. Ng:
Sparse Filtering. NIPS 2011: 1125-1133`
and is based on the Matlab code provided in the supplementary material
"""
def __init__(self, n_features, maxfun=500, iprint=-1):
self.n_features = n_features
self.iprint = iprint
self.maxfun = maxfun
def fit(self, X, y=None, **params):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.w_ = self._fit(X, **params)
return self
def transform(self, X):
return self._transform(X)
def fit_transform(self, X, y=None, **params):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self.w_ = self._fit(X, **params)
return self._transform(X)
def _fit(self, X):
# transpose data in order to be consistent with the Matlab code
X = np.array(X.T)
# substract the mean from each image patch
X -= X.mean(0)
def objective_fct(w):
# View 1d weight vector as a 2d matrix
W = w.reshape(self.n_features, X.shape[0])
# Determine features resulting from weight vector
F, Fs, L2Fs, NFs, L2Fn, Fhat = self._determine_features(X, W)
# Compute sparsity of each feature over all example, i.e., compute
# its l1-norm; the objective function is the sum over these
# sparsities
obj = np.apply_along_axis(np.linalg.norm, 1, Fhat, 1).sum()
# Backprop through each feedforward step
deltaW = l2grad(NFs.T, Fhat, L2Fn, np.ones_like(Fhat))
deltaW = l2grad(Fs, NFs, L2Fs, deltaW.T)
deltaW = (deltaW * (F / Fs)).dot(X.T)
# Return objective value and gradient
return obj, deltaW.flatten()
def l2grad(X, Y, N, D):
# Backpropagate through normalization
return D / N[:, None] - Y \
* (D * X).sum(1)[:, None] / (N ** 2)[:, None]
# Choose initial weights randomly
w0 = np.random.random(X.shape[0] * self.n_features) * 2 - 1
# Use L-BFGS to find weights which correspond to a (local) minimum of
# the objective function
w, s, d = fmin_l_bfgs_b(objective_fct, w0, iprint=self.iprint,
maxfun=self.maxfun)
return w.reshape(self.n_features, X.shape[0])
def _transform(self, X):
# transpose data in order to be consistent with the Matlab code
X = np.array(X.T)
# substract the mean from each image patch
X -= X.mean(0)
W = self.w_.reshape(self.n_features, X.shape[0])
# Determine features resulting from weight vector
# (ignore internals required for gradient)
_, _, _, _, _, Fhat = self._determine_features(X, W)
return Fhat
def _determine_features(self, X, W):
# Compute unnormalized features by multiplying weight matrix with
# data
F = W.dot(X) # Linear Activation
Fs = np.sqrt(F ** 2 + 1e-8) # Soft-Absolute Activation
# Normalize each feature to be equally active by dividing each
# feature by its l2-norm across all examples
L2Fs = np.apply_along_axis(np.linalg.norm, 1, Fs)
NFs = Fs / L2Fs[:, None]
# Normalize features per example, so that they lie on the unit
# l2-ball
L2Fn = np.apply_along_axis(np.linalg.norm, 1, NFs.T)
Fhat = NFs.T / L2Fn[:, None]
return F, Fs, L2Fs, NFs, L2Fn, Fhat
|
{
"content_hash": "6d950a8fab84e40f1368904b73939adb",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 79,
"avg_line_length": 35.53333333333333,
"alnum_prop": 0.5974756950366706,
"repo_name": "codeaudit/sparse-filtering",
"id": "be8a0cb86f7391ff16577ccff15dd8cbcdbb465f",
"size": "5863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sparse_filtering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10154"
}
],
"symlink_target": ""
}
|
"""Contains signals"""
# Python imports
from functools import wraps
# Django imports
from django.db.models import signals
# app imports
from oweb.models import Account, Planet, Moon
from oweb.models.research import *
from oweb.models.ship import *
from oweb.models.building import *
from oweb.models.defense import *
def disable_for_loaddata(signal_handler):
@wraps(signal_handler)
def wrapper(*args, **kwargs):
if kwargs['raw']:
return
signal_handler(*args, **kwargs)
return wrapper
@disable_for_loaddata
def callback_create_account(sender, instance, created, **kwargs):
"""Creates :py:class:`Research` and :py:class:`Ship` objects after account creation"""
if created:
Research106.objects.create(account=instance)
Research108.objects.create(account=instance)
Research109.objects.create(account=instance)
Research110.objects.create(account=instance)
Research111.objects.create(account=instance)
Research113.objects.create(account=instance)
Research114.objects.create(account=instance)
Research115.objects.create(account=instance)
Research117.objects.create(account=instance)
Research118.objects.create(account=instance)
Research120.objects.create(account=instance)
Research121.objects.create(account=instance)
Research122.objects.create(account=instance)
Research123.objects.create(account=instance)
Research124.objects.create(account=instance)
Research199.objects.create(account=instance)
Military204.objects.create(account=instance)
Military205.objects.create(account=instance)
Military206.objects.create(account=instance)
Military207.objects.create(account=instance)
Military215.objects.create(account=instance)
Military211.objects.create(account=instance)
Military213.objects.create(account=instance)
Military214.objects.create(account=instance)
Civil202.objects.create(account=instance)
Civil203.objects.create(account=instance)
Civil208.objects.create(account=instance)
Civil209.objects.create(account=instance)
Civil210.objects.create(account=instance)
Planet.objects.create(account=instance, name='Homeworld')
@disable_for_loaddata
def callback_create_planet(sender, instance, created, **kwargs):
"""Creates :py:class:`Building`, :py:class:`Defense` and
:py:class:`Civil212` objects after planet creation"""
if created:
Supply1.objects.create(astro_object=instance)
Supply2.objects.create(astro_object=instance)
Supply3.objects.create(astro_object=instance)
Supply4.objects.create(astro_object=instance)
Supply12.objects.create(astro_object=instance)
Supply22.objects.create(astro_object=instance)
Supply23.objects.create(astro_object=instance)
Supply24.objects.create(astro_object=instance)
Supply25.objects.create(astro_object=instance)
Supply26.objects.create(astro_object=instance)
Supply27.objects.create(astro_object=instance)
Station14.objects.create(astro_object=instance)
Station15.objects.create(astro_object=instance)
Station21.objects.create(astro_object=instance)
Station31.objects.create(astro_object=instance)
Station33.objects.create(astro_object=instance)
Station34.objects.create(astro_object=instance)
Station44.objects.create(astro_object=instance)
Civil212.objects.create(account=instance.account, astro_object=instance)
Defense401.objects.create(astro_object=instance)
Defense402.objects.create(astro_object=instance)
Defense403.objects.create(astro_object=instance)
Defense404.objects.create(astro_object=instance)
Defense405.objects.create(astro_object=instance)
Defense406.objects.create(astro_object=instance)
Defense407.objects.create(astro_object=instance)
Defense408.objects.create(astro_object=instance)
Defense502.objects.create(astro_object=instance)
Defense503.objects.create(astro_object=instance)
@disable_for_loaddata
def callback_create_moon(sender, instance, created, **kwargs):
if created:
Supply22.objects.create(astro_object=instance)
Supply23.objects.create(astro_object=instance)
Supply24.objects.create(astro_object=instance)
Supply25.objects.create(astro_object=instance)
Supply26.objects.create(astro_object=instance)
Supply27.objects.create(astro_object=instance)
Station14.objects.create(astro_object=instance)
Station21.objects.create(astro_object=instance)
Station41.objects.create(astro_object=instance)
Station42.objects.create(astro_object=instance)
Station43.objects.create(astro_object=instance)
Civil212.objects.create(account=instance.planet.account, astro_object=instance)
Defense401.objects.create(astro_object=instance)
Defense402.objects.create(astro_object=instance)
Defense403.objects.create(astro_object=instance)
Defense404.objects.create(astro_object=instance)
Defense405.objects.create(astro_object=instance)
Defense406.objects.create(astro_object=instance)
Defense407.objects.create(astro_object=instance)
Defense408.objects.create(astro_object=instance)
@disable_for_loaddata
def callback_update_moon_coord(sender, instance, **kwargs):
try:
moon = Moon.objects.get(planet=instance)
moon.coord = instance.coord
moon.save()
except Moon.DoesNotExist:
pass
# Register the callbacks
signals.post_save.connect(callback_create_account,
sender=Account,
weak=False,
dispatch_uid='models.callback_create_account')
signals.post_save.connect(callback_create_planet,
sender=Planet,
weak=False,
dispatch_uid='models.callback_create_planet')
signals.post_save.connect(callback_update_moon_coord,
sender=Planet,
weak=False,
dispatch_uid='models.callback_update_moon_coord')
signals.post_save.connect(callback_create_moon,
sender=Moon,
weak=False,
dispatch_uid='models.callback_create_moon')
|
{
"content_hash": "f5a947c975bbda0869069e1cf83cc81b",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 90,
"avg_line_length": 42.07741935483871,
"alnum_prop": 0.6965654707145048,
"repo_name": "Mischback/django-oweb",
"id": "17277316bd5de736b11776c8866c7dfecc13cb4a",
"size": "6522",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "oweb/models/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6779"
},
{
"name": "Python",
"bytes": "170213"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
}
|
from neo.Utils.BlockchainFixtureTestCase import BlockchainFixtureTestCase
from neo.VM.InteropService import StackItem
from neo.VM.ExecutionEngine import ExecutionEngine
from neo.VM.ExecutionContext import ExecutionContext
from neo.SmartContract.StateReader import StateReader
from neo.Core.Block import Block
from neo.Core.TX.Transaction import Transaction
from neo.Settings import settings
from neo.Core.UInt256 import UInt256
from neo.VM.Script import Script
import os
from neo.Blockchain import GetBlockchain
from neo.SmartContract import TriggerType
class StringIn(str):
def __eq__(self, other):
return self in other
class BlockchainInteropTest(BlockchainFixtureTestCase):
engine = None
econtext = None
state_reader = None
@classmethod
def leveldb_testpath(cls):
return os.path.join(settings.DATA_DIR_PATH, 'fixtures/test_chain')
@classmethod
def setUpClass(cls):
super(BlockchainInteropTest, cls).setUpClass()
def setUp(self):
self.engine = ExecutionEngine()
self.econtext = ExecutionContext(Script(self.engine.Crypto, b''), 0)
self.engine.InvocationStack.PushT(self.econtext)
snapshot = GetBlockchain()._db.createSnapshot()
self.state_reader = StateReader(TriggerType.Application, snapshot)
def test_interop_getblock(self):
height = StackItem.New(9369)
self.econtext.EvaluationStack.PushT(height)
self.engine.InvocationStack.PushT(self.econtext)
self.state_reader.Blockchain_GetBlock(self.engine)
block = self.econtext.EvaluationStack.Pop().GetInterface()
self.assertIsInstance(block, Block)
def test_interop_get_transaction(self):
u256 = UInt256.ParseString('8be9660512991d36e016b8ced6fda5d611d26a0f6e2faaaf1f379496edb3395f')
hash = StackItem.New(u256.Data)
self.econtext.EvaluationStack.PushT(hash)
self.engine.InvocationStack.PushT(self.econtext)
self.state_reader.Blockchain_GetTransaction(self.engine)
tx = self.econtext.EvaluationStack.Pop().GetInterface()
self.assertIsInstance(tx, Transaction)
def test_interop_get_bad_transaction(self):
u256 = UInt256.ParseString('8be9660512991d36e016b8ced6fda5d611d26a0f6e2faaaf1f379496edb33956')
hash = StackItem.New(u256.Data)
self.econtext.EvaluationStack.PushT(hash)
self.engine.InvocationStack.PushT(self.econtext)
self.state_reader.Blockchain_GetTransaction(self.engine)
tx = self.econtext.EvaluationStack.Pop().GetInterface()
self.assertIsNone(tx)
def test_interop_get_transaction_height(self):
u256 = UInt256.ParseString('8be9660512991d36e016b8ced6fda5d611d26a0f6e2faaaf1f379496edb3395f')
hash = StackItem.New(u256.Data)
self.econtext.EvaluationStack.PushT(hash)
self.engine.InvocationStack.PushT(self.econtext)
self.state_reader.Blockchain_GetTransactionHeight(self.engine)
height = self.econtext.EvaluationStack.Pop().GetBigInteger()
self.assertEqual(height, 9369)
def test_interop_get_bad_transaction_height(self):
u256 = UInt256.ParseString('8be9660512991d36e016b8ced6fda5d611d26a0f6e2faaaf1f379496edb33956')
hash = StackItem.New(u256.Data)
self.econtext.EvaluationStack.PushT(hash)
self.engine.InvocationStack.PushT(self.econtext)
self.state_reader.Blockchain_GetTransactionHeight(self.engine)
height = self.econtext.EvaluationStack.Pop().GetBigInteger()
self.assertEqual(height, -1)
|
{
"content_hash": "072d3ed76bd400f6006b430db6bd0eaf",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 102,
"avg_line_length": 35,
"alnum_prop": 0.7364145658263306,
"repo_name": "hal0x2328/neo-python",
"id": "19595cfaa0524372dff27cd665cb2f7fc1890354",
"size": "3570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/VM/tests/test_interop_blockchain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2059"
},
{
"name": "Makefile",
"bytes": "1898"
},
{
"name": "Python",
"bytes": "1758220"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
}
|
from ....testing import assert_equal
from ..utils import RelabelHypointensities
def test_RelabelHypointensities_inputs():
input_map = dict(args=dict(argstr='%s',
),
aseg=dict(argstr='%s',
mandatory=True,
position=-3,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
lh_white=dict(mandatory=True,
),
out_file=dict(argstr='%s',
genfile=True,
mandatory=False,
position=-1,
),
rh_white=dict(mandatory=True,
),
subjects_dir=dict(),
surf_directory=dict(argstr='%s',
genfile=True,
mandatory=False,
position=-2,
),
terminal_output=dict(nohash=True,
),
)
inputs = RelabelHypointensities.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_RelabelHypointensities_outputs():
output_map = dict(out_file=dict(argstr='%s',
mandatory=False,
),
)
outputs = RelabelHypointensities.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
{
"content_hash": "4c3a68339da2713f568134a6755c6f13",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 25.365384615384617,
"alnum_prop": 0.6338134950720242,
"repo_name": "dgellis90/nipype",
"id": "e08d447d1ee68f23459a0319e8e33d80ff7f94ca",
"size": "1373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4857096"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
"""Here is defined the Index class."""
from __future__ import print_function
from __future__ import absolute_import
import math
import operator
import os
import os.path
import sys
import tempfile
import warnings
from time import time, clock
import numpy
from .idxutils import (calc_chunksize, calcoptlevels,
get_reduction_level, nextafter, inftype)
from . import indexesextension
from .node import NotLoggedMixin
from .atom import UIntAtom, Atom
from .earray import EArray
from .carray import CArray
from .leaf import Filters
from .indexes import CacheArray, LastRowArray, IndexArray
from .group import Group
from .path import join_path
from .exceptions import PerformanceWarning
from .utils import is_idx, idx2long, lazyattr
from .utilsextension import (nan_aware_gt, nan_aware_ge,
nan_aware_lt, nan_aware_le,
bisect_left, bisect_right)
from .lrucacheextension import ObjectCache
from six.moves import range
# default version for INDEX objects
# obversion = "1.0" # Version of indexes in PyTables 1.x series
# obversion = "2.0" # Version of indexes in PyTables Pro 2.0 series
obversion = "2.1" # Version of indexes in PyTables Pro 2.1 and up series,
# including the join 2.3 Std + Pro version
debug = False
# debug = True # Uncomment this for printing sizes purposes
profile = False
# profile = True # Uncomment for profiling
if profile:
from .utils import show_stats
# The default method for sorting
# defsort = "quicksort"
# Changing to mergesort to fix #441
defsort = "mergesort"
# Default policy for automatically updating indexes after a table
# append operation, or automatically reindexing after an
# index-invalidating operation like removing or modifying table rows.
default_auto_index = True
# Keep in sync with ``Table.autoindex`` docstring.
# Default filters used to compress indexes. This is quite fast and
# compression is pretty good.
# Remember to keep these defaults in sync with the docstrings and UG.
default_index_filters = Filters(complevel=1, complib='zlib',
shuffle=True, fletcher32=False)
# Deprecated API
defaultAutoIndex = default_auto_index
defaultIndexFilters = default_index_filters
# The list of types for which an optimised search in cython and C has
# been implemented. Always add here the name of a new optimised type.
opt_search_types = ("int8", "int16", "int32", "int64",
"uint8", "uint16", "uint32", "uint64",
"float32", "float64")
# The upper limit for uint32 ints
max32 = 2**32
def _table_column_pathname_of_index(indexpathname):
names = indexpathname.split("/")
for i, name in enumerate(names):
if name.startswith('_i_'):
break
tablepathname = "/".join(names[:i]) + "/" + name[3:]
colpathname = "/".join(names[i + 1:])
return (tablepathname, colpathname)
class Index(NotLoggedMixin, indexesextension.Index, Group):
"""Represents the index of a column in a table.
This class is used to keep the indexing information for columns in a Table
dataset (see :ref:`TableClassDescr`). It is actually a descendant of the
Group class (see :ref:`GroupClassDescr`), with some added functionality. An
Index is always associated with one and only one column in the table.
.. note::
This class is mainly intended for internal use, but some of its
documented attributes and methods may be interesting for the
programmer.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom : Atom
An Atom object representing the shape and type of the atomic objects to
be saved. Only scalar atoms are supported.
title
Sets a TITLE attribute of the Index entity.
kind
The desired kind for this index. The 'full' kind specifies a complete
track of the row position (64-bit), while the 'medium', 'light' or
'ultralight' kinds only specify in which chunk the row is (using
32-bit, 16-bit and 8-bit respectively).
optlevel
The desired optimization level for this index.
filters : Filters
An instance of the Filters class that provides information about the
desired I/O filters to be applied during the life of this object.
tmp_dir
The directory for the temporary files.
expectedrows
Represents an user estimate about the number of row slices that will be
added to the growable dimension in the IndexArray object.
byteorder
The byteorder of the index datasets *on-disk*.
blocksizes
The four main sizes of the compound blocks in index datasets (a low
level parameter).
"""
_c_classid = 'INDEX'
@property
def kind(self):
"The kind of this index."
return {1: 'ultralight', 2: 'light',
4: 'medium', 8: 'full'}[self.indsize]
@property
def filters(self):
"""Filter properties for this index - see Filters in
:ref:`FiltersClassDescr`."""
return self._v_filters
@property
def dirty(self):
"""Whether the index is dirty or not.
Dirty indexes are out of sync with column data, so they exist but they
are not usable.
"""
# If there is no ``DIRTY`` attribute, index should be clean.
return getattr(self._v_attrs, 'DIRTY', False)
@dirty.setter
def dirty(self, dirty):
wasdirty, isdirty = self.dirty, bool(dirty)
self._v_attrs.DIRTY = dirty
# If an *actual* change in dirtiness happens,
# notify the condition cache by setting or removing a nail.
conditioncache = self.table._condition_cache
if not wasdirty and isdirty:
conditioncache.nail()
if wasdirty and not isdirty:
conditioncache.unnail()
@property
def column(self):
"""The Column (see :ref:`ColumnClassDescr`) instance for the indexed
column."""
tablepath, columnpath = _table_column_pathname_of_index(
self._v_pathname)
table = self._v_file._get_node(tablepath)
column = table.cols._g_col(columnpath)
return column
@property
def table(self):
"""Accessor for the `Table` object of this index."""
tablepath, columnpath = _table_column_pathname_of_index(self._v_pathname)
table = self._v_file._get_node(tablepath)
return table
@property
def nblockssuperblock(self):
"The number of blocks in a superblock."
return self.superblocksize // self.blocksize
@property
def nslicesblock(self):
"The number of slices in a block."
return self.blocksize // self.slicesize
@property
def nchunkslice(self):
"The number of chunks in a slice."
return self.slicesize // self.chunksize
@property
def nsuperblocks(self):
"The total number of superblocks in index."
# Last row should not be considered as a superblock
nelements = self.nelements - self.nelementsILR
nblocks = nelements // self.superblocksize
if nelements % self.blocksize > 0:
nblocks += 1
return nblocks
@property
def nblocks(self):
"The total number of blocks in index."
# Last row should not be considered as a block
nelements = self.nelements - self.nelementsILR
nblocks = nelements // self.blocksize
if nelements % self.blocksize > 0:
nblocks += 1
return nblocks
@property
def nslices(self):
"The number of complete slices in index."
return self.nelements // self.slicesize
@property
def nchunks(self):
"The number of complete chunks in index."
return self.nelements // self.chunksize
@property
def shape(self):
"The shape of this index (in slices and elements)."
return (self.nrows, self.slicesize)
@property
def temp_required(self):
"Whether a temporary file for indexes is required or not."
return self.indsize > 1 and self.optlevel > 0 and self.table.nrows > self.slicesize
@property
def want_complete_sort(self):
"Whether we should try to build a completely sorted index or not."
return self.indsize == 8 and self.optlevel == 9
@property
def is_csi(self):
"""Whether the index is completely sorted or not.
.. versionchanged:: 3.0
The *is_CSI* property has been renamed into *is_csi*.
"""
if self.nelements == 0:
# An index with 0 indexed elements is not a CSI one (by definition)
return False
if self.indsize < 8:
# An index that is not full cannot be completely sorted
return False
# Try with the 'is_csi' attribute
if 'is_csi' in self._v_attrs:
return self._v_attrs.is_csi
# If not, then compute the overlaps manually
# (the attribute 'is_csi' will be set there)
self.compute_overlaps(self, None, False)
return self.noverlaps == 0
@lazyattr
def nrowsinchunk(self):
"""The number of rows that fits in a *table* chunk."""
return self.table.chunkshape[0]
@lazyattr
def lbucket(self):
"""Return the length of a bucket based index type."""
# Avoid to set a too large lbucket size (mainly useful for tests)
lbucket = min(self.nrowsinchunk, self.chunksize)
if self.indsize == 1:
# For ultra-light, we will never have to keep track of a
# bucket outside of a slice.
maxnb = 2**8
if self.slicesize > maxnb * lbucket:
lbucket = int(math.ceil(float(self.slicesize) / maxnb))
elif self.indsize == 2:
# For light, we will never have to keep track of a
# bucket outside of a block.
maxnb = 2**16
if self.blocksize > maxnb * lbucket:
lbucket = int(math.ceil(float(self.blocksize) / maxnb))
else:
# For medium and full indexes there should not be a need to
# increase lbucket
pass
return lbucket
def __init__(self, parentnode, name,
atom=None, title="",
kind=None,
optlevel=None,
filters=None,
tmp_dir=None,
expectedrows=0,
byteorder=None,
blocksizes=None,
new=True):
self._v_version = None
"""The object version of this index."""
self.optlevel = optlevel
"""The optimization level for this index."""
self.tmp_dir = tmp_dir
"""The directory for the temporary files."""
self.expectedrows = expectedrows
"""The expected number of items of index arrays."""
if byteorder in ["little", "big"]:
self.byteorder = byteorder
else:
self.byteorder = sys.byteorder
"""The byteorder of the index datasets."""
if atom is not None:
self.dtype = atom.dtype.base
self.type = atom.type
"""The datatypes to be stored by the sorted index array."""
############### Important note ###########################
# The datatypes saved as index values are NumPy native
# types, so we get rid of type metainfo like Time* or Enum*
# that belongs to HDF5 types (actually, this metainfo is
# not needed for sorting and looking-up purposes).
##########################################################
indsize = {
'ultralight': 1, 'light': 2, 'medium': 4, 'full': 8}[kind]
assert indsize in (1, 2, 4, 8), "indsize should be 1, 2, 4 or 8!"
self.indsize = indsize
"""The itemsize for the indices part of the index."""
self.nrows = None
"""The total number of slices in the index."""
self.nelements = None
"""The number of currently indexed rows for this column."""
self.blocksizes = blocksizes
"""The four main sizes of the compound blocks (if specified)."""
self.dirtycache = True
"""Dirty cache (for ranges, bounds & sorted) flag."""
self.superblocksize = None
"""Size of the superblock for this index."""
self.blocksize = None
"""Size of the block for this index."""
self.slicesize = None
"""Size of the slice for this index."""
self.chunksize = None
"""Size of the chunk for this index."""
self.tmpfilename = None
"""Filename for temporary bounds."""
self.opt_search_types = opt_search_types
"""The types for which and optimized search has been implemented."""
self.noverlaps = -1
"""The number of overlaps in an index. 0 means a completely
sorted index. -1 means that this number is not computed yet."""
self.tprof = 0
"""Time counter for benchmarking purposes."""
from .file import open_file
self._openFile = open_file
"""The `open_file()` function, to avoid a circular import."""
super(Index, self).__init__(parentnode, name, title, new, filters)
def _g_post_init_hook(self):
if self._v_new:
# The version for newly created indexes
self._v_version = obversion
super(Index, self)._g_post_init_hook()
# Index arrays must only be created for new indexes
if not self._v_new:
idxversion = self._v_version
# Set-up some variables from info on disk and return
attrs = self._v_attrs
# Coerce NumPy scalars to Python scalars in order
# to avoid undesired upcasting operations.
self.superblocksize = int(attrs.superblocksize)
self.blocksize = int(attrs.blocksize)
self.slicesize = int(attrs.slicesize)
self.chunksize = int(attrs.chunksize)
self.blocksizes = (self.superblocksize, self.blocksize,
self.slicesize, self.chunksize)
self.optlevel = int(attrs.optlevel)
sorted = self.sorted
indices = self.indices
self.dtype = sorted.atom.dtype
self.type = sorted.atom.type
self.indsize = indices.atom.itemsize
# Some sanity checks for slicesize, chunksize and indsize
assert self.slicesize == indices.shape[1], "Wrong slicesize"
assert self.chunksize == indices._v_chunkshape[
1], "Wrong chunksize"
assert self.indsize in (1, 2, 4, 8), "Wrong indices itemsize"
if idxversion > "2.0":
self.reduction = int(attrs.reduction)
nelementsSLR = int(self.sortedLR.attrs.nelements)
nelementsILR = int(self.indicesLR.attrs.nelements)
else:
self.reduction = 1
nelementsILR = self.indicesLR[-1]
nelementsSLR = nelementsILR
self.nrows = sorted.nrows
self.nelements = self.nrows * self.slicesize + nelementsILR
self.nelementsSLR = nelementsSLR
self.nelementsILR = nelementsILR
if nelementsILR > 0:
self.nrows += 1
# Get the bounds as a cache (this has to remain here!)
rchunksize = self.chunksize // self.reduction
nboundsLR = (nelementsSLR - 1) // rchunksize
if nboundsLR < 0:
nboundsLR = 0 # correction for -1 bounds
nboundsLR += 2 # bounds + begin + end
# All bounds values (+begin + end) are at the end of sortedLR
self.bebounds = self.sortedLR[
nelementsSLR:nelementsSLR + nboundsLR]
return
# The index is new. Initialize the values
self.nrows = 0
self.nelements = 0
self.nelementsSLR = 0
self.nelementsILR = 0
# The atom
atom = Atom.from_dtype(self.dtype)
# The filters
filters = self.filters
# Compute the superblocksize, blocksize, slicesize and chunksize values
# (in case these parameters haven't been passed to the constructor)
if self.blocksizes is None:
self.blocksizes = calc_chunksize(
self.expectedrows, self.optlevel, self.indsize)
(self.superblocksize, self.blocksize,
self.slicesize, self.chunksize) = self.blocksizes
if debug:
print("blocksizes:", self.blocksizes)
# Compute the reduction level
self.reduction = get_reduction_level(
self.indsize, self.optlevel, self.slicesize, self.chunksize)
rchunksize = self.chunksize // self.reduction
rslicesize = self.slicesize // self.reduction
# Save them on disk as attributes
self._v_attrs.superblocksize = numpy.uint64(self.superblocksize)
self._v_attrs.blocksize = numpy.uint64(self.blocksize)
self._v_attrs.slicesize = numpy.uint32(self.slicesize)
self._v_attrs.chunksize = numpy.uint32(self.chunksize)
# Save the optlevel as well
self._v_attrs.optlevel = self.optlevel
# Save the reduction level
self._v_attrs.reduction = self.reduction
# Create the IndexArray for sorted values
sorted = IndexArray(self, 'sorted', atom, "Sorted Values",
filters, self.byteorder)
# Create the IndexArray for index values
IndexArray(self, 'indices', UIntAtom(itemsize=self.indsize),
"Number of chunk in table", filters, self.byteorder)
# Create the cache for range values (1st order cache)
CacheArray(self, 'ranges', atom, (0, 2), "Range Values", filters,
self.expectedrows // self.slicesize,
byteorder=self.byteorder)
# median ranges
EArray(self, 'mranges', atom, (0,), "Median ranges", filters,
byteorder=self.byteorder, _log=False)
# Create the cache for boundary values (2nd order cache)
nbounds_inslice = (rslicesize - 1) // rchunksize
CacheArray(self, 'bounds', atom, (0, nbounds_inslice),
"Boundary Values", filters, self.nchunks,
(1, nbounds_inslice), byteorder=self.byteorder)
# begin, end & median bounds (only for numerical types)
EArray(self, 'abounds', atom, (0,), "Start bounds", filters,
byteorder=self.byteorder, _log=False)
EArray(self, 'zbounds', atom, (0,), "End bounds", filters,
byteorder=self.byteorder, _log=False)
EArray(self, 'mbounds', atom, (0,), "Median bounds", filters,
byteorder=self.byteorder, _log=False)
# Create the Array for last (sorted) row values + bounds
shape = (rslicesize + 2 + nbounds_inslice,)
sortedLR = LastRowArray(self, 'sortedLR', atom, shape,
"Last Row sorted values + bounds",
filters, (rchunksize,),
byteorder=self.byteorder)
# Create the Array for the number of chunk in last row
shape = (self.slicesize,) # enough for indexes and length
indicesLR = LastRowArray(self, 'indicesLR',
UIntAtom(itemsize=self.indsize),
shape, "Last Row indices",
filters, (self.chunksize,),
byteorder=self.byteorder)
# The number of elements in LR will be initialized here
sortedLR.attrs.nelements = 0
indicesLR.attrs.nelements = 0
# All bounds values (+begin + end) are uninitialized in creation time
self.bebounds = None
# The starts and lengths initialization
self.starts = numpy.empty(shape=self.nrows, dtype=numpy.int32)
"""Where the values fulfiling conditions starts for every slice."""
self.lengths = numpy.empty(shape=self.nrows, dtype=numpy.int32)
"""Lengths of the values fulfilling conditions for every slice."""
# Finally, create a temporary file for indexes if needed
if self.temp_required:
self.create_temp()
def initial_append(self, xarr, nrow, reduction):
"""Compute an initial indices arrays for data to be indexed."""
if profile:
tref = time()
if profile:
show_stats("Entering initial_append", tref)
arr = xarr.pop()
indsize = self.indsize
slicesize = self.slicesize
nelementsILR = self.nelementsILR
if profile:
show_stats("Before creating idx", tref)
if indsize == 8:
idx = numpy.arange(0, len(arr), dtype="uint64") + nrow * slicesize
elif indsize == 4:
# For medium (32-bit) all the rows in tables should be
# directly reachable. But as len(arr) < 2**31, we can
# choose uint32 for representing indices. In this way, we
# consume far less memory during the keysort process. The
# offset will be added in self.final_idx32() later on.
#
# This optimization also prevents the values in LR to
# participate in the ``swap_chunks`` process, and this is
# the main reason to not allow the medium indexes to create
# completely sorted indexes. However, I don't find this to
# be a big limitation, as probably fully indexes are much
# more suitable for producing completely sorted indexes
# because in this case the indices part is usable for
# getting the reverse indices of the index, and I forsee
# this to be a common requirement in many operations (for
# example, in table sorts).
#
# F. Alted 2008-09-15
idx = numpy.arange(0, len(arr), dtype="uint32")
else:
idx = numpy.empty(len(arr), "uint%d" % (indsize * 8))
lbucket = self.lbucket
# Fill the idx with the bucket indices
offset = lbucket - ((nrow * (slicesize % lbucket)) % lbucket)
idx[0:offset] = 0
for i in range(offset, slicesize, lbucket):
idx[i:i + lbucket] = (i + lbucket - 1) // lbucket
if indsize == 2:
# Add a second offset in this case
# First normalize the number of rows
offset2 = (nrow % self.nslicesblock) * slicesize // lbucket
idx += offset2
# Add the last row at the beginning of arr & idx (if needed)
if (indsize == 8 and nelementsILR > 0):
# It is possible that the values in LR are already sorted.
# Fetch them and override existing values in arr and idx.
assert len(arr) > nelementsILR
self.read_slice_lr(self.sortedLR, arr[:nelementsILR])
self.read_slice_lr(self.indicesLR, idx[:nelementsILR])
# In-place sorting
if profile:
show_stats("Before keysort", tref)
indexesextension.keysort(arr, idx)
larr = arr[-1]
if reduction > 1:
# It's important to do a copy() here in order to ensure that
# sorted._append() will receive a contiguous array.
if profile:
show_stats("Before reduction", tref)
reduc = arr[::reduction].copy()
if profile:
show_stats("After reduction", tref)
arr = reduc
if profile:
show_stats("After arr <-- reduc", tref)
# A completely sorted index is not longer possible after an
# append of an index with already one slice.
if nrow > 0:
self._v_attrs.is_csi = False
if profile:
show_stats("Exiting initial_append", tref)
return larr, arr, idx
def final_idx32(self, idx, offset):
"""Perform final operations in 32-bit indices."""
if profile:
tref = time()
if profile:
show_stats("Entering final_idx32", tref)
# Do an upcast first in order to add the offset.
idx = idx.astype('uint64')
idx += offset
# The next partition is valid up to table sizes of
# 2**30 * 2**18 = 2**48 bytes, that is, 256 Tera-elements,
# which should be a safe figure, at least for a while.
idx //= self.lbucket
# After the division, we can downsize the indexes to 'uint32'
idx = idx.astype('uint32')
if profile:
show_stats("Exiting final_idx32", tref)
return idx
def append(self, xarr, update=False):
"""Append the array to the index objects."""
if profile:
tref = time()
if profile:
show_stats("Entering append", tref)
if not update and self.temp_required:
where = self.tmp
# The reduction will take place *after* the optimization process
reduction = 1
else:
where = self
reduction = self.reduction
sorted = where.sorted
indices = where.indices
ranges = where.ranges
mranges = where.mranges
bounds = where.bounds
mbounds = where.mbounds
abounds = where.abounds
zbounds = where.zbounds
sortedLR = where.sortedLR
indicesLR = where.indicesLR
nrows = sorted.nrows # before sorted.append()
larr, arr, idx = self.initial_append(xarr, nrows, reduction)
# Save the sorted array
sorted.append(arr.reshape(1, arr.size))
cs = self.chunksize // reduction
ncs = self.nchunkslice
# Save ranges & bounds
ranges.append([[arr[0], larr]])
bounds.append([arr[cs::cs]])
abounds.append(arr[0::cs])
zbounds.append(arr[cs - 1::cs])
# Compute the medians
smedian = arr[cs // 2::cs]
mbounds.append(smedian)
mranges.append([smedian[ncs // 2]])
if profile:
show_stats("Before deleting arr & smedian", tref)
del arr, smedian # delete references
if profile:
show_stats("After deleting arr & smedian", tref)
# Now that arr is gone, we can upcast the indices and add the offset
if self.indsize == 4:
idx = self.final_idx32(idx, nrows * self.slicesize)
indices.append(idx.reshape(1, idx.size))
if profile:
show_stats("Before deleting idx", tref)
del idx
# Update counters after a successful append
self.nrows = nrows + 1
self.nelements = self.nrows * self.slicesize
self.nelementsSLR = 0 # reset the counter of the last row index to 0
self.nelementsILR = 0 # reset the counter of the last row index to 0
# The number of elements will be saved as an attribute.
# This is necessary in case the LR arrays can remember its values
# after a possible node preemtion/reload.
sortedLR.attrs.nelements = self.nelementsSLR
indicesLR.attrs.nelements = self.nelementsILR
self.dirtycache = True # the cache is dirty now
if profile:
show_stats("Exiting append", tref)
def append_last_row(self, xarr, update=False):
"""Append the array to the last row index objects."""
if profile:
tref = time()
if profile:
show_stats("Entering appendLR", tref)
# compute the elements in the last row sorted & bounds array
nrows = self.nslices
if not update and self.temp_required:
where = self.tmp
# The reduction will take place *after* the optimization process
reduction = 1
else:
where = self
reduction = self.reduction
indicesLR = where.indicesLR
sortedLR = where.sortedLR
larr, arr, idx = self.initial_append(xarr, nrows, reduction)
nelementsSLR = len(arr)
nelementsILR = len(idx)
# Build the cache of bounds
rchunksize = self.chunksize // reduction
self.bebounds = numpy.concatenate((arr[::rchunksize], [larr]))
# The number of elements will be saved as an attribute
sortedLR.attrs.nelements = nelementsSLR
indicesLR.attrs.nelements = nelementsILR
# Save the number of elements, bounds and sorted values
# at the end of the sorted array
offset2 = len(self.bebounds)
sortedLR[nelementsSLR:nelementsSLR + offset2] = self.bebounds
sortedLR[:nelementsSLR] = arr
del arr
# Now that arr is gone, we can upcast the indices and add the offset
if self.indsize == 4:
idx = self.final_idx32(idx, nrows * self.slicesize)
# Save the reverse index array
indicesLR[:len(idx)] = idx
del idx
# Update counters after a successful append
self.nrows = nrows + 1
self.nelements = nrows * self.slicesize + nelementsILR
self.nelementsILR = nelementsILR
self.nelementsSLR = nelementsSLR
self.dirtycache = True # the cache is dirty now
if profile:
show_stats("Exiting appendLR", tref)
def optimize(self, verbose=False):
"""Optimize an index so as to allow faster searches.
verbose
If True, messages about the progress of the
optimization process are printed out.
"""
if not self.temp_required:
return
if verbose:
self.verbose = True
else:
self.verbose = debug
# Initialize last_tover and last_nover
self.last_tover = 0
self.last_nover = 0
# Compute the correct optimizations for current optim level
opts = calcoptlevels(self.nblocks, self.optlevel, self.indsize)
optmedian, optstarts, optstops, optfull = opts
if debug:
print("optvalues:", opts)
self.create_temp2()
# Start the optimization process
while True:
if optfull:
for niter in range(optfull):
if self.swap('chunks', 'median'):
break
if self.nblocks > 1:
# Swap slices only in the case that we have
# several blocks
if self.swap('slices', 'median'):
break
if self.swap('chunks', 'median'):
break
if self.swap('chunks', 'start'):
break
if self.swap('chunks', 'stop'):
break
else:
if optmedian:
if self.swap('chunks', 'median'):
break
if optstarts:
if self.swap('chunks', 'start'):
break
if optstops:
if self.swap('chunks', 'stop'):
break
break # If we reach this, exit the loop
# Check if we require a complete sort. Important: this step
# should be carried out *after* the optimization process has
# been completed (this is to guarantee that the complete sort
# does not take too much memory).
if self.want_complete_sort:
if self.noverlaps > 0:
self.do_complete_sort()
# Check that we have effectively achieved the complete sort
if self.noverlaps > 0:
warnings.warn(
"OPSI was not able to achieve a completely sorted index."
" Please report this to the authors.", UserWarning)
# Close and delete the temporal optimization index file
self.cleanup_temp()
return
def do_complete_sort(self):
"""Bring an already optimized index into a complete sorted state."""
if self.verbose:
t1 = time()
c1 = clock()
ss = self.slicesize
tmp = self.tmp
ranges = tmp.ranges[:]
nslices = self.nslices
nelementsLR = self.nelementsILR
if nelementsLR > 0:
# Add the ranges corresponding to the last row
rangeslr = numpy.array([self.bebounds[0], self.bebounds[-1]])
ranges = numpy.concatenate((ranges, [rangeslr]))
nslices += 1
sorted = tmp.sorted
indices = tmp.indices
sortedLR = tmp.sortedLR
indicesLR = tmp.indicesLR
sremain = numpy.array([], dtype=self.dtype)
iremain = numpy.array([], dtype='u%d' % self.indsize)
starts = numpy.zeros(shape=nslices, dtype=numpy.int_)
for i in range(nslices):
# Find the overlapping elements for slice i
sover = numpy.array([], dtype=self.dtype)
iover = numpy.array([], dtype='u%d' % self.indsize)
prev_end = ranges[i, 1]
for j in range(i + 1, nslices):
stj = starts[j]
if ((j < self.nslices and stj == ss) or
(j == self.nslices and stj == nelementsLR)):
# This slice has been already dealt with
continue
if j < self.nslices:
assert stj < ss, \
"Two slices cannot overlap completely at this stage!"
next_beg = sorted[j, stj]
else:
assert stj < nelementsLR, \
"Two slices cannot overlap completely at this stage!"
next_beg = sortedLR[stj]
next_end = ranges[j, 1]
if prev_end > next_end:
# Complete overlapping case
if j < self.nslices:
sover = numpy.concatenate((sover, sorted[j, stj:]))
iover = numpy.concatenate((iover, indices[j, stj:]))
starts[j] = ss
else:
n = nelementsLR
sover = numpy.concatenate((sover, sortedLR[stj:n]))
iover = numpy.concatenate((iover, indicesLR[stj:n]))
starts[j] = nelementsLR
elif prev_end > next_beg:
idx = self.search_item_lt(tmp, prev_end, j, ranges[j], stj)
if j < self.nslices:
sover = numpy.concatenate((sover, sorted[j, stj:idx]))
iover = numpy.concatenate((iover, indices[j, stj:idx]))
else:
sover = numpy.concatenate((sover, sortedLR[stj:idx]))
iover = numpy.concatenate((iover, indicesLR[stj:idx]))
starts[j] = idx
# Build the extended slices to sort out
if i < self.nslices:
ssorted = numpy.concatenate(
(sremain, sorted[i, starts[i]:], sover))
sindices = numpy.concatenate(
(iremain, indices[i, starts[i]:], iover))
else:
ssorted = numpy.concatenate(
(sremain, sortedLR[starts[i]:nelementsLR], sover))
sindices = numpy.concatenate(
(iremain, indicesLR[starts[i]:nelementsLR], iover))
# Sort the extended slices
indexesextension.keysort(ssorted, sindices)
# Save the first elements of extended slices in the slice i
if i < self.nslices:
sorted[i] = ssorted[:ss]
indices[i] = sindices[:ss]
# Update caches for this slice
self.update_caches(i, ssorted[:ss])
# Save the remaining values in a separate array
send = len(sover) + len(sremain)
sremain = ssorted[ss:ss + send]
iremain = sindices[ss:ss + send]
else:
# Still some elements remain for the last row
n = len(ssorted)
assert n == nelementsLR
send = 0
sortedLR[:n] = ssorted
indicesLR[:n] = sindices
# Update the caches for last row
sortedlr = sortedLR[:nelementsLR]
bebounds = numpy.concatenate(
(sortedlr[::self.chunksize], [sortedlr[-1]]))
sortedLR[nelementsLR:nelementsLR + len(bebounds)] = bebounds
self.bebounds = bebounds
# Verify that we have dealt with all the remaining values
assert send == 0
# Compute the overlaps in order to verify that we have achieved
# a complete sort. This has to be executed always (and not only
# in verbose mode!).
self.compute_overlaps(self.tmp, "do_complete_sort()", self.verbose)
if self.verbose:
t = round(time() - t1, 4)
c = round(clock() - c1, 4)
print("time: %s. clock: %s" % (t, c))
def swap(self, what, mode=None):
"""Swap chunks or slices using a certain bounds reference."""
# Thresholds for avoiding continuing the optimization
# thnover = 4 * self.slicesize # minimum number of overlapping
# # elements
thnover = 40
thmult = 0.1 # minimum ratio of multiplicity (a 10%)
thtover = 0.01 # minimum overlaping index for slices (a 1%)
if self.verbose:
t1 = time()
c1 = clock()
if what == "chunks":
self.swap_chunks(mode)
elif what == "slices":
self.swap_slices(mode)
if mode:
message = "swap_%s(%s)" % (what, mode)
else:
message = "swap_%s" % (what,)
(nover, mult, tover) = self.compute_overlaps(
self.tmp, message, self.verbose)
rmult = len(mult.nonzero()[0]) / float(len(mult))
if self.verbose:
t = round(time() - t1, 4)
c = round(clock() - c1, 4)
print("time: %s. clock: %s" % (t, c))
# Check that entropy is actually decreasing
if what == "chunks" and self.last_tover > 0. and self.last_nover > 0:
tover_var = (self.last_tover - tover) / self.last_tover
nover_var = (self.last_nover - nover) / self.last_nover
if tover_var < 0.05 and nover_var < 0.05:
# Less than a 5% of improvement is too few
return True
self.last_tover = tover
self.last_nover = nover
# Check if some threshold has met
if nover < thnover:
return True
if rmult < thmult:
return True
# Additional check for the overlap ratio
if tover >= 0. and tover < thtover:
return True
return False
def create_temp(self):
"""Create some temporary objects for slice sorting purposes."""
# The index will be dirty during the index optimization process
self.dirty = True
# Build the name of the temporary file
fd, self.tmpfilename = tempfile.mkstemp(
".tmp", "pytables-", self.tmp_dir)
# Close the file descriptor so as to avoid leaks
os.close(fd)
# Create the proper PyTables file
self.tmpfile = self._openFile(self.tmpfilename, "w")
self.tmp = tmp = self.tmpfile.root
cs = self.chunksize
ss = self.slicesize
filters = self.filters
# temporary sorted & indices arrays
shape = (0, ss)
atom = Atom.from_dtype(self.dtype)
EArray(tmp, 'sorted', atom, shape,
"Temporary sorted", filters, chunkshape=(1, cs))
EArray(tmp, 'indices', UIntAtom(itemsize=self.indsize), shape,
"Temporary indices", filters, chunkshape=(1, cs))
# temporary bounds
nbounds_inslice = (ss - 1) // cs
shape = (0, nbounds_inslice)
EArray(tmp, 'bounds', atom, shape, "Temp chunk bounds",
filters, chunkshape=(cs, nbounds_inslice))
shape = (0,)
EArray(tmp, 'abounds', atom, shape, "Temp start bounds",
filters, chunkshape=(cs,))
EArray(tmp, 'zbounds', atom, shape, "Temp end bounds",
filters, chunkshape=(cs,))
EArray(tmp, 'mbounds', atom, shape, "Median bounds",
filters, chunkshape=(cs,))
# temporary ranges
EArray(tmp, 'ranges', atom, (0, 2),
"Temporary range values", filters, chunkshape=(cs, 2))
EArray(tmp, 'mranges', atom, (0,),
"Median ranges", filters, chunkshape=(cs,))
# temporary last row (sorted)
shape = (ss + 2 + nbounds_inslice,)
CArray(tmp, 'sortedLR', atom, shape,
"Temp Last Row sorted values + bounds",
filters, chunkshape=(cs,))
# temporary last row (indices)
shape = (ss,)
CArray(tmp, 'indicesLR',
UIntAtom(itemsize=self.indsize),
shape, "Temp Last Row indices",
filters, chunkshape=(cs,))
def create_temp2(self):
"""Create some temporary objects for slice sorting purposes."""
# The algorithms for doing the swap can be optimized so that
# one should be necessary to create temporaries for keeping just
# the contents of a single superblock.
# F. Alted 2007-01-03
cs = self.chunksize
ss = self.slicesize
filters = self.filters
# temporary sorted & indices arrays
shape = (self.nslices, ss)
atom = Atom.from_dtype(self.dtype)
tmp = self.tmp
CArray(tmp, 'sorted2', atom, shape,
"Temporary sorted 2", filters, chunkshape=(1, cs))
CArray(tmp, 'indices2', UIntAtom(itemsize=self.indsize), shape,
"Temporary indices 2", filters, chunkshape=(1, cs))
# temporary bounds
nbounds_inslice = (ss - 1) // cs
shape = (self.nslices, nbounds_inslice)
CArray(tmp, 'bounds2', atom, shape, "Temp chunk bounds 2",
filters, chunkshape=(cs, nbounds_inslice))
shape = (self.nchunks,)
CArray(tmp, 'abounds2', atom, shape, "Temp start bounds 2",
filters, chunkshape=(cs,))
CArray(tmp, 'zbounds2', atom, shape, "Temp end bounds 2",
filters, chunkshape=(cs,))
CArray(tmp, 'mbounds2', atom, shape, "Median bounds 2",
filters, chunkshape=(cs,))
# temporary ranges
CArray(tmp, 'ranges2', atom, (self.nslices, 2),
"Temporary range values 2", filters, chunkshape=(cs, 2))
CArray(tmp, 'mranges2', atom, (self.nslices,),
"Median ranges 2", filters, chunkshape=(cs,))
def cleanup_temp(self):
"""Copy the data and delete the temporaries for sorting purposes."""
if self.verbose:
print("Copying temporary data...")
# tmp -> index
reduction = self.reduction
cs = self.chunksize // reduction
ncs = self.nchunkslice
tmp = self.tmp
for i in range(self.nslices):
# Copy sorted & indices slices
sorted = tmp.sorted[i][::reduction].copy()
self.sorted.append(sorted.reshape(1, sorted.size))
# Compute ranges
self.ranges.append([[sorted[0], sorted[-1]]])
# Compute chunk bounds
self.bounds.append([sorted[cs::cs]])
# Compute start, stop & median bounds and ranges
self.abounds.append(sorted[0::cs])
self.zbounds.append(sorted[cs - 1::cs])
smedian = sorted[cs // 2::cs]
self.mbounds.append(smedian)
self.mranges.append([smedian[ncs // 2]])
del sorted, smedian # delete references
# Now that sorted is gone, we can copy the indices
indices = tmp.indices[i]
self.indices.append(indices.reshape(1, indices.size))
# Now it is the last row turn (if needed)
if self.nelementsSLR > 0:
# First, the sorted values
sortedLR = self.sortedLR
indicesLR = self.indicesLR
nelementsLR = self.nelementsILR
sortedlr = tmp.sortedLR[:nelementsLR][::reduction].copy()
nelementsSLR = len(sortedlr)
sortedLR[:nelementsSLR] = sortedlr
# Now, the bounds
self.bebounds = numpy.concatenate((sortedlr[::cs], [sortedlr[-1]]))
offset2 = len(self.bebounds)
sortedLR[nelementsSLR:nelementsSLR + offset2] = self.bebounds
# Finally, the indices
indicesLR[:] = tmp.indicesLR[:]
# Update the number of (reduced) sorted elements
self.nelementsSLR = nelementsSLR
# The number of elements will be saved as an attribute
self.sortedLR.attrs.nelements = self.nelementsSLR
self.indicesLR.attrs.nelements = self.nelementsILR
if self.verbose:
print("Deleting temporaries...")
self.tmp = None
self.tmpfile.close()
os.remove(self.tmpfilename)
self.tmpfilename = None
# The optimization process has finished, and the index is ok now
self.dirty = False
# ...but the memory data cache is dirty now
self.dirtycache = True
def get_neworder(self, neworder, src_disk, tmp_disk,
lastrow, nslices, offset, dtype):
"""Get sorted & indices values in new order."""
cs = self.chunksize
ncs = ncs2 = self.nchunkslice
self_nslices = self.nslices
tmp = numpy.empty(shape=self.slicesize, dtype=dtype)
for i in range(nslices):
ns = offset + i
if ns == self_nslices:
# The number of complete chunks in the last row
ncs2 = self.nelementsILR // cs
# Get slices in new order
for j in range(ncs2):
idx = neworder[i * ncs + j]
ins = idx // ncs
inc = (idx - ins * ncs) * cs
ins += offset
nc = j * cs
if ins == self_nslices:
tmp[nc:nc + cs] = lastrow[inc:inc + cs]
else:
tmp[nc:nc + cs] = src_disk[ins, inc:inc + cs]
if ns == self_nslices:
# The number of complete chunks in the last row
lastrow[:ncs2 * cs] = tmp[:ncs2 * cs]
# The elements in the last chunk of the last row will
# participate in the global reordering later on, during
# the phase of sorting of *two* slices at a time
# (including the last row slice, see
# self.reorder_slices()). The caches for last row will
# be updated in self.reorder_slices() too.
# F. Altet 2008-08-25
else:
tmp_disk[ns] = tmp
def swap_chunks(self, mode="median"):
"""Swap & reorder the different chunks in a block."""
boundsnames = {
'start': 'abounds', 'stop': 'zbounds', 'median': 'mbounds'}
tmp = self.tmp
sorted = tmp.sorted
indices = tmp.indices
tmp_sorted = tmp.sorted2
tmp_indices = tmp.indices2
sortedLR = tmp.sortedLR
indicesLR = tmp.indicesLR
cs = self.chunksize
ncs = self.nchunkslice
nsb = self.nslicesblock
ncb = ncs * nsb
ncb2 = ncb
boundsobj = tmp._f_get_child(boundsnames[mode])
can_cross_bbounds = (self.indsize == 8 and self.nelementsILR > 0)
for nblock in range(self.nblocks):
# Protection for last block having less chunks than ncb
remainingchunks = self.nchunks - nblock * ncb
if remainingchunks < ncb:
ncb2 = remainingchunks
if ncb2 <= 1:
# if only zero or one chunks remains we are done
break
nslices = ncb2 // ncs
bounds = boundsobj[nblock * ncb:nblock * ncb + ncb2]
# Do this only if lastrow elements can cross block boundaries
if (nblock == self.nblocks - 1 and # last block
can_cross_bbounds):
nslices += 1
ul = self.nelementsILR // cs
bounds = numpy.concatenate((bounds, self.bebounds[:ul]))
sbounds_idx = bounds.argsort(kind=defsort)
offset = nblock * nsb
# Swap sorted and indices following the new order
self.get_neworder(sbounds_idx, sorted, tmp_sorted, sortedLR,
nslices, offset, self.dtype)
self.get_neworder(sbounds_idx, indices, tmp_indices, indicesLR,
nslices, offset, 'u%d' % self.indsize)
# Reorder completely the index at slice level
self.reorder_slices(tmp=True)
def read_slice(self, where, nslice, buffer, start=0):
"""Read a slice from the `where` dataset and put it in `buffer`."""
# Create the buffers for specifying the coordinates
self.startl = numpy.array([nslice, start], numpy.uint64)
self.stopl = numpy.array([nslice + 1, start + buffer.size],
numpy.uint64)
self.stepl = numpy.ones(shape=2, dtype=numpy.uint64)
where._g_read_slice(self.startl, self.stopl, self.stepl, buffer)
def write_slice(self, where, nslice, buffer, start=0):
"""Write a `slice` to the `where` dataset with the `buffer` data."""
self.startl = numpy.array([nslice, start], numpy.uint64)
self.stopl = numpy.array([nslice + 1, start + buffer.size],
numpy.uint64)
self.stepl = numpy.ones(shape=2, dtype=numpy.uint64)
countl = self.stopl - self.startl # (1, self.slicesize)
where._g_write_slice(self.startl, self.stepl, countl, buffer)
# Read version for LastRow
def read_slice_lr(self, where, buffer, start=0):
"""Read a slice from the `where` dataset and put it in `buffer`."""
startl = numpy.array([start], dtype=numpy.uint64)
stopl = numpy.array([start + buffer.size], dtype=numpy.uint64)
stepl = numpy.array([1], dtype=numpy.uint64)
where._g_read_slice(startl, stopl, stepl, buffer)
# Write version for LastRow
def write_sliceLR(self, where, buffer, start=0):
"""Write a slice from the `where` dataset with the `buffer` data."""
startl = numpy.array([start], dtype=numpy.uint64)
countl = numpy.array([start + buffer.size], dtype=numpy.uint64)
stepl = numpy.array([1], dtype=numpy.uint64)
where._g_write_slice(startl, stepl, countl, buffer)
def reorder_slice(self, nslice, sorted, indices, ssorted, sindices,
tmp_sorted, tmp_indices):
"""Copy & reorder the slice in source to final destination."""
ss = self.slicesize
# Load the second part in buffers
self.read_slice(tmp_sorted, nslice, ssorted[ss:])
self.read_slice(tmp_indices, nslice, sindices[ss:])
indexesextension.keysort(ssorted, sindices)
# Write the first part of the buffers to the regular leaves
self.write_slice(sorted, nslice - 1, ssorted[:ss])
self.write_slice(indices, nslice - 1, sindices[:ss])
# Update caches
self.update_caches(nslice - 1, ssorted[:ss])
# Shift the slice in the end to the beginning
ssorted[:ss] = ssorted[ss:]
sindices[:ss] = sindices[ss:]
def update_caches(self, nslice, ssorted):
"""Update the caches for faster lookups."""
cs = self.chunksize
ncs = self.nchunkslice
tmp = self.tmp
# update first & second cache bounds (ranges & bounds)
tmp.ranges[nslice] = ssorted[[0, -1]]
tmp.bounds[nslice] = ssorted[cs::cs]
# update start & stop bounds
tmp.abounds[nslice * ncs:(nslice + 1) * ncs] = ssorted[0::cs]
tmp.zbounds[nslice * ncs:(nslice + 1) * ncs] = ssorted[cs - 1::cs]
# update median bounds
smedian = ssorted[cs // 2::cs]
tmp.mbounds[nslice * ncs:(nslice + 1) * ncs] = smedian
tmp.mranges[nslice] = smedian[ncs // 2]
def reorder_slices(self, tmp):
"""Reorder completely the index at slice level.
This method has to maintain the locality of elements in the
ambit of ``blocks``, i.e. an element of a ``block`` cannot be
sent to another ``block`` during this reordering. This is
*critical* for ``light`` indexes to be able to use this.
This version of reorder_slices is optimized in that *two*
complete slices are taken at a time (including the last row
slice) so as to sort them. Then, each new slice that is read is
put at the end of this two-slice buffer, while the previous one
is moved to the beginning of the buffer. This is in order to
better reduce the entropy of the regular part (i.e. all except
the last row) of the index.
A secondary effect of this is that it takes at least *twice* of
memory than a previous version of reorder_slices() that only
reorders on a slice-by-slice basis. However, as this is more
efficient than the old version, one can configure the slicesize
to be smaller, so the memory consumption is barely similar.
"""
tmp = self.tmp
sorted = tmp.sorted
indices = tmp.indices
if tmp:
tmp_sorted = tmp.sorted2
tmp_indices = tmp.indices2
else:
tmp_sorted = tmp.sorted
tmp_indices = tmp.indices
cs = self.chunksize
ss = self.slicesize
nsb = self.blocksize // self.slicesize
nslices = self.nslices
nblocks = self.nblocks
nelementsLR = self.nelementsILR
# Create the buffer for reordering 2 slices at a time
ssorted = numpy.empty(shape=ss * 2, dtype=self.dtype)
sindices = numpy.empty(shape=ss * 2,
dtype=numpy.dtype('u%d' % self.indsize))
if self.indsize == 8:
# Bootstrap the process for reordering
# Read the first slice in buffers
self.read_slice(tmp_sorted, 0, ssorted[:ss])
self.read_slice(tmp_indices, 0, sindices[:ss])
nslice = 0 # Just in case the loop behind executes nothing
# Loop over the remainding slices in block
for nslice in range(1, sorted.nrows):
self.reorder_slice(nslice, sorted, indices,
ssorted, sindices,
tmp_sorted, tmp_indices)
# End the process (enrolling the lastrow if necessary)
if nelementsLR > 0:
sortedLR = self.tmp.sortedLR
indicesLR = self.tmp.indicesLR
# Shrink the ssorted and sindices arrays to the minimum
ssorted2 = ssorted[:ss + nelementsLR]
sortedlr = ssorted2[ss:]
sindices2 = sindices[:ss + nelementsLR]
indiceslr = sindices2[ss:]
# Read the last row info in the second part of the buffer
self.read_slice_lr(sortedLR, sortedlr)
self.read_slice_lr(indicesLR, indiceslr)
indexesextension.keysort(ssorted2, sindices2)
# Write the second part of the buffers to the lastrow indices
self.write_sliceLR(sortedLR, sortedlr)
self.write_sliceLR(indicesLR, indiceslr)
# Update the caches for last row
bebounds = numpy.concatenate((sortedlr[::cs], [sortedlr[-1]]))
sortedLR[nelementsLR:nelementsLR + len(bebounds)] = bebounds
self.bebounds = bebounds
# Write the first part of the buffers to the regular leaves
self.write_slice(sorted, nslice, ssorted[:ss])
self.write_slice(indices, nslice, sindices[:ss])
# Update caches for this slice
self.update_caches(nslice, ssorted[:ss])
else:
# Iterate over each block. No data should cross block
# boundaries to avoid adressing problems with short indices.
for nb in range(nblocks):
# Bootstrap the process for reordering
# Read the first slice in buffers
nrow = nb * nsb
self.read_slice(tmp_sorted, nrow, ssorted[:ss])
self.read_slice(tmp_indices, nrow, sindices[:ss])
# Loop over the remainding slices in block
lrb = nrow + nsb
if lrb > nslices:
lrb = nslices
nslice = nrow # Just in case the loop behind executes nothing
for nslice in range(nrow + 1, lrb):
self.reorder_slice(nslice, sorted, indices,
ssorted, sindices,
tmp_sorted, tmp_indices)
# Write the first part of the buffers to the regular leaves
self.write_slice(sorted, nslice, ssorted[:ss])
self.write_slice(indices, nslice, sindices[:ss])
# Update caches for this slice
self.update_caches(nslice, ssorted[:ss])
def swap_slices(self, mode="median"):
"""Swap slices in a superblock."""
tmp = self.tmp
sorted = tmp.sorted
indices = tmp.indices
tmp_sorted = tmp.sorted2
tmp_indices = tmp.indices2
ncs = self.nchunkslice
nss = self.superblocksize // self.slicesize
nss2 = nss
for sblock in range(self.nsuperblocks):
# Protection for last superblock having less slices than nss
remainingslices = self.nslices - sblock * nss
if remainingslices < nss:
nss2 = remainingslices
if nss2 <= 1:
break
if mode == "start":
ranges = tmp.ranges[sblock * nss:sblock * nss + nss2, 0]
elif mode == "stop":
ranges = tmp.ranges[sblock * nss:sblock * nss + nss2, 1]
elif mode == "median":
ranges = tmp.mranges[sblock * nss:sblock * nss + nss2]
sranges_idx = ranges.argsort(kind=defsort)
# Don't swap the superblock at all if one doesn't need to
ndiff = (sranges_idx != numpy.arange(nss2)).sum() / 2
if ndiff * 50 < nss2:
# The number of slices to rearrange is less than 2.5%,
# so skip the reordering of this superblock
# (too expensive for such a little improvement)
if self.verbose:
print("skipping reordering of superblock ->", sblock)
continue
ns = sblock * nss2
# Swap sorted and indices slices following the new order
for i in range(nss2):
idx = sranges_idx[i]
# Swap sorted & indices slices
oi = ns + i
oidx = ns + idx
tmp_sorted[oi] = sorted[oidx]
tmp_indices[oi] = indices[oidx]
# Swap start, stop & median ranges
tmp.ranges2[oi] = tmp.ranges[oidx]
tmp.mranges2[oi] = tmp.mranges[oidx]
# Swap chunk bounds
tmp.bounds2[oi] = tmp.bounds[oidx]
# Swap start, stop & median bounds
j = oi * ncs
jn = (oi + 1) * ncs
xj = oidx * ncs
xjn = (oidx + 1) * ncs
tmp.abounds2[j:jn] = tmp.abounds[xj:xjn]
tmp.zbounds2[j:jn] = tmp.zbounds[xj:xjn]
tmp.mbounds2[j:jn] = tmp.mbounds[xj:xjn]
# tmp -> originals
for i in range(nss2):
# Copy sorted & indices slices
oi = ns + i
sorted[oi] = tmp_sorted[oi]
indices[oi] = tmp_indices[oi]
# Copy start, stop & median ranges
tmp.ranges[oi] = tmp.ranges2[oi]
tmp.mranges[oi] = tmp.mranges2[oi]
# Copy chunk bounds
tmp.bounds[oi] = tmp.bounds2[oi]
# Copy start, stop & median bounds
j = oi * ncs
jn = (oi + 1) * ncs
tmp.abounds[j:jn] = tmp.abounds2[j:jn]
tmp.zbounds[j:jn] = tmp.zbounds2[j:jn]
tmp.mbounds[j:jn] = tmp.mbounds2[j:jn]
def search_item_lt(self, where, item, nslice, limits, start=0):
"""Search a single item in a specific sorted slice."""
# This method will only works under the assumtion that item
# *is to be found* in the nslice.
assert nan_aware_lt(limits[0], item) and nan_aware_le(item, limits[1])
cs = self.chunksize
ss = self.slicesize
nelementsLR = self.nelementsILR
bstart = start // cs
# Find the chunk
if nslice < self.nslices:
nchunk = bisect_left(where.bounds[nslice], item, bstart)
else:
# We need to subtract 1 chunk here because bebounds
# has a leading value
nchunk = bisect_left(self.bebounds, item, bstart) - 1
assert nchunk >= 0
# Find the element in chunk
pos = nchunk * cs
if nslice < self.nslices:
pos += bisect_left(where.sorted[nslice, pos:pos + cs], item)
assert pos <= ss
else:
end = pos + cs
if end > nelementsLR:
end = nelementsLR
pos += bisect_left(self.sortedLR[pos:end], item)
assert pos <= nelementsLR
assert pos > 0
return pos
def compute_overlaps_finegrain(self, where, message, verbose):
"""Compute some statistics about overlaping of slices in index.
It returns the following info:
noverlaps : int
The total number of elements that overlaps in index.
multiplicity : array of int
The number of times that a concrete slice overlaps with any other.
toverlap : float
An ovelap index: the sum of the values in segment slices that
overlaps divided by the entire range of values. This index is only
computed for numerical types.
"""
ss = self.slicesize
ranges = where.ranges[:]
sorted = where.sorted
sortedLR = where.sortedLR
nslices = self.nslices
nelementsLR = self.nelementsILR
if nelementsLR > 0:
# Add the ranges corresponding to the last row
rangeslr = numpy.array([self.bebounds[0], self.bebounds[-1]])
ranges = numpy.concatenate((ranges, [rangeslr]))
nslices += 1
soverlap = 0.
toverlap = -1.
multiplicity = numpy.zeros(shape=nslices, dtype="int_")
overlaps = multiplicity.copy()
starts = multiplicity.copy()
for i in range(nslices):
prev_end = ranges[i, 1]
for j in range(i + 1, nslices):
stj = starts[j]
assert stj <= ss
if stj == ss:
# This slice has already been counted
continue
if j < self.nslices:
next_beg = sorted[j, stj]
else:
next_beg = sortedLR[stj]
next_end = ranges[j, 1]
if prev_end > next_end:
# Complete overlapping case
multiplicity[j - i] += 1
if j < self.nslices:
overlaps[i] += ss - stj
starts[j] = ss # a sentinel
else:
overlaps[i] += nelementsLR - stj
starts[j] = nelementsLR # a sentinel
elif prev_end > next_beg:
multiplicity[j - i] += 1
idx = self.search_item_lt(
where, prev_end, j, ranges[j], stj)
nelem = idx - stj
overlaps[i] += nelem
starts[j] = idx
if self.type != "string":
# Convert ranges into floats in order to allow
# doing operations with them without overflows
soverlap += float(ranges[i, 1]) - float(ranges[j, 0])
# Return the overlap as the ratio between overlaps and entire range
if self.type != "string":
erange = float(ranges[-1, 1]) - float(ranges[0, 0])
# Check that there is an effective range of values
# Beware, erange can be negative in situations where
# the values are suffering overflow. This can happen
# specially on big signed integer values (on overflows,
# the end value will become negative!).
# Also, there is no way to compute overlap ratios for
# non-numerical types. So, be careful and always check
# that toverlap has a positive value (it must have been
# initialized to -1. before) before using it.
# F. Alted 2007-01-19
if erange > 0:
toverlap = soverlap / erange
if verbose and message != "init":
print("toverlap (%s):" % message, toverlap)
print("multiplicity:\n", multiplicity, multiplicity.sum())
print("overlaps:\n", overlaps, overlaps.sum())
noverlaps = overlaps.sum()
# For full indexes, set the 'is_csi' flag
if self.indsize == 8 and self._v_file._iswritable():
self._v_attrs.is_csi = (noverlaps == 0)
# Save the number of overlaps for future references
self.noverlaps = noverlaps
return (noverlaps, multiplicity, toverlap)
def compute_overlaps(self, where, message, verbose):
"""Compute some statistics about overlaping of slices in index.
It returns the following info:
noverlaps : int
The total number of slices that overlaps in index.
multiplicity : array of int
The number of times that a concrete slice overlaps with any other.
toverlap : float
An ovelap index: the sum of the values in segment slices that
overlaps divided by the entire range of values. This index is only
computed for numerical types.
"""
ranges = where.ranges[:]
nslices = self.nslices
if self.nelementsILR > 0:
# Add the ranges corresponding to the last row
rangeslr = numpy.array([self.bebounds[0], self.bebounds[-1]])
ranges = numpy.concatenate((ranges, [rangeslr]))
nslices += 1
noverlaps = 0
soverlap = 0.
toverlap = -1.
multiplicity = numpy.zeros(shape=nslices, dtype="int_")
for i in range(nslices):
for j in range(i + 1, nslices):
if ranges[i, 1] > ranges[j, 0]:
noverlaps += 1
multiplicity[j - i] += 1
if self.type != "string":
# Convert ranges into floats in order to allow
# doing operations with them without overflows
soverlap += float(ranges[i, 1]) - float(ranges[j, 0])
# Return the overlap as the ratio between overlaps and entire range
if self.type != "string":
erange = float(ranges[-1, 1]) - float(ranges[0, 0])
# Check that there is an effective range of values
# Beware, erange can be negative in situations where
# the values are suffering overflow. This can happen
# specially on big signed integer values (on overflows,
# the end value will become negative!).
# Also, there is no way to compute overlap ratios for
# non-numerical types. So, be careful and always check
# that toverlap has a positive value (it must have been
# initialized to -1. before) before using it.
# F. Altet 2007-01-19
if erange > 0:
toverlap = soverlap / erange
if verbose:
print("overlaps (%s):" % message, noverlaps, toverlap)
print(multiplicity)
# For full indexes, set the 'is_csi' flag
if self.indsize == 8 and self._v_file._iswritable():
self._v_attrs.is_csi = (noverlaps == 0)
# Save the number of overlaps for future references
self.noverlaps = noverlaps
return (noverlaps, multiplicity, toverlap)
def read_sorted_indices(self, what, start, stop, step):
"""Return the sorted or indices values in the specified range."""
(start, stop, step) = self._process_range(start, stop, step)
if start >= stop:
return numpy.empty(0, self.dtype)
# Correction for negative values of step (reverse indices)
if step < 0:
tmp = start
start = self.nelements - stop
stop = self.nelements - tmp
if what == "sorted":
values = self.sorted
valuesLR = self.sortedLR
buffer_ = numpy.empty(stop - start, dtype=self.dtype)
else:
values = self.indices
valuesLR = self.indicesLR
buffer_ = numpy.empty(stop - start, dtype="u%d" % self.indsize)
ss = self.slicesize
nrow_start = start // ss
istart = start % ss
nrow_stop = stop // ss
tlen = stop - start
bstart = 0
ilen = 0
for nrow in range(nrow_start, nrow_stop + 1):
blen = ss - istart
if ilen + blen > tlen:
blen = tlen - ilen
if blen <= 0:
break
if nrow < self.nslices:
self.read_slice(
values, nrow, buffer_[bstart:bstart + blen], istart)
else:
self.read_slice_lr(
valuesLR, buffer_[bstart:bstart + blen], istart)
istart = 0
bstart += blen
ilen += blen
return buffer_[::step]
def read_sorted(self, start=None, stop=None, step=None):
"""Return the sorted values of index in the specified range.
The meaning of the start, stop and step arguments is the same as in
:meth:`Table.read_sorted`.
"""
return self.read_sorted_indices('sorted', start, stop, step)
def read_indices(self, start=None, stop=None, step=None):
"""Return the indices values of index in the specified range.
The meaning of the start, stop and step arguments is the same as in
:meth:`Table.read_sorted`.
"""
return self.read_sorted_indices('indices', start, stop, step)
def _process_range(self, start, stop, step):
"""Get a range specifc for the index usage."""
if start is not None and stop is None:
# Special case for the behaviour of PyTables iterators
stop = idx2long(start + 1)
if start is None:
start = 0
else:
start = idx2long(start)
if stop is None:
stop = idx2long(self.nelements)
else:
stop = idx2long(stop)
if step is None:
step = 1
else:
step = idx2long(step)
return (start, stop, step)
def __getitem__(self, key):
"""Return the indices values of index in the specified range.
If key argument is an integer, the corresponding index is returned. If
key is a slice, the range of indices determined by it is returned. A
negative value of step in slice is supported, meaning that the results
will be returned in reverse order.
This method is equivalent to :meth:`Index.read_indices`.
"""
if is_idx(key):
key = operator.index(key)
if key < 0:
# To support negative values
key += self.nelements
return self.read_indices(key, key + 1, 1)[0]
elif isinstance(key, slice):
return self.read_indices(key.start, key.stop, key.step)
def __len__(self):
return self.nelements
def restorecache(self):
"Clean the limits cache and resize starts and lengths arrays"
params = self._v_file.params
# The sorted IndexArray is absolutely required to be in memory
# at the same time than the Index instance, so create a strong
# reference to it. We are not introducing leaks because the
# strong reference will disappear when this Index instance is
# to be closed.
self._sorted = self.sorted
self._sorted.boundscache = ObjectCache(params['BOUNDS_MAX_SLOTS'],
params['BOUNDS_MAX_SIZE'],
'non-opt types bounds')
self.sorted.boundscache = ObjectCache(params['BOUNDS_MAX_SLOTS'],
params['BOUNDS_MAX_SIZE'],
'non-opt types bounds')
"""A cache for the bounds (2nd hash) data. Only used for
non-optimized types searches."""
self.limboundscache = ObjectCache(params['LIMBOUNDS_MAX_SLOTS'],
params['LIMBOUNDS_MAX_SIZE'],
'bounding limits')
"""A cache for bounding limits."""
self.sortedLRcache = ObjectCache(params['SORTEDLR_MAX_SLOTS'],
params['SORTEDLR_MAX_SIZE'],
'last row chunks')
"""A cache for the last row chunks. Only used for searches in
the last row, and mainly useful for small indexes."""
self.starts = numpy.empty(shape=self.nrows, dtype=numpy.int32)
self.lengths = numpy.empty(shape=self.nrows, dtype=numpy.int32)
self.sorted._init_sorted_slice(self)
self.dirtycache = False
def search(self, item):
"""Do a binary search in this index for an item."""
if profile:
tref = time()
if profile:
show_stats("Entering search", tref)
if self.dirtycache:
self.restorecache()
# An empty item or if left limit is larger than the right one
# means that the number of records is always going to be empty,
# so we avoid further computation (including looking up the
# limits cache).
if not item or item[0] > item[1]:
self.starts[:] = 0
self.lengths[:] = 0
return 0
tlen = 0
# Check whether the item tuple is in the limits cache or not
nslot = self.limboundscache.getslot(item)
if nslot >= 0:
startlengths = self.limboundscache.getitem(nslot)
# Reset the lengths array (not necessary for starts)
self.lengths[:] = 0
# Now, set the interesting rows
for nrow in range(len(startlengths)):
nrow2, start, length = startlengths[nrow]
self.starts[nrow2] = start
self.lengths[nrow2] = length
tlen = tlen + length
return tlen
# The item is not in cache. Do the real lookup.
sorted = self.sorted
if self.nslices > 0:
if self.type in self.opt_search_types:
# The next are optimizations. However, they hide the
# CPU functions consumptions from python profiles.
# You may want to de-activate them during profiling.
if self.type == "int32":
tlen = sorted._search_bin_na_i(*item)
elif self.type == "int64":
tlen = sorted._search_bin_na_ll(*item)
elif self.type == "float16":
tlen = sorted._search_bin_na_e(*item)
elif self.type == "float32":
tlen = sorted._search_bin_na_f(*item)
elif self.type == "float64":
tlen = sorted._search_bin_na_d(*item)
elif self.type == "float96":
tlen = sorted._search_bin_na_g(*item)
elif self.type == "float128":
tlen = sorted._search_bin_na_g(*item)
elif self.type == "uint32":
tlen = sorted._search_bin_na_ui(*item)
elif self.type == "uint64":
tlen = sorted._search_bin_na_ull(*item)
elif self.type == "int8":
tlen = sorted._search_bin_na_b(*item)
elif self.type == "int16":
tlen = sorted._search_bin_na_s(*item)
elif self.type == "uint8":
tlen = sorted._search_bin_na_ub(*item)
elif self.type == "uint16":
tlen = sorted._search_bin_na_us(*item)
else:
assert False, "This can't happen!"
else:
tlen = self.search_scalar(item, sorted)
# Get possible remaining values in last row
if self.nelementsSLR > 0:
# Look for more indexes in the last row
(start, stop) = self.search_last_row(item)
self.starts[-1] = start
self.lengths[-1] = stop - start
tlen += stop - start
if self.limboundscache.couldenablecache():
# Get a startlengths tuple and save it in cache.
# This is quite slow, but it is a good way to compress
# the bounds info. Moreover, the .couldenablecache()
# is doing a good work so as to avoid computing this
# when it is not necessary to do it.
startlengths = []
for nrow, length in enumerate(self.lengths):
if length > 0:
startlengths.append((nrow, self.starts[nrow], length))
# Compute the size of the recarray (aproximately)
# The +1 at the end is important to avoid 0 lengths
# (remember, the object headers take some space)
size = len(startlengths) * 8 * 2 + 1
# Put this startlengths list in cache
self.limboundscache.setitem(item, startlengths, size)
if profile:
show_stats("Exiting search", tref)
return tlen
# This is an scalar version of search. It works with strings as well.
def search_scalar(self, item, sorted):
"""Do a binary search in this index for an item."""
tlen = 0
# Do the lookup for values fullfilling the conditions
for i in range(self.nslices):
(start, stop) = sorted._search_bin(i, item)
self.starts[i] = start
self.lengths[i] = stop - start
tlen += stop - start
return tlen
def search_last_row(self, item):
# Variable initialization
item1, item2 = item
bebounds = self.bebounds
b0, b1 = bebounds[0], bebounds[-1]
bounds = bebounds[1:-1]
itemsize = self.dtype.itemsize
sortedLRcache = self.sortedLRcache
hi = self.nelementsSLR # maximum number of elements
rchunksize = self.chunksize // self.reduction
nchunk = -1
# Lookup for item1
if nan_aware_gt(item1, b0):
if nan_aware_le(item1, b1):
# Search the appropriate chunk in bounds cache
nchunk = bisect_left(bounds, item1)
# Lookup for this chunk in cache
nslot = sortedLRcache.getslot(nchunk)
if nslot >= 0:
chunk = sortedLRcache.getitem(nslot)
else:
begin = rchunksize * nchunk
end = rchunksize * (nchunk + 1)
if end > hi:
end = hi
# Read the chunk from disk
chunk = self.sortedLR._read_sorted_slice(
self.sorted, begin, end)
# Put it in cache. It's important to *copy*
# the buffer, as it is reused in future reads!
sortedLRcache.setitem(nchunk, chunk.copy(),
(end - begin) * itemsize)
start = bisect_left(chunk, item1)
start += rchunksize * nchunk
else:
start = hi
else:
start = 0
# Lookup for item2
if nan_aware_ge(item2, b0):
if nan_aware_lt(item2, b1):
# Search the appropriate chunk in bounds cache
nchunk2 = bisect_right(bounds, item2)
if nchunk2 != nchunk:
# Lookup for this chunk in cache
nslot = sortedLRcache.getslot(nchunk2)
if nslot >= 0:
chunk = sortedLRcache.getitem(nslot)
else:
begin = rchunksize * nchunk2
end = rchunksize * (nchunk2 + 1)
if end > hi:
end = hi
# Read the chunk from disk
chunk = self.sortedLR._read_sorted_slice(
self.sorted, begin, end)
# Put it in cache. It's important to *copy*
# the buffer, as it is reused in future reads!
# See bug #60 in xot.carabos.com
sortedLRcache.setitem(nchunk2, chunk.copy(),
(end - begin) * itemsize)
stop = bisect_right(chunk, item2)
stop += rchunksize * nchunk2
else:
stop = hi
else:
stop = 0
return (start, stop)
def get_chunkmap(self):
"""Compute a map with the interesting chunks in index."""
if profile:
tref = time()
if profile:
show_stats("Entering get_chunkmap", tref)
ss = self.slicesize
nsb = self.nslicesblock
nslices = self.nslices
lbucket = self.lbucket
indsize = self.indsize
bucketsinblock = float(self.blocksize) / lbucket
nchunks = int(math.ceil(float(self.nelements) / lbucket))
chunkmap = numpy.zeros(shape=nchunks, dtype="bool")
reduction = self.reduction
starts = (self.starts - 1) * reduction + 1
stops = (self.starts + self.lengths) * reduction
starts[starts < 0] = 0 # All negative values set to zero
indices = self.indices
for nslice in range(self.nrows):
start = starts[nslice]
stop = stops[nslice]
if stop > start:
idx = numpy.empty(shape=stop - start, dtype='u%d' % indsize)
if nslice < nslices:
indices._read_index_slice(nslice, start, stop, idx)
else:
self.indicesLR._read_index_slice(start, stop, idx)
if indsize == 8:
idx //= lbucket
elif indsize == 2:
# The chunkmap size cannot be never larger than 'int_'
idx = idx.astype("int_")
offset = int((nslice // nsb) * bucketsinblock)
idx += offset
elif indsize == 1:
# The chunkmap size cannot be never larger than 'int_'
idx = idx.astype("int_")
offset = (nslice * ss) // lbucket
idx += offset
chunkmap[idx] = True
# The case lbucket < nrowsinchunk should only happen in tests
nrowsinchunk = self.nrowsinchunk
if lbucket != nrowsinchunk:
# Map the 'coarse grain' chunkmap into the 'true' chunkmap
nelements = self.nelements
tnchunks = int(math.ceil(float(nelements) / nrowsinchunk))
tchunkmap = numpy.zeros(shape=tnchunks, dtype="bool")
ratio = float(lbucket) / nrowsinchunk
idx = chunkmap.nonzero()[0]
starts = (idx * ratio).astype('int_')
stops = numpy.ceil((idx + 1) * ratio).astype('int_')
for i in range(len(idx)):
tchunkmap[starts[i]:stops[i]] = True
chunkmap = tchunkmap
if profile:
show_stats("Exiting get_chunkmap", tref)
return chunkmap
def get_lookup_range(self, ops, limits):
assert len(ops) in [1, 2]
assert len(limits) in [1, 2]
assert len(ops) == len(limits)
column = self.column
coldtype = column.dtype.base
itemsize = coldtype.itemsize
if len(limits) == 1:
assert ops[0] in ['lt', 'le', 'eq', 'ge', 'gt']
limit = limits[0]
op = ops[0]
if op == 'lt':
range_ = (inftype(coldtype, itemsize, sign=-1),
nextafter(limit, -1, coldtype, itemsize))
elif op == 'le':
range_ = (inftype(coldtype, itemsize, sign=-1),
limit)
elif op == 'gt':
range_ = (nextafter(limit, +1, coldtype, itemsize),
inftype(coldtype, itemsize, sign=+1))
elif op == 'ge':
range_ = (limit,
inftype(coldtype, itemsize, sign=+1))
elif op == 'eq':
range_ = (limit, limit)
elif len(limits) == 2:
assert ops[0] in ('gt', 'ge') and ops[1] in ('lt', 'le')
lower, upper = limits
if lower > upper:
# ``a <[=] x <[=] b`` is always false if ``a > b``.
return ()
if ops == ('gt', 'lt'): # lower < col < upper
range_ = (nextafter(lower, +1, coldtype, itemsize),
nextafter(upper, -1, coldtype, itemsize))
elif ops == ('ge', 'lt'): # lower <= col < upper
range_ = (lower, nextafter(upper, -1, coldtype, itemsize))
elif ops == ('gt', 'le'): # lower < col <= upper
range_ = (nextafter(lower, +1, coldtype, itemsize), upper)
elif ops == ('ge', 'le'): # lower <= col <= upper
range_ = (lower, upper)
return range_
def _f_remove(self, recursive=False):
"""Remove this Index object."""
# Index removal is always recursive,
# no matter what `recursive` says.
super(Index, self)._f_remove(True)
def __str__(self):
"""This provides a more compact representation than __repr__"""
# The filters
filters = ""
if self.filters.complevel:
if self.filters.shuffle:
filters += ", shuffle"
filters += ", %s(%s)" % (self.filters.complib,
self.filters.complevel)
return "Index(%s, %s%s).is_csi=%s" % \
(self.optlevel, self.kind, filters, self.is_csi)
def __repr__(self):
"""This provides more metainfo than standard __repr__"""
cpathname = self.table._v_pathname + ".cols." + self.column.pathname
retstr = """%s (Index for column %s)
optlevel := %s
kind := %s
filters := %s
is_csi := %s
nelements := %s
chunksize := %s
slicesize := %s
blocksize := %s
superblocksize := %s
dirty := %s
byteorder := %r""" % (self._v_pathname, cpathname,
self.optlevel, self.kind,
self.filters, self.is_csi,
self.nelements,
self.chunksize, self.slicesize,
self.blocksize, self.superblocksize,
self.dirty, self.byteorder)
retstr += "\n sorted := %s" % self.sorted
retstr += "\n indices := %s" % self.indices
retstr += "\n ranges := %s" % self.ranges
retstr += "\n bounds := %s" % self.bounds
retstr += "\n sortedLR := %s" % self.sortedLR
retstr += "\n indicesLR := %s" % self.indicesLR
return retstr
class IndexesDescG(NotLoggedMixin, Group):
_c_classid = 'DINDEX'
def _g_width_warning(self):
warnings.warn(
"the number of indexed columns on a single description group "
"is exceeding the recommended maximum (%d); "
"be ready to see PyTables asking for *lots* of memory "
"and possibly slow I/O" % self._v_max_group_width,
PerformanceWarning)
class IndexesTableG(NotLoggedMixin, Group):
_c_classid = 'TINDEX'
@property
def auto(self):
if 'AUTO_INDEX' not in self._v_attrs:
return default_auto_index
return self._v_attrs.AUTO_INDEX
@auto.setter
def auto(self, auto):
self._v_attrs.AUTO_INDEX = bool(auto)
@auto.deleter
def auto(self):
del self._v_attrs.AUTO_INDEX
def _g_width_warning(self):
warnings.warn(
"the number of indexed columns on a single table "
"is exceeding the recommended maximum (%d); "
"be ready to see PyTables asking for *lots* of memory "
"and possibly slow I/O" % self._v_max_group_width,
PerformanceWarning)
def _g_check_name(self, name):
if not name.startswith('_i_'):
raise ValueError(
"names of index groups must start with ``_i_``: %s" % name)
@property
def table(self):
"Accessor for the `Table` object of this `IndexesTableG` container."
names = self._v_pathname.split("/")
tablename = names.pop()[3:] # "_i_" is at the beginning
parentpathname = "/".join(names)
tablepathname = join_path(parentpathname, tablename)
table = self._v_file._get_node(tablepathname)
return table
class OldIndex(NotLoggedMixin, Group):
"""This is meant to hide indexes of PyTables 1.x files."""
_c_classid = 'CINDEX'
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
{
"content_hash": "31bba3c2f17e1fdb0a6120d70c3dffdf",
"timestamp": "",
"source": "github",
"line_count": 2226,
"max_line_length": 91,
"avg_line_length": 40.9622641509434,
"alnum_prop": 0.5528284091158343,
"repo_name": "andreabedini/PyTables",
"id": "e37b7eafeb84ee5a9e20d7dc51a86307aff40951",
"size": "91453",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tables/index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896101"
},
{
"name": "C++",
"bytes": "97380"
},
{
"name": "CMake",
"bytes": "21598"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "4159"
},
{
"name": "Objective-C",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "3326507"
},
{
"name": "Shell",
"bytes": "16985"
}
],
"symlink_target": ""
}
|
from telemetry.core.timeline_data import TimelineData
class EmptyTimelineDataImporter(object):
"""Imports empty TimlineData objects."""
def __init__(self, model, timeline_data, import_priority=0):
pass
@staticmethod
def CanImport(timeline_data):
if not isinstance(timeline_data, TimelineData):
return False
event_data = timeline_data.EventData()
if isinstance(event_data, list):
return len(event_data) == 0
elif isinstance(event_data, basestring):
return len(event_data) == 0
return False
def ImportEvents(self):
pass
def FinalizeImport(self):
pass
|
{
"content_hash": "398da065095e309589403cf55062ab41",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 62,
"avg_line_length": 26.73913043478261,
"alnum_prop": 0.7008130081300813,
"repo_name": "anirudhSK/chromium",
"id": "a7c5b4d66392c282723c4a79cfb2c0d2edcfa032",
"size": "778",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/timeline/empty_timeline_data_importer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42502191"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "201859263"
},
{
"name": "CSS",
"bytes": "946557"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5687122"
},
{
"name": "JavaScript",
"bytes": "22163714"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2496"
},
{
"name": "Objective-C",
"bytes": "7670589"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10873885"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1315894"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
"""Config flow for DoorBird integration."""
from ipaddress import ip_address
import logging
import urllib
from doorbirdpy import DoorBird
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import callback
from homeassistant.util.network import is_link_local
from .const import CONF_EVENTS, DOORBIRD_OUI
from .const import DOMAIN # pylint:disable=unused-import
from .util import get_mac_address_from_doorstation_info
_LOGGER = logging.getLogger(__name__)
def _schema_with_defaults(host=None, name=None):
return vol.Schema(
{
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_NAME, default=name): str,
}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
device = DoorBird(data[CONF_HOST], data[CONF_USERNAME], data[CONF_PASSWORD])
try:
status = await hass.async_add_executor_job(device.ready)
info = await hass.async_add_executor_job(device.info)
except urllib.error.HTTPError as err:
if err.code == HTTP_UNAUTHORIZED:
raise InvalidAuth from err
raise CannotConnect from err
except OSError as err:
raise CannotConnect from err
if not status[0]:
raise CannotConnect
mac_addr = get_mac_address_from_doorstation_info(info)
# Return info that you want to store in the config entry.
return {"title": data[CONF_HOST], "mac_addr": mac_addr}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for DoorBird."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the DoorBird config flow."""
self.discovery_schema = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
info, errors = await self._async_validate_or_error(user_input)
if not errors:
await self.async_set_unique_id(info["mac_addr"])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
data = self.discovery_schema or _schema_with_defaults()
return self.async_show_form(step_id="user", data_schema=data, errors=errors)
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered doorbird device."""
macaddress = discovery_info["properties"]["macaddress"]
if macaddress[:6] != DOORBIRD_OUI:
return self.async_abort(reason="not_doorbird_device")
if is_link_local(ip_address(discovery_info[CONF_HOST])):
return self.async_abort(reason="link_local_address")
await self.async_set_unique_id(macaddress)
self._abort_if_unique_id_configured(
updates={CONF_HOST: discovery_info[CONF_HOST]}
)
chop_ending = "._axis-video._tcp.local."
friendly_hostname = discovery_info["name"]
if friendly_hostname.endswith(chop_ending):
friendly_hostname = friendly_hostname[: -len(chop_ending)]
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["title_placeholders"] = {
CONF_NAME: friendly_hostname,
CONF_HOST: discovery_info[CONF_HOST],
}
self.discovery_schema = _schema_with_defaults(
host=discovery_info[CONF_HOST], name=friendly_hostname
)
return await self.async_step_user()
async def async_step_import(self, user_input):
"""Handle import."""
if user_input:
info, errors = await self._async_validate_or_error(user_input)
if not errors:
await self.async_set_unique_id(
info["mac_addr"], raise_on_progress=False
)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return await self.async_step_user(user_input)
async def _async_validate_or_error(self, user_input):
"""Validate doorbird or error."""
errors = {}
info = {}
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return info, errors
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for doorbird."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
events = [event.strip() for event in user_input[CONF_EVENTS].split(",")]
return self.async_create_entry(title="", data={CONF_EVENTS: events})
current_events = self.config_entry.options.get(CONF_EVENTS, [])
# We convert to a comma separated list for the UI
# since there really isn't anything better
options_schema = vol.Schema(
{vol.Optional(CONF_EVENTS, default=", ".join(current_events)): str}
)
return self.async_show_form(step_id="init", data_schema=options_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
{
"content_hash": "39f2698a2c0b380007cc9681529576f3",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 84,
"avg_line_length": 35.17777777777778,
"alnum_prop": 0.6388186986734049,
"repo_name": "tchellomello/home-assistant",
"id": "8e3f661254df1521be8bba15ad4f03136782411e",
"size": "6332",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/doorbird/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
}
|
import datetime
import urllib
from tempest.api.compute import base
from tempest import test
class InstanceUsageAuditLogTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def resource_setup(cls):
super(InstanceUsageAuditLogTestJSON, cls).resource_setup()
cls.adm_client = cls.os_adm.instance_usages_audit_log_client
@test.attr(type='gate')
def test_list_instance_usage_audit_logs(self):
# list instance usage audit logs
body = self.adm_client.list_instance_usage_audit_logs()
expected_items = ['total_errors', 'total_instances', 'log',
'num_hosts_running', 'num_hosts_done',
'num_hosts', 'hosts_not_run', 'overall_status',
'period_ending', 'period_beginning',
'num_hosts_not_run']
for item in expected_items:
self.assertIn(item, body)
@test.attr(type='gate')
def test_get_instance_usage_audit_log(self):
# Get instance usage audit log before specified time
now = datetime.datetime.now()
body = self.adm_client.get_instance_usage_audit_log(
urllib.quote(now.strftime("%Y-%m-%d %H:%M:%S")))
expected_items = ['total_errors', 'total_instances', 'log',
'num_hosts_running', 'num_hosts_done', 'num_hosts',
'hosts_not_run', 'overall_status', 'period_ending',
'period_beginning', 'num_hosts_not_run']
for item in expected_items:
self.assertIn(item, body)
|
{
"content_hash": "9f3f3ded76a7ece01c693d5a285d5cf5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 40.64102564102564,
"alnum_prop": 0.592429022082019,
"repo_name": "Vaidyanath/tempest",
"id": "16ce93c6184de4ce01a0a4b9b987eb42b90935c4",
"size": "2216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/compute/admin/test_instance_usage_audit_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "695"
},
{
"name": "Python",
"bytes": "2788179"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
from logging import Handler
from logtracker.models import *
import traceback
logrecord_keys = ['msecs', 'args', 'name', 'thread', 'created', 'process', 'threadName', 'module', 'filename', 'levelno', 'processName', 'lineno', 'exc_info', 'exc_text', 'pathname', 'funcName', 'relativeCreated', 'levelname', 'msg']
class TrackingHandler(Handler):
""" Realtime log analysis handling for alerts. """
def __init__(self):
Handler.__init__(self)
def emit(self, record, *args, **kwargs):
""" Append the record to the buffer for the current thread. """
try:
newlog = LogTrack(level=record.levelno,
message=record.msg,
filename=record.filename,
line_no=record.lineno,
pathname=record.pathname,
funcname = record.funcName,
module = record.module
)
#Simple reading of extras
#data_dump=str(record.__dict__)
#slightly more intelligent reading of extras
record_dict = record.__dict__
#dmyung - get the traceback for this log message and set it as a value in the dump
newlog.data_dump = "traceback:" + str(traceback.extract_stack())
if record_dict:
for key in record_dict:
if key in logrecord_keys:
continue
else:
if newlog.data_dump == None:
newlog.data_dump = ''
newlog.data_dump += key + ":=" + str(record_dict[key]) + "\n"
newlog.save()
except Exception:
# TODO: maybe do something more here. Logging shouldn't blow
# up anything else, but at the same time we'd still like to
# know that something went wrong.
# unfortunately we can't really log it, as that could land us in
# an infinite loop.
pass
|
{
"content_hash": "8f3e7830ce7d9a4149acfdacab368339",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 233,
"avg_line_length": 46,
"alnum_prop": 0.48867753623188404,
"repo_name": "commtrack/commtrack-core",
"id": "232314125cb8f69955f58ce92c3911c856b07474",
"size": "2367",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "apps/logtracker/handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "522879"
},
{
"name": "PHP",
"bytes": "2787"
},
{
"name": "Python",
"bytes": "3628092"
},
{
"name": "Shell",
"bytes": "487"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from Cookie import SimpleCookie
from cookielib import Cookie
from weakref import ref
from .pool import ConnectionPool
from .request import Request
from .response import Response
MAX_REDIRECTS = 10
class Session(object):
def __init__(self, debuglevel=None):
self._pool = ConnectionPool(debuglevel=debuglevel)
self.cookies = defaultdict(dict)
self.encoding = None
self.user_agent = None
def _set_cookies(self, headers, request):
cookies = [c for domain, cookies in self.cookies.iteritems()
if request.host.endswith(domain) or domain == '_all' for c in cookies.itervalues()]
if cookies:
headers['Cookie'] = '; '.join(c.output([], '').lstrip() for c in cookies)
def set_cookie(self, name, value=None, domain='_all'):
if isinstance(name, Cookie):
name, value = name.name, name.value
domain = name.domain or domain
c = SimpleCookie()
c[name] = value
for cookie in c.itervalues():
self.cookies[domain.rstrip(',').lstrip('.')][cookie.key] = cookie
def _get_cookies(self, response):
c = SimpleCookie()
for h, v in response.getheaders():
if h == 'set-cookie':
c.load(v)
for cookie in c.itervalues():
domain = cookie['domain'].rstrip(',').lstrip('.') or response.host
self.cookies[domain][cookie.key] = cookie
def _get_default_headers(self):
return {
'User-Agent': self.user_agent or \
'Opera/9.80 (X11; Linux i686; U; en) Presto/2.10.229 Version/11.60',
'Accept': 'text/html, application/xml;q=0.9, application/xhtml+xml, image/png,'
' image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1',
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'Keep-Alive'
}
def __call__(self, request):
if isinstance(request, basestring):
request = Request(request)
count = MAX_REDIRECTS
while count:
headers = self._get_default_headers()
if request.referer:
headers['Referer'] = request.referer.url if\
isinstance(request.referer, (Response, Request)) else request.referer
self._set_cookies(headers, request)
response = request.request(self._pool, headers)
response_data = response.read()
self._get_cookies(response)
if 300 <= response.status < 400:
request = Request(response.getheader('location'))
request.referer = response.url
count -= 1
else:
break
return Response(self, response, response_data)
def request(self, url, baseurl=None):
if isinstance(baseurl, (Request, Response)):
baseurl = baseurl.url
return Request(url, baseurl, ref(self))
def clear(self):
self._pool.clear()
self.cookies.clear()
def get_state(self):
result = {}
for domain, cookies in self.cookies.iteritems():
for cookie in cookies.itervalues():
result.setdefault(domain, []).append(
cookie.output(None, '').lstrip())
return result
def set_state(self, state):
self.cookies.clear()
for domain, values in state.iteritems():
c = SimpleCookie()
for v in values:
c.load(v)
for cookie in c.itervalues():
self.cookies[domain][cookie.key] = cookie
|
{
"content_hash": "f14dc2ad44e435d49da7e0cafb086cb1",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 95,
"avg_line_length": 32.796460176991154,
"alnum_prop": 0.5682676740420939,
"repo_name": "baverman/swoop",
"id": "e540109c0744ee06f4baea53497340b5ffcf1b29",
"size": "3706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swoop/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17822"
}
],
"symlink_target": ""
}
|
import numbers
from typing import Type
import warnings
import numpy as np
from pandas._libs import lib
from pandas.compat import set_function_name
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops, ops
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.tools.numeric import to_numeric
class _IntegerDtype(ExtensionDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
_IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
name = None # type: str
base = None
type = None # type: Type
na_value = np.nan
def __repr__(self):
sign = "U" if self.is_unsigned_integer else ""
return "{sign}Int{size}Dtype()".format(sign=sign, size=8 * self.itemsize)
@cache_readonly
def is_signed_integer(self):
return self.kind == "i"
@cache_readonly
def is_unsigned_integer(self):
return self.kind == "u"
@property
def _is_numeric(self):
return True
@cache_readonly
def numpy_dtype(self):
""" Return an instance of our numpy dtype """
return np.dtype(self.type)
@cache_readonly
def kind(self):
return self.numpy_dtype.kind
@cache_readonly
def itemsize(self):
""" Return the number of bytes in this dtype """
return self.numpy_dtype.itemsize
@classmethod
def construct_array_type(cls):
"""Return the array type associated with this dtype
Returns
-------
type
"""
return IntegerArray
def integer_array(values, dtype=None, copy=False):
"""
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : boolean, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
"""
values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
def safe_cast(values, dtype, copy):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError(
"cannot safely cast non-equivalent {} to {}".format(
values.dtype, np.dtype(dtype)
)
)
def coerce_to_array(values, dtype, mask=None, copy=False):
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : boolean 1D array, optional
copy : boolean, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
except KeyError:
raise ValueError("invalid dtype specified {}".format(dtype))
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"mixed-integer-float",
]:
raise TypeError(
"{} cannot be converted to an IntegerDtype".format(values.dtype)
)
elif is_bool_dtype(values) and is_integer_dtype(dtype):
values = np.array(values, dtype=int, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(
"{} cannot be converted to an IntegerDtype".format(values.dtype)
)
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("int64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
class IntegerArray(ExtensionArray, ExtensionOpsMixin):
"""
Array of integer (optional missing) values.
.. versionadded:: 0.24.0
.. warning::
IntegerArray is currently experimental, and its API or internal
implementation may change without warning.
We represent an IntegerArray with 2 numpy arrays:
- data: contains a numpy integer array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an IntegerArray from generic array-like input, use
:func:`pandas.array` with one of the integer dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d integer-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
IntegerArray
Examples
--------
Create an IntegerArray with :func:`pandas.array`.
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
>>> int_array
<IntegerArray>
[1, NaN, 3]
Length: 3, dtype: Int32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([1, None, 3], dtype='Int32')
<IntegerArray>
[1, NaN, 3]
Length: 3, dtype: Int32
>>> pd.array([1, None, 3], dtype='UInt16')
<IntegerArray>
[1, NaN, 3]
Length: 3, dtype: UInt16
"""
@cache_readonly
def dtype(self):
return _dtypes[str(self._data.dtype)]
def __init__(self, values, mask, copy=False):
if not (isinstance(values, np.ndarray) and is_integer_dtype(values.dtype)):
raise TypeError(
"values should be integer numpy array. Use "
"the 'integer_array' function instead"
)
if not (isinstance(mask, np.ndarray) and is_bool_dtype(mask.dtype)):
raise TypeError(
"mask should be boolean numpy array. Use "
"the 'integer_array' function instead"
)
if copy:
values = values.copy()
mask = mask.copy()
self._data = values
self._mask = mask
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return integer_array(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
scalars = to_numeric(strings, errors="raise")
return cls._from_sequence(scalars, dtype, copy)
@classmethod
def _from_factorized(cls, values, original):
return integer_array(values, dtype=original.dtype)
def _formatter(self, boxed=False):
def fmt(x):
if isna(x):
return "NaN"
return str(x)
return fmt
def __getitem__(self, item):
if is_integer(item):
if self._mask[item]:
return self.dtype.na_value
return self._data[item]
return type(self)(self._data[item], self._mask[item])
def _coerce_to_ndarray(self):
"""
coerce to an ndarary of object dtype
"""
# TODO(jreback) make this better
data = self._data.astype(object)
data[self._mask] = self._na_value
return data
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
def __array__(self, dtype=None):
"""
the array interface, return my values
We return an object array here to preserve our scalar values
"""
return self._coerce_to_ndarray()
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# For IntegerArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (IntegerArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, IntegerArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_integer_dtype(x.dtype):
m = mask.copy()
return IntegerArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def __iter__(self):
for i in range(len(self)):
if self._mask[i]:
yield self.dtype.na_value
else:
yield self._data[i]
def take(self, indexer, allow_fill=False, fill_value=None):
from pandas.api.extensions import take
# we always fill with 1 internally
# to avoid upcasting
data_fill_value = 1 if isna(fill_value) else fill_value
result = take(
self._data, indexer, fill_value=data_fill_value, allow_fill=allow_fill
)
mask = take(self._mask, indexer, fill_value=True, allow_fill=allow_fill)
# if we are filling
# we only fill where the indexer is null
# not existing missing values
# TODO(jreback) what if we have a non-na float as a fill value?
if allow_fill and notna(fill_value):
fill_mask = np.asarray(indexer) == -1
result[fill_mask] = fill_value
mask = mask ^ fill_mask
return type(self)(result, mask, copy=False)
def copy(self):
data, mask = self._data, self._mask
data = data.copy()
mask = mask.copy()
return type(self)(data, mask, copy=False)
def __setitem__(self, key, value):
_is_scalar = is_scalar(value)
if _is_scalar:
value = [value]
value, mask = coerce_to_array(value, dtype=self.dtype)
if _is_scalar:
value = value[0]
mask = mask[0]
self._data[key] = value
self._mask[key] = mask
def __len__(self):
return len(self._data)
@property
def nbytes(self):
return self._data.nbytes + self._mask.nbytes
def isna(self):
return self._mask
@property
def _na_value(self):
return np.nan
@classmethod
def _concat_same_type(cls, to_concat):
data = np.concatenate([x._data for x in to_concat])
mask = np.concatenate([x._mask for x in to_concat])
return cls(data, mask)
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array or IntegerArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray or IntegerArray
NumPy ndarray or IntergerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
# if we are astyping to an existing IntegerDtype we can fastpath
if isinstance(dtype, _IntegerDtype):
result = self._data.astype(dtype.numpy_dtype, copy=False)
return type(self)(result, mask=self._mask, copy=False)
# coerce
data = self._coerce_to_ndarray()
return astype_nansafe(data, dtype, copy=None)
@property
def _ndarray_values(self) -> np.ndarray:
"""Internal pandas method for lossy conversion to a NumPy ndarray.
This method is not part of the pandas interface.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
"""
return self._data
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Index, Series
# compute counts on the data with no nans
data = self._data[~self._mask]
value_counts = Index(data).value_counts()
array = value_counts.values
# TODO(extension)
# if we have allow Index to hold an ExtensionArray
# this is easier
index = value_counts.index.astype(object)
# if we want nans, count the mask
if not dropna:
# TODO(extension)
# appending to an Index *always* infers
# w/o passing the dtype
array = np.append(array, [self._mask.sum()])
index = Index(
np.concatenate([index.values, np.array([np.nan], dtype=object)]),
dtype=object,
)
return Series(array, index=index)
def _values_for_argsort(self) -> np.ndarray:
"""Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
data = self._data.copy()
data[self._mask] = data.min() - 1
return data
@classmethod
def _create_comparison_method(cls, op):
def cmp_method(self, other):
op_name = op.__name__
mask = None
if isinstance(other, (ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
if isinstance(other, IntegerArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 0 and len(self) != len(other):
raise ValueError("Lengths must match to compare")
other = lib.item_from_zerodim(other)
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
result = op(self._data, other)
# nans propagate
if mask is None:
mask = self._mask
else:
mask = self._mask | mask
result[mask] = op_name == "ne"
return result
name = "__{name}__".format(name=op.__name__)
return set_function_name(cmp_method, name, cls)
def _reduce(self, name, skipna=True, **kwargs):
data = self._data
mask = self._mask
# coerce to a nan-aware float if needed
if mask.any():
data = self._data.astype("float64")
data[mask] = self._na_value
op = getattr(nanops, "nan" + name)
result = op(data, axis=0, skipna=skipna, mask=mask)
# if we have a boolean op, don't coerce
if name in ["any", "all"]:
pass
# if we have a preservable numeric op,
# provide coercion back to an integer type if possible
elif name in ["sum", "min", "max", "prod"] and notna(result):
int_result = int(result)
if int_result == result:
result = int_result
return result
def _maybe_mask_result(self, result, mask, other, op_name):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# may need to fill infs
# and mask wraparound
if is_float_dtype(result):
mask |= (result == np.inf) | (result == -np.inf)
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
result[mask] = np.nan
return result
return type(self)(result, mask, copy=False)
@classmethod
def _create_arithmetic_method(cls, op):
def integer_arithmetic_method(self, other):
op_name = op.__name__
mask = None
if isinstance(other, (ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
if getattr(other, "ndim", 0) > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if isinstance(other, IntegerArray):
other, mask = other._data, other._mask
elif getattr(other, "ndim", None) == 0:
other = other.item()
elif is_list_like(other):
other = np.asarray(other)
if not other.ndim:
other = other.item()
elif other.ndim == 1:
if not (is_float_dtype(other) or is_integer_dtype(other)):
raise TypeError("can only perform ops with numeric values")
else:
if not (is_float(other) or is_integer(other)):
raise TypeError("can only perform ops with numeric values")
# nans propagate
if mask is None:
mask = self._mask
else:
mask = self._mask | mask
# 1 ** np.nan is 1. So we have to unmask those.
if op_name == "pow":
mask = np.where(self == 1, False, mask)
elif op_name == "rpow":
mask = np.where(other == 1, False, mask)
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
name = "__{name}__".format(name=op.__name__)
return set_function_name(integer_arithmetic_method, name, cls)
IntegerArray._add_arithmetic_ops()
IntegerArray._add_comparison_ops()
_dtype_docstring = """
An ExtensionDtype for {dtype} integer data.
Attributes
----------
None
Methods
-------
None
"""
# create the Dtype
Int8Dtype = register_extension_dtype(
type(
"Int8Dtype",
(_IntegerDtype,),
{
"type": np.int8,
"name": "Int8",
"__doc__": _dtype_docstring.format(dtype="int8"),
},
)
)
Int16Dtype = register_extension_dtype(
type(
"Int16Dtype",
(_IntegerDtype,),
{
"type": np.int16,
"name": "Int16",
"__doc__": _dtype_docstring.format(dtype="int16"),
},
)
)
Int32Dtype = register_extension_dtype(
type(
"Int32Dtype",
(_IntegerDtype,),
{
"type": np.int32,
"name": "Int32",
"__doc__": _dtype_docstring.format(dtype="int32"),
},
)
)
Int64Dtype = register_extension_dtype(
type(
"Int64Dtype",
(_IntegerDtype,),
{
"type": np.int64,
"name": "Int64",
"__doc__": _dtype_docstring.format(dtype="int64"),
},
)
)
UInt8Dtype = register_extension_dtype(
type(
"UInt8Dtype",
(_IntegerDtype,),
{
"type": np.uint8,
"name": "UInt8",
"__doc__": _dtype_docstring.format(dtype="uint8"),
},
)
)
UInt16Dtype = register_extension_dtype(
type(
"UInt16Dtype",
(_IntegerDtype,),
{
"type": np.uint16,
"name": "UInt16",
"__doc__": _dtype_docstring.format(dtype="uint16"),
},
)
)
UInt32Dtype = register_extension_dtype(
type(
"UInt32Dtype",
(_IntegerDtype,),
{
"type": np.uint32,
"name": "UInt32",
"__doc__": _dtype_docstring.format(dtype="uint32"),
},
)
)
UInt64Dtype = register_extension_dtype(
type(
"UInt64Dtype",
(_IntegerDtype,),
{
"type": np.uint64,
"name": "UInt64",
"__doc__": _dtype_docstring.format(dtype="uint64"),
},
)
)
_dtypes = {
"int8": Int8Dtype(),
"int16": Int16Dtype(),
"int32": Int32Dtype(),
"int64": Int64Dtype(),
"uint8": UInt8Dtype(),
"uint16": UInt16Dtype(),
"uint32": UInt32Dtype(),
"uint64": UInt64Dtype(),
}
|
{
"content_hash": "04afa9a20154ab58941d03ee8ce47e15",
"timestamp": "",
"source": "github",
"line_count": 867,
"max_line_length": 85,
"avg_line_length": 27.91118800461361,
"alnum_prop": 0.5566345716765155,
"repo_name": "kushalbhola/MyStuff",
"id": "867122964fe592c0b12243e141b71be4ea62ac63",
"size": "24199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/core/arrays/integer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
}
|
from __future__ import division
from vistrails.db.domain import DBPluginData
import unittest
import copy
import random
from vistrails.db.domain import IdScope
import vistrails.core
class PluginData(DBPluginData):
##########################################################################
# Constructors and copy
def __init__(self, *args, **kwargs):
DBPluginData.__init__(self, *args, **kwargs)
if self.id is None:
self.id = -1
def __copy__(self):
return PluginData.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBPluginData.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = PluginData
return cp
##########################################################################
# DB Conversion
@staticmethod
def convert(_plugin_data):
_plugin_data.__class__ = PluginData
##########################################################################
# Properties
id = DBPluginData.db_id
data = DBPluginData.db_data
##########################################################################
# Operators
def __eq__(self, other):
if type(other) != type(self):
return False
return self.data == other.data
################################################################################
# Testing
class TestPluginData(unittest.TestCase):
def create_data(self, id=1, data=""):
return PluginData(id=id, data=data)
def test_create(self):
self.create_data(2, "testing the data field")
def test_serialization(self):
import vistrails.core.db.io
p_data1 = self.create_data()
xml_str = vistrails.core.db.io.serialize(p_data1)
p_data2 = vistrails.core.db.io.unserialize(xml_str, PluginData)
self.assertEquals(p_data1, p_data2)
self.assertEquals(p_data1.id, p_data2.id)
|
{
"content_hash": "ef794bccf2200fd0486f89a6e77c5750",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 28.28985507246377,
"alnum_prop": 0.5005122950819673,
"repo_name": "hjanime/VisTrails",
"id": "2beddcc50b31bc9c67c20adef22cf154d0c8aec8",
"size": "3865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/core/vistrail/plugin_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
from rest_framework.serializers import (
ModelSerializer,
SerializerMethodField
)
from trialscompendium.trials.api.plot.plotserializers import plot_serializers
from trialscompendium.trials.models import Treatment
from trialscompendium.utils.hyperlinkedidentity import hyperlinked_identity
from trialscompendium.utils.serializersutils import FieldMethodSerializer, get_related_content
plot_serializers = plot_serializers()
def treatment_serializers():
"""
Treatment serializers
:return: All treatment serializers
:rtype: Object
"""
class TreatmentBaseSerializer(ModelSerializer):
"""
Base serializer for DRY implementation.
"""
class Meta:
model = Treatment
fields = [
'id',
'no_replicate',
'nitrogen_treatment',
'phosphate_treatment',
'tillage_practice',
'cropping_system',
'crops_grown',
'farm_yard_manure',
'farm_residue',
]
class TreatmentRelationBaseSerializer(ModelSerializer):
"""
Base serializer for DRY implementation.
"""
plots = SerializerMethodField()
class Meta:
model = Treatment
fields = [
'plots',
]
class TreatmentFieldMethodSerializer:
"""
Serialize an object based on a provided field
"""
def get_plots(self, obj):
"""
:param obj: Current record object
:return: Plot of a treatment
:rtype: Object/record
"""
request = self.context['request']
PlotListSerializer = plot_serializers['PlotListSerializer']
related_content = get_related_content(
obj, PlotListSerializer, obj.plot_relation, request
)
return related_content
class TreatmentListSerializer(
TreatmentBaseSerializer,
TreatmentRelationBaseSerializer,
TreatmentFieldMethodSerializer
):
"""
Serialize all records in given fields into an API
"""
url = hyperlinked_identity('trials_api:treatment_detail', 'pk')
class Meta:
model = Treatment
fields = TreatmentBaseSerializer.Meta.fields + ['url', ] + \
TreatmentRelationBaseSerializer.Meta.fields
class TreatmentDetailSerializer(
TreatmentBaseSerializer, TreatmentRelationBaseSerializer,
FieldMethodSerializer, TreatmentFieldMethodSerializer):
"""
Serialize single record into an API. This is dependent on fields given.
"""
user = SerializerMethodField()
modified_by = SerializerMethodField()
class Meta:
common_fields = [
'user',
'modified_by',
'last_update',
'time_created',
] + TreatmentRelationBaseSerializer.Meta.fields
model = Treatment
fields = TreatmentBaseSerializer.Meta.fields + common_fields
read_only_fields = ['id', ] + common_fields
return {
'TreatmentListSerializer': TreatmentListSerializer,
'TreatmentDetailSerializer': TreatmentDetailSerializer
}
|
{
"content_hash": "2f59756d7badf1c8e7b889abf17398cc",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 94,
"avg_line_length": 31.82857142857143,
"alnum_prop": 0.5957510472770796,
"repo_name": "nkoech/trialscompendium",
"id": "de7852658e88568656ba18c3d783ca37809fa295",
"size": "3342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trialscompendium/trials/api/treatment/treatmentserializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9892"
},
{
"name": "HTML",
"bytes": "23497"
},
{
"name": "JavaScript",
"bytes": "42015"
},
{
"name": "Python",
"bytes": "66024"
}
],
"symlink_target": ""
}
|
from django.db import models #djangoのベースにあるmodelsから派生させる
class Page(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=256)
description = models.CharField(max_length=1024)
href = models.URLField(max_length=2084)
date_published = models.DateField(auto_now=True)
feed = models.ForeignKey('Feed',on_delete=models.CASCADE,default=1)
class Feed(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=256)
description = models.CharField(max_length=1024)
href = models.URLField(max_length=2084)
rss = models.URLField(max_length=2084)
class Scrape(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=256)
href = models.URLField(max_length=2084)
class NurseryArea(models.Model):
id = models.AutoField(primary_key=True)
area = models.CharField(max_length=256,unique=True)
class NurseryList(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=256)
zipcode = models.CharField(max_length=24)
address = models.CharField(max_length=1024)
latitude = models.CharField(max_length=24)
longitude = models.CharField(max_length=24)
tel = models.CharField(max_length=24)
category1 = models.CharField(max_length=256)
category2 = models.CharField(max_length=256)
capacity_all = models.CharField(max_length=24)
capacity_0 = models.CharField(max_length=24)
capacity_1 = models.CharField(max_length=24)
capacity_2 = models.CharField(max_length=24)
capacity_3 = models.CharField(max_length=24)
capacity_4_5 = models.CharField(max_length=24)
area = models.ForeignKey('NurseryArea',on_delete=models.CASCADE,default=1)
|
{
"content_hash": "dbe1ee0d46ce2a92df91d94c04f7463d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 41.25581395348837,
"alnum_prop": 0.7294250281848929,
"repo_name": "mdworks2016/work_development",
"id": "4510e4d55898fc986ca16e9a69523fef6d3d56a1",
"size": "1802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/10_Second_Python3_Django2/05_Django_BootStrap/djangoApp/djangoApp/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
}
|
"""PostProcessor for serving reveal.js HTML slideshows."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import webbrowser
from tornado import web, ioloop, httpserver
from tornado.httpclient import AsyncHTTPClient
from traitlets import Bool, Unicode, Int
from .base import PostProcessorBase
class ProxyHandler(web.RequestHandler):
"""handler the proxies requests from a local prefix to a CDN"""
@web.asynchronous
def get(self, prefix, url):
"""proxy a request to a CDN"""
proxy_url = "/".join([self.settings['cdn'], url])
client = self.settings['client']
client.fetch(proxy_url, callback=self.finish_get)
def finish_get(self, response):
"""finish the request"""
# rethrow errors
response.rethrow()
for header in ["Content-Type", "Cache-Control", "Date", "Last-Modified", "Expires"]:
if header in response.headers:
self.set_header(header, response.headers[header])
self.finish(response.body)
class ServePostProcessor(PostProcessorBase):
"""Post processor designed to serve files
Proxies reveal.js requests to a CDN if no local reveal.js is present
"""
open_in_browser = Bool(True, config=True,
help="""Should the browser be opened automatically?"""
)
reveal_cdn = Unicode("https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.1.0",
config=True, help="""URL for reveal.js CDN."""
)
reveal_prefix = Unicode("reveal.js", config=True, help="URL prefix for reveal.js")
ip = Unicode("127.0.0.1", config=True, help="The IP address to listen on.")
port = Int(8000, config=True, help="port for the server to listen on.")
def postprocess(self, input):
"""Serve the build directory with a webserver."""
dirname, filename = os.path.split(input)
handlers = [
(r"/(.+)", web.StaticFileHandler, {'path' : dirname}),
(r"/", web.RedirectHandler, {"url": "/%s" % filename})
]
if ('://' in self.reveal_prefix or self.reveal_prefix.startswith("//")):
# reveal specifically from CDN, nothing to do
pass
elif os.path.isdir(os.path.join(dirname, self.reveal_prefix)):
# reveal prefix exists
self.log.info("Serving local %s", self.reveal_prefix)
else:
self.log.info("Redirecting %s requests to %s", self.reveal_prefix, self.reveal_cdn)
handlers.insert(0, (r"/(%s)/(.*)" % self.reveal_prefix, ProxyHandler))
app = web.Application(handlers,
cdn=self.reveal_cdn,
client=AsyncHTTPClient(),
)
# hook up tornado logging to our logger
try:
from tornado import log
log.app_log = self.log
except ImportError:
# old tornado (<= 3), ignore
pass
http_server = httpserver.HTTPServer(app)
http_server.listen(self.port, address=self.ip)
url = "http://%s:%i/%s" % (self.ip, self.port, filename)
print("Serving your slides at %s" % url)
print("Use Control-C to stop this server")
if self.open_in_browser:
webbrowser.open(url, new=2)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("\nInterrupted")
def main(path):
"""allow running this module to serve the slides"""
server = ServePostProcessor()
server(path)
if __name__ == '__main__':
import sys
main(sys.argv[1])
|
{
"content_hash": "57fe33eedaa4789cb9743e262c5d6d70",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 95,
"avg_line_length": 34.923809523809524,
"alnum_prop": 0.6086719389146441,
"repo_name": "ArcherSys/ArcherSys",
"id": "9b561c66121329a06b43eb6396a5a5cf8ce7b51d",
"size": "3667",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/site-packages/nbconvert/postprocessors/serve.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Utilities related to dates, times, intervals, and timezones."""
from __future__ import absolute_import, print_function, unicode_literals
import numbers
import os
import random
import time as _time
from calendar import monthrange
from datetime import date, datetime, timedelta, tzinfo
from kombu.utils.functional import reprcall
from kombu.utils.objects import cached_property
from pytz import AmbiguousTimeError, FixedOffset
from pytz import timezone as _timezone
from pytz import utc
from celery.five import PY3, python_2_unicode_compatible, string_t
from .functional import dictfilter
from .iso8601 import parse_iso8601
from .text import pluralize
__all__ = (
'LocalTimezone', 'timezone', 'maybe_timedelta',
'delta_resolution', 'remaining', 'rate', 'weekday',
'humanize_seconds', 'maybe_iso8601', 'is_naive',
'make_aware', 'localize', 'to_utc', 'maybe_make_aware',
'ffwd', 'utcoffset', 'adjust_timestamp',
'get_exponential_backoff_interval',
)
C_REMDEBUG = os.environ.get('C_REMDEBUG', False)
DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'
WEEKDAYS = dict(zip(DAYNAMES, range(7)))
RATE_MODIFIER_MAP = {
's': lambda n: n,
'm': lambda n: n / 60.0,
'h': lambda n: n / 60.0 / 60.0,
}
TIME_UNITS = (
('day', 60 * 60 * 24.0, lambda n: format(n, '.2f')),
('hour', 60 * 60.0, lambda n: format(n, '.2f')),
('minute', 60.0, lambda n: format(n, '.2f')),
('second', 1.0, lambda n: format(n, '.2f')),
)
ZERO = timedelta(0)
_local_timezone = None
@python_2_unicode_compatible
class LocalTimezone(tzinfo):
"""Local time implementation.
Note:
Used only when the :setting:`enable_utc` setting is disabled.
"""
_offset_cache = {}
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return '<LocalTimezone: UTC{0:+03d}>'.format(
int(self.DSTOFFSET.total_seconds() / 3600),
)
def utcoffset(self, dt):
return self.DSTOFFSET if self._isdst(dt) else self.STDOFFSET
def dst(self, dt):
return self.DSTDIFF if self._isdst(dt) else ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
if PY3: # pragma: no cover
def fromutc(self, dt):
# The base tzinfo class no longer implements a DST
# offset aware .fromutc() in Python 3 (Issue #2306).
# I'd rather rely on pytz to do this, than port
# the C code from cpython's fromutc [asksol]
offset = int(self.utcoffset(dt).seconds / 60.0)
try:
tz = self._offset_cache[offset]
except KeyError:
tz = self._offset_cache[offset] = FixedOffset(offset)
return tz.fromutc(dt.replace(tzinfo=tz))
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class _Zone(object):
def tz_or_local(self, tzinfo=None):
# pylint: disable=redefined-outer-name
if tzinfo is None:
return self.local
return self.get_timezone(tzinfo)
def to_local(self, dt, local=None, orig=None):
if is_naive(dt):
dt = make_aware(dt, orig or self.utc)
return localize(dt, self.tz_or_local(local))
if PY3: # pragma: no cover
def to_system(self, dt):
# tz=None is a special case since Python 3.3, and will
# convert to the current local timezone (Issue #2306).
return dt.astimezone(tz=None)
else:
def to_system(self, dt): # noqa
return localize(dt, self.local)
def to_local_fallback(self, dt):
if is_naive(dt):
return make_aware(dt, self.local)
return localize(dt, self.local)
def get_timezone(self, zone):
if isinstance(zone, string_t):
return _timezone(zone)
return zone
@cached_property
def local(self):
return LocalTimezone()
@cached_property
def utc(self):
return self.get_timezone('UTC')
timezone = _Zone()
def maybe_timedelta(delta):
"""Convert integer to timedelta, if argument is an integer."""
if isinstance(delta, numbers.Real):
return timedelta(seconds=delta)
return delta
def delta_resolution(dt, delta):
"""Round a :class:`~datetime.datetime` to the resolution of timedelta.
If the :class:`~datetime.timedelta` is in days, the
:class:`~datetime.datetime` will be rounded to the nearest days,
if the :class:`~datetime.timedelta` is in hours the
:class:`~datetime.datetime` will be rounded to the nearest hour,
and so on until seconds, which will just return the original
:class:`~datetime.datetime`.
"""
delta = max(delta.total_seconds(), 0)
resolutions = ((3, lambda x: x / 86400),
(4, lambda x: x / 3600),
(5, lambda x: x / 60))
args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second
for res, predicate in resolutions:
if predicate(delta) >= 1.0:
return datetime(*args[:res], tzinfo=dt.tzinfo)
return dt
def remaining(start, ends_in, now=None, relative=False):
"""Calculate the remaining time for a start date and a timedelta.
For example, "how many seconds left for 30 seconds after start?"
Arguments:
start (~datetime.datetime): Starting date.
ends_in (~datetime.timedelta): The end delta.
relative (bool): If enabled the end time will be calculated
using :func:`delta_resolution` (i.e., rounded to the
resolution of `ends_in`).
now (Callable): Function returning the current time and date.
Defaults to :func:`datetime.utcnow`.
Returns:
~datetime.timedelta: Remaining time.
"""
now = now or datetime.utcnow()
if now.utcoffset() != start.utcoffset():
# Timezone has changed, or DST started/ended
start = start.replace(tzinfo=now.tzinfo)
end_date = start + ends_in
if relative:
end_date = delta_resolution(end_date, ends_in)
ret = end_date - now
if C_REMDEBUG: # pragma: no cover
print('rem: NOW:%r START:%r ENDS_IN:%r END_DATE:%s REM:%s' % (
now, start, ends_in, end_date, ret))
return ret
def rate(r):
"""Convert rate string (`"100/m"`, `"2/h"` or `"0.5/s"`) to seconds."""
if r:
if isinstance(r, string_t):
ops, _, modifier = r.partition('/')
return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0
return r or 0
return 0
def weekday(name):
"""Return the position of a weekday: 0 - 7, where 0 is Sunday.
Example:
>>> weekday('sunday'), weekday('sun'), weekday('mon')
(0, 0, 1)
"""
abbreviation = name[0:3].lower()
try:
return WEEKDAYS[abbreviation]
except KeyError:
# Show original day name in exception, instead of abbr.
raise KeyError(name)
def humanize_seconds(secs, prefix='', sep='', now='now', microseconds=False):
"""Show seconds in human form.
For example, 60 becomes "1 minute", and 7200 becomes "2 hours".
Arguments:
prefix (str): can be used to add a preposition to the output
(e.g., 'in' will give 'in 1 second', but add nothing to 'now').
now (str): Literal 'now'.
microseconds (bool): Include microseconds.
"""
secs = float(format(float(secs), '.2f'))
for unit, divider, formatter in TIME_UNITS:
if secs >= divider:
w = secs / float(divider)
return '{0}{1}{2} {3}'.format(prefix, sep, formatter(w),
pluralize(w, unit))
if microseconds and secs > 0.0:
return '{prefix}{sep}{0:.2f} seconds'.format(
secs, sep=sep, prefix=prefix)
return now
def maybe_iso8601(dt):
"""Either ``datetime | str -> datetime`` or ``None -> None``."""
if not dt:
return
if isinstance(dt, datetime):
return dt
return parse_iso8601(dt)
def is_naive(dt):
"""Return :const:`True` if :class:`~datetime.datetime` is naive."""
return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
def make_aware(dt, tz):
"""Set timezone for a :class:`~datetime.datetime` object."""
try:
_localize = tz.localize
except AttributeError:
return dt.replace(tzinfo=tz)
else:
# works on pytz timezones
try:
return _localize(dt, is_dst=None)
except AmbiguousTimeError:
return min(_localize(dt, is_dst=True),
_localize(dt, is_dst=False))
def localize(dt, tz):
"""Convert aware :class:`~datetime.datetime` to another timezone."""
if is_naive(dt): # Ensure timezone aware datetime
dt = make_aware(dt, tz)
if dt.tzinfo == utc:
dt = dt.astimezone(tz) # Always safe to call astimezone on utc zones
try:
_normalize = tz.normalize
except AttributeError: # non-pytz tz
return dt
else:
try:
return _normalize(dt, is_dst=None)
except TypeError:
return _normalize(dt)
except AmbiguousTimeError:
return min(_normalize(dt, is_dst=True),
_normalize(dt, is_dst=False))
def to_utc(dt):
"""Convert naive :class:`~datetime.datetime` to UTC."""
return make_aware(dt, timezone.utc)
def maybe_make_aware(dt, tz=None):
"""Convert dt to aware datetime, do nothing if dt is already aware."""
if is_naive(dt):
dt = to_utc(dt)
return localize(
dt, timezone.utc if tz is None else timezone.tz_or_local(tz),
)
return dt
@python_2_unicode_compatible
class ffwd(object):
"""Version of ``dateutil.relativedelta`` that only supports addition."""
def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None,
hour=None, minute=None, second=None, microsecond=None,
**kwargs):
# pylint: disable=redefined-outer-name
# weekday is also a function in outer scope.
self.year = year
self.month = month
self.weeks = weeks
self.weekday = weekday
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.days = weeks * 7
self._has_time = self.hour is not None or self.minute is not None
def __repr__(self):
return reprcall('ffwd', (), self._fields(weeks=self.weeks,
weekday=self.weekday))
def __radd__(self, other):
if not isinstance(other, date):
return NotImplemented
year = self.year or other.year
month = self.month or other.month
day = min(monthrange(year, month)[1], self.day or other.day)
ret = other.replace(**dict(dictfilter(self._fields()),
year=year, month=month, day=day))
if self.weekday is not None:
ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7)
return ret + timedelta(days=self.days)
def _fields(self, **extra):
return dictfilter({
'year': self.year, 'month': self.month, 'day': self.day,
'hour': self.hour, 'minute': self.minute,
'second': self.second, 'microsecond': self.microsecond,
}, **extra)
def utcoffset(time=_time, localtime=_time.localtime):
"""Return the current offset to UTC in hours."""
if localtime().tm_isdst:
return time.altzone // 3600
return time.timezone // 3600
def adjust_timestamp(ts, offset, here=utcoffset):
"""Adjust timestamp based on provided utcoffset."""
return ts - (offset - here()) * 3600
def get_exponential_backoff_interval(
factor,
retries,
maximum,
full_jitter=False
):
"""Calculate the exponential backoff wait time."""
# Will be zero if factor equals 0
countdown = factor * (2 ** retries)
# Full jitter according to
# https://www.awsarchitectureblog.com/2015/03/backoff.html
if full_jitter:
countdown = random.randrange(countdown + 1)
# Adjust according to maximum wait time and account for negative values.
return max(0, min(maximum, countdown))
|
{
"content_hash": "b77e17a6f53f4118aeaf6bffa07a3246",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 78,
"avg_line_length": 31.641975308641975,
"alnum_prop": 0.6001560671088568,
"repo_name": "kawamon/hue",
"id": "4783c767942f9868e154192ddfd4d1c75dd58327",
"size": "12839",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/celery-4.2.1/celery/utils/time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
"""
Create a demo spectrum with a couple of lines in it, and check they look sensible when we resample them.
"""
from math import pi
import numpy as np
from fourgp_degrade import SpectrumResampler
from fourgp_speclib import Spectrum
# Create a really bonkers raster to sample a spectrum on
raster_original = np.concatenate([np.linspace(0, 3, 30), np.linspace(3, 6, 300), np.linspace(6, 10, 20)])
# An analytic function for a spectral line
def lorentzian(x, x0, fwhm):
return 1 / pi * (0.5 * fwhm) / (np.square(x - x0) + np.square(0.5 * fwhm))
# Create a dummy spectrum
x_in = raster_original
absorption = (lorentzian(x_in, 3, 0.5) +
lorentzian(x_in, 4.5, 0.2) +
lorentzian(x_in, 6, 0.01) +
lorentzian(x_in, 8, 0.2) +
lorentzian(x_in, 9, 0.01))
spectrum_original = np.exp(-absorption)
spectrum_original_object = Spectrum(wavelengths=raster_original,
values=spectrum_original,
value_errors=np.zeros_like(spectrum_original)
)
# Create list of the spectra we're going to save to disk
output = [spectrum_original_object]
# Create a more sensible raster to sample the spectrum onto
resampler = SpectrumResampler(input_spectrum=spectrum_original_object)
for raster_new in [np.linspace(0, 12, 240), np.linspace(0, 12, 48), np.linspace(0, 12, 24)]:
spectrum_new_object = resampler.onto_raster(output_raster=raster_new)
output.append(spectrum_new_object)
for counter, item in enumerate(output):
item.to_file(filename="/tmp/resampling_demo_{:d}.dat".format(counter),
binary=False, overwrite=False)
|
{
"content_hash": "72759f3f479de18033ee44850305df77",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 105,
"avg_line_length": 38.5,
"alnum_prop": 0.6523022432113341,
"repo_name": "dcf21/4most-4gp-scripts",
"id": "5491f4dea270a79231e34e298619b7127914a16f",
"size": "2180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scripts/tests/resample_spectrum.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "660733"
},
{
"name": "Shell",
"bytes": "276964"
}
],
"symlink_target": ""
}
|
from django import test
from django_cradmin.templatetags import cradmin_tags
class TestCradminTestCssClass(test.TestCase):
def test_test_css_classes_disabled(self):
with self.settings(DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES=False):
self.assertEqual('', cradmin_tags.cradmin_test_css_class('my-css-class'))
def test_test_css_classes_enabled(self):
with self.settings(DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES=True):
self.assertEqual(' test-my-css-class ',
cradmin_tags.cradmin_test_css_class('my-css-class'))
|
{
"content_hash": "a314222f2e95b8d91e5259c04700ce12",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 85,
"avg_line_length": 42.07142857142857,
"alnum_prop": 0.6859083191850595,
"repo_name": "appressoas/django_cradmin",
"id": "dd7dc589d4ccdd8d811c80991f006fea0d258e18",
"size": "589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cradmin/tests/test_templatetags/test_cradmin_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "192105"
},
{
"name": "JavaScript",
"bytes": "1951677"
},
{
"name": "Python",
"bytes": "771868"
},
{
"name": "SCSS",
"bytes": "679114"
}
],
"symlink_target": ""
}
|
__author__ = 'juan'
import remote_database
import glob
db = remote_database.database()
files = glob.glob("*.txt")
print files
for file in files:
try:
file = open(file,"r")
try:
lines = file.readlines()
finally:
file.close()
except IOError:
pass
for line in lines:
db.insert(line)
print "Finished ",file
|
{
"content_hash": "00128700d83cb5b24fd62ac60409df75",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 37,
"avg_line_length": 16.666666666666668,
"alnum_prop": 0.5425,
"repo_name": "aitoralmeida/eu-elections",
"id": "6ff6e48997ace4453f3890b2436d84461ed656c0",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyzer/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33879"
},
{
"name": "HTML",
"bytes": "101091"
},
{
"name": "JavaScript",
"bytes": "88290"
},
{
"name": "Python",
"bytes": "149634"
}
],
"symlink_target": ""
}
|
from ftp import FtpUpload
|
{
"content_hash": "a4fdf12f6ffa3f1d5eeab55e238f48f2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 25,
"alnum_prop": 0.88,
"repo_name": "tspycher/python-blogengine",
"id": "aaa47c06e335da2da0221899f3ec47240059145a",
"size": "25",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogengine/upload/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15664"
}
],
"symlink_target": ""
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts WAV audio files into input features for neural networks.
The models used in this example take in two-dimensional spectrograms as the
input to their neural network portions. For testing and porting purposes it's
useful to be able to generate these spectrograms outside of the full model, so
that on-device implementations using their own FFT and streaming code can be
tested against the version used in training for example. The output is as a
C source file, so it can be easily linked into an embedded test application.
To use this, run:
bazel run tensorflow/examples/speech_commands:wav_to_features -- \
--input_wav=my.wav --output_c_file=my_wav_data.c
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def wav_to_features(sample_rate, clip_duration_ms, window_size_ms,
window_stride_ms, feature_bin_count, quantize, preprocess,
input_wav, output_c_file):
"""Converts an audio file into its corresponding feature map.
Args:
sample_rate: Expected sample rate of the wavs.
clip_duration_ms: Expected duration in milliseconds of the wavs.
window_size_ms: How long each spectrogram timeslice is.
window_stride_ms: How far to move in time between spectogram timeslices.
feature_bin_count: How many bins to use for the feature fingerprint.
quantize: Whether to train the model for eight-bit deployment.
preprocess: Spectrogram processing mode; "mfcc", "average" or "micro".
input_wav: Path to the audio WAV file to read.
output_c_file: Where to save the generated C source file.
"""
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
model_settings = models.prepare_model_settings(
0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms,
feature_bin_count, preprocess)
audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,
model_settings, None)
results = audio_processor.get_features_for_wav(input_wav, model_settings,
sess)
features = results[0]
variable_base = os.path.splitext(os.path.basename(input_wav).lower())[0]
# Save a C source file containing the feature data as an array.
with gfile.GFile(output_c_file, 'w') as f:
f.write('/* File automatically created by\n')
f.write(' * tensorflow/examples/speech_commands/wav_to_features.py \\\n')
f.write(' * --sample_rate=%d \\\n' % sample_rate)
f.write(' * --clip_duration_ms=%d \\\n' % clip_duration_ms)
f.write(' * --window_size_ms=%d \\\n' % window_size_ms)
f.write(' * --window_stride_ms=%d \\\n' % window_stride_ms)
f.write(' * --feature_bin_count=%d \\\n' % feature_bin_count)
if quantize:
f.write(' * --quantize=1 \\\n')
f.write(' * --preprocess="%s" \\\n' % preprocess)
f.write(' * --input_wav="%s" \\\n' % input_wav)
f.write(' * --output_c_file="%s" \\\n' % output_c_file)
f.write(' */\n\n')
f.write('const int g_%s_width = %d;\n' %
(variable_base, model_settings['fingerprint_width']))
f.write('const int g_%s_height = %d;\n' %
(variable_base, model_settings['spectrogram_length']))
if quantize:
features_min, features_max = input_data.get_features_range(model_settings)
f.write('const unsigned char g_%s_data[] = {' % variable_base)
i = 0
for value in features.flatten():
quantized_value = int(
round(
(255 * (value - features_min)) / (features_max - features_min)))
if quantized_value < 0:
quantized_value = 0
if quantized_value > 255:
quantized_value = 255
if i == 0:
f.write('\n ')
f.write('%d, ' % (quantized_value))
i = (i + 1) % 10
else:
f.write('const float g_%s_data[] = {\n' % variable_base)
i = 0
for value in features.flatten():
if i == 0:
f.write('\n ')
f.write(' ,%f' % value)
i = (i + 1) % 10
f.write('\n};\n')
def main(_):
# We want to see all the logging messages.
tf.logging.set_verbosity(tf.logging.INFO)
wav_to_features(FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.window_size_ms, FLAGS.window_stride_ms,
FLAGS.feature_bin_count, FLAGS.quantize, FLAGS.preprocess,
FLAGS.input_wav, FLAGS.output_c_file)
tf.logging.info('Wrote to "%s"' % (FLAGS.output_c_file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is.',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How far to move in time between spectogram timeslices.',)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--quantize',
type=bool,
default=False,
help='Whether to train the model for eight-bit deployment')
parser.add_argument(
'--preprocess',
type=str,
default='mfcc',
help='Spectrogram processing mode. Can be "mfcc", "average", or "micro"')
parser.add_argument(
'--input_wav',
type=str,
default=None,
help='Path to the audio WAV file to read')
parser.add_argument(
'--output_c_file',
type=str,
default=None,
help='Where to save the generated C source file containing the features')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
{
"content_hash": "8a9e62fa9b99afed8263e6d083539617",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 80,
"avg_line_length": 37.16216216216216,
"alnum_prop": 0.6334545454545455,
"repo_name": "jbedorf/tensorflow",
"id": "d7f2446d355dd8ee98c37a6ff8179c19e2e721df",
"size": "6875",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/examples/speech_commands/wav_to_features.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "647467"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59799751"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1508512"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46379626"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "480235"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
import os
from logging.config import fileConfig
from flask import Flask, request, jsonify
from flask_apscheduler import APScheduler
from flask_googlemaps import GoogleMaps
from flask_mongoengine import MongoEngine
from werkzeug.exceptions import HTTPException
# MongoEngine instance used in the app
db = MongoEngine()
def create_app(**config_overrides):
"""Creates and initializes Flask app
Args:
config_overrides: named arguments for overriding app config obtained from config file
Returns:
Flask app
"""
# Create application object
app = Flask(__name__)
# Configure app based on config.py
app.config.from_object('config')
# Configure logging
if os.path.isfile('logging_config.ini'):
fileConfig('logging_config.ini')
# Apply overrides
app.config.update(config_overrides)
# Init MongoEngine
db.init_app(app)
# Initialize Google Maps extension for frontend
GoogleMaps(app)
# Import the blueprints here.
# We need to do it here to avoid circular dependencies
from app.api.v1 import mod_api_v1
from app.frontend import mod_frontend
# Register the blueprints
app.register_blueprint(mod_api_v1)
app.register_blueprint(mod_frontend)
# Set up job scheduler
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
# Error handler for http errors and exceptions
def handle_error(error):
"""Global error handler"""
code = 500
if isinstance(error, HTTPException):
code = error.code
else:
# Log all internal exceptions
app.logger.error('Exception {} raised for request {}'.format(error, request.url))
# If request was to the api, render error in JSON
if request.path.startswith('/api/'):
return jsonify(error='error', code=code), code
else:
return error
# Register the error handlers
for cls in HTTPException.__subclasses__():
app.register_error_handler(cls, handle_error)
app.register_error_handler(Exception, handle_error)
return app
|
{
"content_hash": "9d33eb1e7913a312b5f296bd76cf57ee",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 93,
"avg_line_length": 27.139240506329113,
"alnum_prop": 0.6735074626865671,
"repo_name": "ksluckow/foodtrucklocator",
"id": "cb97930fa407a83fd20239bf64fc39ca258c8c15",
"size": "2144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1012"
},
{
"name": "HTML",
"bytes": "927"
},
{
"name": "Python",
"bytes": "25091"
}
],
"symlink_target": ""
}
|
"""
InformationMachineAPILib.Models.GetProductAlternativeTypesWrapper
"""
from InformationMachineAPILib.APIHelper import APIHelper
from InformationMachineAPILib.Models.ProductAlternativeTypeInfo import ProductAlternativeTypeInfo
from InformationMachineAPILib.Models.MetaBase import MetaBase
class GetProductAlternativeTypesWrapper(object):
"""Implementation of the 'GetProductAlternativeTypesWrapper' model.
TODO: type model description here.
Attributes:
result (list of ProductAlternativeTypeInfo): TODO: type description
here.
meta (MetaBase): TODO: type description here.
"""
def __init__(self,
**kwargs):
"""Constructor for the GetProductAlternativeTypesWrapper class
Args:
**kwargs: Keyword Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
result -- list of ProductAlternativeTypeInfo -- Sets the attribute result
meta -- MetaBase -- Sets the attribute meta
"""
# Set all of the parameters to their default values
self.result = None
self.meta = None
# Create a mapping from API property names to Model property names
replace_names = {
"result": "result",
"meta": "meta",
}
# Parse all of the Key-Value arguments
if kwargs is not None:
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace_names[key], kwargs[key])
# Other objects also need to be initialised properly
if "result" in kwargs:
# Parameter is an array, so we need to iterate through it
self.result = list()
for item in kwargs["result"]:
self.result.append(ProductAlternativeTypeInfo(**item))
# Other objects also need to be initialised properly
if "meta" in kwargs:
self.meta = MetaBase(**kwargs["meta"])
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"result": "result",
"meta": "meta",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval)
|
{
"content_hash": "9f6c10c8c8d166bce86b654e71366381",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 97,
"avg_line_length": 35.94117647058823,
"alnum_prop": 0.6134206219312602,
"repo_name": "information-machine/information-machine-api-python",
"id": "2580f6d16f18caf0c47f081b6081c0297b626f0e",
"size": "3080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InformationMachineAPILib/Models/GetProductAlternativeTypesWrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "379009"
}
],
"symlink_target": ""
}
|
""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
import traceback
import pytest, py
from _pytest.python import FixtureRequest
from py._code.code import TerminalRepr, ReprFileLocation
def pytest_addoption(parser):
parser.addini('doctest_optionflags', 'option flags for doctests',
type="args", default=["ELLIPSIS"])
group = parser.getgroup("collect")
group.addoption("--doctest-modules",
action="store_true", default=False,
help="run doctests in all .py modules",
dest="doctestmodules")
group.addoption("--doctest-glob",
action="store", default="test*.txt", metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob")
group.addoption("--doctest-ignore-import-errors",
action="store_true", default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors")
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules:
return DoctestModule(path, parent)
elif (path.ext in ('.txt', '.rst') and parent.session.isinitpath(path)) or \
path.check(fnmatch=config.getvalue("doctestglob")):
return DoctestTextfile(path, parent)
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation, lines):
self.reprlocation = reprlocation
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
self.reprlocation.toterminal(tw)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfuncargvalue)
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self.runner.run(self.dtest)
def repr_failure(self, excinfo):
import doctest
if excinfo.errisinstance((doctest.DocTestFailure,
doctest.UnexpectedException)):
doctestfailure = excinfo.value
example = doctestfailure.example
test = doctestfailure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = excinfo.type.__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_unicode_checker()
REPORT_UDIFF = doctest.REPORT_UDIFF
filelines = py.path.local(filename).readlines(cr=0)
lines = []
if lineno is not None:
i = max(test.lineno, max(0, lineno - 10)) # XXX?
for line in filelines[i:lineno]:
lines.append("%03d %s" % (i+1, line))
i += 1
else:
lines.append('EXAMPLE LOCATION UNKNOWN, not showing all tests of that example')
indent = '>>>'
for line in example.source.splitlines():
lines.append('??? %s %s' % (indent, line))
indent = '...'
if excinfo.errisinstance(doctest.DocTestFailure):
lines += checker.output_difference(example,
doctestfailure.got, REPORT_UDIFF).split("\n")
else:
inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" %
repr(inner_excinfo.value)]
lines += traceback.format_exception(*excinfo.value.exc_info)
return ReprFailDoctest(reprlocation, lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, None, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag())
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
class DoctestTextfile(DoctestItem, pytest.Module):
def runtest(self):
import doctest
fixture_request = _setup_fixtures(self)
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
text = self.fspath.read()
filename = str(self.fspath)
name = self.fspath.basename
globs = dict(getfixture=fixture_request.getfuncargvalue)
if '__name__' not in globs:
globs['__name__'] = '__main__'
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_unicode_checker())
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
_check_all_skipped(test)
runner.run(test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip('all tests skipped by +SKIP option')
class DoctestModule(pytest.Module):
def collect(self):
import doctest
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue('doctest_ignore_import_errors'):
pytest.skip('unable to import module %r' % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = doctest.DocTestFinder()
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_unicode_checker())
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
cls=None, funcargs=False)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_unicode_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings. Useful
when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_unicode_checker, 'UnicodeOutputChecker'):
return _get_unicode_checker.UnicodeOutputChecker()
import doctest
import re
class UnicodeOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
"""
_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if res:
return True
if not (optionflags & _get_allow_unicode_flag()):
return False
else: # pragma: no cover
# the code below will end up executed only in Python 2 in
# our tests, and our coverage check runs in Python 3 only
def remove_u_prefixes(txt):
return re.sub(self._literal_re, r'\1\2', txt)
want = remove_u_prefixes(want)
got = remove_u_prefixes(got)
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
return res
_get_unicode_checker.UnicodeOutputChecker = UnicodeOutputChecker
return _get_unicode_checker.UnicodeOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag('ALLOW_UNICODE')
|
{
"content_hash": "03fc2b85015f531f23a2a4686063e509",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 95,
"avg_line_length": 36.86153846153846,
"alnum_prop": 0.5977671118530885,
"repo_name": "gabrielcnr/pytest",
"id": "fd4a24790b8f7839544c4c274c5356079471f3a3",
"size": "9584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_pytest/doctest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "1094591"
},
{
"name": "Shell",
"bytes": "282"
}
],
"symlink_target": ""
}
|
"""Generic FAQ Wizard.
This is a CGI program that maintains a user-editable FAQ. It uses RCS
to keep track of changes to individual FAQ entries. It is fully
configurable; everything you might want to change when using this
program to maintain some other FAQ than the Python FAQ is contained in
the configuration module, faqconf.py.
Note that this is not an executable script; it's an importable module.
The actual script to place in cgi-bin is faqw.py.
"""
import sys, time, os, stat, re, cgi, faqconf
from faqconf import * # This imports all uppercase names
now = time.time()
class FileError:
def __init__(self, file):
self.file = file
class InvalidFile(FileError):
pass
class NoSuchSection(FileError):
def __init__(self, section):
FileError.__init__(self, NEWFILENAME %(section, 1))
self.section = section
class NoSuchFile(FileError):
def __init__(self, file, why=None):
FileError.__init__(self, file)
self.why = why
def escape(s):
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
return s
def escapeq(s):
s = escape(s)
s = s.replace('"', '"')
return s
def _interpolate(format, args, kw):
try:
quote = kw['_quote']
except KeyError:
quote = 1
d = (kw,) + args + (faqconf.__dict__,)
m = MagicDict(d, quote)
return format % m
def interpolate(format, *args, **kw):
return _interpolate(format, args, kw)
def emit(format, *args, **kw):
try:
f = kw['_file']
except KeyError:
f = sys.stdout
f.write(_interpolate(format, args, kw))
translate_prog = None
def translate(text, pre=0):
global translate_prog
if not translate_prog:
translate_prog = prog = re.compile(
r'\b(http|ftp|https)://\S+(\b|/)|\b[-.\w]+@[-.\w]+')
else:
prog = translate_prog
i = 0
list = []
while 1:
m = prog.search(text, i)
if not m:
break
j = m.start()
list.append(escape(text[i:j]))
i = j
url = m.group(0)
while url[-1] in '();:,.?\'"<>':
url = url[:-1]
i = i + len(url)
url = escape(url)
if not pre or (pre and PROCESS_PREFORMAT):
if ':' in url:
repl = '<A HREF="%s">%s</A>' % (url, url)
else:
repl = '<A HREF="mailto:%s">%s</A>' % (url, url)
else:
repl = url
list.append(repl)
j = len(text)
list.append(escape(text[i:j]))
return ''.join(list)
def emphasize(line):
return re.sub(r'\*([a-zA-Z]+)\*', r'<I>\1</I>', line)
revparse_prog = None
def revparse(rev):
global revparse_prog
if not revparse_prog:
revparse_prog = re.compile(r'^(\d{1,3})\.(\d{1,4})$')
m = revparse_prog.match(rev)
if not m:
return None
[major, minor] = map(int, m.group(1, 2))
return major, minor
logon = 0
def log(text):
if logon:
logfile = open("logfile", "a")
logfile.write(text + "\n")
logfile.close()
def load_cookies():
if not os.environ.has_key('HTTP_COOKIE'):
return {}
raw = os.environ['HTTP_COOKIE']
words = [s.strip() for s in raw.split(';')]
cookies = {}
for word in words:
i = word.find('=')
if i >= 0:
key, value = word[:i], word[i+1:]
cookies[key] = value
return cookies
def load_my_cookie():
cookies = load_cookies()
try:
value = cookies[COOKIE_NAME]
except KeyError:
return {}
import urllib
value = urllib.unquote(value)
words = value.split('/')
while len(words) < 3:
words.append('')
author = '/'.join(words[:-2])
email = words[-2]
password = words[-1]
return {'author': author,
'email': email,
'password': password}
def send_my_cookie(ui):
name = COOKIE_NAME
value = "%s/%s/%s" % (ui.author, ui.email, ui.password)
import urllib
value = urllib.quote(value)
then = now + COOKIE_LIFETIME
gmt = time.gmtime(then)
path = os.environ.get('SCRIPT_NAME', '/cgi-bin/')
print "Set-Cookie: %s=%s; path=%s;" % (name, value, path),
print time.strftime("expires=%a, %d-%b-%y %X GMT", gmt)
class MagicDict:
def __init__(self, d, quote):
self.__d = d
self.__quote = quote
def __getitem__(self, key):
for d in self.__d:
try:
value = d[key]
if value:
value = str(value)
if self.__quote:
value = escapeq(value)
return value
except KeyError:
pass
return ''
class UserInput:
def __init__(self):
self.__form = cgi.FieldStorage()
#log("\n\nbody: " + self.body)
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError
try:
value = self.__form[name].value
except (TypeError, KeyError):
value = ''
else:
value = value.strip()
setattr(self, name, value)
return value
def __getitem__(self, key):
return getattr(self, key)
class FaqEntry:
def __init__(self, fp, file, sec_num):
self.file = file
self.sec, self.num = sec_num
if fp:
import rfc822
self.__headers = rfc822.Message(fp)
self.body = fp.read().strip()
else:
self.__headers = {'title': "%d.%d. " % sec_num}
self.body = ''
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError
key = '-'.join(name.split('_'))
try:
value = self.__headers[key]
except KeyError:
value = ''
setattr(self, name, value)
return value
def __getitem__(self, key):
return getattr(self, key)
def load_version(self):
command = interpolate(SH_RLOG_H, self)
p = os.popen(command)
version = ''
while 1:
line = p.readline()
if not line:
break
if line[:5] == 'head:':
version = line[5:].strip()
p.close()
self.version = version
def getmtime(self):
if not self.last_changed_date:
return 0
try:
return os.stat(self.file)[stat.ST_MTIME]
except os.error:
return 0
def emit_marks(self):
mtime = self.getmtime()
if mtime >= now - DT_VERY_RECENT:
emit(MARK_VERY_RECENT, self)
elif mtime >= now - DT_RECENT:
emit(MARK_RECENT, self)
def show(self, edit=1):
emit(ENTRY_HEADER1, self)
self.emit_marks()
emit(ENTRY_HEADER2, self)
pre = 0
raw = 0
for line in self.body.split('\n'):
# Allow the user to insert raw html into a FAQ answer
# (Skip Montanaro, with changes by Guido)
tag = line.rstrip().lower()
if tag == '<html>':
raw = 1
continue
if tag == '</html>':
raw = 0
continue
if raw:
print line
continue
if not line.strip():
if pre:
print '</PRE>'
pre = 0
else:
print '<P>'
else:
if not line[0].isspace():
if pre:
print '</PRE>'
pre = 0
else:
if not pre:
print '<PRE>'
pre = 1
if '/' in line or '@' in line:
line = translate(line, pre)
elif '<' in line or '&' in line:
line = escape(line)
if not pre and '*' in line:
line = emphasize(line)
print line
if pre:
print '</PRE>'
pre = 0
if edit:
print '<P>'
emit(ENTRY_FOOTER, self)
if self.last_changed_date:
emit(ENTRY_LOGINFO, self)
print '<P>'
class FaqDir:
entryclass = FaqEntry
__okprog = re.compile(OKFILENAME)
def __init__(self, dir=os.curdir):
self.__dir = dir
self.__files = None
def __fill(self):
if self.__files is not None:
return
self.__files = files = []
okprog = self.__okprog
for file in os.listdir(self.__dir):
if self.__okprog.match(file):
files.append(file)
files.sort()
def good(self, file):
return self.__okprog.match(file)
def parse(self, file):
m = self.good(file)
if not m:
return None
sec, num = m.group(1, 2)
return int(sec), int(num)
def list(self):
# XXX Caller shouldn't modify result
self.__fill()
return self.__files
def open(self, file):
sec_num = self.parse(file)
if not sec_num:
raise InvalidFile(file)
try:
fp = open(file)
except IOError, msg:
raise NoSuchFile(file, msg)
try:
return self.entryclass(fp, file, sec_num)
finally:
fp.close()
def show(self, file, edit=1):
self.open(file).show(edit=edit)
def new(self, section):
if not SECTION_TITLES.has_key(section):
raise NoSuchSection(section)
maxnum = 0
for file in self.list():
sec, num = self.parse(file)
if sec == section:
maxnum = max(maxnum, num)
sec_num = (section, maxnum+1)
file = NEWFILENAME % sec_num
return self.entryclass(None, file, sec_num)
class FaqWizard:
def __init__(self):
self.ui = UserInput()
self.dir = FaqDir()
def go(self):
print 'Content-type: text/html'
req = self.ui.req or 'home'
mname = 'do_%s' % req
try:
meth = getattr(self, mname)
except AttributeError:
self.error("Bad request type %r." % (req,))
else:
try:
meth()
except InvalidFile, exc:
self.error("Invalid entry file name %s" % exc.file)
except NoSuchFile, exc:
self.error("No entry with file name %s" % exc.file)
except NoSuchSection, exc:
self.error("No section number %s" % exc.section)
self.epilogue()
def error(self, message, **kw):
self.prologue(T_ERROR)
emit(message, kw)
def prologue(self, title, entry=None, **kw):
emit(PROLOGUE, entry, kwdict=kw, title=escape(title))
def epilogue(self):
emit(EPILOGUE)
def do_home(self):
self.prologue(T_HOME)
emit(HOME)
def do_debug(self):
self.prologue("FAQ Wizard Debugging")
form = cgi.FieldStorage()
cgi.print_form(form)
cgi.print_environ(os.environ)
cgi.print_directory()
cgi.print_arguments()
def do_search(self):
query = self.ui.query
if not query:
self.error("Empty query string!")
return
if self.ui.querytype == 'simple':
query = re.escape(query)
queries = [query]
elif self.ui.querytype in ('anykeywords', 'allkeywords'):
words = filter(None, re.split('\W+', query))
if not words:
self.error("No keywords specified!")
return
words = map(lambda w: r'\b%s\b' % w, words)
if self.ui.querytype[:3] == 'any':
queries = ['|'.join(words)]
else:
# Each of the individual queries must match
queries = words
else:
# Default to regular expression
queries = [query]
self.prologue(T_SEARCH)
progs = []
for query in queries:
if self.ui.casefold == 'no':
p = re.compile(query)
else:
p = re.compile(query, re.IGNORECASE)
progs.append(p)
hits = []
for file in self.dir.list():
try:
entry = self.dir.open(file)
except FileError:
constants
for p in progs:
if not p.search(entry.title) and not p.search(entry.body):
break
else:
hits.append(file)
if not hits:
emit(NO_HITS, self.ui, count=0)
elif len(hits) <= MAXHITS:
if len(hits) == 1:
emit(ONE_HIT, count=1)
else:
emit(FEW_HITS, count=len(hits))
self.format_all(hits, headers=0)
else:
emit(MANY_HITS, count=len(hits))
self.format_index(hits)
def do_all(self):
self.prologue(T_ALL)
files = self.dir.list()
self.last_changed(files)
self.format_index(files, localrefs=1)
self.format_all(files)
def do_compat(self):
files = self.dir.list()
emit(COMPAT)
self.last_changed(files)
self.format_index(files, localrefs=1)
self.format_all(files, edit=0)
sys.exit(0) # XXX Hack to suppress epilogue
def last_changed(self, files):
latest = 0
for file in files:
entry = self.dir.open(file)
if entry:
mtime = mtime = entry.getmtime()
if mtime > latest:
latest = mtime
print time.strftime(LAST_CHANGED, time.localtime(latest))
emit(EXPLAIN_MARKS)
def format_all(self, files, edit=1, headers=1):
sec = 0
for file in files:
try:
entry = self.dir.open(file)
except NoSuchFile:
continue
if headers and entry.sec != sec:
sec = entry.sec
try:
title = SECTION_TITLES[sec]
except KeyError:
title = "Untitled"
emit("\n<HR>\n<H1>%(sec)s. %(title)s</H1>\n",
sec=sec, title=title)
entry.show(edit=edit)
def do_index(self):
self.prologue(T_INDEX)
files = self.dir.list()
self.last_changed(files)
self.format_index(files, add=1)
def format_index(self, files, add=0, localrefs=0):
sec = 0
for file in files:
try:
entry = self.dir.open(file)
except NoSuchFile:
continue
if entry.sec != sec:
if sec:
if add:
emit(INDEX_ADDSECTION, sec=sec)
emit(INDEX_ENDSECTION, sec=sec)
sec = entry.sec
try:
title = SECTION_TITLES[sec]
except KeyError:
title = "Untitled"
emit(INDEX_SECTION, sec=sec, title=title)
if localrefs:
emit(LOCAL_ENTRY, entry)
else:
emit(INDEX_ENTRY, entry)
entry.emit_marks()
if sec:
if add:
emit(INDEX_ADDSECTION, sec=sec)
emit(INDEX_ENDSECTION, sec=sec)
def do_recent(self):
if not self.ui.days:
days = 1
else:
days = float(self.ui.days)
try:
cutoff = now - days * 24 * 3600
except OverflowError:
cutoff = 0
list = []
for file in self.dir.list():
entry = self.dir.open(file)
if not entry:
continue
mtime = entry.getmtime()
if mtime >= cutoff:
list.append((mtime, file))
list.sort()
list.reverse()
self.prologue(T_RECENT)
if days <= 1:
period = "%.2g hours" % (days*24)
else:
period = "%.6g days" % days
if not list:
emit(NO_RECENT, period=period)
elif len(list) == 1:
emit(ONE_RECENT, period=period)
else:
emit(SOME_RECENT, period=period, count=len(list))
self.format_all(map(lambda (mtime, file): file, list), headers=0)
emit(TAIL_RECENT)
def do_roulette(self):
import random
files = self.dir.list()
if not files:
self.error("No entries.")
return
file = random.choice(files)
self.prologue(T_ROULETTE)
emit(ROULETTE)
self.dir.show(file)
def do_help(self):
self.prologue(T_HELP)
emit(HELP)
def do_show(self):
entry = self.dir.open(self.ui.file)
self.prologue(T_SHOW)
entry.show()
def do_add(self):
self.prologue(T_ADD)
emit(ADD_HEAD)
sections = SECTION_TITLES.items()
sections.sort()
for section, title in sections:
emit(ADD_SECTION, section=section, title=title)
emit(ADD_TAIL)
def do_delete(self):
self.prologue(T_DELETE)
emit(DELETE)
def do_log(self):
entry = self.dir.open(self.ui.file)
self.prologue(T_LOG, entry)
emit(LOG, entry)
self.rlog(interpolate(SH_RLOG, entry), entry)
def rlog(self, command, entry=None):
output = os.popen(command).read()
sys.stdout.write('<PRE>')
athead = 0
lines = output.split('\n')
while lines and not lines[-1]:
del lines[-1]
if lines:
line = lines[-1]
if line[:1] == '=' and len(line) >= 40 and \
line == line[0]*len(line):
del lines[-1]
headrev = None
for line in lines:
if entry and athead and line[:9] == 'revision ':
rev = line[9:].split()
mami = revparse(rev)
if not mami:
print line
else:
emit(REVISIONLINK, entry, rev=rev, line=line)
if mami[1] > 1:
prev = "%d.%d" % (mami[0], mami[1]-1)
emit(DIFFLINK, entry, prev=prev, rev=rev)
if headrev:
emit(DIFFLINK, entry, prev=rev, rev=headrev)
else:
headrev = rev
print
athead = 0
else:
athead = 0
if line[:1] == '-' and len(line) >= 20 and \
line == len(line) * line[0]:
athead = 1
sys.stdout.write('<HR>')
else:
print line
print '</PRE>'
def do_revision(self):
entry = self.dir.open(self.ui.file)
rev = self.ui.rev
mami = revparse(rev)
if not mami:
self.error("Invalid revision number: %r." % (rev,))
self.prologue(T_REVISION, entry)
self.shell(interpolate(SH_REVISION, entry, rev=rev))
def do_diff(self):
entry = self.dir.open(self.ui.file)
prev = self.ui.prev
rev = self.ui.rev
mami = revparse(rev)
if not mami:
self.error("Invalid revision number: %r." % (rev,))
if prev:
if not revparse(prev):
self.error("Invalid previous revision number: %r." % (prev,))
else:
prev = '%d.%d' % (mami[0], mami[1])
self.prologue(T_DIFF, entry)
self.shell(interpolate(SH_RDIFF, entry, rev=rev, prev=prev))
def shell(self, command):
output = os.popen(command).read()
sys.stdout.write('<PRE>')
print escape(output)
print '</PRE>'
def do_new(self):
entry = self.dir.new(section=int(self.ui.section))
entry.version = '*new*'
self.prologue(T_EDIT)
emit(EDITHEAD)
emit(EDITFORM1, entry, editversion=entry.version)
emit(EDITFORM2, entry, load_my_cookie())
emit(EDITFORM3)
entry.show(edit=0)
def do_edit(self):
entry = self.dir.open(self.ui.file)
entry.load_version()
self.prologue(T_EDIT)
emit(EDITHEAD)
emit(EDITFORM1, entry, editversion=entry.version)
emit(EDITFORM2, entry, load_my_cookie())
emit(EDITFORM3)
entry.show(edit=0)
def do_review(self):
send_my_cookie(self.ui)
if self.ui.editversion == '*new*':
sec, num = self.dir.parse(self.ui.file)
entry = self.dir.new(section=sec)
entry.version = "*new*"
if entry.file != self.ui.file:
self.error("Commit version conflict!")
emit(NEWCONFLICT, self.ui, sec=sec, num=num)
return
else:
entry = self.dir.open(self.ui.file)
entry.load_version()
# Check that the FAQ entry number didn't change
if self.ui.title.split()[:1] != entry.title.split()[:1]:
self.error("Don't change the entry number please!")
return
# Check that the edited version is the current version
if entry.version != self.ui.editversion:
self.error("Commit version conflict!")
emit(VERSIONCONFLICT, entry, self.ui)
return
commit_ok = ((not PASSWORD
or self.ui.password == PASSWORD)
and self.ui.author
and '@' in self.ui.email
and self.ui.log)
if self.ui.commit:
if not commit_ok:
self.cantcommit()
else:
self.commit(entry)
return
self.prologue(T_REVIEW)
emit(REVIEWHEAD)
entry.body = self.ui.body
entry.title = self.ui.title
entry.show(edit=0)
emit(EDITFORM1, self.ui, entry)
if commit_ok:
emit(COMMIT)
else:
emit(NOCOMMIT_HEAD)
self.errordetail()
emit(NOCOMMIT_TAIL)
emit(EDITFORM2, self.ui, entry, load_my_cookie())
emit(EDITFORM3)
def cantcommit(self):
self.prologue(T_CANTCOMMIT)
print CANTCOMMIT_HEAD
self.errordetail()
print CANTCOMMIT_TAIL
def errordetail(self):
if PASSWORD and self.ui.password != PASSWORD:
emit(NEED_PASSWD)
if not self.ui.log:
emit(NEED_LOG)
if not self.ui.author:
emit(NEED_AUTHOR)
if not self.ui.email:
emit(NEED_EMAIL)
def commit(self, entry):
file = entry.file
# Normalize line endings in body
if '\r' in self.ui.body:
self.ui.body = re.sub('\r\n?', '\n', self.ui.body)
# Normalize whitespace in title
self.ui.title = ' '.join(self.ui.title.split())
# Check that there were any changes
if self.ui.body == entry.body and self.ui.title == entry.title:
self.error("You didn't make any changes!")
return
# need to lock here because otherwise the file exists and is not writable (on NT)
command = interpolate(SH_LOCK, file=file)
p = os.popen(command)
output = p.read()
try:
os.unlink(file)
except os.error:
pass
try:
f = open(file, 'w')
except IOError, why:
self.error(CANTWRITE, file=file, why=why)
return
date = time.ctime(now)
emit(FILEHEADER, self.ui, os.environ, date=date, _file=f, _quote=0)
f.write('\n')
f.write(self.ui.body)
f.write('\n')
f.close()
import tempfile
tf = tempfile.NamedTemporaryFile()
emit(LOGHEADER, self.ui, os.environ, date=date, _file=tf)
tf.flush()
tf.seek(0)
command = interpolate(SH_CHECKIN, file=file, tfn=tf.name)
log("\n\n" + command)
p = os.popen(command)
output = p.read()
sts = p.close()
log("output: " + output)
log("done: " + str(sts))
log("TempFile:\n" + tf.read() + "end")
if not sts:
self.prologue(T_COMMITTED)
emit(COMMITTED)
else:
self.error(T_COMMITFAILED)
emit(COMMITFAILED, sts=sts)
print '<PRE>%s</PRE>' % escape(output)
try:
os.unlink(tf.name)
except os.error:
pass
entry = self.dir.open(file)
entry.show()
wiz = FaqWizard()
wiz.go()
|
{
"content_hash": "3951ddedc38ad4841794f9f427b3e502",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 89,
"avg_line_length": 30.491082045184303,
"alnum_prop": 0.4784541590297547,
"repo_name": "google/google-ctf",
"id": "babb42658210d49d626d0b9c1eeb8d0633355f67",
"size": "25643",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/faqwiz/faqwiz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "508"
},
{
"name": "Assembly",
"bytes": "107617"
},
{
"name": "BASIC",
"bytes": "6068"
},
{
"name": "Batchfile",
"bytes": "1032"
},
{
"name": "Blade",
"bytes": "14530"
},
{
"name": "C",
"bytes": "1481904"
},
{
"name": "C++",
"bytes": "2139472"
},
{
"name": "CMake",
"bytes": "11595"
},
{
"name": "CSS",
"bytes": "172375"
},
{
"name": "Dart",
"bytes": "6282"
},
{
"name": "Dockerfile",
"bytes": "232352"
},
{
"name": "EJS",
"bytes": "92308"
},
{
"name": "Emacs Lisp",
"bytes": "2668"
},
{
"name": "GDB",
"bytes": "273"
},
{
"name": "GLSL",
"bytes": "33392"
},
{
"name": "Go",
"bytes": "3031142"
},
{
"name": "HTML",
"bytes": "467647"
},
{
"name": "Java",
"bytes": "174199"
},
{
"name": "JavaScript",
"bytes": "2643200"
},
{
"name": "Lua",
"bytes": "5944"
},
{
"name": "Makefile",
"bytes": "149152"
},
{
"name": "NSIS",
"bytes": "2800"
},
{
"name": "Nix",
"bytes": "139"
},
{
"name": "PHP",
"bytes": "311900"
},
{
"name": "Perl",
"bytes": "32742"
},
{
"name": "Pug",
"bytes": "8752"
},
{
"name": "Python",
"bytes": "1756592"
},
{
"name": "Red",
"bytes": "188"
},
{
"name": "Rust",
"bytes": "541267"
},
{
"name": "Sage",
"bytes": "39814"
},
{
"name": "Shell",
"bytes": "382149"
},
{
"name": "Smali",
"bytes": "2316656"
},
{
"name": "Starlark",
"bytes": "8216"
},
{
"name": "SystemVerilog",
"bytes": "16466"
},
{
"name": "VCL",
"bytes": "895"
},
{
"name": "Verilog",
"bytes": "7230"
},
{
"name": "Vim Script",
"bytes": "890"
},
{
"name": "Vue",
"bytes": "10248"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sfepy.base.base import Struct
from sfepy.solvers.solvers import Solver, use_first_available
class AutoFallbackSolver(Solver):
"""
Base class for virtual solvers with the automatic fallback.
"""
_ls_solvers = []
def __new__(cls, conf, **kwargs):
"""
Choose an available solver from `self._ls_solvers`.
Parameters
----------
conf : dict or Struct
The solver configuration.
**kwargs : keyword arguments
Additional solver options, see the particular __init__() methods.
"""
if isinstance(conf, Struct):
dconf = conf.to_dict()
else:
dconf = conf
dconf.pop('kind', None)
ls_solvers = [(ls, Struct(**_conf) + Struct(kind=ls) + Struct(**dconf))
for ls, _conf in cls._ls_solvers]
return use_first_available(ls_solvers, **kwargs)
class AutoDirect(AutoFallbackSolver):
"""The automatically selected linear direct solver.
The first available solver from the following list is used:
`ls.mumps <sfepy.solvers.ls.MUMPSSolver>`,
`ls.scipy_umfpack <sfepy.solvers.ls.ScipyUmfpack>` and
`ls.scipy_superlu <sfepy.solvers.ls.ScipySuperLU>`.
"""
name = 'ls.auto_direct'
_ls_solvers = [
('ls.mumps', {}),
('ls.scipy_umfpack', {}),
('ls.scipy_superlu', {})
]
class AutoIterative(AutoFallbackSolver):
"""The automatically selected linear iterative solver.
The first available solver from the following list is used:
`ls.petsc <sfepy.solvers.ls.PETScKrylovSolver>` and
`ls.scipy_iterative <sfepy.solvers.ls.ScipyIterative>`
"""
name = 'ls.auto_iterative'
_ls_solvers = [
('ls.petsc', {'method': 'cg', 'precond': 'icc'}),
('ls.scipy_iterative', {'method': 'cg'}),
]
|
{
"content_hash": "3a9526af9d4354c2612e436d37b32d7c",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 29.107692307692307,
"alnum_prop": 0.6025369978858351,
"repo_name": "BubuLK/sfepy",
"id": "2e112a3cec9d237d85caead5c97d533414d09cb4",
"size": "1892",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sfepy/solvers/auto_fallback.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "471175"
},
{
"name": "GLSL",
"bytes": "8269"
},
{
"name": "MATLAB",
"bytes": "1918"
},
{
"name": "Makefile",
"bytes": "489"
},
{
"name": "PowerShell",
"bytes": "3121"
},
{
"name": "Python",
"bytes": "3553817"
}
],
"symlink_target": ""
}
|
import web
import sys, logging
from wsgilog import WsgiLog
#import config
# Copied this code from:
# http://stackoverflow.com/questions/7192788/how-do-i-redirrect-the-output-in-web-py
class WebLog(WsgiLog):
def __init__(self, application):
WsgiLog.__init__(
self,
application,
logformat = web.config.log_format,
debug = True,
tofile = web.config.log_tofile,
toprint = False,
logname = "WebLog", # For the name in logformat
file = web.config.log_file,
loglevel = logging.DEBUG,
interval = web.config.log_interval,
backups = web.config.log_backups
)
def __call__(self, environ, start_response):
def hstart_response(status, response_headers, *args):
out = start_response(status, response_headers, *args)
try:
logline=environ["SERVER_PROTOCOL"]+" "+environ["REQUEST_METHOD"]+" "+environ["REQUEST_URI"]+" - "+status
except err:
logline="Could not log <%s> due to err <%s>" % (str(environ), err)
self.logger.info(logline)
return out
return super(WebLog, self).__call__(environ, hstart_response)
|
{
"content_hash": "8323f01c27428a344be68774b5efb424",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 112,
"avg_line_length": 30.236842105263158,
"alnum_prop": 0.6318537859007833,
"repo_name": "jimboca/ISYHelper",
"id": "23a4e5c97e2d806f082372e54a46133ed9d64a48",
"size": "1150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ISYHelper/weblog.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62818"
},
{
"name": "Shell",
"bytes": "685"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from collections import defaultdict
from os import listdir
from os.path import abspath, basename, dirname, isdir, isfile, join, realpath, relpath, splitext
import re
from subprocess import Popen, PIPE
import sys
# Runs the tests.
WREN_DIR = dirname(dirname(realpath(__file__)))
WREN_APP = join(WREN_DIR, 'wren','bin','Release', 'wren')
EXPECT_PATTERN = re.compile(r'// expect: ?(.*)')
EXPECT_ERROR_PATTERN = re.compile(r'// expect error(?! line)')
EXPECT_ERROR_LINE_PATTERN = re.compile(r'// expect error line (\d+)')
EXPECT_RUNTIME_ERROR_PATTERN = re.compile(r'// expect runtime error: (.+)')
ERROR_PATTERN = re.compile(r'\[.* line (\d+)\] Error')
STACK_TRACE_PATTERN = re.compile(r'\[main line (\d+)\] in')
STDIN_PATTERN = re.compile(r'// stdin: (.*)')
SKIP_PATTERN = re.compile(r'// skip: (.*)')
NONTEST_PATTERN = re.compile(r'// nontest')
passed = 0
failed = 0
skipped = defaultdict(int)
num_skipped = 0
expectations = 0
def color_text(text, color):
"""Converts text to a string and wraps it in the ANSI escape sequence for
color, if supported."""
# No ANSI escapes on Windows.
#if sys.platform == 'win32':
return str(text)
#return color + str(text) + '\033[0m'
def green(text): return color_text(text, '\033[32m')
def pink(text): return color_text(text, '\033[91m')
def red(text): return color_text(text, '\033[31m')
def yellow(text): return color_text(text, '\033[33m')
def walk(dir, callback, ignored=None):
"""
Walks [dir], and executes [callback] on each file unless it is [ignored].
"""
if not ignored:
ignored = []
ignored += [".",".."]
dir = abspath(dir)
for file in [file for file in listdir(dir) if not file in ignored]:
nfile = join(dir, file)
if isdir(nfile):
walk(nfile, callback)
else:
callback(nfile)
def print_line(line=None):
# Erase the line.
print('\033[2K', end='')
# Move the cursor to the beginning.
print('\r', end='')
if line:
print(line, end='')
sys.stdout.flush()
def run_script(app, path, type):
global passed
global failed
global skipped
global num_skipped
global expectations
if (splitext(path)[1] != '.wren'):
return
# Check if we are just running a subset of the tests.
if len(sys.argv) == 2:
this_test = relpath(path, join(WREN_DIR, 'test'))
if not this_test.startswith(sys.argv[1]):
return
# Make a nice short path relative to the working directory.
# Normalize it to use "/"
path = relpath(path).replace("\\", "/")
# Read the test and parse out the expectations.
expect_output = []
expect_error = []
expect_runtime_error_line = 0
expect_runtime_error = None
expect_return = 0
input_lines = []
print_line('Passed: ' + green(passed) +
' Failed: ' + red(failed) +
' Skipped: ' + yellow(num_skipped))
line_num = 1
with open(path, 'r') as file:
for line in file:
match = EXPECT_PATTERN.search(line)
if match:
expect_output.append((match.group(1), line_num))
expectations += 1
match = EXPECT_ERROR_PATTERN.search(line)
if match:
expect_error.append(line_num)
# If we expect compile errors, it should exit with 65.
expect_return = 65
expectations += 1
match = EXPECT_ERROR_LINE_PATTERN.search(line)
if match:
expect_error.append(int(match.group(1)))
# If we expect compile errors, it should exit with E65.
expect_return = 65
expectations += 1
match = EXPECT_RUNTIME_ERROR_PATTERN.search(line)
if match:
expect_runtime_error_line = line_num
expect_runtime_error = match.group(1)
# If we expect a runtime error, it should exit with 70.
expect_return = 70
expectations += 1
match = STDIN_PATTERN.search(line)
if match:
input_lines.append(match.group(1) + '\n')
match = SKIP_PATTERN.search(line)
if match:
num_skipped += 1
skipped[match.group(1)] += 1
return
match = NONTEST_PATTERN.search(line)
if match:
# Not a test file at all, so ignore it.
return
line_num += 1
# If any input is fed to the test in stdin, concatetate it into one string.
input_bytes = None
if len(input_lines) > 0:
input_bytes = "".join(input_lines).encode("utf-8")
# Run the test.
test_arg = path
if type == "api test":
# Just pass the suite name to API tests.
test_arg = basename(splitext(test_arg)[0])
print(test_arg)
proc = Popen([app, test_arg], stdin=PIPE, stdout=PIPE, stderr=PIPE)
(out, err) = proc.communicate(input_bytes)
fails = []
try:
out = out.decode("utf-8").replace('\r\n', '\n')
err = err.decode("utf-8").replace('\r\n', '\n')
except:
fails.append('Error decoding output.')
# Validate that no unexpected errors occurred.
if expect_return != 0 and err != '':
lines = err.split('\n')
if expect_runtime_error:
# Make sure we got the right error.
if lines[0] != expect_runtime_error:
fails.append('Expected runtime error "' + expect_runtime_error +
'" and got:')
fails.append(lines[0])
else:
lines = err.split('\n')
while len(lines) > 0:
line = lines.pop(0)
match = ERROR_PATTERN.search(line)
if match:
if float(match.group(1)) not in expect_error:
fails.append('Unexpected error:')
fails.append(line)
elif line != '':
fails.append('Unexpected output on stderr:')
fails.append(line)
else:
for line in expect_error:
fails.append('Expected error on line ' + str(line) + ' and got none.')
if expect_runtime_error:
fails.append('Expected runtime error "' + expect_runtime_error +
'" and got none.')
# Validate the exit code.
if proc.returncode != expect_return:
fails.append('Expected return code {0} and got {1}. Stderr:'
.format(expect_return, proc.returncode))
fails += err.split('\n')
else:
# Validate the output.
expect_index = 0
# Remove the trailing last empty line.
out_lines = out.split('\n')
if out_lines[-1] == '':
del out_lines[-1]
for line in out_lines:
#if sys.version_info < (3, 0):
#line = line.encode('utf-8')
if type == "example":
# Ignore output from examples.
pass
elif expect_index >= len(expect_output):
fails.append('Got output "{0}" when none was expected.'.format(line))
elif expect_output[expect_index][0] != line:
fails.append('Expected output "{0}" on line {1} and got "{2}".'.
format(expect_output[expect_index][0],
expect_output[expect_index][1], line))
expect_index += 1
while expect_index < len(expect_output):
fails.append('Missing expected output "{0}" on line {1}.'.
format(expect_output[expect_index][0],
expect_output[expect_index][1]))
expect_index += 1
# Display the results.
if len(fails) == 0:
passed += 1
else:
failed += 1
print_line(red('FAIL') + ': ' + path)
print('')
for fail in fails:
print(' ' + pink(fail))
print('')
def run_test(path, example=False):
run_script(WREN_APP, path, "test")
def run_api_test(path):
pass
def run_example(path):
run_script(WREN_APP, path, "example")
walk(join(WREN_DIR, 'test'), run_test, ignored=['api', 'benchmark'])
print_line()
if failed == 0:
print('All ' + green(passed) + ' tests passed (' + str(expectations) +
' expectations).')
else:
print(green(passed) + ' tests passed. ' + red(failed) + ' tests failed.')
for key in sorted(skipped.keys()):
print('Skipped ' + yellow(skipped[key]) + ' tests: ' + key)
if failed != 0:
sys.exit(1)
|
{
"content_hash": "c2496925760c212f05bddf28f754c22c",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 96,
"avg_line_length": 27.89679715302491,
"alnum_prop": 0.6088786835055492,
"repo_name": "robotii/Wren.NET",
"id": "d4e04d6e37422e4ce524ac0eb1a212d4eae31d0f",
"size": "7862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7921"
},
{
"name": "C#",
"bytes": "303606"
},
{
"name": "C++",
"bytes": "176"
},
{
"name": "Lua",
"bytes": "10909"
},
{
"name": "Python",
"bytes": "46864"
},
{
"name": "Ruby",
"bytes": "9694"
}
],
"symlink_target": ""
}
|
"""Tests for AddNodeNotificationTransformer"""
import unittest
from o2a.converter.task import Task
from o2a.converter.task_group import ActionTaskGroup, ControlTaskGroup
from o2a.converter.workflow import Workflow
from o2a.o2a_libs.property_utils import PropertySet
from o2a.transformers.add_node_notificaton_transformer import (
AddNodeNotificationTransformer,
NODE_STATUS_SUFFIX,
NODE_TRANSITION_SUFFIX,
PROP_KEY_NODE_NOTIFICATION_URL,
)
NODE_NOTIFICATION_URL_TPL = "http://example.com/action?job-id=$jobId&node-name=$nodeName&status=$status"
def new_task(task_id):
return Task(task_id=task_id, template_name="dummy.tpl")
class AddNodeNotificationTransformerTest(unittest.TestCase):
def setUp(self) -> None:
self.transformer = AddNodeNotificationTransformer()
self.workflow = Workflow(input_directory_path="", output_directory_path="", dag_name="DAG_NAME")
self.action_task_group = ActionTaskGroup(name="action_task_group", tasks=[new_task("action_task")])
self.workflow.task_groups[self.action_task_group.name] = self.action_task_group
self.props = PropertySet(job_properties={PROP_KEY_NODE_NOTIFICATION_URL: NODE_NOTIFICATION_URL_TPL})
def test_should_do_nothing_when_notification_url_not_configured(self):
"""
Input:
ACTION
Expected output:
ACTION
"""
# Given
props = PropertySet()
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, props)
# Then
self.assertEqual({self.action_task_group.name}, self.workflow.task_groups.keys())
def test_should_add_status_notification_to_single_action_task_group(self):
"""
Input:
ACTION
Expected output:
STATUS
|
ACTION
"""
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, self.props)
# Then
exp_status_notification_name = f"{self.action_task_group.name}{NODE_STATUS_SUFFIX}"
self.assertEqual(2, len(self.workflow.task_groups))
self.assertEqual(
{exp_status_notification_name, self.action_task_group.name}, self.workflow.task_groups.keys()
)
def test_should_add_transition_and_status_between_two_action_task_groups(self):
"""
Input:
ACTION
|
ACTION
Expected output:
STATUS
|
ACTION
|
TRANSITION
|
STATUS
|
ACTION
"""
# Given
second_action_task_group = ActionTaskGroup(
name="second_action_task_group", tasks=[new_task("control_task")]
)
self.workflow.task_groups[second_action_task_group.name] = second_action_task_group
self.action_task_group.downstream_names.append(second_action_task_group.name)
exp_first_action_status_notification_name = f"{self.action_task_group.name}{NODE_STATUS_SUFFIX}"
exp_second_action_status_notification_name = f"{second_action_task_group.name}{NODE_STATUS_SUFFIX}"
exp_second_action_transition_notification_name = (
f"{self.action_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{second_action_task_group.name}"
)
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, self.props)
# Then
self.assertEqual(5, len(self.workflow.task_groups))
self.assertEqual(
{
exp_first_action_status_notification_name,
self.action_task_group.name,
exp_second_action_transition_notification_name,
exp_second_action_status_notification_name,
second_action_task_group.name,
},
self.workflow.task_groups.keys(),
)
def test_should_add_transition_between_action_and_control_task_groups(self):
"""
Input:
ACTION
|
CONTROL
Expected output:
STATUS
|
ACTION
|
TRANSITION
|
CONTROL
"""
# Given
control_task_group = ControlTaskGroup(name="control_task_group", tasks=[new_task("control_task")])
self.workflow.task_groups[control_task_group.name] = control_task_group
self.action_task_group.downstream_names.append(control_task_group.name)
exp_action_status_notification_name = f"{self.action_task_group.name}{NODE_STATUS_SUFFIX}"
exp_control_transition_notification_name = (
f"{self.action_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{control_task_group.name}"
)
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, self.props)
# Then
self.assertEqual(4, len(self.workflow.task_groups))
self.assertEqual(
{
exp_action_status_notification_name,
self.action_task_group.name,
exp_control_transition_notification_name,
control_task_group.name,
},
self.workflow.task_groups.keys(),
)
def test_should_add_transition_between_two_control_task_groups(self):
"""
Input:
CONTROL
|
CONTROL
Expected output:
CONTROL
|
TRANSITION
|
CONTROL
"""
# Given
self.workflow.task_groups.clear() # Reset workflow
first_control_task_group = ControlTaskGroup(
name="first_control_task_group", tasks=[new_task("first_control_task")]
)
second_control_task_group = ControlTaskGroup(
name="second_control_task_group", tasks=[new_task("second_control_task")]
)
self.workflow.task_groups[first_control_task_group.name] = first_control_task_group
self.workflow.task_groups[second_control_task_group.name] = second_control_task_group
first_control_task_group.downstream_names = [second_control_task_group.name]
exp_second_control_transition_notification_name = (
f"{first_control_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{second_control_task_group.name}"
)
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, self.props)
# Then
self.assertEqual(3, len(self.workflow.task_groups))
self.assertEqual(
{
first_control_task_group.name,
exp_second_control_transition_notification_name,
second_control_task_group.name,
},
self.workflow.task_groups.keys(),
)
def test_should_add_transition_and_status_between_control_and_action_task_groups(self):
"""
Input:
CONTROL
|
ACTION
Expected output:
CONTROL
|
TRANSITION
|
STATUS
|
ACTION
"""
# Given
control_task_group = ControlTaskGroup(name="control_task_group", tasks=[new_task("control_task")])
self.workflow.task_groups[control_task_group.name] = control_task_group
control_task_group.downstream_names = [self.action_task_group.name]
exp_transition_notification_name = (
f"{control_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{self.action_task_group.name}"
)
exp_status_notification_name = f"{self.action_task_group.name}{NODE_STATUS_SUFFIX}"
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, self.props)
# Then
self.assertEqual(4, len(self.workflow.task_groups))
self.assertEqual(
{
control_task_group.name,
exp_transition_notification_name,
exp_status_notification_name,
self.action_task_group.name,
},
self.workflow.task_groups.keys(),
)
def test_should_handle_fork_type_case(self):
"""
Input:
ACTION
| |
CONTROL < > ACTION
Expected output:
STATUS
|
ACTION
| |
TRANSITION TRANSITION
| |
| STATUS
| |
CONTROL < > ACTION
"""
# Given
control_task_group = ControlTaskGroup(
name="control_task_group", tasks=[Task(task_id="control_task", template_name="dummy.tpl")]
)
self.workflow.task_groups[control_task_group.name] = control_task_group
self.action_task_group.downstream_names.append(control_task_group.name)
second_action_task_group = ActionTaskGroup(
name="second_action_task_group", tasks=[new_task("second_action_task")]
)
self.workflow.task_groups[second_action_task_group.name] = second_action_task_group
self.action_task_group.downstream_names.append(second_action_task_group.name)
exp_first_action_status_notification_name = f"{self.action_task_group.name}{NODE_STATUS_SUFFIX}"
exp_second_action_status_notification_name = f"{second_action_task_group.name}{NODE_STATUS_SUFFIX}"
exp_action_control_transition_notification_name = (
f"{self.action_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{control_task_group.name}"
)
exp_action_action_transition_notification_name = (
f"{self.action_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{second_action_task_group.name}"
)
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, self.props)
# Then
self.assertEqual(7, len(self.workflow.task_groups))
self.assertEqual(
{
exp_first_action_status_notification_name,
self.action_task_group.name,
exp_action_control_transition_notification_name,
control_task_group.name,
exp_action_action_transition_notification_name,
exp_second_action_status_notification_name,
second_action_task_group.name,
},
self.workflow.task_groups.keys(),
)
def test_should_handle_join_type_case(self):
"""
Input:
CONTROL > < ACTION
| |
CONTROL
Expected output:
STATUS
|
CONTROL > < ACTION
| |
TRANSITION TRANSITION
| |
CONTROL
"""
# Given
control_task_group = ControlTaskGroup(
name="control_task_group", tasks=[Task(task_id="control_task", template_name="dummy.tpl")]
)
join_task_group = ControlTaskGroup(
name="join_task_group", tasks=[Task(task_id="join_task", template_name="dummy.tpl")]
)
self.workflow.task_groups[control_task_group.name] = control_task_group
self.workflow.task_groups[join_task_group.name] = join_task_group
self.action_task_group.downstream_names = [join_task_group.name]
control_task_group.downstream_names = [join_task_group.name]
exp_action_status_notification_name = f"{self.action_task_group.name}{NODE_STATUS_SUFFIX}"
exp_action_control_transition_notification_name = (
f"{self.action_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{join_task_group.name}"
)
exp_control_control_transition_notification_name = (
f"{control_task_group.name}{NODE_TRANSITION_SUFFIX}_T_{join_task_group.name}"
)
# When
self.transformer.process_workflow_after_convert_nodes(self.workflow, self.props)
# Then
self.assertEqual(6, len(self.workflow.task_groups))
self.assertEqual(
{
exp_action_status_notification_name,
control_task_group.name,
self.action_task_group.name,
exp_action_control_transition_notification_name,
exp_control_control_transition_notification_name,
join_task_group.name,
},
self.workflow.task_groups.keys(),
)
|
{
"content_hash": "d6871dd3dd8cebfafd4fae60c78eca87",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 108,
"avg_line_length": 34.24033149171271,
"alnum_prop": 0.5916095199677289,
"repo_name": "GoogleCloudPlatform/oozie-to-airflow",
"id": "1bc00f765f71c5f2598d69e1f968d202e62a9322",
"size": "12989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/transformers/test_add_node_notification_transformer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528273"
},
{
"name": "Shell",
"bytes": "57460"
},
{
"name": "Smarty",
"bytes": "31948"
}
],
"symlink_target": ""
}
|
"""
Manwë resource fields.
.. moduleauthor:: Martijn Vermaat <martijn@vermaat.name>
.. Licensed under the MIT license, see the LICENSE file.
"""
import dateutil.parser
class Field(object):
"""
Base class for resource field definitions.
A field definition can convert field values from their API representation
to their Python representation, and vice versa.
"""
def __init__(self, key=None, mutable=False, hidden=False, default=None,
doc=None):
"""
Create a field instance.
:arg str key: Key by which this field is stored in the API.
:arg bool mutable: If `True`, field values can be modified.
:arg bool hidden: If `True`, field should not be shown.
:arg default: Default field value (as a Python value).
:arg str doc: Documentation string
"""
#: Key by which this field is stored in the API. By default inherited
#: from :attr:`name`.
self.key = key
#: If `True`, field values can be modified.
self.mutable = mutable
#: If `True`, field should not be shown.
self.hidden = hidden
#: Default field value (as an API value).
self.default = self.from_python(default)
#: Documentation string.
self.doc = doc
self._name = None
@property
def name(self):
"""
Name by which this field is available on the resource class.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
if self.key is None:
self.key = self.name
def to_python(self, value, resource):
"""
Convert API value to Python value.
This gets called from field getters, so the user gets a nice Python
value when accessing the field.
Subclasses for structured data (such as lists and dicts) should be
careful to not return mutable structures here, since that would allow
to bypass the field setter. For example, calling `field.append(v)`
will not add `field` to the set of dirty fields and will not go
through :meth:`from_python`. Actually, it might not even modify the
API value on the resource, because :meth:`to_python` probably created
a copy.
One solution for this, as implemented on :class:`Set`, is to return an
immutable field value (a `frozenset` in this case) and thereby force
modifications through the field setter.
Another approach would be something similar to the `MutableDict` type
in SQLAlchemy (see `Mutation Tracking
<http://docs.sqlalchemy.org/en/latest/orm/extensions/mutable.html>`_).
This does not apply to :class:`Link` fields, where the value is itself
a resource which should be modified using its own
:meth:`resources.Resource.save` method.
"""
return value
def from_python(self, value):
"""
Convert Python value to API value.
"""
return value
class Boolean(Field):
pass
class Integer(Field):
pass
class String(Field):
pass
class Link(Field):
"""
Definition for a resource link.
"""
def __init__(self, resource_key, **kwargs):
"""
:arg str resource_key: Key for the linked resource.
"""
self.resource_key = resource_key
super(Link, self).__init__(**kwargs)
def to_python(self, value, resource):
"""
Create a :class:`resources.Resource` instance from the resource URI.
Modifications of the returned resource should be saved by calling
:meth:`resources.Resource.save` on that resource.
"""
if value is None:
return None
# This is a bit ugly. In request data, a resource link is represented
# by its uri (a string). But in response data, it is represented by an
# object with a uri key.
if isinstance(value, dict):
uri = value['uri']
else:
uri = value
return getattr(resource.session, self.resource_key)(uri)
def from_python(self, value):
"""
In request data, a resource link is represented by its URI (a string).
"""
if value is None:
return None
return value.uri
class DateTime(Field):
def to_python(self, value, resource):
if value is None:
return None
return dateutil.parser.parse(value)
def from_python(self, value):
if value is None:
return None
return value.isoformat()
class Blob(Field):
def to_python(self, value, resource):
"""
Iterator over the data source data by chunks.
"""
if value is None:
return None
return resource.session.get(value['uri'], stream=True).iter_content(
chunk_size=resource.session.config.DATA_BUFFER_SIZE)
def from_python(self, value):
if value is None:
return None
raise NotImplementedError()
class Set(Field):
def __init__(self, field, **kwargs):
"""
:arg field: Field definition for the set members.
:type field: :class:`Field`
"""
self.field = field
super(Set, self).__init__(**kwargs)
def to_python(self, value, resource):
"""
Convert the set to an immutable `fronzenset`. See the
:meth:`Field.to_python` docstring.
"""
if value is None:
return None
return frozenset(self.field.to_python(x, resource) for x in value)
def from_python(self, value):
if value is None:
return None
return [self.field.from_python(x) for x in value]
class Queries(Field):
"""
Definition for a field containing annotation queries.
In the API, annotation queries are lists of dictionaries with `name` and
`expression` items.
As a Python value, we represent this as a dictionary with keys the query
names and values the query expressions.
"""
def to_python(self, value, resource):
if value is None:
return None
return {q['name']: q['expression'] for q in value}
def from_python(self, value):
if value is None:
return None
return [{'name': k, 'expression': v} for k, v in value.items()]
class Custom(Field):
"""
Custom field definitions are parameterized with conversion functions.
"""
def __init__(self, from_api, to_api, **kwargs):
self._from_api = from_api
self._to_api = to_api
super(Custom, self).__init__(**kwargs)
def to_python(self, value, resource):
return self._from_api(value, resource)
def from_python(self, value):
return self._to_api(value)
|
{
"content_hash": "5d7a639b3b00fa9b6a373314e4e6ca7b",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 78,
"avg_line_length": 29.22222222222222,
"alnum_prop": 0.60412401286926,
"repo_name": "fmin2958/manwe",
"id": "38f4ef1d27511d947002fa1fabbd317826957842",
"size": "6863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manwe/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125609"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
# Currently we are using the default Django auth views, at some point we will
# want to write our own views here
|
{
"content_hash": "46f90694ea4853303a4ff25633dc6221",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 78,
"avg_line_length": 37.75,
"alnum_prop": 0.7814569536423841,
"repo_name": "mcdermott-scholars/mcdermott",
"id": "5866f44c9593cfe8faa34a50cd2fd2cdd078b1e0",
"size": "151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "login/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7029"
},
{
"name": "HTML",
"bytes": "133938"
},
{
"name": "Python",
"bytes": "134883"
},
{
"name": "Shell",
"bytes": "1354"
}
],
"symlink_target": ""
}
|
VERSION = (0, 3, 5)
__version__ = '.'.join(map(str, VERSION))
class PaymentProviderException(Exception):
def __init__(self, message, *args, **kwargs):
super(PaymentProviderException, self).__init__(message, *args, **kwargs)
payment_providers = {}
def register_provider(name, provider):
payment_providers[name] = provider
|
{
"content_hash": "f1094a4d9640d3c59b44bb438cd8a696",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.6695906432748538,
"repo_name": "feinheit/zipfelchappe",
"id": "b1c19441fe40087665caeda0731ef83bf68b466b",
"size": "343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipfelchappe/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13748"
},
{
"name": "HTML",
"bytes": "54873"
},
{
"name": "JavaScript",
"bytes": "12651"
},
{
"name": "Python",
"bytes": "183855"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
}
|
"""Run all of the unit tests for this package."""
from __future__ import print_function
import sys
import pytest
from ._compat import input
try:
import System
except ImportError:
print("Load clr import hook")
import clr
clr.AddReference("Python.Test")
clr.AddReference("System.Collections")
clr.AddReference("System.Data")
clr.AddReference("System.Management")
def main(verbosity=1):
# test_module passes on its own, but not here if
# other test modules that import System.Windows.Forms
# run first. They must not do module level import/AddReference()
# of the System.Windows.Forms namespace.
# FIXME: test_engine has tests that are being skipped.
# FIXME: test_subclass has tests that are being skipped.
pytest.main()
if __name__ == '__main__':
main()
if '--pause' in sys.argv:
print("Press enter to continue")
input()
|
{
"content_hash": "525b9c198bff6ff802953792b2e5d0e7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 24.594594594594593,
"alnum_prop": 0.676923076923077,
"repo_name": "vmuriart/pythonnet",
"id": "8011d05e62b4a6ea87aebfa52bffe9203f2cb45b",
"size": "957",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/tests/runtests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "126"
},
{
"name": "C",
"bytes": "9291"
},
{
"name": "C#",
"bytes": "658657"
},
{
"name": "PowerShell",
"bytes": "3211"
},
{
"name": "Python",
"bytes": "258972"
}
],
"symlink_target": ""
}
|
from cloud4rpi import utils
class Device(object):
def __init__(self, api):
def on_command(cmd):
self.__on_command(cmd)
self.__api = api
self.__api.on_command = on_command
self.__variables = {}
self.__diag = {}
@staticmethod
def __resolve_binding(binding, current=None, default=None):
if hasattr(binding, 'read'):
return binding.read()
elif callable(binding):
return utils.resolve_callable(binding, current)
else:
return default
def __validate_payload(self, payload):
result = {}
for name, value in payload.items():
variable = self.__variables.get(name, None)
if not variable:
continue
t = variable.get('type', None)
result[name] = utils.validate_variable_value(name, t, value)
return result
def __on_command(self, cmd):
update = self.__apply_commands(cmd)
if bool(update):
self.__api.publish_data(update, data_type='cr')
def __apply_commands(self, cmd):
update = {}
for varName, value in cmd.items():
variable = self.__variables.get(varName, None)
if not variable:
continue
# consider to use resolve binding here
new_value = value
handler = variable.get('bind', None)
if callable(handler):
new_value = handler(new_value)
t = variable.get('type', None)
new_value = utils.validate_variable_value(varName, t, new_value)
variable['value'] = new_value
update[varName] = new_value
return update
def declare(self, variables):
for name, value in variables.items():
utils.guard_against_invalid_variable_type(name,
value.get('type', None))
self.__variables = variables
def declare_diag(self, diag):
self.__diag = diag
def read_config(self):
return [{'name': name, 'type': value['type']}
for name, value in self.__variables.items()]
def read_data(self):
for name, varConfig in self.__variables.items():
bind = varConfig.get('bind', None)
if bind:
curr = varConfig.get('value')
result = self.__resolve_binding(bind, curr, curr)
t = varConfig.get('type')
new_val = utils.validate_variable_value(name, t, result)
varConfig['value'] = new_val
readings = {varName: varConfig.get('value')
for varName, varConfig in self.__variables.items()}
return readings
def read_diag(self):
readings = {}
for name, value in self.__diag.items():
readings[name] = self.__resolve_binding(value, None, value)
return readings
def publish_config(self, cfg=None):
if cfg is None:
cfg = self.read_config()
else:
cfg = utils.validate_config(cfg)
return self.__api.publish_config(cfg)
def publish_data(self, data=None):
if data is None:
data = self.read_data()
else:
data = self.__validate_payload(data)
return self.__api.publish_data(data)
def publish_diag(self, diag=None):
if diag is None:
diag = self.read_diag()
return self.__api.publish_diag(diag)
|
{
"content_hash": "3fcf02a6408341ac818339762e83bfa0",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 31.419642857142858,
"alnum_prop": 0.539357772094345,
"repo_name": "cloud4rpi/cloud4rpi",
"id": "d6f47b1c0c8b8cfcf0394ea8062954e5f226a936",
"size": "3544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloud4rpi/device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "492"
},
{
"name": "Python",
"bytes": "44644"
}
],
"symlink_target": ""
}
|
import abc
class SnmpSenderBase(object, metaclass=abc.ABCMeta):
"""Abstract Vitrage snmp trap sender"""
@abc.abstractmethod
def send_snmp(self, alarm_data):
pass
|
{
"content_hash": "8510ebdd81078d6c40a10a4adfc13067",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 52,
"avg_line_length": 20.555555555555557,
"alnum_prop": 0.6864864864864865,
"repo_name": "openstack/vitrage",
"id": "3fedf4cf551d7ab83a9aafedaa0351d959af3e4e",
"size": "752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrage/notifier/plugins/snmp/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26541"
},
{
"name": "Mako",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2074427"
},
{
"name": "Shell",
"bytes": "17668"
}
],
"symlink_target": ""
}
|
from Bio import pairwise2, Entrez, SeqIO
from Bio.SubsMat import MatrixInfo as matlist
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
import tensorflow as tf
from urllib.request import urlopen
from urllib.parse import urlparse
from subprocess import call, check_output, run
from pyensembl import EnsemblRelease
from bs4 import BeautifulSoup
from collections import OrderedDict
from operator import itemgetter
from itertools import islice
from threading import Thread
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import argrelextrema
import pandas as pd
import regex
import re
import datetime, math, sys, hashlib, pickle, time, random, string, json, glob, os, signal
import httplib2 as http
from urllib.request import urlopen
from pyliftover import LiftOver
from PIL import Image
class TimeoutError(Exception):
'''
Custom error for Timeout class.
'''
pass
class Timeout:
'''
A timeout handler with context manager.
Based on UNIX signals.
'''
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def random_walk(lenght):
'''int => np.array
Return a random walk path.
'''
walk = []
y = 0
for _ in range(lenght):
if random.randint(0,1):
y += 1
else:
y -= 1
walk.append(y)
return np.array(walk)
def find_min_max(array):
'''np.array => dict
Return a dictionary of indexes
where the maxima and minima of the input array are found.
'''
# for local maxima
maxima = argrelextrema(array, np.greater)
# for local minima
minima = argrelextrema(array, np.less)
return {'maxima':maxima,
'minima':minima}
def smooth(array, window_len=10, window='hanning'):
'''np.array, int, str => np.array
Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t = linspace(-2,2,0.1)
x = sin(t)+randn(len(t))*0.1
y = smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
'''
if array.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if array.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[array[window_len-1:0:-1],array,array[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = eval('np.'+window+'(window_len)')
y = np.convolve(w/w.sum(),s,mode='valid')
y = y[int(window_len/2-1):-int(window_len/2)]
offset = len(y)-len(array) #in case input and output are not of the same lenght
assert len(array) == len(y[offset:])
return y[offset:]
def cohen_effect_size(group1, group2):
'''(np.array, np.array) => float
Compute the Cohen Effect Size (d) between two groups
by comparing the difference between groups to the variability within groups.
Return the the difference in standard deviation.
'''
assert type(group1) == np.ndarray
assert type(group2) == np.ndarray
diff = group1.mean() - group2.mean()
var1 = group1.var()
var2 = group2.var()
n1, n2 = len(group1), len(group2)
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / np.sqrt(pooled_var)
return d
def gen_ascii_symbols(input_file, chars):
'''
Return a dict of letters/numbers associated with
the corresponding ascii-art representation.
You can use http://www.network-science.de/ascii/ to generate the ascii-art for each symbol.
The input file looks like:
,adPPYYba,
"" `Y8
,adPPPPP88
88, ,88
`"8bbdP"Y8
88
88
88
88,dPPYba,
88P' "8a
88 d8
88b, ,a8"
8Y"Ybbd8"'
...
Each symbol is separated by at least one empty line ("\n")
'''
#input_file = 'ascii_symbols.txt'
#chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789'
symbols = []
s = ''
with open(input_file, 'r') as f:
for line in f:
if line == '\n':
if len(s):
symbols.append(s)
s = ''
else:
continue
else:
s += line
return dict(zip(chars,symbols))
def gen_ascii_captcha(symbols, length=6, max_h=10, noise_level=0, noise_char='.'):
'''
Return a string of the specified length made by random symbols.
Print the ascii-art representation of it.
Example:
symbols = gen_ascii_symbols(input_file='ascii_symbols.txt',
chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789')
while True:
captcha = gen_ascii_captcha(symbols, noise_level=0.2)
x = input('captcha: ')
if x == captcha:
print('\ncorrect')
break
print('\ninvalid captcha, please retry')
'''
assert noise_level <= 1
#max_h = 10
#noise_level = 0
captcha = ''.join(random.sample(chars, length))
#print(code)
pool = [symbols[c].split('\n') for c in captcha]
for n in range(max_h, 0, -1):
line = ''
for item in pool:
try:
next_line = item[-n]
except IndexError:
next_line = ''.join([' ' for i in range(max([len(_item) for _item in item]))])
if noise_level:
#if random.random() < noise_level:
# next_line = next_line.replace(' ', noise_char)
next_line = ''.join([c if random.random() > noise_level \
else random.choice(noise_char) for c in next_line])
line += next_line
print(line)
return captcha
def rnd_sample_df(df, n=1, slice_size=1):
'''
Yield dataframes generated by randomly slicing df.
It is different from pandas.DataFrame.sample().
'''
assert n > 0 and slice_size > 0
max_len = len(df)-slice_size
for _ in range(n):
i = random.randint(0,max_len)
yield df.iloc[i:i+slice_size]
def date_to_stamp(d='2012-12-31'):
'''
Return UNIX timestamp of a date.
'''
Y,M,D = d.split('-')
stamp = time.mktime(datetime.date(int(Y),
int(M),
int(D)
).timetuple()
)
return stamp
def rolling_normalize_df(df, method='min-max', size=30, overlap=5):
'''
Return a new df with datapoints normalized based on a sliding window
of rolling on the a pandas.DataFrame.
It is useful to have local (window by window) normalization of the values.
'''
to_merge = []
for item in split_overlap_long(df, size, overlap, is_dataframe=True):
to_merge.append(normalize_df(item, method))
new_df = pd.concat(to_merge)
return new_df.groupby(new_df.index).mean()
def normalize_df(df, method='min-max'):
'''
Return normalized data.
max, min, mean and std are computed considering
all the values of the dfand not by column.
i.e. mean = df.values.mean() and not df.mean().
Ideal to normalize df having multiple columns of non-indipendent values.
Methods implemented:
'raw' No normalization
'min-max' Deafault
'norm' ...
'z-norm' ...
'sigmoid' ...
'decimal' ...
'softmax' It's a transformation rather than a normalization
'tanh' ...
'''
if type(df) is not pd.core.frame.DataFrame:
df = pd.DataFrame(df)
if method == 'min-max':
return (df-df.values.min())/(df.values.max()-df.values.min())
if method == 'norm':
return (df-df.values.mean())/(df.values.max()-df.values.mean())
if method == 'z-norm':
return (df-df.values.mean())/df.values.std()
if method == 'sigmoid':
_max = df.values.max()
return df.apply(lambda x: 1/(1+np.exp(-x/_max)))
if method == 'decimal':
#j = len(str(int(df.values.max())))
i = 10**len(str(int(df.values.max())))#10**j
return df.apply(lambda x: x/i)
if method == 'tanh':
return 0.5*(np.tanh(0.01*(df-df.values.mean()))/df.values.std() + 1)
if method == 'softmax':
return np.exp(df)/np.sum(np.exp(df))
if method == 'raw':
return df
raise ValueError(f'"method" not found: {method}')
def merge_dict(dictA, dictB):
'''(dict, dict) => dict
Merge two dicts, if they contain the same keys, it sums their values.
Return the merged dict.
Example:
dictA = {'any key':1, 'point':{'x':2, 'y':3}, 'something':'aaaa'}
dictB = {'any key':1, 'point':{'x':2, 'y':3, 'z':0, 'even more nested':{'w':99}}, 'extra':8}
merge_dict(dictA, dictB)
{'any key': 2,
'point': {'x': 4, 'y': 6, 'z': 0, 'even more nested': {'w': 99}},
'something': 'aaaa',
'extra': 8}
'''
r = {}
common_k = [k for k in dictA if k in dictB]
common_k += [k for k in dictB if k in dictA]
common_k = set(common_k)
for k, v in dictA.items():
#add unique k of dictA
if k not in common_k:
r[k] = v
else:
#add inner keys if they are not containing other dicts
if type(v) is not dict:
if k in dictB:
r[k] = v + dictB[k]
else:
#recursively merge the inner dicts
r[k] = merge_dict(dictA[k], dictB[k])
#add unique k of dictB
for k, v in dictB.items():
if k not in common_k:
r[k] = v
return r
def png_to_flat_array(img_file):
img = Image.open(img_file).convert('RGBA')
arr = np.array(img)
# make a 1-dimensional view of arr
return arr.ravel()
def png_to_vector_matrix(img_file):
# convert it to a matrix
return np.matrix(png_to_flat_array(img_file))
def TFKMeansCluster(vectors, noofclusters, datatype="uint8"):
'''
K-Means Clustering using TensorFlow.
'vectors' should be a n*k 2-D NumPy array, where n is the number
of vectors of dimensionality k.
'noofclusters' should be an integer.
'''
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
#Find out the dimensionality
dim = len(vectors[0])
#Will help select random centroids from among the available vectors
vector_indices = list(range(len(vectors)))
random.shuffle(vector_indices)
#GRAPH OF COMPUTATION
#We initialize a new graph and set it as the default during each run
#of this algorithm. This ensures that as this function is called
#multiple times, the default graph doesn't keep getting crowded with
#unused ops and Variables from previous function calls.
graph = tf.Graph()
with graph.as_default():
#SESSION OF COMPUTATION
sess = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
centroids = [tf.Variable((vectors[vector_indices[i]])) for i in range(noofclusters)]
##These nodes will assign the centroid Variables the appropriate
##values
centroid_value = tf.placeholder(datatype, [dim])
cent_assigns = []
for centroid in centroids:
cent_assigns.append(tf.assign(centroid, centroid_value))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
assignments = [tf.Variable(0) for i in range(len(vectors))]
##These nodes will assign an assignment Variable the appropriate
##value
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
cluster_assigns.append(tf.assign(assignment,
assignment_value))
##Now lets construct the node that will compute the mean
#The placeholder for the input
mean_input = tf.placeholder("float", [None, dim])
#The Node/op takes the input and computes a mean along the 0th
#dimension, i.e. the list of input vectors
mean_op = tf.reduce_mean(mean_input, 0)
##Node for computing Euclidean distances
#Placeholders for input
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(v1, v2), 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
#Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
init_op = tf.global_variables_initializer() #deprecated tf.initialize_all_variables()
#Initialize all variables
sess.run(init_op)
##CLUSTERING ITERATIONS
#Now perform the Expectation-Maximization steps of K-Means clustering
#iterations. To keep things simple, we will only do a set number of
#iterations, instead of using a Stopping Criterion.
noofiterations = 100
for iteration_n in range(noofiterations):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
#Iterate over each vector
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
#Compute Euclidean distance between this vector and each
#centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
#cluster assignment node.
distances = [sess.run(euclid_dist, feed_dict={v1: vect, v2: sess.run(centroid)})
for centroid in centroids]
#Now use the cluster assignment node, with the distances
#as the input
assignment = sess.run(cluster_assignment, feed_dict = {
centroid_distances: distances})
#Now assign the value to the appropriate state variable
sess.run(cluster_assigns[vector_n], feed_dict={
assignment_value: assignment})
##MAXIMIZATION STEP
#Based on the expected state computed from the Expectation Step,
#compute the locations of the centroids so as to maximize the
#overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(noofclusters):
#Collect all the vectors assigned to this cluster
assigned_vects = [vectors[i] for i in range(len(vectors))
if sess.run(assignments[i]) == cluster_n]
#Compute new centroid location
new_location = sess.run(mean_op, feed_dict={mean_input: np.array(assigned_vects)})
#Assign value to appropriate variable
sess.run(cent_assigns[cluster_n], feed_dict={centroid_value: new_location})
#Return centroids and assignments
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
def xna_calc(sequence, t='dsDNA', p=0):
'''str => dict
BETA version, works only for dsDNA and ssDNA.
Return basic "biomath" calculations based on the input sequence.
Arguments:
t (type) :'ssDNA' or 'dsDNA'
p (phosphates): 0,1,2
#in case if ssDNA having 3'P, you should pass 2 i.e., 2 phospates present in 1 dsDNA molecule
'''
r = {}
#check inputs
c = Counter(sequence.upper())
for k in c.keys():
if k in 'ACGNT':
pass
else:
raise ValueError(f'Wrong sequence passed: "sequence" contains invalid characters, only "ATCGN" are allowed.')
if t not in ['ssDNA','dsDNA']:
raise ValueError(f'Wrong DNA type passed: "t" can be "ssDNA" or "dsDNA". "{t}" was passed instead.')
if not 0 <= p <= 2:
raise ValueError(f'Wrong number of 5\'-phosphates passed: "p" must be an integer from 0 to 4. {p} was passed instead.')
##Calculate:
#length
r['len'] = len(sequence)
#molecular weight
#still unsure about what is the best method to do this
#s = 'ACTGACTGACTATATTCGCGATCGATGCGCTAGCTCGTACGC'
#bioinformatics.org : 25986.8 Da
#Thermo : 25854.8 Da
#Promega : 27720.0 Da
#MolBioTools : 25828.77 Da
#This function : 25828.86 Da #Similar to OligoCalc implementation
#DNA Molecular Weight (typically for synthesized DNA oligonucleotides.
#The OligoCalc DNA MW calculations assume that there is not a 5' monophosphate)
#Anhydrous Molecular Weight = (An x 313.21) + (Tn x 304.2) + (Cn x 289.18) + (Gn x 329.21) - 61.96
#An, Tn, Cn, and Gn are the number of each respective nucleotide within the polynucleotide.
#The subtraction of 61.96 gm/mole from the oligonucleotide molecular weight takes into account the removal
#of HPO2 (63.98) and the addition of two hydrogens (2.02).
#Alternatively, you could think of this of the removal of a phosphate and the addition of a hydroxyl,
#since this formula calculates the molecular weight of 5' and 3' hydroxylated oligonucleotides.
#Please note: this calculation works well for synthesized oligonucleotides.
#If you would like an accurate MW for restriction enzyme cut DNA, please use:
#Molecular Weight = (An x 313.21) + (Tn x 304.2) + (Cn x 289.18) + (Gn x 329.21) - 61.96 + 79.0
#The addition of 79.0 gm/mole to the oligonucleotide molecular weight takes into account the 5' monophosphate
#left by most restriction enzymes.
#No phosphate is present at the 5' end of strands made by primer extension,
#so no adjustment to the OligoCalc DNA MW calculation is necessary for primer extensions.
#That means that for ssDNA, you need to add 79.0 to the value calculated by OligoCalc
#to get the weight with a 5' monophosphate.
#Finally, if you need to calculate the molecular weight of phosphorylated dsDNA,
#don't forget to adjust both strands. You can automatically perform either addition
#by selecting the Phosphorylated option from the 5' modification select list.
#Please note that the chemical modifications are only valid for DNA and may not be valid for RNA
#due to differences in the linkage chemistry, and also due to the lack of the 5' phosphates
#from synthetic RNA molecules. RNA Molecular Weight (for instance from an RNA transcript).
#The OligoCalc RNA MW calculations assume that there is a 5' triphosphate on the molecule)
#Molecular Weight = (An x 329.21) + (Un x 306.17) + (Cn x 305.18) + (Gn x 345.21) + 159.0
#An, Un, Cn, and Gn are the number of each respective nucleotide within the polynucleotide.
#Addition of 159.0 gm/mole to the molecular weight takes into account the 5' triphosphate.
if t == 'ssDNA':
mw = ((c['A']*313.21)+(c['T']*304.2)+(c['C']*289.18)+(c['G']*329.21)+(c['N']*303.7)-61.96)+(p*79.0)
elif t =='dsDNA':
mw_F = ((c['A']*313.21)+(c['T']*304.2)+(c['C']*289.18)+(c['G']*329.21)+(c['N']*303.7)-61.96)+(p*79.0)
d = Counter(complement(sequence.upper())) #complement sequence
mw_R = ((d['A']*313.21)+(d['T']*304.2)+(d['C']*289.18)+(d['G']*329.21)+(d['N']*303.7)-61.96)+(p*79.0)
mw = mw_F + mw_R
elif t == 'ssRNA':
pass
elif t == 'dsRNA':
pass
else:
return ValueError(f'Nucleic acid type not understood: "{t}"')
r['MW in Daltons'] = mw
#in ng
r['MW in ng'] = mw * 1.6605402e-15
#molecules in 1ng
r['molecules per ng'] = 1/r['MW in ng']
#ng for 10e10 molecules
r['ng per billion molecules'] = (10**9)/r['molecules per ng'] #(1 billions)
#moles per ng
r['moles per ng'] = (r['MW in ng'] * mw)
return r
def occur(string, sub):
'''
Counts the occurrences of a sequence in a string considering overlaps.
Example:
>> s = 'ACTGGGACGGGGGG'
>> s.count('GGG')
3
>> occur(s,'GGG')
5
'''
count = start = 0
while True:
start = string.find(sub, start) + 1
if start > 0:
count+=1
else:
return count
def get_prime(n):
for num in range(2,n,2):
if all(num%i != 0 for i in range(2,int(math.sqrt(num))+1)):
yield num
def ssl_fencrypt(infile, outfile):
'''(file_path, file_path) => encrypted_file
Uses openssl to encrypt/decrypt files.
'''
pwd = getpass('enter encryption pwd:')
if getpass('repeat pwd:') == pwd:
run(f'openssl enc -aes-256-cbc -a -salt -pass pass:{pwd} -in {infile} -out {outfile}',shell=True)
else:
print("passwords don't match.")
def ssl_fdecrypt(infile, outfile):
'''(file_path, file_path) => decrypted_file
Uses openssl to encrypt/decrypt files.
'''
pwd = getpass('enter decryption pwd:')
run(f'openssl enc -d -aes-256-cbc -a -pass pass:{pwd} -in {infile} -out {outfile}', shell=True)
def loop_zip(strA, strB):
'''(str, str) => zip()
Return a zip object containing each letters of strA, paired with letters of strB.
If strA is longer than strB, then its letters will be paired recursively.
Example:
>>> list(loop_zip('ABCDEF', '123'))
[('A', '1'), ('B', '2'), ('C', '3'), ('D', '1'), ('E', '2'), ('F', '3')]
'''
assert len(strA) >= len(strB)
s = ''
n = 0
for l in strA:
try:
s += strB[n]
except IndexError:
n = 0
s += strB[n]
n += 1
return zip(list(strA),list(s))
def encrypt(msg, pwd):
'''(str, str) => list
Simple encryption/decription tool.
WARNING:
This is NOT cryptographically secure!!
'''
if len(msg) < len(pwd):
raise ValueError('The password is longer than the message. This is not allowed.')
return [(string_to_number(a)+string_to_number(b)) for a,b in loop_zip(msg, pwd)]
def decrypt(encr, pwd):
'''(str, str) => list
Simple encryption/decription tool.
WARNING:
This is NOT cryptographically secure!!
'''
return ''.join([number_to_string((a-string_to_number(b))) for a,b in loop_zip(encr, pwd)])
def convert_mw(mw, to='g'):
'''(int_or_float, str) => float
Converts molecular weights (in dalton) to g, mg, ug, ng, pg.
Example:
>> diploid_human_genome_mw = 6_469.66e6 * 660 #lenght * average weight of nucleotide
>> convert_mw(diploid_human_genome_mw, to="ng")
0.0070904661368191195
'''
if to == 'g':
return mw * 1.6605402e-24
if to == 'mg':
return mw * 1.6605402e-21
if to == 'ug':
return mw * 1.6605402e-18
if to == 'ng':
return mw * 1.6605402e-15
if to == 'pg':
return mw * 1.6605402e-12
raise ValueError(f"'to' must be one of ['g','mg','ug','ng','pg'] but '{to}' was passed instead.")
def snp237(snp_number):
'''int => list
Return the genomic position of a SNP on the GCRh37 reference genome.
'''
query = f'https://www.snpedia.com/index.php/Rs{snp_number}'
html = urlopen(query).read().decode("utf-8")
for line in html.split('\n'):
if line.startswith('<tr><td width="90">Reference</td>'):
reference = line.split('"')[-2]
elif line.startswith('<tr><td width="90">Chromosome</td>'):
chromosome = line.split('<td>')[1].split('<')[0]
elif line.startswith('<tr><td width="90">Position</td>'):
position = int(line.split('<td>')[1].split('<')[0])
break
if 'GRCh38' in reference:
lo = LiftOver('hg38', 'hg19')
return lo.convert_coordinate(f'chr{chromosome}', position)[0][:2]
else:
return f'chr{chromosome}', position
def is_prime(n):
'''Return True if n is a prime number'''
if n == 1:
return False #1 is not prime
#if it's even and not 2, then it's not prime
if n == 2:
return True
if n > 2 and n % 2 == 0:
return False
max_divisor = math.floor(math.sqrt(n))
for d in range(3, 1 + max_divisor, 2):
if n % d == 0:
return False
return True
def flatmap(f, items):
return chain.from_iterable(imap(f, items))
def parse_fasta(fasta_file):
'''file_path => dict
Return a dict of id:sequences.
'''
d = {}
_id = False
seq = ''
with open(fasta_file,'r') as f:
for line in f:
if line.startswith('\n'):
continue
if line.startswith('>'):
if not _id:
_id = line[1:].strip()
elif _id and seq:
d.update({_id:seq})
_id = line[1:].strip()
seq = ''
else:
seq += line.strip()
d.update({_id:seq})
return d
def get_fasta_stats(fasta_file):
'''file_path => dict
Return lenght and base counts of each seuqence found in the fasta file.
'''
d = {}
_id = False
seq = ''
with open(fasta_file,'r') as f:
for line in f:
if line.startswith('\n'):
continue
if line.startswith('>'):
if not _id:
_id = line[1:].strip()
elif _id and seq:
d.update({_id:seq})
_id = line[1:].strip()
seq = ''
else:
seq += line.strip().upper()
d.update({_id:{'length':len(seq),
'A':seq.count('A'),
'T':seq.count('T'),
'C':seq.count('C'),
'G':seq.count('G'),
'N':seq.count('N')}
})
return d
def quick_align(reference, sample, matrix=matlist.blosum62, gap_open=-10, gap_extend=-0.5):
'''
Return a binary score matrix for a pairwise alignment.
'''
alns = pairwise2.align.globalds(reference, sample, matrix, gap_open, gap_extend)
top_aln = alns[0]
aln_reference, aln_sample, score, begin, end = top_aln
score = []
for i, base in enumerate(aln_reference):
if aln_sample[i] == base:
score.append(1)
else:
score.append(0)
return score
def vp(var_name,var_dict=globals(),sep=' : '):
'''(str, dict) => print
Variable Print, a fast way to print out a variable's value.
>>> scale = 0.35
>>> mass = '71 Kg'
>>> vp('scale')
scale : 0.35
>>> vp('mass',sep='=')
mass=71 Kg
'''
try:
print(f'{var_name}{sep}{g[var_name]}')
except:
print(f'{var_name} not found!')
def view_matrix(arrays):
'''list_of_arrays => print
Print out the array, row by row.
'''
for a in arrays:
print(a)
print('=========')
for n,r in enumerate(arrays):
print(n,len(r))
print(f'row:{len(arrays)}\ncol:{len(r)}')
def fill_matrix(arrays,z=0):
'''(list_of_arrays, any) => None
Add z to fill-in any array shorter than m=max([len(a) for a in arrays]).
'''
m = max([len(a) for a in arrays])
for i,a in enumerate(arrays):
if len(a) != m:
arrays[i] = np.append(a, [z for n in range(m-len(a))])
def get_size(obj_0):
'''obj => int
Recursively iterate to sum size of object & members (in bytes).
Adapted from http://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
'''
def inner(obj, _seen_ids = set()):
obj_id = id(obj)
if obj_id in _seen_ids:
return 0
_seen_ids.add(obj_id)
size = sys.getsizeof(obj)
if isinstance(obj, zero_depth_bases):
pass # bypass remaining control flow and return
elif isinstance(obj, (tuple, list, Set, deque)):
size += sum(inner(i) for i in obj)
elif isinstance(obj, Mapping) or hasattr(obj, iteritems):
size += sum(inner(k) + inner(v) for k, v in getattr(obj, iteritems)())
# Check for custom object instances - may subclass above too
if hasattr(obj, '__dict__'):
size += inner(vars(obj))
if hasattr(obj, '__slots__'): # can have __slots__ with __dict__
size += sum(inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s))
return size
return inner(obj_0)
def total_size(o, handlers={}, verbose=False):
'''(object, dict, bool) => print
Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
>>> d = dict(a=1, b=2, c=3, d=[4,5,6,7], e='a string of chars')
>>> print(total_size(d, verbose=True))
796
280 <type 'dict'> {'a': 1, 'c': 3, 'b': 2, 'e': 'a string of chars', 'd': [4, 5, 6, 7]}
38 <type 'str'> 'a'
24 <type 'int'> 1
38 <type 'str'> 'c'
24 <type 'int'> 3
38 <type 'str'> 'b'
24 <type 'int'> 2
38 <type 'str'> 'e'
54 <type 'str'> 'a string of chars'
38 <type 'str'> 'd'
104 <type 'list'> [4, 5, 6, 7]
24 <type 'int'> 4
24 <type 'int'> 5
24 <type 'int'> 6
24 <type 'int'> 7
'''
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = sys.getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = sys.getsizeof(o, default_size)
if verbose:
print(s,type(o),repr(o))
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
def center(pattern):
'''np.array => np.array
Return the centered pattern,
which is given by [(value - mean) for value in pattern]
>>> array = np.array([681.7, 682.489, 681.31, 682.001, 682.001, 682.499, 682.001])
>>> center(array)
array([-0.30014286, 0.48885714, -0.69014286, 0.00085714, 0.00085714, 0.49885714, 0.00085714])
'''
#mean = pattern.mean()
#return np.array([(value - mean) for value in pattern])
return (pattern - np.mean(pattern))
def rescale(pattern):
'''np.array => np.array
Rescale each point of the array to be a float between 0 and 1.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> rescale(a)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 0.8, 0.6, 0.4, 0.2, 0. ])
'''
#_max = pattern.max()
#_min = pattern.min()
#return np.array([(value - _min)/(_max - _min) for value in pattern])
return (pattern - pattern.min()) / (pattern.max()-pattern.min())
def standardize(pattern):
'''np.array => np.array
Return a standard pattern.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> standardize(a)
array([-1.41990459, -0.79514657, -0.17038855, 0.45436947, 1.07912749,
1.7038855 , 1.07912749, 0.45436947, -0.17038855, -0.79514657,
-1.41990459])
'''
#mean = pattern.mean()
#std = pattern.std()
#return np.array([(value - mean)/std for value in pattern])
return (pattern - np.mean(pattern)) / np.std(pattern)
def normalize(pattern):
'''np.array => np.array
Return a normalized pattern using np.linalg.norm().
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> normalize(a)
'''
return pattern / np.linalg.norm(pattern)
def gen_patterns(data, length, ptype='all'):
'''(array, int) => dict
Generate all possible patterns of a given legth
by manipulating consecutive slices of data.
Return a dict of patterns dividad by pattern_type.
>>> data = [1,2,3,4,5,4,3,2,1]
>>> gen_patterns(data,len(data))
{'center': {0: array([-1.77777778, -0.77777778, 0.22222222, 1.22222222, 2.22222222, 1.22222222, 0.22222222, -0.77777778, -1.77777778])},
'norm': {0: array([ 0.10846523, 0.21693046, 0.32539569, 0.43386092, 0.54232614, 0.43386092, 0.32539569, 0.21693046, 0.10846523])},
'scale': {0: array([ 0. , 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25, 0. ])},
'std': {0: array([-1.35224681, -0.59160798, 0.16903085, 0.92966968, 1.69030851, 0.92966968, 0.16903085, -0.59160798, -1.35224681])}}
>>> gen_patterns(data,3)
{'center': {0: array([-1., 0., 1.]),
1: array([-1., 0., 1.]),
2: array([-1., 0., 1.])},
'norm': {0: array([ 0.26726124, 0.53452248, 0.80178373]),
1: array([ 0.37139068, 0.55708601, 0.74278135]),
2: array([ 0.42426407, 0.56568542, 0.70710678])},
'scale': {0: array([ 0. , 0.5, 1. ]),
1: array([ 0. , 0.5, 1. ]),
2: array([ 0. , 0.5, 1. ])},
'std': {0: array([-1.22474487, 0. , 1.22474487]),
1: array([-1.22474487, 0. , 1.22474487]),
2: array([-1.22474487, 0. , 1.22474487])}}
'''
results = {}
ptypes = ['std','norm','scale','center']
if ptype == 'all': #to do: select specific ptypes
for t in ptypes:
results.update({t:{}})
for n in range(length):
if n+length > len(data):
break
raw = np.array(data[n:n+length])
partial = {'std' :standardize(raw),
'norm' :normalize(raw),
'scale' :rescale(raw),
'center':center(raw)}
for t in ptypes:
results[t].update({n:partial[t]})
return results
def delta_percent(a, b, warnings=False):
'''(float, float) => float
Return the difference in percentage between a nd b.
If the result is 0.0 return 1e-09 instead.
>>> delta_percent(20,22)
10.0
>>> delta_percent(2,20)
900.0
>>> delta_percent(1,1)
1e-09
>>> delta_percent(10,9)
-10.0
'''
#np.seterr(divide='ignore', invalid='ignore')
try:
x = ((float(b)-a) / abs(a))*100
if x == 0.0:
return 0.000000001 #avoid -inf
else:
return x
except Exception as e:
if warnings:
print(f'Exception raised by delta_percent(): {e}')
return 0.000000001 #avoid -inf
def is_similar(array1,array2,t=0.1):
'''(array, array, float) => bool
Return True if all the points of two arrays are no more than t apart.
'''
if len(array1) != len(array2):
return False
for i,n in enumerate(array1):
if abs(n-array2[i]) <= t:
pass
else:
return False
return True
def cluster_patterns(pattern_list,t):
''' ([array, array, ...], float) => dict
Return a dict having as keys the idx of patterns in pattern_list
and as values the idx of the similar patterns.
"t" is the inverse of a similarity threshold,
i.e. the max discrepancy between the value of array1[i] and array2[i].
If no simalar patterns are found,value is assigned to an empty list.
>>> a = [1,2,3,4,5,6,5,4,3,2,1]
>>> a1 = [n+1 for n in a]
>>> a2 = [n+5 for n in a]
>>> a3 = [n+6 for n in a]
>>> patterns = [a,a1,a2,a3]
>>> cluster_patterns(patterns,t=2)
{0: [1], 1: [0], 2: [3], 3: [2]}
>>> cluster_patterns(patterns,t=5)
{0: [1, 2], 1: [0, 2, 3], 2: [0, 1, 3], 3: [1, 2]}
>>> cluster_patterns(patterns,t=0.2)
{0: [], 1: [], 2: [], 3: []}
'''
result = {}
for idx, array1 in enumerate(pattern_list):
result.update({idx:[]})
for i,array2 in enumerate(pattern_list):
if i != idx:
if is_similar(array1,array2,t=t):
result[idx].append(i)
#print 'clusters:',len([k for k,v in result.iteritems() if len(v)])
return result
def stamp_to_date(stamp,time='utc'):
'''(int_or_float, float, str) => datetime.datetime
Convert UNIX timestamp to UTC or Local Time
>>> stamp = 1477558868.93
>>> print stamp_to_date(stamp,time='utc')
2016-10-27 09:01:08.930000
>>> print stamp_to_date(int(stamp),time='utc')
2016-10-27 09:01:08
>>> stamp_to_date(stamp,time='local')
datetime.datetime(2016, 10, 27, 11, 1, 8, 930000)
'''
if time.lower() == 'utc':
return datetime.datetime.utcfromtimestamp(stamp)
elif time.lower() == 'local':
return datetime.datetime.fromtimestamp(stamp)
else:
raise ValueError('"time" must be "utc" or "local"')
def future_value(interest,period,cash):
'''(float, int, int_or_float) => float
Return the future value obtained from an amount of cash
growing with a fix interest over a period of time.
>>> future_value(0.5,1,1)
1.5
>>> future_value(0.1,10,100)
259.37424601
'''
if not 0 <= interest <= 1:
raise ValueError('"interest" must be a float between 0 and 1')
for d in range(period):
cash += cash * interest
return cash
def entropy(sequence, verbose=False):
'''(string, bool) => float
Return the Shannon Entropy of a string.
Calculated as the minimum average number of
bits per symbol required for encoding the string.
The theoretical limit for data compression:
Shannon Entropy of the string * string length
'''
letters = list(sequence)
alphabet = list(set(letters)) # list of symbols in the string
# calculate the frequency of each symbol in the string
frequencies = []
for symbol in alphabet:
ctr = 0
for sym in letters:
if sym == symbol:
ctr += 1
frequencies.append(float(ctr) / len(letters))
# Shannon entropy
ent = 0.0
for freq in frequencies:
ent = ent + freq * math.log(freq, 2)
ent = -ent
if verbose:
print('Input string:')
print(sequence)
print()
print('Alphabet of symbols in the string:')
print(alphabet)
print()
print('Frequencies of alphabet symbols:')
print(frequencies)
print()
print('Shannon entropy:')
print(ent)
print('Minimum number of bits required to encode each symbol:')
print(int(math.ceil(ent)))
return ent
def quick_entropy(sequence):
'''(string, bool) => float
Return the Shannon Entropy of a string.
Compact version of entropy()
Calculated as the minimum average number of bits per symbol
required for encoding the string.
The theoretical limit for data compression:
Shannon Entropy of the string * string length.
'''
alphabet = set(sequence) # list of symbols in the string
# calculate the frequency of each symbol in the string
frequencies = []
for symbol in alphabet:
frequencies.append(sequence.count(symbol) / len(sequence))
# Shannon entropy
ent = 0.0
for freq in frequencies:
ent -= freq * math.log(freq, 2)
return ent
def percent_of(total, fraction):
'''(int_or_float,int_or_float) => float
Return the percentage of 'fraction' in 'total'.
Examples:
percent_of(150, 75)
>>> 50.0
percent_of(30, 90)
>>> 300.0
'''
assert total > 0
if np.isnan(total) or np.isnan(fraction):
return nan
return (100*fraction)/total
def buzz(sequence, noise=0.01):
'''(string,float) => string
Return a sequence with some random noise.
'''
if not noise:
return sequence
bits = set([char for char in sequence] + ['del','dup'])
r = ''
for char in sequence:
if random.random() <= noise:
b = random.sample(bits,1)[0]
if b == 'del':
continue
elif b == 'dup':
r += 2*char
else:
r += b
else:
r += char
return r
def simple_consensus(aligned_sequences_file):
'''file => string
Return the consensus of a series of fasta sequences aligned with muscle.
'''
# Generate consensus from Muscle alignment
sequences = []
seq = False
with open(aligned_sequences_file,'r') as f:
for line in f:
if line.startswith('\n'):
continue
if line.startswith('>'):
if seq:
sequences.append(seq)
seq = ''
else:
seq += line.strip()
sequences.append(seq)
#check if all sequenced have the same length
for seq in sequences:
assert len(seq) == len(sequences[0])
#compute consensus by majority vote
consensus = ''
for i in range(len(sequences[0])):
char_count = Counter()
for seq in sequences:
char_count.update(seq[i])
consensus += char_count.most_common()[0][0]
return consensus.replace('-','')
def print_sbar(n,m,s='|#.|',size=30,message=''):
'''(int,int,string,int) => None
Print a progress bar using the simbols in 's'.
Example:
range_limit = 1000
for n in range(range_limit):
print_sbar(n+1,m=range_limit)
time.sleep(0.1)
'''
#adjust to bar size
if m != size:
n =(n*size)/m
m = size
#calculate ticks
_a = int(n)*s[1]+(int(m)-int(n))*s[2]
_b = round(n/(int(m))*100,1)
#adjust overflow
if _b >= 100:
_b = 100.0
#to stdout
sys.stdout.write(f'\r{message}{s[0]}{_a}{s[3]} {_b}% ')
sys.stdout.flush()
def get_hash(a_string,algorithm='md5'):
'''str => str
Return the hash of a string calculated using various algorithms.
.. code-block:: python
>>> get_hash('prova','md5')
'189bbbb00c5f1fb7fba9ad9285f193d1'
>>> get_hash('prova','sha256')
'6258a5e0eb772911d4f92be5b5db0e14511edbe01d1d0ddd1d5a2cb9db9a56ba'
'''
if algorithm == 'md5':
return hashlib.md5(a_string.encode()).hexdigest()
elif algorithm == 'sha256':
return hashlib.sha256(a_string.encode()).hexdigest()
else:
raise ValueError('algorithm {} not found'.format(algorithm))
def get_first_transcript_by_gene_name(gene_name):
'''str => str
Return the id of the main trascript for a given gene.
The data is from http://grch37.ensembl.org/
'''
data = EnsemblRelease(75)
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(',')[0].split('=')[-1]
gene_location = str(gene[0]).split('=')[-1].strip(')')
url = 'http://grch37.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={};r={}'.format(gene_id,gene_location)
for line in urlopen(url):
if '<tbody><tr><td class="bold">' in line:
return line.split('">')[2].split('</a>')[0]
def get_exons_coord_by_gene_name(gene_name):
'''str => OrderedDict({'exon_id':[coordinates]})
Return an OrderedDict having as k the exon_id
and as value a tuple containing the genomic coordinates ('chr',start,stop).
'''
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(',')[0].split('=')[-1]
gene_location = str(gene[0]).split('=')[-1].strip(')')
gene_transcript = get_first_transcript_by_gene_name(gene_name).split('.')[0]
table = OrderedDict()
for exon_id in data.exon_ids_of_gene_id(gene_id):
exon = data.exon_by_id(exon_id)
coordinates = (exon.contig, exon.start, exon.end)
table.update({exon_id:coordinates})
return table
def get_exons_coord_by_gene_name(gene_name):
'''string => OrderedDict
.. code-block:: python
>>> table = get_exons_coord_by_gene_name('TP53')
>>> for k,v in table.items():
... print(k,v)
ENSE00002419584 ['7,579,721', '7,579,700']
'''
data = EnsemblRelease(75)
gene = data.genes_by_name(gene_name)
gene_id = str(gene[0]).split(',')[0].split('=')[-1]
gene_location = str(gene[0]).split('=')[-1].strip(')')
gene_transcript = get_first_transcript_by_gene_name(gene_name).split('.')[0]
url = 'http://grch37.ensembl.org/Homo_sapiens/Transcript/Exons?db=core;g={};r={};t={}'.format(gene_id,gene_location,gene_transcript)
str_html = get_html(url)
html = ''
for line in str_html.split('\n'):
try:
#print line
html += str(line)+'\n'
except UnicodeEncodeError:
pass
blocks = html.split('\n')
table = OrderedDict()
for exon_id in data.exon_ids_of_gene_id(gene_id):
for i,txt in enumerate(blocks):
if exon_id in txt:
if exon_id not in table:
table.update({exon_id:[]})
for item in txt.split('<td style="width:10%;text-align:left">')[1:-1]:
table[exon_id].append(item.split('</td>')[0])
return table
def split_overlap(seq, size, overlap, is_dataframe=False):
'''(seq,int,int) => [[...],[...],...]
Split a sequence into chunks of a specific size and overlap.
Works also on strings!
It is very efficient for short sequences (len(seq()) <= 100).
Set "is_dataframe=True" to split a pandas.DataFrame
Examples:
>>> split_overlap(seq=list(range(10)),size=3,overlap=2)
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]
>>> split_overlap(seq=range(10),size=3,overlap=2)
[range(0, 3), range(1, 4), range(2, 5), range(3, 6), range(4, 7), range(5, 8), range(6, 9), range(7, 10)]
'''
if size < 1 or overlap < 0:
raise ValueError('"size must be >= 1 and overlap >= 0')
result = []
if is_dataframe:
while True:
if len(seq) <= size:
result.append(seq)
return result
else:
result.append(seq.iloc[:size])
seq = seq.iloc[size-overlap:]
else:
while True:
if len(seq) <= size:
result.append(seq)
return result
else:
result.append(seq[:size])
seq = seq[size-overlap:]
def split_overlap_long(seq, size, overlap, is_dataframe=False):
'''(seq,int,int) => generator
Split a sequence into chunks of a specific size and overlap.
Return a generator. It is very efficient for long sequences (len(seq()) > 100).
https://stackoverflow.com/questions/48381870/a-better-way-to-split-a-sequence-in-chunks-with-overlaps
Set "is_dataframe=True" to split a pandas.DataFrame
Examples:
>>> split_overlap_long(seq=list(range(10)),size=3,overlap=2)
<generator object split_overlap_long at 0x10bc49d58>
>>> list(split_overlap_long(seq=list(range(10)),size=3,overlap=2))
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]]
>>> list(split_overlap_long(seq=range(10),size=3,overlap=2))
[range(0, 3), range(1, 4), range(2, 5), range(3, 6), range(4, 7), range(5, 8), range(6, 9), range(7, 10)]
'''
if size < 1 or overlap < 0:
raise ValueError('size must be >= 1 and overlap >= 0')
if is_dataframe:
for i in range(0, len(seq) - overlap, size - overlap):
yield seq.iloc[i:i + size]
else:
for i in range(0, len(seq) - overlap, size - overlap):
yield seq[i:i + size]
def itr_split_overlap(iterable, size, overlap):
'''(iterable,int,int) => generator
Similar to long_split_overlap() but it works on any iterable.
In case of long sequences, long_split_overlap() is more efficient
but this function can handle potentially infinite iterables using deque().
https://stackoverflow.com/questions/48381870/a-better-way-to-split-a-sequence-in-chunks-with-overlaps
Warning: for range() and symilar, it behaves differently than split_overlap() and split_overlap_long()
Examples:
>>> list(itr_split_overlap(iterable=range(10),size=3,overlap=2))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6), (5, 6, 7), (6, 7, 8), (7, 8, 9)]
'''
if size < 1 or overlap < 0:
raise ValueError('size must be >= 1 and overlap >= 0')
itr = iter(iterable)
buf = deque(islice(itr, size), maxlen=size)
chunk = None
for chunk in iter(lambda: tuple(islice(itr, size - overlap)), ()):
yield tuple(buf)
buf.extend(chunk)
rest = tuple(buf)
if chunk:
rest = rest[size - overlap - len(chunk):]
yield rest
def reorder_dict(d, keys):
'''(dict,list) => OrderedDict
Change the order of a dictionary's keys
without copying the dictionary (save RAM!).
Return an OrderedDict.
'''
tmp = OrderedDict()
for k in keys:
tmp[k] = d[k]
del d[k] #this saves RAM
return tmp
#test = OrderedDict({'1':1,'2':2,'4':4,'3':3})
#print(test)
#test2 = reorder_dict(test,['1','2','3','4'])
#print(test)
#print(test2)
#>>> OrderedDict([('2', 2), ('3', 3), ('4', 4), ('1', 1)])
#>>> OrderedDict()
#>>> OrderedDict([('1', 1), ('2', 2), ('3', 3), ('4', 4)])
def in_between(one_number, two_numbers):
'''(int,list) => bool
Return true if a number is in between two other numbers.
Return False otherwise.
'''
if two_numbers[0] < two_numbers[1]:
pass
else:
two_numbers = sorted(two_numbers)
return two_numbers[0] <= one_number <= two_numbers[1]
def is_overlapping(svA, svB, limit=0.9):
'''(list,list,float) => bool
Check if two SV ovelaps for at least 90% (limit=0.9).
svX = [chr1,brk1,chr2,brk2]
'''
# Step 1.
# Select the breaks in order to have lower coordinates first
if int(svA[1]) <= int(svA[3]):
chr1_A = svA[0]
brk1_A = int(svA[1])
chr2_A = svA[2]
brk2_A = int(svA[3])
else:
chr2_A = svA[0]
brk2_A = svA[1]
chr1_A = svA[2]
brk1_A = svA[3]
if int(svB[1]) <= int(svB[3]):
chr1_B = svB[0]
brk1_B = int(svB[1])
chr2_B = svB[2]
brk2_B = int(svB[3])
else:
chr2_B = svB[0]
brk2_B = int(svB[1])
chr1_B = svB[2]
brk1_B = int(svB[3])
# Step 2.
# Determine who is the longest
# Return False immediately if the chromosomes are not the same.
# This computation is reasonable only for sv on the same chormosome.
if chr1_A == chr2_A and chr1_B == chr2_B and chr1_A == chr1_B:
len_A = brk2_A - brk1_A
len_B = brk2_B - brk1_B
if len_A >= len_B:
len_reference = len_A
len_sample = len_B
else:
len_reference = len_B
len_sample = len_A
limit = round(len_reference * limit) # this is the minimum overlap the two sv need to share
# to be considered overlapping
# if the sample is smaller then the limit then there is no need to go further.
# the sample segment will never share enough similarity with the reference.
if len_sample < limit:
return False
else:
return False
# Step 3.
# Determine if there is an overlap
# >> There is an overlap if a least one of the break of an sv is in beetween the two breals of the other sv.
overlapping = False
for b in [brk1_A,brk2_A]:
if in_between(b,[brk1_B,brk2_B]):
overlapping = True
for b in [brk1_B,brk2_B]:
if in_between(b,[brk1_A,brk2_A]):
overlapping = True
if not overlapping:
return False
# Step 4.
# Determine the lenght of the ovelapping part
# easy case: if the points are all different then, if I sort the points,
# the overlap is the region between points[1] and points[2]
# |-----------------| |---------------------|
# |--------------| |-------------|
points = sorted([brk1_A,brk2_A,brk1_B,brk2_B])
if len(set(points)) == 4: # the points are all different
overlap = points[2]-points[1]
elif len(set(points)) == 3: #one point is in common
# |-----------------|
# |--------------|
if points[0] == points[1]:
overlap = points[3]-points[2]
# |---------------------|
# |-------------|
if points[2] == points[3]:
overlap = points[2]-points[1]
# |-----------------|
# |-------------|
if points[1] == points[2]:
return False # there is no overlap
else:
# |-----------------|
# |-----------------|
return True # if two points are in common, then it is the very same sv
if overlap >= limit:
return True
else:
return False
def load_obj(file):
'''
Load a pickled object.
Be aware that pickle is version dependent,
i.e. objects dumped in Py3 cannot be loaded with Py2.
'''
try:
with open(file,'rb') as f:
obj = pickle.load(f)
return obj
except:
return False
def save_obj(obj, file):
'''
Dump an object with pickle.
Be aware that pickle is version dependent,
i.e. objects dumped in Py3 cannot be loaded with Py2.
'''
try:
with open(file,'wb') as f:
pickle.dump(obj, f)
print('Object saved to {}'.format(file))
return True
except:
print('Error: Object not saved...')
return False
#save_obj(hotspots_review,'hotspots_review_CIS.txt')
def query_encode(chromosome, start, end):
'''
Queries ENCODE via http://promoter.bx.psu.edu/ENCODE/search_human.php
Parses the output and returns a dictionary of CIS elements found and the relative location.
'''
## Regex setup
re1='(chr{})'.format(chromosome) # The specific chromosome
re2='(:)' # Any Single Character ':'
re3='(\\d+)' # Integer
re4='(-)' # Any Single Character '-'
re5='(\\d+)' # Integer
rg = re.compile(re1+re2+re3+re4+re5,re.IGNORECASE|re.DOTALL)
## Query ENCODE
std_link = 'http://promoter.bx.psu.edu/ENCODE/get_human_cis_region.php?assembly=hg19&'
query = std_link + 'chr=chr{}&start={}&end={}'.format(chromosome,start,end)
print(query)
html_doc = urlopen(query)
html_txt = BeautifulSoup(html_doc, 'html.parser').get_text()
data = html_txt.split('\n')
## Parse the output
parsed = {}
coordinates = [i for i, item_ in enumerate(data) if item_.strip() == 'Coordinate']
elements = [data[i-2].split(' ')[-1].replace(': ','') for i in coordinates]
blocks = [item for item in data if item[:3] == 'chr']
print(elements)
try:
i = 0
for item in elements:
#print(i)
try:
txt = blocks[i]
#print(txt)
m = rg.findall(txt)
bins = [''.join(item) for item in m]
parsed.update({item:bins})
i += 1
print('found {}'.format(item))
except:
print('the field {} was empty'.format(item))
return parsed
except Exception as e:
print('ENCODE query falied on chr{}:{}-{}'.format(chromosome, start, end))
print(e)
return False
def compare_patterns(pattA, pattB):
'''(np.array, np.array) => float
Compare two arrays point by point.
Return a "raw similarity score".
You may want to center the two patterns before compare them.
>>> a = np.array([1,2,3,4,5,6,5,4,3,2,1])
>>> a1 = np.array([n+0.1 for n in a])
>>> a2 = np.array([n+1 for n in a])
>>> a3 = np.array([n+10 for n in a])
>>> compare_patterns(a,a)
99.999999999
>>> compare_patterns(a,a1)
95.69696969696969
>>> compare_patterns(a,a2)
56.96969696969697
>>> compare_patterns(a2,a)
72.33766233766234
>>> compare_patterns(center(a),center(a2))
99.999999999999943
>>> compare_patterns(a,a3)
-330.3030303030303
'''
if len(pattA) == len(pattB):
deltas = []
for i,pA in enumerate(pattA):
deltas.append(100 - abs(delta_percent(pA,pattB[i])))
similarity = sum(deltas)/len(pattA)
return similarity
else:
raise ValueError('"pattA" and "pattB" must have same length.')
def compare_bins(dict_A,dict_B):
'''(dict,dict) => dict, dict, dict
Compares two dicts of bins.
Returns the shared elements, the unique elements of A and the unique elements of B.
The dicts shape is supposed to be like this:
OrderedDict([('1',
['23280000-23290000',
'24390000-24400000',
...]),
('2',
['15970000-15980000',
'16020000-16030000',
...]),
('3',
['610000-620000',
'3250000-3260000',
'6850000-6860000',
...])}
'''
chrms = [str(x) for x in range(1,23)] + ['X','Y']
shared = OrderedDict()
unique_A = OrderedDict()
unique_B = OrderedDict()
for k in chrms:
shared.update({k:[]})
unique_A.update({k:[]})
unique_B.update({k:[]})
if k in dict_A and k in dict_B:
for bin_ in dict_A[k]:
if bin_ in dict_B[k]:
shared[k].append(bin_)
else:
unique_A[k].append(bin_)
for bin_ in dict_B[k]:
if bin_ not in shared[k]:
unique_B[k].append(bin_)
elif k not in dict_A:
unique_B[k] = [bin_ for bin_ in dict_B[k]]
elif k not in dict_B:
unique_A[k] = [bin_ for bin_ in dict_A[k]]
return shared, unique_A, unique_B
#To manage heavy files
def yield_file(infile):
with open(infile, 'r') as f:
for line in f:
if line[0] not in ['#','\n',' ','']:
yield line.strip()
#Downaload sequence from ensembl
def sequence_from_coordinates(chromosome, strand, start, end, ref_genome=37):
'''
Download the nucleotide sequence from the gene_name.
'''
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
if int(ref_genome) == 37:
#GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS = {'1':'NC_000001.10','2':'NC_000002.11','3':'NC_000003.11','4':'NC_000004.11',
'5':'NC_000005.9','6':'NC_000006.11','7':'NC_000007.13','8':'NC_000008.10',
'9':'NC_000009.11','10':'NC_000010.10','11':'NC_000011.9','12':'NC_000012.11',
'13':'NC_000013.10','14':'NC_000014.8','15':'NC_000015.9','16':'NC_000016.9',
'17':'NC_000017.10','18':'NC_000018.9','19':'NC_000019.9','20':'NC_000020.10',
'21':'NC_000021.8','22':'NC_000022.10','X':'NC_000023.10','Y':'NC_000024.9'}
elif int(ref_genome) == 38:
#GRCh38 from https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.38
NCBI_IDS = {'1':'NC_000001.11','2':'NC_000002.12','3':'NC_000003.12','4':'NC_000004.12',
'5':'NC_000005.10','6':'NC_000006.12','7':'NC_000007.14','8':'NC_000008.11',
'9':'NC_000009.12','10':'NC_000010.11','11':'NC_000011.10','12':'NC_000012.12',
'13':'NC_000013.11','14':'NC_000014.9','15':'NC_000015.10','16':'NC_000016.10',
'17':'NC_000017.11','18':'NC_000018.10','19':'NC_000019.10','20':'NC_000020.11',
'21':'NC_000021.9','22':'NC_000022.11','X':'NC_000023.11','Y':'NC_000024.10'}
try:
handle = Entrez.efetch(db="nucleotide",
id=NCBI_IDS[str(chromosome)],
rettype="fasta",
strand=strand, #"1" for the plus strand and "2" for the minus strand.
seq_start=start,
seq_stop=end)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print('ValueError: no sequence found in NCBI')
return False
#GC content calculator
def gc_content(sequence, percent=True):
'''
Return the GC content of a sequence.
'''
sequence = sequence.upper()
g = sequence.count("G")
c = sequence.count("C")
t = sequence.count("T")
a = sequence.count("A")
gc_count = g+c
total_bases_count = g+c+t+a
if total_bases_count == 0:
print('Error in gc_content(sequence): sequence may contain only Ns')
return None
try:
gc_fraction = float(gc_count) / total_bases_count
except Exception as e:
print(e)
print(sequence)
if percent:
return gc_fraction * 100
else:
return gc_fraction
##Flexibility calculator##
#requires stabflex3.py
#Endpoint function to calculate the flexibility of a given sequence
def dna_flex(sequence, window_size=500, step_zize=100, verbose=False):
'''(str,int,int,bool) => list_of_tuples
Calculate the flexibility index of a sequence.
Return a list of tuples.
Each tuple contains the bin's coordinates
and the calculated flexibility of that bin.
Example:
dna_flex(seq_a,500,100)
>>> [('0-500', 9.7),('100-600', 9.77),...]
'''
if verbose:
print("Algorithm window size : %d" % window_size)
print("Algorithm window step : %d" % step_zize)
print("Sequence has {} bases".format(len(self.seq)))
algorithm = myFlex(sequence,window_size,step_zize)
flexibility_result = algorithm.analyse(flexibility_data)
return flexibility_result.report(verbose)
##Repeats scanner##
#G-quadruplex
def g4_scanner(sequence):
'''
G-quadruplex motif scanner.
Scan a sequence for the presence of the regex motif:
[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}
Reference: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1636468/
Return two callable iterators.
The first one contains G4 found on the + strand.
The second contains the complementary G4 found on the + strand, i.e. a G4 in the - strand.
'''
#forward G4
pattern_f = '[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}[ACGT]{1,7}[G]{3,5}'
result_f = re.finditer(pattern_f, sequence)
#reverse G4
pattern_r = '[C]{3,5}[ACGT]{1,7}[C]{3,5}[ACGT]{1,7}[C]{3,5}[ACGT]{1,7}[C]{3,5}'
result_r = re.finditer(pattern_r, sequence)
return result_f, result_r
#Repeat-masker
def parse_RepeatMasker(infile="RepeatMasker.txt", rep_type='class'):
'''
Parse RepeatMasker.txt and return a dict of bins for each chromosome
and a set of repeats found on that bin.
dict = {'chromosome':{'bin':set(repeats)}}
'''
chromosomes = [str(c) for c in range(1,23)]+['X','Y']
result = {}
if rep_type == 'name':
idx = 10 #repName
elif rep_type == 'class':
idx = 11 #repClass
elif rep_type == 'family':
idx = 12 #repFamily
else:
raise NameError('Invalid rep_type "{}". Expected "class","family" or "name"'.format(rep_type))
#RepeatMasker.txt is around 500MB!
for line in yield_file(infile):
data = line.split('\t')
chromosome = data[5].replace('chr','')
start = data[6]
end = data[7]
bin_ = '{}-{}'.format(start,end)
repeat = data[idx].replace('?','')
if chromosome in chromosomes:
if chromosome not in result:
result.update({chromosome:{bin_:set([repeat])}})
else:
if bin_ not in result[chromosome]:
result[chromosome].update({bin_:set([repeat])})
else:
result[chromosome][bin_].add(repeat)
return result
def next_day(d='2012-12-04'):
'''Return the next day in the calendar.'''
Y,M,D = d.split('-')
t = datetime.date(int(Y),int(M),int(D))
_next = t + datetime.timedelta(1)
return str(_next)
# next_day('2012-12-31')
# >>> '2013-01-01'
def previous_day(d='2012-12-04'):
'''Return the previous day in the calendar.'''
Y,M,D = d.split('-')
t = datetime.date(int(Y),int(M),int(D))
_prev = t + datetime.timedelta(-1)
return str(_prev)
# previous_day('2013-01-01')
# >>> '2012-12-31'
def intersect(list1, list2):
'''(list,list) => list
Return the intersection of two lists, i.e. the item in common.
'''
return [item for item in list2 if item in list1]
def annotate_fusion_genes(dataset_file):
'''
Uses FusionGenes_Annotation.pl to find fusion genes in the dataset.
Generates a new file containing all the annotations.
'''
start = time.time()
print('annotating', dataset_file, '...')
raw_output = run_perl('FusionGenes_Annotation.pl', dataset_file)
raw_list = str(raw_output)[2:].split('\\n')
outfile = dataset_file[:-4] + '_annotated.txt'
with open(outfile, 'w') as outfile:
line_counter = 0
header = ['##ID', 'ChrA', 'StartA', 'EndA', 'ChrB', 'StartB', 'EndB', 'CnvType', 'Orientation',
'GeneA', 'StrandA', 'LastExonA', 'TotalExonsA', 'PhaseA',
'GeneB', 'StrandB', 'LastExonB', 'TotalExonsB', 'PhaseB',
'InFrame', 'InPhase']
outfile.write(list_to_line(header, '\t') + '\n')
for item in raw_list:
cleaned_item = item.split('\\t')
if len(cleaned_item) > 10: # FusionGenes_Annotation.pl return the data twice. We kepp the annotated one.
outfile.write(list_to_line(cleaned_item, '\t') + '\n')
line_counter += 1
print('succesfully annotated',line_counter, 'breakpoints from', dataset_file, 'in', time.time()-start, 'seconds')
# track threads
try:
global running_threads
running_threads -= 1
except:
pass
# dataset_file = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher-DeletionsOnly.txt'
# annotate_fusion_genes(dataset_file)
def blastn(input_fasta_file, db_path='/Users/amarcozzi/Desktop/BLAST_DB/',db_name='human_genomic',out_file='blastn_out.xml'):
'''
Run blastn on the local machine using a local database.
Requires NCBI BLAST+ to be installed. http://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=Download
Takes a fasta file as input and writes the output in an XML file.
'''
db = db_path + db_name
blastn_cline = NcbiblastnCommandline(query=input_fasta_file, db=db, evalue=0.001, outfmt=5, out=out_file)
print(blastn_cline)
stdout, stderr = blastn_cline()
# to be tested
def check_line(line, unexpected_char=['\n','',' ','#']):
'''
Check if the line starts with an unexpected character.
If so, return False, else True
'''
for item in unexpected_char:
if line.startswith(item):
return False
return True
def dice_coefficient(sequence_a, sequence_b):
'''(str, str) => float
Return the dice cofficient of two sequences.
'''
a = sequence_a
b = sequence_b
if not len(a) or not len(b): return 0.0
# quick case for true duplicates
if a == b: return 1.0
# if a != b, and a or b are single chars, then they can't possibly match
if len(a) == 1 or len(b) == 1: return 0.0
# list comprehension, preferred over list.append() '''
a_bigram_list = [a[i:i+2] for i in range(len(a)-1)]
b_bigram_list = [b[i:i+2] for i in range(len(b)-1)]
a_bigram_list.sort()
b_bigram_list.sort()
# assignments to save function calls
lena = len(a_bigram_list)
lenb = len(b_bigram_list)
# initialize match counters
matches = i = j = 0
while (i < lena and j < lenb):
if a_bigram_list[i] == b_bigram_list[j]:
matches += 2
i += 1
j += 1
elif a_bigram_list[i] < b_bigram_list[j]:
i += 1
else:
j += 1
score = float(matches)/float(lena + lenb)
return score
def find_path(graph, start, end, path=[]):
'''
Find a path between two nodes in a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
'''
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath: return newpath
return None
def find_all_paths(graph, start, end, path=[]):
'''
Find all paths between two nodes of a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
'''
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def find_shortest_path(graph, start, end, path=[]):
'''
Find the shortest path between two nodes of a graph.
Works on graphs like this:
graph ={'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
'''
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
# ##
# graph = {'A': ['B', 'C'],
# 'B': ['C', 'D'],
# 'C': ['D'],
# 'D': ['C'],
# 'E': ['F'],
# 'F': ['C']}
# >>> find_path(graph, 'A', 'D')
# ['A', 'B', 'C', 'D']
# >>> find_all_paths(graph, 'A', 'D')
# [['A', 'B', 'C', 'D'], ['A', 'B', 'D'], ['A', 'C', 'D']]
# >>> find_shortest_path(graph, 'A', 'D')
# ['A', 'C', 'D']
def gen_rnd_string(length):
'''
Return a string of uppercase and lowercase ascii letters.
'''
s = [l for l in string.ascii_letters]
random.shuffle(s)
s = ''.join(s[:length])
return s
def gene_synonyms(gene_name):
'''str => list()
Queries http://rest.genenames.org and returns a list of synonyms of gene_name.
Returns None if no synonym was found.
'''
result = []
headers = {'Accept': 'application/json'}
uri = 'http://rest.genenames.org'
path = '/search/{}'.format(gene_name)
target = urlparse(uri+path)
method = 'GET'
body = ''
h = http.Http()
response, content = h.request(
target.geturl(),
method,
body,
headers )
if response['status'] == '200':
# assume that content is a json reply
# parse content with the json module
data = json.loads(content.decode('utf8'))
for item in data['response']['docs']:
result.append(item['symbol'])
return result
else:
print('Error detected: ' + response['status'])
return None
#print(gene_synonyms('MLL3'))
def string_to_number(s):
'''
Convert a bytes string into a single number.
Example:
>>> string_to_number('foo bar baz')
147948829660780569073512294
'''
return int.from_bytes(s.encode(), 'little')
def number_to_string(n):
'''
Convert a number into a bytes string.
Example:
>>> number_to_string(147948829660780569073512294)
'foo bar baz'
'''
return n.to_bytes(math.ceil(n.bit_length() / 8), 'little').decode()
#x = 147948829660780569073512294
#number_to_string(x)
#>>> 'foo bar baz'
def determine_average_breaks_distance(dataset): # tested only for deletion/duplication
'''
Evaluate the average distance among breaks in a dataset.
'''
data = extract_data(dataset, columns=[1,2,4,5], verbose=False)
to_average = []
for item in data:
if item[0] == item[2]:
to_average.append(int(item[3])-int(item[1]))
return sum(to_average)/len(to_average)
#print(determine_average_breaks_distance('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random/sorted/rnd_dataset_100_annotated_sorted.txt'))
def dict_overview(dictionary, how_many_keys, indent=False):
'''
Prints out how_many_elements of the target dictionary.
Useful to have a quick look at the structure of a dictionary.
'''
ks = list(islice(dictionary, how_many_keys))
for k in ks:
if indent:
print(f'{k}\n\t{dictionary[k]}')
else:
print(f'{k}\t{dictionary[k]}')
def download_human_genome(build='GRCh37', entrez_usr_email="A.E.vanvlimmeren@students.uu.nl"): #beta: works properly only forGRCh37
'''
Download the Human genome from enterez.
Save each chromosome in a separate txt file.
'''
Entrez.email = entrez_usr_email
#Last available version
NCBI_IDS = {'1':"NC_000001", '2':"NC_000002",'3':"NC_000003",'4':"NC_000004",
'5':"NC_000005",'6':"NC_000006",'7':"NC_000007", '8':"NC_000008",
'9':"NC_000009", '10':"NC_000010", '11':"NC_000011", '12':"NC_000012",
'13':"NC_000013",'14':"NC_000014", '15':"NC_000015", '16':"NC_000016",
'17':"NC_000017", '18':"NC_000018", '19':"NC_000019", '20':"NC_000020",
'21':"NC_000021", '22':"NC_000022", 'X':"NC_000023", 'Y':"NC_000024"}
#GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS_GRCh37 = { 'NC_000001.10','NC_000002.11','NC_000003.11','NC_000004.11',
'NC_000005.9','NC_000006.11','NC_000007.13','NC_000008.10',
'NC_000009.11','NC_000010.10','NC_000011.9','NC_000012.11',
'NC_000013.10','NC_000014.8','NC_000015.9','NC_000016.9',
'NC_000017.10','NC_000018.9','NC_000019.9','NC_000020.10',
'NC_000021.8','NC_000022.10','NC_000023.10','NC_000024.9'}
CHR_LENGTHS_GRCh37 = { '1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
if build == 'GRCh37':
NCBI_IDS = NCBI_IDS_GRCh37
CHR_LENGTHS = CHR_LENGTHS_GRCh37
else:
print('This function only work with genome build GRCh37 fow now...')
return False
idx = 0
for target_chromosome in NCBI_IDS:
length = CHR_LENGTHS[idx]
idx += 1
sequence = False
try:
# Always tell NCBI who you are
handle = Entrez.efetch(db="nucleotide",
id=target_chromosome,
rettype="fasta",
strand=1,
seq_start=0, #this is to obtain actual start coordinates from the index
seq_stop=length) # this is the end of the chromosome
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
except ValueError:
print('ValueError: no sequence found in NCBI')
with open('sequence_{}.txt'.format(target_chromosome), 'w') as f:
f.write(sequence)
def exponential_range(start=0,end=10000,base=10):
'''
Generates a range of integer that grow exponentially.
Example: list(exp_range(0,100000,2))
Output :[0,
2,
4,
8,
16,
32,
64,
128,
256,
512,
1024,
2048,
4096,
8192,
16384,
32768,
65536]
'''
if end/base < base:
raise ValueError('"end" must be at least "base**2"')
result = []
new_start = start
new_end = base**2
new_base = base
while new_start < end:
result.append(range(new_start,new_end,new_base))
new_start = new_end
new_end = new_start*base
new_base = new_base*base
#print(result)
for item in result:
for i in item:
yield i
##list(exp_range(0,100000,10))
def extract_data(infile, columns=[3,0,1,2,5], header='##', skip_lines_starting_with='#', data_separator='\t', verbose=False ):
'''
Extract data from a file. Returns a list of tuples.
Each tuple contains the data extracted from one line of the file
in the indicated columns and with the indicated order.
'''
extracted_data = []
header_list = []
header_flag = 0
line_counter = 0
with open(infile) as infile:
lines = infile.readlines()
for line in lines: # yield_file(infile) can be used instead
line_counter += 1
if line[:len(header)] == header: # checks the header
header_list = line_to_list(line[len(header):], data_separator)
header_flag += 1
if header_flag > 1:
raise ValueError('More than one line seems to contain the header identificator "' + header + '".')
elif line[0] == skip_lines_starting_with or line == '' or line == '\n': # skips comments and blank lines
pass
else:
list_ = line_to_list(line, data_separator)
reduced_list=[]
for item in columns:
reduced_list.append(list_[item])
extracted_data.append(tuple(reduced_list))
if verbose == True: # Prints out a brief summary
print('Data extracted from', infile)
print('Header =', header_list)
print('Total lines =', line_counter)
return extracted_data
# extract_data('tables/clinvarCnv.txt', columns=[3,0,1,2,5], header='##', skip_lines_starting_with='#', data_separator='\t', verbose=True)
def extract_Toronto(infile, outfile):
'''
Ad hoc function to extract deletions and duplications out of the Toronto Genetic Variants Database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
'''
# Extract data from infile
# Columns are: ID, Chr, Start, End, CNV_Type
raw_data = extract_data(infile, columns=[0,1,2,3,5], verbose=True )
# Take only deletions and duplications
filtered_data = []
for data in raw_data:
if "deletion" in data or 'duplication' in data:
filtered_data.append(data)
print('len(row_data) :',len(raw_data))
print('len(filtered_data) :',len(filtered_data))
# Write filtered_data to a text file
header = ['##ID','ChrA','StartA','EndA','ChrB','StartB','EndB','CnvType','Orientation']
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in filtered_data:
if item[-1] == 'duplication':
orientation = 'HT'
elif item[-1] == 'deletion':
orientation = 'TH'
else:
print('ERROR: unable to determine "Orientation"...')
list_ = [item[0],item[1],item[2],item[2],item[1],item[3],item[3],item[-1].upper(),orientation]
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done')
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/GRCh37_hg19_variants_2014-10-16.txt'
# outfile = infile[:-4]+'_DelDupOnly.txt'
# extract_Toronto(infile, outfile)
def extract_Decipher(infile, outfile):
'''
Ad hoc function to extract deletions and duplications out of the Decipher Database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
'''
# Extract data from infile
# Columns are: ID, Chr, Start, End, CNV_Type(here expressed as "mean_ratio")
raw_data = extract_data(infile, columns=[0,3,1,2,4], verbose=True )
header = ['##ID','ChrA','StartA','EndA','ChrB','StartB','EndB','CnvType','Orientation']
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in raw_data:
# Convert mean_ratio to CnvType
if float(item[-1]) > 0:
CnvType = 'DUPLICATION'
orientation = 'HT'
elif float(item[-1]) < 0:
CnvType = 'DELETION'
orientation = 'TH'
else:
print('ERROR: unable to determine "Orientation"...')
# Write output
list_ = [item[0],item[1],item[2],item[2],item[1],item[3],item[3],CnvType,orientation]
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done')
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/decipher-hg19_15-01-30.txt'
# outfile = infile[:-4]+'_DelDupOnly.txt'
# extract_Decipher(infile, outfile)
def extract_dgvMerged(infile, outfile):
'''
Ad hoc function to extract deletions and losses out of the dgvMerged database.
Returns a file ready to be annotated with FusionGenes_Annotation.pl .
'''
#original_header = '##bin chrom chromStart chromEnd name score strand thickStart thickEnd itemRgb varType reference pubMedId method platform mergedVariants supportingVariants sampleSize observedGains observedLosses cohortDescription genes samples'
# [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] [14] [15] [16] [17] [18] [19] [20] [21] [22]
raw_data = extract_data(infile, columns=[4,1,2,3,10], header='##', skip_lines_starting_with='#', data_separator='\t', verbose=False )
# Take only deletions and losses
filtered_data = []
for data in raw_data:
if "Deletion" in data or 'Loss' in data:
filtered_data.append(data)
print('len(row_data) :',len(raw_data))
print('len(filtered_data) :',len(filtered_data))
# Write filtered_data to a text file
header = ['##ID','ChrA','StartA','EndA','ChrB','StartB','EndB','CnvType','Orientation']
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in filtered_data:
if item[-1] == 'Deletion' or item[-1] == 'Loss':
cnv_type = 'DELETION'
orientation = 'HT'
# elif item[-1] == 'deletion':
# orientation = 'TH'
else:
print('ERROR: unable to determine "Orientation"...')
list_ = [item[0],item[1][3:],item[2],item[2],item[1][3:],item[3],item[3],cnv_type,orientation]
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done')
# ## Extract deletions and Losses from dgvMerged
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks'
# file_name = 'dgvMerged.txt'
# infile = folder + '/' + file_name
# outfile = folder + '/' + 'dgvMerged-DeletionsOnly.txt'
# extract_dgvMerged(infile, outfile)
# ## annotate
# dataset_file = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/dgvMerged-DeletionsOnly.txt'
# annotate_fusion_genes(dataset_file)
def fill_and_sort(pandas_chrSeries):
'''incomplete pandas.Series => complete and sorted pandas.Series
Given a pandas.Series in which the first argument is the chromosome name
and the second argument is a count " [('1', 61), ('3', 28), ..., ('X', 29)]"
This function returns a new (sorted by chromosome) series with the missing chromosome included as ('Chr_name',0).
This is useful when creating series out of subsets grouped by Chr.
If the Chr does not contains any event, then it will be excluded from the subset.
However, expecially for plotting reasons, you may want to have ('Chr',0) in you list instead of a missing Chr.
Example.
> series = [('1', 61), ('3', 28), ..., ('X', 29)] # in this Series Chr_2 and Chr_Y are missing.
> fill_and_sort(series)
>>> [('1', 61), ('2',0), ('3', 28), ..., ('X', 29), ('Y',0)] # this Series have all the chromosomes
'''
# add missing ChrA
CHROMOSOMES = [str(c) for c in range(1,23)]+['X','Y']
chr_list = CHROMOSOMES[:]
complete_series = []
for item in pandas_chrSeries.iteritems():
chr_list.remove(item[0])
complete_series.append(item)
for item in chr_list:
complete_series.append((item,0))
# sort by chromosome
sorted_ = []
for item in CHROMOSOMES:
for _item in complete_series:
if _item[0]==item:
sorted_.append(_item[1])
return pd.Series(sorted_, index=CHROMOSOMES)
# counts = [50,9,45,6]
# pandas_chrSeries = pd.Series(counts, index=['1','4','X','10'])
# print(pandas_chrSeries)
# good_series = fill_and_sort(pandas_chrSeries)
# print(good_series)
def find(string, char):
'''
Looks for a character in a sctring and returns its index.
'''
# Compared to string.find(), it returns ALL the indexes, not only the first one.
return [index for index, letter in enumerate(string) if letter == char]
# print(find('alessio', 's'))
def filter_out(word, infile, outfile):
'''
Reads a file line by line
and writes an output file containing only
the lines that DO NOT contains 'word'.
'''
print('Filtering out lines containing',word,'...')
with open(infile, 'r') as infile:
lines = infile.readlines()
with open(outfile, 'w') as outfile:
for line in lines: # yield_file(infile) can be used instead
if word not in line:
outfile.write(line)
print('Done')
# infile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher_DelDupOnly.txt'
# outfile = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/breaks/Decipher-DeletionsOnly.txt'
# filter_out('DUPLICATION',infile, outfile)
def flatten2(l):
'''
Flat an irregular iterable to a list.
Python >= 2.6 version.
'''
for item in l:
if isinstance(item, collections.Iterable) and not isinstance(item, basestring):
for sub in flatten(item):
yield sub
else:
yield item
def flatten(l):
'''
Flat an irregular iterable to a list.
Python >= 3.3 version.
'''
for item in l:
try:
yield from flatten(item)
except TypeError:
yield item
def gene_synonyms(gene_name):
'''str => list()
Queries http://rest.genenames.org and http://www.ncbi.nlm.nih.gov/ to figure out the best synonym of gene_name.
'''
result = []
tmp = []
headers = {'Accept': 'application/json'}
uri = 'http://rest.genenames.org'
path = '/search/{}'.format(gene_name)
html_doc = urlopen('http://www.ncbi.nlm.nih.gov/gene/?term={}[sym]'.format(gene_name))
html_txt = BeautifulSoup(html_doc, 'html.parser').get_text()
target = urlparse(uri+path)
method = 'GET'
body = ''
h = http.Http()
response, content = h.request(
target.geturl(),
method,
body,
headers )
if response['status'] == '200':
# assume that content is a json reply
# parse content with the json module
data = json.loads(content.decode('utf8'))
for item in data['response']['docs']:
tmp.append(item['symbol'])
else:
print('Error detected: ' + response['status'])
return None
if len(tmp) > 1:
for gene in tmp:
if gene in html_txt:
result.append(gene)
return result
else:
return tmp
#print(gene_synonyms('MLL3'))
def gen_controls(how_many, chromosome, GapTable_file,outfile):
global running_threads # in case of multithreading
list_brkps = gen_rnd_single_break(how_many, chromosome, GapTable_file, verbose=False)
with open(outfile,'w') as f:
for item in list_brkps:
f.write(list_to_line(item,'\t')+'\n')
running_threads -= 1 # in case of multithreading
# # Generate controls
# import time
# from threading import Thread
# threads = 0
# running_threads = 0
# max_simultaneous_threads = 20
# how_many=9045
# chromosome='9'
# GapTable_file='/Users/alec/Desktop/UMCU_Backup/Projects/Anne_Project/current_brkps_DB/out_ALL_gap.txt'
# while threads < 100:
# while running_threads >= max_simultaneous_threads:
# time.sleep(1)
# running_threads += 1
# outfile = '/Users/alec/Desktop/UMCU_Backup/Projects/Anne_Project/current_brkps_DB/out_chr9_control_'+str(threads)+'.txt'
# print('thread', threads, '|', 'running threads:',running_threads)
# Thread(target=gen_controls, args=(how_many,chromosome,GapTable_file,outfile)).start()
# threads += 1
def gen_control_dataset(real_dataset, suffix='_control.txt'):# tested only for deletion/duplication
'''
Generates a control dataset ad hoc.
Takes as input an existing dataset and generates breaks
in the same chromosomes and with the same distance (+-1bp),
the position are however randomized.
'''
real_data = extract_data(real_dataset, columns=[1,2,4,5,7,8], verbose=False)
control_data = []
_id_list = []
for item in real_data:
if item[0] == item[2]: # ChrA == ChrB
# generate a unique id
_id = gen_rnd_id(16)
while _id in _id_list:
_id = gen_rnd_id(16)
_id_list.append(_id)
chromosome = item[0]
distance = int(item[3])-int(item[1]) #
cnv_type = item[4]
orientation = item[5]
breaks = gen_rnd_breaks(how_many=1, chromosome=chromosome,
min_distance=distance-1, max_distance=distance+1,
GapTable_file='tables/gap.txt')
print(breaks)
control_data.append([_id,chromosome,breaks[0][1],breaks[0][1],chromosome,breaks[0][2],
breaks[0][2],cnv_type,orientation])
else:
print(item[0],'is no equal to',item[2],'I am skipping these breaks')
header = ['##ID', 'ChrA', 'StartA', 'EndA', 'ChrB', 'StartB', 'EndB', 'CnvType', 'Orientation']
filename = real_dataset[:-4]+ suffix
with open(filename,'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in control_data:
line = list_to_line(item, '\t')
print(line)
outfile.write(line + '\n')
print('Data written in',filename)
# gen_control_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/clinvarCnv-DeletionsOnly.txt')
def gen_gap_table(infile='/Users/amarcozzi/Desktop/All_breakpoints_HG19_final.txt', outfile='/Users/amarcozzi/Desktop/All_breakpoints_HG19_gap.txt', resolution=10000):
'''
Generates a file containing a list of coordinates
for wich no brakpoints have been found in the input file.
'''
# Global constants
CHROMOSOMES = [str(c) for c in range(1,23)]+['X','Y']
# length of chromosomes based on GRCh37 (Data source: Ensembl genome browser release 68, July 2012)
# http://jul2012.archive.ensembl.org/Homo_sapiens/Location/Chromosome?r=1:1-1000000
# http://grch37.ensembl.org/Homo_sapiens/Location/Chromosome?r=1:24626643-24726643
CHR_LENGTHS = {'1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
gap_list = []
for Chr in CHROMOSOMES:
print('-----------------------------------------------------')
print('Analyzing breakpoints in chromosome',Chr)
length = CHR_LENGTHS[Chr]
# determine the intervals given the chromosome length and the resolution
x_ax = [] # data holder
y_ax = [] # stores breakpoint counts per inteval
breakpoint_list = []
# # Extract data from infile, chromosome by chromosome
# with open(infile, 'r') as f:
# lines = f.readlines()
# for line in lines: # yield_file(infile) can be used instead
# if line.startswith('chr'+Chr+':'):
# tmp = line.split(':')
# breakpoint = tmp[1].split('-')[0]
# breakpoint_list.append(int(breakpoint))
# print(len(breakpoint_list),'breakpoints found...')
with open(infile, 'r') as f:
#lines = f.readlines()
for line in f:#lines: # yield_file(infile) can be used instead
if line.startswith(Chr+'\t'):
tmp = line_to_list(line,'\t')
breakpoint = tmp[1]
breakpoint_list.append(int(breakpoint))
print(len(breakpoint_list),'breakpoints found...')
for item in range(resolution,length+resolution,resolution):
x_ax.append(item)
print('Interval list:',len(x_ax), 'at',resolution,'bases resolution')
for interval in x_ax:
count = 0
to_remove = []
for breakpoint in breakpoint_list:
if breakpoint <= interval:
count += 1
to_remove.append(breakpoint)
y_ax.append(count)
for item in to_remove:
try:
breakpoint_list.remove(item)
except:
print('Error',item)
counter = 0
for idx,count_ in enumerate(y_ax):
if count_ == 0:
gap = x_ax[idx]
gap_list.append((Chr,gap))
counter += 1
print('Found', counter,'gaps in chromosome',Chr,'\n')
with open(outfile, 'w') as f:
f.write('#Gap table at '+str(resolution)+' bases resolution based on '+infile+'\n')
f.write('##chrom'+'\t'+'chromStart'+'\t'+'chromEnd'+'\n')
for item in gap_list:
line = 'chr'+str(item[0])+'\t'+str(item[1]-resolution)+'\t'+str(item[1])
f.write(line+'\n')
# import time
# start = time.time()
# gen_gap_table()
# print('Done in',time.time()-start,'seconds')
## Generate a gap table file
# import time
# start = time.time()
# gen_gap_table(infile='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL.txt', outfile='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL_gap.txt', resolution=10000)
# print('Done in',time.time()-start,'seconds')
def gen_multiple_controls(real_dataset, how_many):
'''
Generates how_many control datasets.
'''
n=0
while n < how_many:
suffix = '_control_'+str(n)+'.txt'
#real_dataset = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/dataset_1b.txt'
gen_control_dataset(real_dataset,suffix)
n+=1
print(n,'datasets have been generated')
# gen_multiple_controls('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/raw/dataset_4.txt',1000)
# ## Generate multiple controls of datasets found in a folder
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random'
# for item in list_of_files(folder,'txt'):
# gen_multiple_controls(item,1000)
def gen_deletion_dataset_from_breaks(list_of_breaks, outfile, ID_already=False):
'''Genrates a proper deletion dataset file out of a list of breaks '''
# Var names are not pythonic but I think it is better for readibility
header = ['##ID', 'ChrA', 'StartA', 'EndA', 'ChrB', 'StartB', 'EndB', 'CnvType', 'Orientation']
ID_list = [] # to check if the ID is already present
print('writing breakpoints to', outfile, '..........')
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for item in list_of_breaks:
if ID_already == False: # the braks do not have an ID
while True: # checks ID
ID = gen_rnd_id(8)
if ID not in ID_list:
ID_list.append(ID)
break
ChrA = ChrB = item[0][3:]
StartA = EndA = item[1]
StartB = EndB = item[2]
else: # the break do have an ID
ID = item[0] # the ID is supposed to be the first entry
ChrA = ChrB = item[1][3:]
StartA = EndA = item[2]
StartB = EndB = item[3]
CnvType = 'DELETION'
Orientation = 'TH'
line = list_to_line([ID, ChrA, StartA, EndA, ChrB, StartB, EndB, CnvType, Orientation], '\t')
outfile.write(line + '\n')
print('OK')
# list_of_breaks = gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt')
# gen_deletion_dataset_from_breaks(list_of_breaks, 'test_deletion_dataset.txt')
# ## Generate (m) RANDOM datasets of different length (n)
# for m in range(1000):
# for n in [100,1000,10000,100000,1000000]:
# outfile = 'rnd_dataset_'+ str(n)+'_'+str(m)+'.txt'
# breaks = list()
# for chromosome in CHROMOSOMES:
# breaks.extend(gen_rnd_breaks(how_many=500, chromosome=chromosome, min_distance=0, max_distance=n))
# gen_deletion_dataset_from_breaks(breaks, outfile)
def gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt'):
'''Returns tuples containing 1)the chromosome, 2)first breakpoint, 3)second breakpoint
Keeps only the points that do not appear in te gap table.
gen_rnd_breaks(int, string, int, int, filepath) => [(chrX, int, int), ...]
valid chromosomes inputs are "1" to "22" ; "Y" ; "X"
The chromosome length is based on the build GRCh37/hg19.'''
# CHR_LENGTHS is based on GRCh37
CHR_LENGTHS = {'1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
# Genrates a chromosome-specific gap list
print('generating', how_many, 'breakpoints in Chr', chromosome, '..........')
with open(GapTable_file,'r') as infile:
lines = infile.readlines()
full_gap_list = []
chr_specific_gap = []
for line in lines:
if '#' not in line: # skip comments
full_gap_list.append(line_to_list(line, '\t'))
for item in full_gap_list:
if 'chr' + chromosome in item:
# Database/browser start coordinates differ by 1 base
chr_specific_gap.append((item[2],item[3]))
# Merge contiguous gaps
merged_gaps = []
n = 0
left_tick = False
while n < len(chr_specific_gap):
if left_tick == False:
left_tick = chr_specific_gap[n][0]
try:
if chr_specific_gap[n][1] == chr_specific_gap[n+1][0]:
n += 1
else:
right_tick = chr_specific_gap[n][1]
merged_gaps.append((left_tick,right_tick))
left_tick = False
n += 1
except:
n += 1
# Genrates breakpoint list
list_of_breakpoints = []
while len(list_of_breakpoints) < how_many:
try:
start = random.randint(0,CHR_LENGTHS[chromosome])
except KeyError:
if chromosome == '23':
chromosome = 'X'
start = random.randint(0,CHR_LENGTHS[chromosome])
elif chromosome == '24':
chromosome = 'Y'
start = random.randint(0,CHR_LENGTHS[chromosome])
else:
print('ERROR: Wrong chromosome name!!')
end = random.randint(start+min_distance, start+max_distance)
are_points_ok = True # assumes that the points are ok
for item in merged_gaps:
# checks whether the points are ok for real
if start < int(item[0]) or start > int(item[1]):
if end < int(item[0]) or end > int(item[1]):
pass
else: are_points_ok = False
else: are_points_ok = False
if are_points_ok == True:
list_of_breakpoints.append(('chr'+chromosome, start, end))
print('OK')
return list_of_breakpoints
# print(gen_rnd_breaks(how_many=100, chromosome='Y', min_distance=1000, max_distance=15000, GapTable_file='tables/gap.txt'))
def gen_rnd_id(length):
'''Generates a random string made by uppercase ascii chars and digits'''
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for char in range(length))
# print(gen_rnd_id(16))
#@profile
def gen_rnd_single_break(how_many=100, chromosome='1', GapTable_file='/Users/amarcozzi/Desktop/All_breakpoints_HG19_gap_10k.txt', verbose=False):
'''Returns tuples containing 1)the chromosome, 2)the breakpoint
Keeps only the points that do not appear in te gap table.
gen_rnd_breaks(int, string, filepath) => [(chrX, int), ...]
valid chromosomes inputs are "1" to "22" ; "Y" ; "X"
The chromosome length is based on the build GRCh37/hg19.
Prerequisites: The gap_list file is in the form:
##chrom chromStart chromEnd
chr1 0 10000
chr1 30000 40000
chr1 40000 50000
chr1 50000 60000
'''
if verbose == True:
start_time = time.time()
# CHR_LENGTHS is based on GRCh37
CHR_LENGTHS = {'1':249250621,'2' :243199373,'3' :198022430,'4' :191154276,
'5' :180915260,'6' :171115067,'7' :159138663,'8' :146364022,
'9' :141213431,'10':135534747,'11':135006516,'12':133851895,
'13':115169878,'14':107349540,'15':102531392,'16':90354753,
'17':81195210,'18':78077248,'19':59128983,'20':63025520,
'21':48129895,'22':51304566,'X' :155270560,'Y' :59373566}
# Genrates a chromosome-specific gap list
with open(GapTable_file, 'r') as infile:
lines = infile.readlines()
full_gap_list = []
chr_specific_gap = []
for line in lines:
if '#' not in line: # skip comments
full_gap_list.append(line_to_list(line, '\t'))
for item in full_gap_list:
if 'chr' + chromosome in item:
chr_specific_gap.append((item[1],item[2]))
# Merge contiguous gaps
merged_gaps = merge_gaps(chr_specific_gap)
# merged_gaps = []
# while len(chr_specific_gap) > 0:
# try:
# if chr_specific_gap[0][1] == chr_specific_gap[1][0]:
# tmp = (chr_specific_gap[0][0],chr_specific_gap[1][1])
# chr_specific_gap.pop(0)
# chr_specific_gap[0] = tmp
# else:
# merged_gaps.append(chr_specific_gap.pop(0))
# except:
# merged_gaps.append(chr_specific_gap.pop(0))
# Genrates breakpoint list
if verbose == True: print('generating', how_many, 'breakpoints in Chr', chromosome)
list_of_breakpoints = []
while len(list_of_breakpoints) < how_many:
try:
start = random.randint(0,CHR_LENGTHS[chromosome])
# if verbose == True: print(start)
except KeyError:
if chromosome == '23':
chromosome = 'X'
start = random.randint(0,CHR_LENGTHS[chromosome])
elif chromosome == '24':
chromosome = 'Y'
start = random.randint(0,CHR_LENGTHS[chromosome])
else:
print('ERROR: Wrong chromosome name!!')
#end = random.randint(start+min_distance, start+max_distance)
are_points_ok = True # assumes that the points are ok
for item in merged_gaps:
# checks whether the points are ok for real
if start <= int(item[0]) or start >= int(item[1]):
pass
else:
are_points_ok = False
if verbose == True: print(start,'is in a gap and will be discarded')
if are_points_ok == True:
list_of_breakpoints.append((chromosome, start))
if verbose == True: print(start,'is OK',len(list_of_breakpoints),'good breaks generated out of',how_many)
if verbose == True: print(how_many,'breakpoint have been generated in chromosome',chromosome,'in',time.time()-start_time,'seconds')
return list_of_breakpoints
# gen_rnd_single_break(verbose=True)
# ## Generate single breaks dataset
# import time
# start = time.time()
# breaks_on_1 = gen_rnd_single_break(how_many=19147,verbose=False)
# for item in breaks_on_1:
# print(str(item[0])+'\t'+str(item[1]))
# print('Done in', time.time()-start,'seconds..')
# ## Generate a control file
# list_brkps = gen_rnd_single_break(how_many=20873, chromosome='1', GapTable_file='/Users/amarcozzi/Desktop/current_brkps_DB/out_ALL_gap.txt', verbose=True)
# with open('/Users/amarcozzi/Desktop/current_brkps_DB/out_chr1_control.txt','w') as f:
# for item in list_brkps:
# f.write(list_to_line(item,'\t')+'\n')
# ## Generate multiple controls
# import time
# from threading import Thread
# start_time = time.time()
# threads = 0
# running_threads = 0
# max_simultaneous_threads = 20
# GapTable_file = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/out_ALL_gap.txt'
# chromosome = 'Y'
# infile = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/out_chr'+chromosome+'.txt'
# how_many = 0
# for line in yield_file(infile):
# if line.startswith(chromosome+'\t'):
# how_many += 1
# print('found',how_many,'breakpoints in chromosome',chromosome)
# while threads < 100:
# while running_threads >= max_simultaneous_threads:
# time.sleep(1)
# running_threads += 1
# outfile = '/Users/amarcozzi/Desktop/Projects/Anne_Project/current_brkps_DB/controls/out_chr'+chromosome+'_control_'+str(threads)+'.txt'
# print('thread', threads, '|', 'running threads:',running_threads)
# Thread(target=gen_controls, args=(how_many,chromosome,GapTable_file,outfile)).start()
# threads += 1
# print('Waiting for threads to finish...')
# while running_threads > 0:
# time.sleep(1)
# end_time = time.time()
# print('\nDone in',(end_time-start_time)/60,'minutes')
def kmers_finder(sequence_dict, motif_length, min_repetition):
'''(dict, int, int) => OrderedDict(sorted(list))
Find all the motifs long 'motif_length' and repeated at least 'min_repetition' times.
Return an OrderedDict having motif:repetition as key:value sorted by value.
'''
motif_dict = {}
for _id, sequence in sequence_dict.items():
#populate a dictionary of motifs (motif_dict)
for i in range(len(sequence) - motif_length +1):
motif = sequence[i:i+motif_length]
if motif not in motif_dict:
motif_dict[motif] = 1
else:
motif_dict[motif] += 1
#remove from motif_dict all the motifs repeated less than 'repetition' times
keys_to_remove = [key for key, value in motif_dict.items() if value < min_repetition]
for key in keys_to_remove:
del motif_dict[key]
#Return a sorted dictionary
return OrderedDict(sorted(motif_dict.items(), key=itemgetter(1), reverse=True))
def kmers_finder_with_mismatches(sequence, motif_length, max_mismatches, most_common=False):
'''(str, int, int) => sorted(list)
Find the most frequent k-mers with mismatches in a string.
Input: A sequence and a pair of integers: motif_length (<=12) and max_mismatch (<= 3).
Output: An OrderedDict containing all k-mers with up to d mismatches in string.
Sample Input: ACGTTGCATGTCGCATGATGCATGAGAGCT 4 1
Sample Output: OrderedDict([('ATGC', 5), ('ATGT', 5), ('GATG', 5),...])
'''
#check passed variables
if not motif_length <= 12 and motif_length >= 1:
raise ValueError("motif_length must be between 0 and 12. {} was passed.".format(motif_length))
if not max_mismatches <= 3 and max_mismatches >= 1:
raise ValueError("max_mismatch must be between 0 and 3. {} was passed.".format(max_mismatches))
motif_dict = {}
for i in range(len(sequence) - motif_length +1):
motif = sequence[i:i+motif_length]
if motif not in motif_dict:
motif_dict[motif] = 1
else:
motif_dict[motif] += 1
motif_dict_with_mismatches = {}
for kmer in motif_dict:
motif_dict_with_mismatches.update({kmer:[]})
for other_kmer in motif_dict:
mismatches = 0
for i in range(len(kmer)):
if kmer[i] != other_kmer[i]:
mismatches += 1
if mismatches <= max_mismatches:
motif_dict_with_mismatches[kmer].append([other_kmer,motif_dict[other_kmer]])
tmp = {}
for item in motif_dict_with_mismatches:
count = 0
for motif in motif_dict_with_mismatches[item]:
count += motif[-1]
tmp.update({item:count})
result = OrderedDict(sorted(tmp.items(), key=itemgetter(1), reverse=True))
if most_common:
commons = OrderedDict()
_max = result.items()[0][1]
for item in result:
if result[item] == _max:
commons.update({item:result[item]})
else:
return commons
return result
def line_to_list(line, char):
'''Makes a list of string out of a line. Splits the word at char.'''
# Allows for more customization compared with string.split()
split_indexes = find(line, char)
list_ = []
n = 0
for index in split_indexes:
item = line[n:index].replace('\n','').replace('\r','') # cleans up the line
if item != '': # skips empty 'cells'
list_.append(item)
n = index + 1
list_.append(line[n:].replace('\n','').replace('\r','')) # append the last item
return list_
# print(line_to_list('Makes a list of string out of a line. Splits the word at char.', ' '))
def list_to_line(list_, char):
'''Makes a string out of a list of items'''
# Allows for more customization compared with string.split()
string = ''
for item in list_:
string += str(item) + char
return string.rstrip(char) # Removes the last char
#print(list_to_line(['prova', '1', '2', '3', 'prova'], '---'))
def list_of_files(path, extension, recursive=False):
'''
Return a list of filepaths for each file into path with the target extension.
If recursive, it will loop over subfolders as well.
'''
if not recursive:
for file_path in glob.iglob(path + '/*.' + extension):
yield file_path
else:
for root, dirs, files in os.walk(path):
for file_path in glob.iglob(root + '/*.' + extension):
yield file_path
def merge_gaps(gap_list):
'''
Merges overlapping gaps in a gap list.
The gap list is in the form: [('3','4'),('5','6'),('6','7'),('8','9'),('10','11'),('15','16'),('17','18'),('18','19')]
Returns a new list containing the merged gaps: [('3','4'),('5','7'),('8','9'),('10','11'),('15','16'),('17','19')]
'''
merged_gaps = []
while len(gap_list) > 0:
try:
if int(gap_list[0][1]) >= int(gap_list[1][0]):
tmp = (gap_list[0][0],gap_list[1][1])
gap_list.pop(0)
gap_list[0] = tmp
else:
merged_gaps.append(gap_list.pop(0))
except:
merged_gaps.append(gap_list.pop(0))
return merged_gaps
# gap_list = [('3','4'),('5','6'),('6','7'),('8','9'),('10','11'),('15','16'),('17','18'),('18','19')]
# expected = [('3','4'),('5','7'),('8','9'),('10','11'),('15','16'),('17','19')]
# prova = merge_gaps(gap_list)
# print(prova)
# print(expected)
def merge_sort(intervals):
'''
Merges and sorts the intervals in a list.
It's an alternative of merge_gaps() that sort the list before merging.
Should be faster but I haven't campared them yet.
'''
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged
def multi_threads_fusion_genes_annotation(folder_path, extension, max_simultaneous_threads):
''' Executes annotate_fusion_genes() for each dataset file in a folder.
Each execution run on a different thread.'''
global running_threads
dataset_files = list_of_files(folder_path, extension)
threads = 0
running_threads = 0
for file_ in dataset_files:
while running_threads >= max_simultaneous_threads:
time.sleep(1)
threads += 1
running_threads += 1
print('thread', threads, '|', 'running threads:',running_threads)
Thread(target=annotate_fusion_genes, args=(file_,)).start() # with multithreading
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public'
# multi_threads_fusion_genes_annotation(folder, 'txt',50)
def pandize_dataset(annotated_dataset, verbose=True):
'''
Prepares a dataset to be "pandas ready".
Takes a file path as input.
'''
# Parse
if verbose == True:
message = 'parsing ' + annotated_dataset.split('/')[-1]
spacer = (100-len(message))*'.'
print(message, spacer)
dataset = pd.io.parsers.read_table(annotated_dataset, dtype={'ChrA':'str','ChrB':'str'}, sep='\t', index_col=0)
if verbose == True:
print('OK')
# Clean
if verbose == True:
message = 'cleaning ' + annotated_dataset.split('/')[-1]
spacer = (100-len(message))*'.'
print(message, spacer)
dataset = dataset.replace('In Frame', 1)
dataset = dataset.replace('Not in Frame', 0)
dataset = dataset.replace('In Phase', 1)
dataset = dataset.replace('Not in Phase', 0)
if verbose == True:
print('OK')
return dataset
# pandize_dataset('test_data_annotated.txt')
# pandize_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/control_dataset_100-1000-150000_annotated.txt')
def parse_blastXML(infile):
'''
Parses a blast outfile (XML).
'''
for blast_record in NCBIXML.parse(open(infile)):
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
print("*****Alignment****")
print("sequence:", alignment.title)
print("length:", alignment.length)
print("e-value:", hsp.expect)
print(hsp.query)
print(hsp.match)
print(hsp.sbjct)
# to be tested
def reverse(sequence):
r = ''
for i in range(len(sequence),0,-1):
r += sequence[i-1]
return r
def complement(sequence):
d = {'A':'T','a':'t',
'T':'A','t':'a',
'C':'G','c':'g',
'G':'C','g':'c'}
r = ''
for b in sequence.upper():
r += d[b]
return r
def get_mismatches(template, primer, maxerr, overlapped=False):
error = 'e<={}'.format(maxerr)
return regex.findall(f'({primer}){{{error}}}', template, overlapped=overlapped)
def pcr(template,primer_F,primer_R,circular=False):
if circular: ##works only with primers without 5' overhang
i = template.upper().find(primer_F.upper())
template = template[i:]+template[:i]
#Find primer_F, or the largest 3'part of it, in template
for n in range(len(primer_F)):
ix_F = [m.end() for m in re.finditer(primer_F[n:].upper(),
template.upper())]
if len(ix_F) == 1: #it's unique
#print(ix_F)
#print(primer_F[n:])
break
n += 1
#print(ix_F)
#Find primer_R, or the largest 5'part of it, in template
rc_R = reverse(complement(primer_R))
for n in range(len(primer_R)):
ix = [m.start() for m in re.finditer(rc_R[:n].upper(),
template.upper())]
if len(ix) == 1: #it's unique
ix_R = ix[:]
if len(ix) < 1: #it's the largest possible
#print(ix_R)
#print(rc_R[:n])
break
n += 1
#Build the product
return primer_F + template[ix_F[0]:ix_R[0]] + rc_R
##template = 'CTAGAGAGGGCCTATTTCCCATGATT--something--GCCAATTCTGCAGACAAATGGGGTACCCG'
##primer_F = 'GACAAATGGCTCTAGAGAGGGCCTATTTCCCATGATT'
##primer_R = 'TTATGTAACGGGTACCCCATTTGTCTGCAGAATTGGC'
##product = pcr(template,primer_F,primer_R)
##expected = 'GACAAATGGCTCTAGAGAGGGCCTATTTCCCATGATT--something--GCCAATTCTGCAGACAAATGGGGTACCCGTTACATAA'
##expected == result
def pip_upgrade_all(executable=False):
'''
Upgrades all pip-installed packages.
Requires a bash shell.
'''
if executable:
print('upgrading pip...')
call(f'{executable} -m pip install --upgrade pip',
shell=True)
call(f"{executable} -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 {executable} -m pip install -U",
shell=True)
print('done')
else:
#pip
print('upgrading pip...')
call('python -m pip install --upgrade pip', shell=True)
call("python -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python -m pip install -U", shell=True)
#pip2
print('upgrading pip2...')
call('python2 -m pip install --upgrade pip', shell=True)
call("python2 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python2 -m pip install -U", shell=True)
#pip3
print('upgrading pip3...')
call('python3 -m pip install --upgrade pip', shell=True)
call("python3 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 python3 -m pip install -U", shell=True)
#pypy
print('upgrading pypy-pip...')
call('pypy -m pip install --upgrade pip',shell=True)
call("pypy -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pypy -m pip install -U", shell=True)
#pypy3
print('upgrading pypy3-pip...')
call('pypy3 -m pip install --upgrade pip',shell=True)
call("pypy3 -m pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pypy3 -m pip install -U", shell=True)
def probability(p,n,k):
'''
Simple probability calculator.
Calculates what is the probability that k events occur in n trials.
Each event have p probability of occurring once.
Example: What is the probability of having 3 Heads by flipping a coin 10 times?
probability = prob(0.5,10,3)
print(probability) => (15/128) = 0.1171875
'''
p = float(p)
n = float(n)
k = float(k)
C = math.factorial(n) / (math.factorial(k) * math.factorial(n-k) )
probability = C * (p**k) * (1-p)**(n-k)
return probability
#from math import factorial
#print(probability(0.5,10,3))
#print(probability(0.5,1,1))
def process(real_dataset):
'''
Generates, annotates and sorts a controll dataset for the given real dataset.
'''
gen_control_dataset(real_dataset)
control_filename = real_dataset[:-4]+'_control.txt'
#annotate_fusion_genes(real_dataset)
annotate_fusion_genes(control_filename)
control_filename = control_filename[:-4]+'_annotated.txt'
#dataset_filename = real_dataset[:-4]+'_annotated.txt'
#sort_dataset(dataset_filename)
sort_dataset(control_filename)
print(real_dataset,'processed. All OK.')
#process('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/clinvarCnv-DeletionsOnly.txt')
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/random'
# for item in list_of_files(folder,'txt'):
# process(item)
def query_encode(chromosome, start, end):
'''
Queries ENCODE via http://promoter.bx.psu.edu/ENCODE/search_human.php
Parses the output and returns a dictionary of CIS elements found and the relative location.
'''
## Regex setup
re1='(chr{})'.format(chromosome) # The specific chromosome
re2='(:)' # Any Single Character ':'
re3='(\\d+)' # Integer
re4='(-)' # Any Single Character '-'
re5='(\\d+)' # Integer
rg = re.compile(re1+re2+re3+re4+re5,re.IGNORECASE|re.DOTALL)
## Query ENCODE
std_link = 'http://promoter.bx.psu.edu/ENCODE/get_human_cis_region.php?assembly=hg19&'
query = std_link + 'chr=chr{}&start={}&end={}'.format(chromosome,start,end)
print(query)
html_doc = urlopen(query)
html_txt = BeautifulSoup(html_doc, 'html.parser').get_text()
data = html_txt.split('\n')
## Parse the output
parsed = {}
coordinates = [i for i, item_ in enumerate(data) if item_.strip() == 'Coordinate']
elements = [data[i-2].split(' ')[-1].replace(': ','') for i in coordinates]
blocks = [item for item in data if item[:3] == 'chr']
#if len(elements) == len(blocks):
i = 0
for item in elements:
txt = blocks[i]
m = rg.findall(txt)
bins = [''.join(item) for item in m]
parsed.update({item:bins})
i += 1
return parsed
#cis_elements = query_encode(2,10000,20000)
def run_perl(perl_script_file, input_perl_script):
'''
Run an external perl script and return its output
'''
return check_output(["perl", perl_script_file, input_perl_script])
#print(run_perl('FusionGenes_Annotation.pl', 'test_data.txt'))
def run_py(code, interp='python3'):
'''Run an block of python code using the target interpreter.'''
with open('tmp.py', 'w') as f:
for line in code.split('\n'):
f.write(line+'\n')
return check_output([interpr, 'tmp.py'])
def run_pypy(code, interpr='pypy3'):
'''Run an block of python code with PyPy'''
with open('tmp.py', 'w') as f:
for line in code.split('\n'):
f.write(line+'\n')
return check_output([interpr, 'tmp.py'])
def sequence_from_coordinates(chromosome,strand,start,end): #beta hg19 only
'''
Download the nucleotide sequence from the gene_name.
'''
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
#GRCh37 from http://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.25/#/def_asm_Primary_Assembly
NCBI_IDS = {'1':'NC_000001.10','2':'NC_000002.11','3':'NC_000003.11','4':'NC_000004.11',
'5':'NC_000005.9','6':'NC_000006.11','7':'NC_000007.13','8':'NC_000008.10',
'9':'NC_000009.11','10':'NC_000010.10','11':'NC_000011.9','12':'NC_000012.11',
'13':'NC_000013.10','14':'NC_000014.8','15':'NC_000015.9','16':'NC_000016.9',
'17':'NC_000017.10','18':'NC_000018.9','19':'NC_000019.9','20':'NC_000020.10',
'21':'NC_000021.8','22':'NC_000022.10','X':'NC_000023.10','Y':'NC_000024.9'}
try:
handle = Entrez.efetch(db="nucleotide",
id=NCBI_IDS[str(chromosome)],
rettype="fasta",
strand=strand, #"1" for the plus strand and "2" for the minus strand.
seq_start=start,
seq_stop=end)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print('ValueError: no sequence found in NCBI')
return False
#a = sequence_from_coordinates(9,'-',21967751,21994490)
#print(a)
def sequence_from_gene(gene_name): #beta
'''
Download the nucleotide sequence from the gene_name.
'''
data = EnsemblRelease(75)
Entrez.email = "a.marcozzi@umcutrecht.nl" # Always tell NCBI who you are
NCBI_IDS = {'1':"NC_000001", '2':"NC_000002",'3':"NC_000003",'4':"NC_000004",
'5':"NC_000005",'6':"NC_000006",'7':"NC_000007", '8':"NC_000008",
'9':"NC_000009", '10':"NC_000010", '11':"NC_000011", '12':"NC_000012",
'13':"NC_000013",'14':"NC_000014", '15':"NC_000015", '16':"NC_000016",
'17':"NC_000017", '18':"NC_000018", '19':"NC_000019", '20':"NC_000020",
'21':"NC_000021", '22':"NC_000022", 'X':"NC_000023", 'Y':"NC_000024"}
gene_obj = data.genes_by_name(gene_name)
target_chromosome = NCBI_IDS[gene_obj[0].contig]
seq_start = int(gene_obj[0].start)
seq_stop = int(gene_obj[0].end)
strand = 1 if gene_obj[0].strand == '+' else 2
try:
handle = Entrez.efetch(db="nucleotide",
id=target_chromosome,
rettype="fasta",
strand=strand, #"1" for the plus strand and "2" for the minus strand.
seq_start=seq_start,
seq_stop=seq_stop)
record = SeqIO.read(handle, "fasta")
handle.close()
sequence = str(record.seq)
return sequence
except ValueError:
print('ValueError: no sequence found in NCBI')
return False
def sortby_chr(string):
'''
Helps to sort datasets grouped by ChrA/B.
To use with sorted().
'''
# since the ChrA/B value is a string, when sorting by chr may return ['1','10','11'...'2','20'...'3'...'X','Y']
# instead I want sorted() to return ['1','2',...'9','10','11'...'X','Y']
if string == 'X':
return 23
elif string == 'Y':
return 24
else:
return int(string)
# prova = ['1','10','11','9','2','20','3','X','Y']
# print('sorted()', sorted(prova))
# print('sortby_chr()', sorted(prova, key=sortby_chr))
def sort_dataset(dataset_file, overwrite=False):
'''
Sort a dataset by ChrA. It helps during plotting
'''
text = []
header_counter = 0
header = False
print('Sorting...')
with open(dataset_file, 'r') as infile:
#lines = infile.readlines()
for line in infile:
list_ = line_to_list(line, '\t')
if line[:2] == '##':
header = list_
header_counter += 1
else:
text.append(list_)
#checkpoint
if header == False or header_counter > 1:
print('Something is wrong with the header line...', header_counter, header)
return None
# sort by the second element of the list i.e. 'ChrA'
text.sort(key=lambda x: sortby_chr(itemgetter(1)(x)))
# Write output
if overwrite == False:
outfile = dataset_file[:-4]+'_sorted.txt'
else:
outfile = dataset_files
with open(outfile, 'w') as outfile:
outfile.write(list_to_line(header, '\t') + '\n')
for list_ in text:
outfile.write(list_to_line(list_, '\t') + '\n')
print('Done!')
# sort_dataset('test_data.txt')
# folder = '/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public'
# for item in list_of_files(folder, 'txt'):
# sort_dataset(item)
# sort_dataset('/home/amarcozz/Documents/Projects/Fusion Genes/Scripts/test_datasets/public/annotated/dgvMerged-DeletionsOnly_annotated.txt')
def split_fasta_file(infile): #beta
'''
Split a fasta file containing multiple sequences
into multiple files containing one sequence each.
One sequence per file.
'''
flag = False
length = 0
with open(infile,'r') as f:
for line in f:
if line.startswith('>'):
if flag == False:
flag = True
outfile = '{}.txt'.format(line[1:].strip())
print('writing {}'.format(outfile))
lines = [line]
else:
with open(outfile, 'w') as out:
for _ in lines:
out.write(_)
print('{} bases written'.format(length))
length = 0
outfile = '{}.txt'.format(line[1:].strip())
print('writing {}'.format(outfile))
lines = [line]
else:
lines.append(line)
length += len(line.strip())
#Write last file
with open(outfile, 'w') as out:
for _ in lines:
out.write(_)
print('{} bases written'.format(length))
def substract_datasets(infile_1, infile_2, outfile, header=True):
'''
Takes two files containing tab delimited data, comapares them and return a file
containing the data that is present only in infile_2 but not in infile_1.
The variable by_column is an int that indicates which column to use
as data reference for the comparison.
'''
header2 = False
comment_line = '# dataset generated by substracting ' + infile_1 + ' to ' + infile_2 + '\n'
with open(infile_1) as infile_1:
lines_1 = infile_1.readlines()
with open(infile_2) as infile_2:
lines_2 = infile_2.readlines()
row_to_removes = []
for line in lines_1:
if line[0] != '#': # skips comments
if header == True:
header2 = True # to use for the second file
header = False # set back header to false since the first line will be skipped
first_line = line
pass
else:
item = line_to_list(line, '\t')
row_to_removes.append(item)
result_list = []
for line in lines_2:
if line[0] != '#': # skips comments
if header2 == True:
header2 = False # set back header to false since the first line will be skipped
pass
else:
item = line_to_list(line, '\t')
if item not in row_to_removes:
result_list.append(item)
with open(outfile, 'w') as outfile:
outfile.write(comment_line)
outfile.write(first_line)
for item in result_list:
outfile.write(list_to_line(item, '\t') + '\n')
print('substraction of two datasets DONE')
# substract_datasets('dataset_1_b.txt', 'dataset_1.txt', 'dataset_1-1b.txt', header=True)
def yield_file(filepath):
'''
A simple generator that yield the lines of a file.
Good to read large file without running out of memory.
'''
with open(filepath, 'r') as f:
for line in f:
yield line
# for line in yield_file('GRCh37_hg19_variants_2014-10-16.txt'):
# print(line[:20])
|
{
"content_hash": "99829f04fde8bf87bcd4899b2b06976e",
"timestamp": "",
"source": "github",
"line_count": 3837,
"max_line_length": 288,
"avg_line_length": 35.60672400312745,
"alnum_prop": 0.5648975648316902,
"repo_name": "alec-djinn/alefuncs",
"id": "8245ec825ff9d6835695e398905a1e1d5686a6e1",
"size": "136740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alefuncs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2699887"
},
{
"name": "Jupyter Notebook",
"bytes": "9789"
},
{
"name": "Makefile",
"bytes": "609"
},
{
"name": "Python",
"bytes": "146576"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.tests.watchers.openstack.test_instance
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Michael Stair <mstair@att.com>
"""
from security_monkey.tests.watchers.openstack import OpenStackWatcherTestCase
class OpenStackInstanceWatcherTestCase(OpenStackWatcherTestCase):
def pre_test_setup(self):
super(OpenStackInstanceWatcherTestCase, self).pre_test_setup()
from security_monkey.watchers.openstack.compute.openstack_instance import OpenStackInstance
self.watcher = OpenStackInstance(accounts=[self.account.name])
|
{
"content_hash": "b4a4220fb292ca0f65ba6e1caa9a483d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 99,
"avg_line_length": 36.75,
"alnum_prop": 0.7687074829931972,
"repo_name": "Netflix/security_monkey",
"id": "5965b98b87f12f9fc9a1d42118ea8c1c88c2b5b8",
"size": "1244",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "security_monkey/tests/not_used/openstack/compute/test_instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22837"
},
{
"name": "Dart",
"bytes": "130852"
},
{
"name": "Dockerfile",
"bytes": "3841"
},
{
"name": "HTML",
"bytes": "120266"
},
{
"name": "JavaScript",
"bytes": "13728"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1578684"
},
{
"name": "Shell",
"bytes": "30939"
}
],
"symlink_target": ""
}
|
from elixir.statements import Statement
from elixir import Entity, has_field, using_options, Integer, Unicode
from sqlalchemy import and_
import types
class Tag(Entity):
has_field('target_id', Integer)
has_field('target_table', Unicode)
has_field('tagname', Unicode)
using_options(tablename='tags')
class Taggable(object):
taggable_entities = []
def __init__(self, entity, *args, **kwargs):
Taggable.taggable_entities.append(entity)
def get_by_tag(cls, tag):
return entity.select(and_(Tag.c.target_id==entity.c.id,
Tag.c.target_table==entity.table.name,
Tag.c.tagname==tag))
def add_tag(self, tag):
Tag(target_id=self.id, target_table=self.table.name, tagname=tag)
entity.get_by_tag = classmethod(get_by_tag)
entity.add_tag = add_tag
@classmethod
def entity_for_table(cls, table):
if not hasattr(Taggable, 'table_to_entity'):
Taggable.table_to_entity = dict()
for entity in Taggable.taggable_entities:
Taggable.table_to_entity[entity.table.name] = entity
return Taggable.table_to_entity.get(table)
@classmethod
def get(cls, tag, of_kind=None):
if not of_kind: of_kind = Taggable.taggable_entities
if type(of_kind) not in (types.TupleType, types.ListType):
of_kind = [of_kind]
for entity in of_kind:
for instance in entity.get_by_tag(tag):
yield instance
@classmethod
def get_fast(cls, tag, of_kind=None):
if not of_kind: of_kind = Taggable.taggable_entities
if type(of_kind) not in (types.TupleType, types.ListType):
of_kind = [of_kind]
tablenames = [entity.table.name for entity in of_kind]
for tag in Tag.select(Tag.c.tagname==tag):
if tag.target_table not in tablenames: continue
yield tag.target_id, Taggable.entity_for_table(tag.target_table)
acts_as_taggable = Statement(Taggable)
|
{
"content_hash": "6bd99087d97aee87300533289d4c7c34",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 35.01639344262295,
"alnum_prop": 0.5987827715355806,
"repo_name": "gjhiggins/elixir",
"id": "e945299ae3994c80314e6107e8de2cab8c20d64d",
"size": "2136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elixir/ext/taggable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "872"
},
{
"name": "Python",
"bytes": "297582"
}
],
"symlink_target": ""
}
|
"""
Command dispatcher module
"""
import main
import manage
def dispatcher(*args):
lookup = {
'add': main.add,
'update': main.update,
'check': main.check_database,
'manage': manage.entry,
}
if len(args) == 0:
main.main()
else:
lookup[args[0]](*(args[1:]))
|
{
"content_hash": "d3815defccea78576d64fcbb38eaa1d2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 37,
"avg_line_length": 17,
"alnum_prop": 0.5294117647058824,
"repo_name": "Ayase-252/auto-anime-downloader",
"id": "d434bbc1268817d05f1fdc57bf08bf65bdde1858",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38797"
}
],
"symlink_target": ""
}
|
from tia.bbg.v3api import *
LocalTerminal = Terminal('localhost', 8194)
from tia.bbg.datamgr import *
|
{
"content_hash": "ee7ae855fc53cb75615337d9fb6bb10a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 43,
"avg_line_length": 20.8,
"alnum_prop": 0.75,
"repo_name": "bpsmith/tia",
"id": "63f20884c11ce07356a0003f24206d33168f5c45",
"size": "104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tia/bbg/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "365897"
}
],
"symlink_target": ""
}
|
import socket, ssl
class PortScanner:
""" Finds open paths between this host and a specified host """
def __init__(self, target):
""" Initializes the port scanner with the specified target host"""
self.target_host = target
def run_scan(self):
""" Runs a scan against a collection of ports.
Returns ports which can be used to connect to the target
Returns a tuple (open, closed)
"""
open_ports = []
closed_ports = []
for port in range(1, 3000):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = ssl.wrap_socket(s)
sock.connect((target, port))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
ports.append(port)
except Exception, e:
closed_ports.append(port)
return open_ports, closed_ports
|
{
"content_hash": "cefd7ad9311cda42b57f90a23cda4664",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 33.06896551724138,
"alnum_prop": 0.5422314911366006,
"repo_name": "dgarant/weasel",
"id": "e0554a6d2b5b6279fb7c1cc85b28bf836efa3b53",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Weasel-Bot/weasel/bot/portscan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30073"
},
{
"name": "JavaScript",
"bytes": "377676"
},
{
"name": "Python",
"bytes": "60998"
}
],
"symlink_target": ""
}
|
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from tempfile import NamedTemporaryFile
from selenese.locators import create_locator
from selenese.patterns import create_pattern
from time import sleep
class Executor(object):
"""
Executes selenese commands on a selenium WebDriver
"""
timeout = 30
def __init__(self, testcase, webdriver):
self.testcase = testcase
self.webdriver = webdriver
self._storage = {}
self._directory = {}
def _create_file(self, filename):
"""
:return: a file-like object
"""
# replace all variables
for key in self._storage:
filename = filename.replace('${%s}' % key, self._storage[key])
# create the file like object
file_pointer = NamedTemporaryFile()
self._directory[filename] = file_pointer
return file_pointer
def _andWait(self):
for i in range(self.timeout):
sleep(1)
ready_state = self.webdriver.execute_script('return document.readyState')
if ready_state == 'complete':
return True
i # XXX
return False
# Commands
def addSelection(self, target, value):
pass
def altKeyDown(self, target, value):
pass
def altKeyUp(self, target, value):
pass
def captureEntirePageScreenshot(self, target, value):
self.webdriver.get_screenshot_as_file(self._create_file(target).name)
return True
def check(self, target, value):
element = create_locator(target).get_element(self.webdriver)
if element.get_attribute('type') == 'checkbox' and not element.get_attribute('checked'):
element.click()
def click(self, target, value):
create_locator(target).get_element(self.webdriver).click()
def close(self, target, value):
self.webdriver.close()
def controlKeyDown(self, target, value):
ActionChains(self.driver).key_down(Keys.CONTROL).perform()
def controlKeyUp(self, target, value):
ActionChains(self.driver).key_up(Keys.CONTROL).perform()
def createCookie(self, target, value):
name, value = target.split('=')
data = {
'name': name,
'value': value,
}
for chunk in value.split(','):
if chunk.startsWith('path='):
data['path'] = chunk[5:]
elif chunk.startsWith('max_age='):
data['expiry'] = chunk[7:]
elif chunk.startsWith('domain='):
data['domain'] = chunk[7]
elif chunk.startsWith('secure='):
data['secure'] = bool(chunk[7:])
self.webdriver.addCookie(data)
def deleteAllVisibleCookies(self, target, value):
self.webdriver.delete_all_cookies()
def deleteCookie(self, target, value):
self.webdriver.delete_cookie(target)
def deselectPopUp(self, target, value):
self.webdriver.switch_to_window()
def doubleClick(self, target, value):
pass
def focus(self, target, value):
locator = create_locator(target)
element = locator.get_element(self.webdriver)
if element.get_attribute('id'):
self.webdriver.execute_script('document.getElementById("%s").focus();' %
element.get_attribute('id'))
# TODO
def goBack(self, target, value):
self.webdriver.back()
def open(self, target, value):
if not target.startswith('http://') and not target.startswith('https://'):
if self.testcase.baseurl.endswith('/') and target.startswith('/'):
url = self.testcase.baseurl + target[1:]
else:
url = self.testcase.baseurl + target
else:
url = target
self.webdriver.get(url)
def refresh(self, target, value):
self.webdriver.refresh()
def removeScript(self, target, value):
pass
def runScript(self, target, value):
pass
def selectPopUp(self, target, value):
# TODO
self.webdriver.switch_to_window(target)
def selectWindow(self, target, value):
# TODO
self.webdriver.switch_to_window(target)
def submit(self, target, value):
pass
def type(self, target, value):
pass
def typeKeys(self, target, value):
create_locator(target).get_element(self.webdriver).send_keys(value)
def uncheck(self, target, value):
element = create_locator(target).get_element(self.webdriver)
if element.get_attribute('type') == 'checkbox' and not element.get_attribute('checked'):
element.click()
def windowMaximize(self, target, value):
self.webdriver.maximize_window()
# Accessors
def _getAllButtons(self):
# note: only submit buttons will be selected since the reference implementation (Selenium
# IDE does not select button elements
id_list = []
for button in self.webdriver.find_elements_by_css_selector('input[type="submit"]'):
id_list.append(button.get_attribute('id'))
return ','.join(id_list)
def _getAllFields(self):
# get all input fields
id_list = []
for field in self.webdriver.find_elements_by_css_selector('input[type="text"]'):
id_list.append(field.get_attribute('id'))
return ','.join(id_list)
def assertAlert(self, target, value):
return create_pattern(target).compare(self.webdriver.switch_to_alert().text)
def assertAlertNotPresent(self, target, value):
try:
self.webdriver.switch_to_alert().text
except NoAlertPresentException:
return True
return False
def assertAlertPresent(self, target, value):
return self.assertAlertNotPresent(target, value) == False
def assertAllButtons(self, target, value):
return create_pattern(target).compare(self._getAllButtons())
def assertAllFields(self, target, value):
return create_pattern(target).compare(self._getAllFields())
def assertAllLinks(self, target, value):
pass
def assertAllWindowIds(self, target, value):
pass
def assertAllWindowTitles(self, target, value):
pass
def assertAllWindowNames(self, target, value):
pass
def assertCookiePresent(self, target, value):
return self.webdriver.get_cookie(target) != None
def assertCookieNotPresent(self, target, value):
return self.assertCookiePresent(target, value) == False
def assertCssCount(self, target, value):
elements = self.webdriver.find_elements_by_css_selector(target)
return create_pattern(value).compare(len(elements))
def assertElementNotPresent(self, target, value):
return self.assertElementPresent(target, value) == False
def assertElementPresent(self, target, value):
try:
create_locator(target).get_element(self.webdriver)
return True
except NoSuchElementException:
return False
def assertLocation(self, target, value):
return create_pattern(target).compare(self.webdriver.current_url)
def assertNotCssCount(self, target, value):
return self.assertCssCount(target, value) == False
def assertText(self, target, value):
locator = create_locator(target)
pattern = create_pattern(value)
return pattern.compare(locator.get_element(self.webdriver).text)
def assertTextNotPresent(self, target, value):
return self.assertTextPresent(target, value) == False
def assertTextPresent(self, target, value):
pattern = create_pattern(target)
return pattern.compare(self.webdriver.find_element_by_tag_name('body').text)
def assertTitle(self, target, value):
pattern = create_pattern(target)
return pattern.compare(self.webdriver.title)
def storeAlert(self, target, value):
self._storage[target] = self.webdriver.switch_to_alert().text
def storeAlertPresent(self, target, value):
pass
def storeAllButtons(self, target, value):
self._storage[target] = self._getAllButtons()
def storeAllFields(self, target, value):
self._storage[target] = self._getAllFields()
def storeLocation(self, target, value):
self._storage[target] = self.webdriver.current_url
def storeBodyText(self, target, value):
self._storage[target] = self.webdriver.find_element_by_tag_name('body').text
|
{
"content_hash": "e8809b6133b460cc7bb3932e4862454e",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 97,
"avg_line_length": 33.18250950570342,
"alnum_prop": 0.6343531568694855,
"repo_name": "Lukas-Stuehrk/selenese",
"id": "6ec9e7c7343fd31c6fbf50d46354d8bc07e72583",
"size": "8727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selenese/commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21433"
}
],
"symlink_target": ""
}
|
import zmq
from control4.misc.randomness import random_string
from control4.misc.collection_utils import chunk_slices
from control4.parallel.parallel import PoolBase,get_cpu_count
import os,time
def child_process_loop(addr):
context = zmq.Context() #pylint: disable=E1101
socket = context.socket(zmq.REP) #pylint: disable=E1101
socket.bind(addr)
while True:
tup = socket.recv_pyobj()
if tup[0] == "mapreduce":
func,reducer,li = tup[1:]
result = reducer(map(func, li))
elif tup[0] == "apply":
func,arg = tup[1:]
result = func(arg)
elif tup[0] == "exit":
socket.send_pyobj("")
return
else:
raise RuntimeError("invalid command %s"%tup[0])
# Send reply back to client
socket.send_pyobj(result)
class IPCProcessPool(PoolBase):
def __init__(self,n=-1):
if n==-1: n = get_cpu_count()
elif n==1: print "warning: starting pool with one process"
elif n > 1: pass
else: raise RuntimeError("invalid number of processes: %i"%n)
self.context = zmq.Context() #pylint: disable=E1101
self.sockets = []
self.pids = []
prefix = random_string(12)
pipenames = ["%s-%.4i"%(prefix,i) for i in xrange(n)]
for pipename in pipenames:
addr = "ipc:///tmp/%s"%pipename
pid = os.fork()
if pid == 0:
child_process_loop(addr)
os._exit(0) # skip all exit processing, e.g. exitfuncs, __del__ #pylint: disable=W0212
else:
self.pids.append(pid)
socket = self.context.socket(zmq.REQ) #pylint: disable=E1101
socket.connect(addr)
self.sockets.append(socket)
def size(self):
return len(self.sockets)
def _dispatch(self, tuples):
assert len(tuples)>0
n_results = len(tuples)
assert n_results <= self.size()
# self.log.info("sending %i messages",size)
for (socket,tup) in zip(self.sockets,tuples):
socket.send_pyobj(tup)
results = [None for _ in xrange(n_results)]
n_done = 0
# self.log.info("pulling in %i messages",n_results)
while n_done < n_results:
for i in xrange(n_results):
if results[i] is None:
try:
reply = self.sockets[i].recv_pyobj(zmq.DONTWAIT) #pylint: disable=E1101
results[i] = reply
n_done += 1
except zmq.Again: #pylint: disable=E1101
continue
# self.log.info("received %i/%i",n_done,n_results)
time.sleep(.001)
return results
def mapreduce(self, func, reducer, items):
return reducer(self._dispatch([("mapreduce",func,reducer,items[chunk]) for chunk in chunk_slices(len(items),self.size())]))
def scatter(self, func, items):
return self._dispatch([("apply",func,items[chunk]) for chunk in chunk_slices(len(items),self.size())])
def gather(self, func, reducer, arg):
return reducer(self._dispatch([("apply",func,arg) for _ in xrange(self.size())]))
def __del__(self):
for socket in self.sockets:
socket.send_pyobj(("exit",))
print "waiting for child processes to close..."
for pid in self.pids:
os.waitpid(pid,0)
print "ok"
|
{
"content_hash": "801a26216b99340c7178a0b7fe3a19ac",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 131,
"avg_line_length": 34.02857142857143,
"alnum_prop": 0.5443604813881892,
"repo_name": "SFPD/rlreloaded",
"id": "0098b21df8da1f4f49f706157c279a3706c89721",
"size": "3573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "control4/parallel/ipcprocesspool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "753"
},
{
"name": "C++",
"bytes": "88527"
},
{
"name": "CMake",
"bytes": "33134"
},
{
"name": "Python",
"bytes": "478983"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
}
|
import plugins, os, string, shutil, sys, logging, glob
from ConfigParser import ConfigParser, NoOptionError
from copy import copy
from ordereddict import OrderedDict
plugins.addCategory("bug", "known bugs", "had known bugs")
plugins.addCategory("badPredict", "internal errors", "had internal errors")
plugins.addCategory("crash", "CRASHED")
# For backwards compatibility...
class FailedPrediction(plugins.TestState):
def getExitCode(self):
return int(self.category != "bug")
def getTypeBreakdown(self):
status = "failure" if self.getExitCode() else "success"
return status, self.briefText
class Bug:
def __init__(self, priority, rerunCount):
self.priority = priority
self.rerunCount = rerunCount
def findCategory(self, internalError):
if internalError or self.rerunCount:
return "badPredict"
else:
return "bug"
def isCancellation(self):
return False
def getRerunText(self):
if self.rerunCount:
return "\n(NOTE: Test was run " + str(self.rerunCount + 1) + " times in total and each time encountered this issue.\n" + \
"Results of previous runs can be found in framework_tmp/backup.previous.* under the sandbox directory.)\n\n"
else:
return ""
class BugSystemBug(Bug):
def __init__(self, bugSystem, bugId, priorityStr, *args):
self.bugId = bugId
self.bugSystem = bugSystem
prio = int(priorityStr) if priorityStr else 20
Bug.__init__(self, prio, *args)
def __repr__(self):
return self.bugId
def findInfo(self, test):
location = test.getCompositeConfigValue("bug_system_location", self.bugSystem)
username = test.getCompositeConfigValue("bug_system_username", self.bugSystem)
password = test.getCompositeConfigValue("bug_system_password", self.bugSystem)
exec "from " + self.bugSystem + " import findBugInfo as _findBugInfo"
status, bugText, isResolved = _findBugInfo(self.bugId, location, username, password) #@UndefinedVariable
category = self.findCategory(isResolved)
briefText = "bug " + self.bugId + " (" + status + ")"
return category, briefText, self.getRerunText() + bugText
class UnreportedBug(Bug):
def __init__(self, fullText, briefText, internalError, priorityStr, *args):
self.fullText = fullText
self.briefText = briefText
self.internalError = internalError
prio = self.getPriority(priorityStr)
Bug.__init__(self, prio, *args)
def __repr__(self):
return self.briefText
def isCancellation(self):
return not self.briefText and not self.fullText
def getPriority(self, priorityStr):
if priorityStr:
return int(priorityStr)
elif self.internalError:
return 10
else:
return 30
def findInfo(self, *args):
return self.findCategory(self.internalError), self.briefText, self.getRerunText() + self.fullText
class BugTrigger:
def __init__(self, getOption):
useRegexp = int(getOption("use_regexp", "1"))
searchStr = getOption("search_string").replace("\\n", "\n")
self.textTrigger = plugins.MultilineTextTrigger(searchStr, useRegexp)
self.triggerHosts = self.getTriggerHosts(getOption)
self.checkUnchanged = int(getOption("trigger_on_success", "0"))
self.reportInternalError = int(getOption("internal_error", "0"))
self.ignoreOtherErrors = int(getOption("ignore_other_errors", self.reportInternalError))
self.customTrigger = getOption("custom_trigger", "")
self.bugInfo = self.createBugInfo(getOption)
self.diag = logging.getLogger("Check For Bugs")
def __repr__(self):
return repr(self.textTrigger)
def getTriggerHosts(self, getOption):
hostStr = getOption("execution_hosts")
if hostStr:
return hostStr.split(",")
else:
return []
def createBugInfo(self, getOption):
bugSystem = getOption("bug_system")
prioStr = getOption("priority")
rerunCount = int(getOption("rerun_count", "0"))
if bugSystem:
return BugSystemBug(bugSystem, getOption("bug_id"), prioStr, rerunCount)
else:
return UnreportedBug(getOption("full_description"), getOption("brief_description"), self.reportInternalError, prioStr, rerunCount)
def matchesText(self, line):
return self.textTrigger.matches(line)
def exactMatch(self, lines, **kw):
updatedLines = [line for i,line in enumerate(lines) if i < len(lines) -1] if lines[-1] == '' else lines
if len(updatedLines) == len(self.textTrigger.triggers):
for index, line in enumerate(updatedLines, start=1):
# We must check that every line match because MultilineTextTrigger.matches method
# returns True only when the match is complete
if index < len(updatedLines):
if not self.textTrigger._matches(line)[1]:
return False
else:
return self.hasBug(line, **kw)
return False
def customTriggerMatches(self, *args):
module, method = self.customTrigger.split(".", 1)
return plugins.importAndCall(module, method, *args)
def hasBug(self, line, execHosts=[], isChanged=True, multipleDiffs=False, tmpDir=None):
if not self.checkUnchanged and not isChanged:
self.diag.info("File not changed, ignoring")
return False
if multipleDiffs and not self.ignoreOtherErrors:
self.diag.info("Multiple differences present, allowing others through")
return False
if line is not None and not self.textTrigger.matches(line):
return False
if self.customTrigger and not self.customTriggerMatches(execHosts, tmpDir):
return False
if self.hostsMatch(execHosts):
return True
else:
self.diag.info("No match " + repr(execHosts) + " with " + repr(self.triggerHosts))
return False
def findBugInfo(self, test, fileStem, absenceBug):
category, briefText, fullText = self.bugInfo.findInfo(test)
whatText = "FAILING to find text" if absenceBug else "text found"
matchText = repr(self)
if "\n" in matchText:
matchText = "'''\n" + matchText + "\n'''"
else:
matchText = "'" + matchText + "'"
fullText += "\n(This bug was triggered by " + whatText + " in " + self.getFileText(fileStem) + " matching " + matchText + ")"
return category, briefText, fullText
def getFileText(self, fileStem):
if fileStem == "free_text":
return "the full difference report"
elif fileStem == "brief_text":
return "the brief text/details"
else:
return "file " + repr(fileStem)
def hostsMatch(self, execHosts):
if len(self.triggerHosts) == 0:
return True
for host in execHosts:
if not host in self.triggerHosts:
return False
return True
class FileBugData:
def __init__(self):
self.presentList = []
self.absentList = []
self.identicalList = []
self.checkUnchanged = False
self.diag = logging.getLogger("Check For Bugs")
def addBugTrigger(self, getOption):
bugTrigger = BugTrigger(getOption)
if bugTrigger.checkUnchanged:
self.checkUnchanged = True
if getOption("trigger_on_absence", False):
self.absentList.append(bugTrigger)
elif getOption("trigger_on_identical", False):
self.identicalList.append(bugTrigger)
else:
self.presentList.append(bugTrigger)
def findBugs(self, fileName, execHosts, isChanged, multipleDiffs):
if not self.checkUnchanged and not isChanged:
self.diag.info("File not changed, ignoring all bugs")
return []
if not fileName:
self.diag.info("File doesn't exist, checking only for absence bugs")
return self.findAbsenceBugs(self.absentList, execHosts=execHosts, isChanged=isChanged, multipleDiffs=multipleDiffs, tmpDir=None)
if not os.path.exists(fileName):
raise plugins.TextTestError, "The file '"+ fileName + "' does not exist. Maybe it has been removed by an external process. "
self.diag.info("Looking for bugs in " + fileName)
dirname = os.path.dirname(fileName)
return self.findBugsInText(open(fileName).readlines(), execHosts=execHosts, isChanged=isChanged, multipleDiffs=multipleDiffs, tmpDir=dirname)
def findBugsInText(self, lines, **kw):
currAbsent = copy(self.absentList)
bugs = []
for bugTrigger in self.identicalList:
if bugTrigger not in bugs and bugTrigger.exactMatch(lines, **kw):
bugs.append(bugTrigger)
for line in lines:
for bugTrigger in self.presentList:
self.diag.info("Checking for existence of " + repr(bugTrigger))
if bugTrigger not in bugs and bugTrigger.hasBug(line, **kw):
bugs.append(bugTrigger)
for bugTrigger in currAbsent:
if bugTrigger.matchesText(line):
currAbsent.remove(bugTrigger)
return bugs + self.findAbsenceBugs(currAbsent, **kw)
def findAbsenceBugs(self, absentList, **kw):
bugs = []
for bugTrigger in absentList:
if bugTrigger not in bugs and bugTrigger.hasBug(None, **kw):
bugs.append(bugTrigger)
return bugs
class ParseMethod:
def __init__(self, parser, section):
self.parser = parser
self.section = section
def __call__(self, option, default=""):
try:
return self.parser.get(self.section, option)
except NoOptionError:
return default
class ParserSectionDict(OrderedDict):
def __init__(self, fileName, *args, **kw):
OrderedDict.__init__(self, *args, **kw)
self.readingFile = fileName
def __getitem__(self, key):
if self.readingFile:
msg = "Bug file at " + self.readingFile + " has duplicated sections named '" + key + "', the later ones will be ignored"
plugins.printWarning(msg)
return OrderedDict.__getitem__(self, key)
def values(self):
# Fix for python 2.7... which calls __getitem__ internally
origFile = self.readingFile
self.readingFile = None
ret = OrderedDict.values(self)
self.readingFile = origFile
return ret
class BugMap(OrderedDict):
def checkUnchanged(self):
for bugData in self.values():
if bugData.checkUnchanged:
return True
return False
def readFromFile(self, fileName):
parser = self.makeParser(fileName)
if parser:
self.readFromParser(parser)
def readFromFileObject(self, f):
parser = self.makeParserFromFileObject(f)
if parser:
self.readFromParser(parser)
def makeParserFromFileObject(self, f):
parser = ConfigParser()
# Default behaviour transforms to lower case: we want case-sensitive
parser.optionxform = str
parser.readfp(f)
return parser
@staticmethod
def makeParser(fileName):
parser = ConfigParser()
# Default behaviour transforms to lower case: we want case-sensitive
parser.optionxform = str
# There isn't a nice way to change the behaviour on getting a duplicate section
# so we use a nasty way :)
parser._sections = ParserSectionDict(fileName)
try:
parser.read(fileName)
parser._sections.readingFile = None
return parser
except Exception:
plugins.printWarning("Bug file at " + fileName + " not understood, ignoring")
def readFromParser(self, parser):
for section in reversed(sorted(parser.sections())):
getOption = ParseMethod(parser, section)
fileStem = getOption("search_file")
self.setdefault(fileStem, FileBugData()).addBugTrigger(getOption)
class CheckForCrashes(plugins.Action):
def __init__(self):
self.diag = logging.getLogger("check for crashes")
def __call__(self, test):
if test.state.category == "killed":
return
# Hard-coded prediction: check test didn't crash
comparison, _ = test.state.findComparison("stacktrace")
if comparison and comparison.newResult():
stackTraceFile = comparison.tmpFile
self.diag.info("Parsing " + stackTraceFile)
summary, errorInfo = self.parseStackTrace(test, stackTraceFile)
newState = copy(test.state)
newState.removeComparison("stacktrace")
crashState = FailedPrediction("crash", errorInfo, summary)
newState.setFailedPrediction(crashState)
test.changeState(newState)
if not test.app.keepTemporaryDirectories():
os.remove(stackTraceFile)
def parseStackTrace(self, test, stackTraceFile):
lines = open(stackTraceFile).readlines()
if len(lines) > 2:
return lines[0].strip(), string.join(lines[2:], "")
else:
errFile = test.makeTmpFileName("stacktrace.collate_errs", forFramework=1)
script = test.getCompositeConfigValue("collate_script", "stacktrace")[0]
return "core not parsed", "The core file could not be parsed. Errors from '" + script + "' follow :\n" + open(errFile).read()
class CheckForBugs(plugins.Action):
def __init__(self):
self.diag = logging.getLogger("Check For Bugs")
def callDuringAbandon(self, test):
# want to be able to mark UNRUNNABLE tests as known bugs too...
return test.state.lifecycleChange != "complete"
def __repr__(self):
return "Checking known bugs for"
def __call__(self, test):
newState, rerunCount = self.checkTest(test, test.state)
if newState:
test.changeState(newState)
if rerunCount and not test.app.isReconnecting() and not os.path.exists(test.makeBackupFileName(rerunCount)):
self.describe(test, " - found an issue that triggered a rerun")
test.saveState()
# Current thread, must be done immediately or we might exit...
test.performNotify("Rerun")
def checkTest(self, test, state):
activeBugs = self.readBugs(test)
return self.checkTestWithBugs(test, state, activeBugs)
def checkTestWithBugs(self, test, state, activeBugs):
if not activeBugs.checkUnchanged() and not state.hasFailed():
self.diag.info(repr(test) + " succeeded, not looking for bugs")
return None, 0
bugTrigger, bugStem = self.findBug(test, state, activeBugs)
if bugTrigger:
absenceBug = bugTrigger in activeBugs[bugStem].absentList
category, briefText, fullText = bugTrigger.findBugInfo(test, bugStem, absenceBug)
self.diag.info("Changing to " + category + " with text " + briefText)
bugState = FailedPrediction(category, fullText, briefText, completed=1)
return self.getNewState(state, bugState), bugTrigger.bugInfo.rerunCount
else:
return None, 0
def findAllBugs(self, test, state, activeBugs):
multipleDiffs = self.hasMultipleDifferences(test, state)
bugs, bugStems = [], []
for stem, fileBugData in activeBugs.items():
newBugs = self.findBugsInFile(test, state, stem, fileBugData, multipleDiffs)
if newBugs:
bugs += newBugs
bugStems += [ stem ] * len(newBugs)
return bugs, bugStems
def findBug(self, test, state, activeBugs):
bugs, bugStems = self.findAllBugs(test, state, activeBugs)
unblockedBugs = self.findUnblockedBugs(bugs)
if len(unblockedBugs) > 0:
unblockedBugs.sort(key=lambda bug: (bug.bugInfo.priority, bug.bugInfo.rerunCount))
bug = unblockedBugs[0]
return bug, bugStems[bugs.index(bug)]
else:
return None, None
def findUnblockedBugs(self, bugs):
unblockedBugs = []
for bug in bugs:
if bug.bugInfo.isCancellation():
return unblockedBugs
else:
unblockedBugs.append(bug)
return unblockedBugs
def findBugsInFile(self, test, state, stem, fileBugData, multipleDiffs):
self.diag.info("Looking for bugs in file " + stem)
if stem == "free_text":
return fileBugData.findBugsInText(state.freeText.split("\n"), execHosts=state.executionHosts, tmpDir=test.writeDirectory)
elif stem == "brief_text":
briefText = state.getTypeBreakdown()[1]
return fileBugData.findBugsInText(briefText.split("\n"), execHosts=state.executionHosts, tmpDir=test.writeDirectory)
elif state.hasResults():
# bugs are only relevant if the file itself is changed, unless marked to trigger on success also
bugs = []
for comp in state.findComparisonsMatching(stem):
isChanged = not comp.hasSucceeded()
bugs += fileBugData.findBugs(comp.tmpFile, state.executionHosts, isChanged, multipleDiffs)
return bugs
return []
def getNewState(self, oldState, bugState):
if hasattr(oldState, "failedPrediction"):
# if we've already compared, slot our things into the comparison object
newState = copy(oldState)
newState.setFailedPrediction(bugState, usePreviousText=True)
return newState
else:
return bugState
def hasMultipleDifferences(self, test, state):
if not state.hasResults():
# check for unrunnables...
return False
comparisons = state.getComparisons()
diffCount = len(comparisons)
if diffCount <= 1:
return False
perfStems = state.getPerformanceStems(test)
for comp in comparisons:
if comp.stem in perfStems:
diffCount -= 1
return diffCount > 1
def readBugs(self, test):
bugMap = BugMap()
# Mostly for backwards compatibility, reverse the list so that more specific bugs
# get checked first.
for bugFile in reversed(test.getAllPathNames("knownbugs")):
self.diag.info("Reading bugs from file " + bugFile)
bugMap.readFromFile(bugFile)
return bugMap
# For migrating from knownbugs files which are from TextTest 3.7 and older
class MigrateFiles(plugins.Action):
def setUpSuite(self, suite):
self.migrate(suite)
def __call__(self, test):
self.migrate(test)
def __repr__(self):
return "Migrating knownbugs file in"
def migrate(self, test):
for bugFileName in test.findAllStdFiles("knownbugs"):
parser = ConfigParser()
# Default behaviour transforms to lower case: we want case-sensitive
parser.optionxform = str
try:
parser.read(bugFileName)
except Exception:
plugins.printWarning("Bug file at " + bugFileName + " not understood, ignoring")
continue
if not parser.has_section("Migrated section 1"):
self.describe(test, " - " + os.path.basename(bugFileName))
sys.stdout.flush()
self.updateFile(bugFileName, parser)
else:
self.describe(test, " (already migrated)")
def updateFile(self, bugFileName, parser):
newBugFileName = bugFileName + ".new"
newBugFile = open(newBugFileName, "w")
self.writeNew(parser, newBugFile)
newBugFile.close()
print "Old File:\n" + open(bugFileName).read()
print "New File:\n" + open(newBugFileName).read()
shutil.move(newBugFileName, bugFileName)
def writeNew(self, parser, newBugFile):
sectionNo = 0
for fileStem in parser.sections():
for bugText in parser.options(fileStem):
bugId = parser.get(fileStem, bugText)
sectionNo += 1
self.writeSection(newBugFile, sectionNo, fileStem, bugText, bugId)
def writeSection(self, newBugFile, sectionNo, fileStem, bugText, bugId):
newBugFile.write("[Migrated section " + str(sectionNo) + "]\n")
newBugFile.write("search_string:" + bugText + "\n")
newBugFile.write("search_file:" + fileStem + "\n")
bugSystem = self.findBugSystem(bugId)
if bugSystem:
newBugFile.write("bug_system:" + bugSystem + "\n")
newBugFile.write("bug_id:" + bugId + "\n")
else:
newBugFile.write("full_description:" + bugId + "\n")
newBugFile.write("brief_description:unreported bug\n")
newBugFile.write("internal_error:0\n")
newBugFile.write("\n")
def findBugSystem(self, bugId):
for letter in bugId:
if not letter in string.digits:
return None
return "bugzilla"
|
{
"content_hash": "87ab75ffe9b3758072cd8f37b782ee86",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 149,
"avg_line_length": 41.2148288973384,
"alnum_prop": 0.6161723326721712,
"repo_name": "emilybache/texttest-runner",
"id": "e65cee7fcf6de87fcb9449c0900c4712dc9533da",
"size": "21702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/lib/default/knownbugs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1059"
},
{
"name": "Java",
"bytes": "12288"
},
{
"name": "JavaScript",
"bytes": "6698"
},
{
"name": "NSIS",
"bytes": "17203"
},
{
"name": "Python",
"bytes": "2313463"
}
],
"symlink_target": ""
}
|
import re, os, subprocess, select, time, datetime
import tests, debug, siteconfig
from common import TestCommon, TimeoutError, select_timeout
from results import RowResults
# TODO: this test needs a control loop like the httperf test to
# ramp up the load until it saturates. currently it just runs N
# iterations of a pre-configured set of parameters.
IPBENCH_TEST = 'latency'
IPBENCH_TEST_ARGS = ['Mbps=400', 'size=1300'] # per daemon
IPBENCH_NDAEMONS = 3
IPBENCH_ITERATIONS = 4
IPBENCH_TIMEOUT = datetime.timedelta(seconds=60) # for a single ipbench run
IPBENCH_SLEEPTIME = 10 # seconds between iterations
LOGFILENAME = 'testlog.txt'
class EchoTestCommon(TestCommon):
def setup(self, build, machine, testdir):
super(EchoTestCommon, self).setup(build, machine, testdir)
self.testdir = testdir
self.finished = False
def get_modules(self, build, machine):
cardName = "e1000"
modules = super(EchoTestCommon, self).get_modules(build, machine)
modules.add_module("e1000n", ["auto"])
modules.add_module("NGD_mng", ["auto"])
modules.add_module("netd", ["auto"])
modules.add_module("echoserver",["core=%d"%machine.get_coreids()[3],
"cardname=%s"%cardName])
return modules
def process_line(self, line):
m = re.match(r'Interface up! IP address (\d+\.\d+\.\d+\.\d+)', line)
if m:
self.run_test(m.group(1))
self.finished = True
def is_finished(self, line):
return self.finished or super(EchoTestCommon, self).is_finished(line)
def get_ipbench_test(self):
return (IPBENCH_TEST, IPBENCH_TEST_ARGS)
def _run_ipbenchd(self, user, host):
ssh_dest = '%s@%s' % (user, host)
remotecmd = siteconfig.get('IPBENCHD_PATH')
cmd = ['ssh'] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd]
debug.verbose('spawning ipbenchd on %s' % host)
return subprocess.Popen(cmd)
def _cleanup_ipbenchd(self, user, host):
# run a remote killall to get rid of ipbenchd
ssh_dest = '%s@%s' % (user, host)
remotecmd = 'killall -q python'
cmd = ['ssh'] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd]
debug.verbose('killing ipbenchd on %s' % host)
retcode = subprocess.call(cmd)
if retcode != 0:
debug.warning('failed to killall python on %s!' % host)
def _run_ipbench(self, args, logfile):
cmd = [siteconfig.get('IPBENCH_PATH')] + args
firstrun = True
for _ in range(IPBENCH_ITERATIONS):
if firstrun:
firstrun = False
else:
# sleep a moment to let things settle down between runs
debug.verbose('sleeping between ipbench runs')
time.sleep(IPBENCH_SLEEPTIME)
debug.verbose('running ipbench: %s' % ' '.join(cmd))
child = subprocess.Popen(cmd, stdout=subprocess.PIPE)
timeout = datetime.datetime.now() + IPBENCH_TIMEOUT
while True:
# wait for some output
(rlist, _, _) = select_timeout(timeout, [child.stdout])
if not rlist:
debug.warning('ipbench run timed out')
child.terminate()
child.wait()
raise TimeoutError('waiting for ipbench')
# read one char at a time to avoid blocking
c = child.stdout.read(1)
if c == '':
break # EOF
logfile.write(c)
child.wait()
assert(child.returncode == 0) # check for successful exit
def run_test(self, targetip):
ipbenchds = []
ipbenchd_hosts = []
logfile = open(os.path.join(self.testdir, LOGFILENAME), 'w')
try:
# spawn ipbenchds
for _ in range(IPBENCH_NDAEMONS):
user, host = siteconfig.site.get_load_generator()
# can't run multiple ipbenchds on the same host
assert(host not in [h for (_,h) in ipbenchd_hosts])
ipbenchd_hosts.append((user, host))
ipbenchds.append(self._run_ipbenchd(user, host))
# give them a moment to start
time.sleep(1)
# construct command-line args to ipbench, and run it
test, testargs = self.get_ipbench_test()
args = (['--test=%s' % test, '--test-args=%s' % ','.join(testargs),
'--test-target=%s' % targetip]
+ ['--client=%s' % h for (_, h) in ipbenchd_hosts])
self._run_ipbench(args, logfile)
finally:
logfile.close()
# terminate ipbenchds
for child in ipbenchds:
if child.poll() is None:
child.terminate()
child.wait()
for (user, host) in ipbenchd_hosts:
self._cleanup_ipbenchd(user, host)
def process_data(self, testdir, raw_iter):
cols = ('Requested Throughput,Achieved Throughput,Sent Throughput,'
'Packet Size,Min,Avg,Max,Standard Deviation,Median')
results = RowResults(cols.split(','))
with open(os.path.join(testdir, LOGFILENAME), 'r') as logfile:
for line in logfile:
m = re.match('(\d+),(\d+),(\d+),(\d+),(\d+),(\d+),(\d+),'
'(\d+\.\d+),(\d+)', line)
assert(m) # line must match, otherwise we have junk output
vals = [float(s) if '.' in s else int(s) for s in m.groups()]
results.add_row(vals)
return results
@tests.add_test
class UDPEchoTest(EchoTestCommon):
'''UDP echo throughput'''
name = "udp_echo"
def get_ipbench_test(self):
(test, args) = super(UDPEchoTest, self).get_ipbench_test()
args.append('socktype=udp')
return (test, args)
@tests.add_test
class TCPEchoTest(EchoTestCommon):
'''TCP echo throughput'''
name = "tcp_echo"
def get_ipbench_test(self):
(test, args) = super(TCPEchoTest, self).get_ipbench_test()
args.append('socktype=tcp')
return (test, args)
|
{
"content_hash": "b26c34817f251bd99689aaa55b578a27",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 82,
"avg_line_length": 39.40880503144654,
"alnum_prop": 0.5647941270347909,
"repo_name": "BarrelfishOS/barrelfish",
"id": "56db9071fc1aa004c5e6482d06480afb17dc6541",
"size": "6697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/harness/tests/echoserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "999910"
},
{
"name": "Batchfile",
"bytes": "48598"
},
{
"name": "C",
"bytes": "85073276"
},
{
"name": "C#",
"bytes": "99843"
},
{
"name": "C++",
"bytes": "7573528"
},
{
"name": "CMake",
"bytes": "60487"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "277412"
},
{
"name": "Emacs Lisp",
"bytes": "11006"
},
{
"name": "GAP",
"bytes": "23161"
},
{
"name": "GDB",
"bytes": "5634"
},
{
"name": "Gnuplot",
"bytes": "3383"
},
{
"name": "HTML",
"bytes": "872763"
},
{
"name": "Haskell",
"bytes": "1487583"
},
{
"name": "Java",
"bytes": "1384783"
},
{
"name": "JavaScript",
"bytes": "1403"
},
{
"name": "Lex",
"bytes": "95310"
},
{
"name": "Lua",
"bytes": "232"
},
{
"name": "M4",
"bytes": "112741"
},
{
"name": "Makefile",
"bytes": "1684009"
},
{
"name": "Nix",
"bytes": "2368"
},
{
"name": "Objective-C",
"bytes": "197139"
},
{
"name": "Perl",
"bytes": "1351095"
},
{
"name": "PostScript",
"bytes": "12428691"
},
{
"name": "Prolog",
"bytes": "9660208"
},
{
"name": "Python",
"bytes": "456604"
},
{
"name": "RPC",
"bytes": "17532"
},
{
"name": "Raku",
"bytes": "3718"
},
{
"name": "Roff",
"bytes": "3319475"
},
{
"name": "Scheme",
"bytes": "4249"
},
{
"name": "Scilab",
"bytes": "5315"
},
{
"name": "Shell",
"bytes": "539982"
},
{
"name": "Tcl",
"bytes": "708289"
},
{
"name": "TeX",
"bytes": "3480734"
},
{
"name": "VBA",
"bytes": "20687"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "Yacc",
"bytes": "253508"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
}
|
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.messages import error
from django.contrib.comments.signals import comment_was_posted
from django.core.urlresolvers import reverse
from django.db.models import get_model, ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.generic.fields import RatingField
from mezzanine.generic.forms import ThreadedCommentForm
from mezzanine.generic.models import Keyword, Rating
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.email import send_mail_template
from mezzanine.utils.views import render, set_cookie, is_spam
@staff_member_required
def admin_keywords_submit(request):
"""
Adds any new given keywords from the custom keywords field in the
admin, and returns their IDs for use when saving a model with a
keywords field.
"""
ids, titles = [], []
for title in request.POST.get("text_keywords", "").split(","):
title = "".join([c for c in title if c.isalnum() or c in "- "])
title = title.strip().lower()
if title:
keyword, created = Keyword.objects.get_or_create(title=title)
id = str(keyword.id)
if id not in ids:
ids.append(id)
titles.append(title)
return HttpResponse("%s|%s" % (",".join(ids), ", ".join(titles)))
def comment(request, template="generic/comments.html"):
"""
Handle a ``ThreadedCommentForm`` submission and redirect back to its
related object.
"""
post_data = request.POST
settings.use_editable()
if settings.COMMENTS_ACCOUNT_REQUIRED:
if not request.user.is_authenticated():
# Account required but user isn't authenticated - store
# their post data in the session and redirect to login.
request.session["unauthenticated_comment"] = post_data
error(request, _("You must log in to comment. Please log in or "
"sign up, and your comment will be posted."))
url = "%s?next=%s" % (settings.LOGIN_URL, reverse("comment"))
return redirect(url)
elif "unauthenticated_comment" in request.session:
# User has logged in after post data being stored in the
# session for an unauthenticated comment post, so use it.
post_data = request.session.pop("unauthenticated_comment")
try:
model = get_model(*post_data["content_type"].split(".", 1))
obj = model.objects.get(id=post_data["object_pk"])
except (KeyError, TypeError, AttributeError, ObjectDoesNotExist):
# Something was missing from the post so abort.
return HttpResponseRedirect("/")
form = ThreadedCommentForm(request, obj, post_data)
if form.is_valid():
url = obj.get_absolute_url()
if is_spam(request, form, url):
return redirect(url)
comment = form.get_comment_object()
if request.user.is_authenticated():
comment.user = request.user
comment.by_author = request.user == getattr(obj, "user", None)
comment.ip_address = request.META.get("HTTP_X_FORWARDED_FOR",
request.META["REMOTE_ADDR"])
comment.replied_to_id = post_data.get("replied_to")
comment.save()
comment_was_posted.send(sender=comment.__class__, comment=comment,
request=request)
# Send notification emails.
comment_url = add_cache_bypass(comment.get_absolute_url())
notify_emails = filter(None, [addr.strip() for addr in
settings.COMMENTS_NOTIFICATION_EMAILS.split(",")])
if notify_emails:
subject = _("New comment for: ") + unicode(obj)
context = {
"comment": comment,
"comment_url": comment_url,
"request": request,
"obj": obj,
}
send_mail_template(subject, "email/comment_notification",
settings.DEFAULT_FROM_EMAIL, notify_emails,
context, fail_silently=settings.DEBUG)
response = HttpResponseRedirect(comment_url)
# Store commenter's details in a cookie for 90 days.
cookie_expires = 60 * 60 * 24 * 90
for field in ThreadedCommentForm.cookie_fields:
cookie_name = ThreadedCommentForm.cookie_prefix + field
cookie_value = post_data.get(field, "")
set_cookie(response, cookie_name, cookie_value, cookie_expires)
return response
else:
# Show errors with stand-alone comment form.
context = {"obj": obj, "posted_comment_form": form}
return render(request, template, context)
def rating(request):
"""
Handle a ``RatingForm`` submission and redirect back to its
related object.
"""
try:
model = get_model(*request.POST["content_type"].split(".", 1))
obj = model.objects.get(id=request.POST["object_pk"])
url = add_cache_bypass(obj.get_absolute_url()) + "#rating-%s" % obj.id
except (KeyError, TypeError, AttributeError, ObjectDoesNotExist):
# Something was missing from the post so abort.
return HttpResponseRedirect("/")
try:
rating_value = int(request.POST["value"])
except (KeyError, ValueError):
return HttpResponseRedirect(url)
# There can only be one ``RatingField``, find its manager.
for field in obj._meta.many_to_many:
if isinstance(field, RatingField):
rating_manager = getattr(obj, field.name)
break
else:
raise TypeError("%s doesn't contain a RatingField." % obj)
ratings = request.COOKIES.get("mezzanine-rating", "").split(",")
rating_string = "%s.%s" % (request.POST["content_type"],
request.POST["object_pk"])
if rating_string in ratings:
# Already rated so abort.
if request.is_ajax():
response = HttpResponse("err")
else:
response = HttpResponseRedirect(url)
return response
rating_manager.add(Rating(value=rating_value))
if request.is_ajax():
# Reload the object and return the new rating.
obj = model.objects.get(id=request.POST["object_pk"])
response = HttpResponse(str(obj.rating_average))
else:
response = HttpResponseRedirect(url)
ratings.append(rating_string)
expiry = 60 * 60 * 24 * 365
set_cookie(response, "mezzanine-rating", ",".join(ratings), expiry)
return response
|
{
"content_hash": "2cca3cd6feafe76ba0ef21ced15102d6",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 78,
"avg_line_length": 43.42307692307692,
"alnum_prop": 0.6260702686743431,
"repo_name": "guibernardino/mezzanine",
"id": "f2931c7e6857123d5fcfb9bcd0cfaae0ab71dcd7",
"size": "6775",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mezzanine/generic/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "22201"
},
{
"name": "JavaScript",
"bytes": "61430"
},
{
"name": "Python",
"bytes": "832496"
}
],
"symlink_target": ""
}
|
import argparse
import multiprocessing
import warnings
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
from sklearn.externals import joblib
from sklearn.preprocessing import normalize
from lib.hmmer import hmmstats
from lib.hmmer import hmmscan
def main():
"""
The go_predict_blast module takes an input blast TSV file containing sequences and a SVC model object and makes
predictions about gene ontology based on the domain scores generated from a HMM domain model database
"""
sh_parse = argparse.ArgumentParser(description="Predict the classification of a tsv file from cp-blast")
sh_parse.add_argument("-f", "--file", dest="infile", help="Input sequence FILE", metavar="FILE", required=True)
sh_parse.add_argument("-c", "--column", dest="column", help="Sequence column NUMBER (0-index)", metavar="NUMBER",
required=True, type=int)
sh_parse.add_argument("-o", "--out", dest="outfile", help="Output matrix FILE", metavar="FILE", required=True)
sh_parse.add_argument("-d", "--db", dest="database", help="Database FILE", metavar="FILE", required=True)
sh_parse.add_argument("-m", "--model", dest="modelfile", help="Model joblib FILE", metavar="FILE", required=True)
sh_parse.add_argument("--cpu", dest="cores", help="Number of processor CORES to use", metavar="COREs", type=int,
default=1)
sh_args = sh_parse.parse_args()
go_predict_blast(sh_args.infile, sh_args.database, sh_args.modelfile, out_file=sh_args.outfile,
seq_column=sh_args.column, cores=sh_args.cores)
def go_predict_blast(infile_name, database_path, modelfile_name, out_file=None, seq_column=0, cores=2):
svc_model_est = joblib.load(modelfile_name)
hmmer_pool = multiprocessing.Pool(processes=cores, maxtasksperchild=1000)
with open(infile_name, mode="rU") as in_fh:
hmmer_imap = hmmer_pool.imap(PredictFromDomains(database_path, svc_model_est).hmmscan_predict,
line_generator(in_fh, column=seq_column))
with open(out_file, mode="w") as out_fh:
for line, prediction, proba in hmmer_imap:
print(line + "\t{}\t{}".format(prediction, proba), file=out_fh)
def line_generator(in_fh, column=0):
for line in in_fh:
line = line.strip()
if line[0] == "#":
continue
line_tabs = line.split("\t")
sequence = SeqRecord(Seq(line_tabs[column].strip(), alphabet=generic_protein),
id=line_tabs[1].strip(),
name=line_tabs[1].strip())
yield (sequence, line)
class PredictFromDomains:
def __init__(self, database, model, alpha=0.98):
self.database = database
self.domain_idx = hmmstats(database)
self.model = model
self.alpha = alpha
print("Protein domain file parsed: {} domains detected".format(len(self.domain_idx)))
def hmmscan_predict(self, data):
sequence, line = data
sparse_data, _ = hmmscan(sequence, self.database, self.domain_idx)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
predict_proba = self.model.predict_proba(normalize(sparse_data))[0]
if predict_proba[1] > self.alpha:
predict = True
else:
predict = False
return line, predict, predict_proba[1]
if __name__ == '__main__':
main()
|
{
"content_hash": "161e026d1613d1ee0ca0cda621d24c2e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 117,
"avg_line_length": 38.88172043010753,
"alnum_prop": 0.6219579646017699,
"repo_name": "trappedInARibosome/go-model",
"id": "18f1928d089daae1c4dd975acbdaac70d0fda4dd",
"size": "3616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "go_predict_blast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35719"
}
],
"symlink_target": ""
}
|
"""
Module : symbol_table
Function : Contains class and function definitions used to implement a symbol table
"""
import ir_generation as IR
class SymTabEntry(object):
SCALAR, ARRAY, HASH = range(3)
def __init__(self, variable):
self.scopeNum = -1 # Indicates the fact that it has not
# been entered into the symtable as of yet
self.varName = variable
self.width = 1
if variable[0] == '$' : self.externalType = SymTabEntry.SCALAR
elif variable[0] == '@' : self.externalType = SymTabEntry.ARRAY
elif variable[0] == '%' : self.externalType = SymTabEntry.HASH
else:
raise Exception("Unrecognized variable type")
self.baseVarName = variable[1:]
typeMap = {SymTabEntry.SCALAR : "SCALAR", SymTabEntry.HASH : "HASH", SymTabEntry.ARRAY : "ARRAY"}
self.place = typeMap[self.externalType] + "__" + self.baseVarName # To differentiate between namespaces for arrays, hashes and scalars
self.typePlace = "TYPE__" + self.place
self.code = []
def CheckDeclaration(self):
return self.scopeNum != -1 or self.baseVarName == '_'
def __str__(self):
return repr(self.varName)
def InsertGlobally(self, symTabManager):
if self.scopeNum == -1:
self.scopeNum = 0
symTabManager.symtables[0].Insert(self, self.varName)
def InsertLocally(self, symTabManager):
self.scopeNum = symTabManager.curScope
self.place = self.place + "_scope_" + str(self.scopeNum)
self.typePlace = self.typePlace + "_scope_" + str(self.scopeNum)
IR.CurActivationRecord.AllocateVariable(self.place, self.width*4)
IR.CurActivationRecord.AllocateVariable(self.typePlace, self.width*4)
symTabManager.curSymTab.Insert(self, self.varName)
def Print(self, filePtr):
typeMap = {SymTabEntry.SCALAR : "SCALAR", SymTabEntry.HASH : "HASH", SymTabEntry.ARRAY : "ARRAY"}
writeString = "TYPE : {:6} | SIZE : 4 bytes | NAME : {:100}".format(typeMap[self.externalType], self.baseVarName)
filePtr.write(writeString + "\n")
class SymTable(object):
def __init__(self, scopeNum, parentScope):
self.entries = {} # Map from ID(name) to entry
self.scopeNum = scopeNum
self.parentScope = parentScope
def Insert(self, entry, varName):
self.entries[varName] = entry
def IsPresent(self, varName):
return self.entries.has_key(varName)
def Lookup(self, varName):
return self.entries.get(varName, None)
def Print(self, filePtr):
filePtr.write("###### SYMTABLE SCOPE : %d (Parent Scope : %d) ######\n"%(self.scopeNum, self.parentScope))
for entry in self.entries.values():
entry.Print(filePtr)
filePtr.write("#####################################################\n\n")
class SymTabManager(object):
"""
Purpose : Manages all the symbol tables associated with all scopes.
"""
def __init__(self):
self.symtables = {-1 : None} # Indexed by the scope number
self.nextScopeNumber = 0
self.curScope = -1
self.curSymTab = None
self.scopeStack = []
def PushScope(self):
lastScope = self.curScope
self.scopeStack.append(self.nextScopeNumber)
self.curScope = self.nextScopeNumber
self.curSymTab = SymTable(self.curScope, lastScope)
self.symtables[self.curScope] = self.curSymTab
self.nextScopeNumber += 1
def PopScope(self):
self.scopeStack.pop()
self.curScope = self.scopeStack[-1]
self.curSymTab = self.symtables[self.curScope]
def Lookup(self, varName):
itScope = self.curScope
while (itScope != -1):
symTab = self.symtables[itScope]
if symTab.IsPresent(varName):
return symTab.entries[varName]
itScope = symTab.parentScope
# Create new entry to be entered later
return SymTabEntry(varName)
def PrintAllSymTables(self, outputFile):
f = open(outputFile[:-3] + '.sym', 'w+')
for scope, table in self.symtables.items():
if (scope != -1):
table.Print(f)
class ActivationRecord(object):
''' Stores some information required for generating the activation record '''
def __init__(self, funcID):
self.funcID = funcID
self.varLocationMap = {} # Stores the position of the variable in the stack/static region
self.tempVarMap = {}
self.varOffset = 0
self.tempOffset = 0
def AllocateVariable(self, varName, width=4):
self.varLocationMap[varName] = self.varOffset
self.varOffset += width
def AllocateTemp(self, tempName, width=4):
self.tempVarMap[tempName] = self.tempOffset
self.tempOffset += width
def AllocateTypeTemp(self, tempName, width=4):
self.tempVarMap[tempName] = self.tempOffset
self.tempOffset += width
|
{
"content_hash": "05f21d755fe88726459a6cdd963f669e",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 142,
"avg_line_length": 31.493827160493826,
"alnum_prop": 0.6103488827910624,
"repo_name": "vaishious/comperler",
"id": "6c768f3b6bc7069e1762cdaa083ada4495eb4d98",
"size": "5102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/src/irgen/symbol_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10217"
},
{
"name": "C",
"bytes": "19494"
},
{
"name": "Perl",
"bytes": "4973"
},
{
"name": "Perl6",
"bytes": "684"
},
{
"name": "Python",
"bytes": "131496"
}
],
"symlink_target": ""
}
|
"""Manage class and methods for all types of indicators."""
from ast import literal_eval
from typing import List
import logging
import os
import pandas
from data_source import DataSource
from constants import IndicatorType
import utils
# Load logging configuration
log = logging.getLogger(__name__)
class Indicator:
"""Base class used to compute indicators, regardless of their type."""
def verify_indicator_parameters(self, authorization: str, indicator_type_id: int, parameters: List[dict]):
"""Verify if the list of indicator parameters is valid and return them as a dictionary."""
# Build dictionary of parameter types referential
query = 'query{allParameterTypes{nodes{id, name}}}'
payload = {'query': query}
response = utils.execute_graphql_request(authorization, payload)
parameter_types_referential = {}
for parameter_type in response['data']['allParameterTypes']['nodes']:
parameter_types_referential[parameter_type['id']] = parameter_type['name']
# Build dictionary of indicator parameters
indicator_parameters = {}
for parameter in parameters:
indicator_parameters[parameter['parameterTypeId']] = parameter['value']
# Verify mandatory parameters exist
# Alert operator, Alert threshold, Distribution list, Dimensions, Measures, Target, Target request
missing_parameters = []
for parameter_type_id in [1, 2, 3, 4, 5, 8, 9]:
if parameter_type_id not in indicator_parameters:
parameter_type = parameter_types_referential[parameter_type_id]
missing_parameters.append(parameter_type)
# Verify parameters specific to completeness and latency indicator types
# Source, Source request
if indicator_type_id in [IndicatorType.COMPLETENESS, IndicatorType.LATENCY]:
for parameter_type_id in [6, 7]:
if parameter_type_id not in indicator_parameters:
parameter_type = parameter_types_referential[parameter_type_id]
missing_parameters.append(parameter_type)
if missing_parameters:
missing_parameters = ', '.join(missing_parameters)
error_message = f'Missing parameters: {missing_parameters}.'
log.error(error_message)
raise Exception(error_message)
# Convert distribution list, dimensions and measures parameters to python list
indicator_parameters[3] = literal_eval(indicator_parameters[3]) # Distribution list
indicator_parameters[4] = literal_eval(indicator_parameters[4]) # Dimensions
indicator_parameters[5] = literal_eval(indicator_parameters[5]) # Measures
return indicator_parameters
def get_data_frame(self, authorization: str, data_source: pandas.DataFrame, request: str, dimensions: str, measures: str):
"""Get data from data source. Return a formatted data frame according to dimensions and measures parameters."""
# Get data source credentials
query = 'query getDataSource($name: String!){dataSourceByName(name: $name){id, connectionString, login, dataSourceTypeId}}'
variables = {'name': data_source}
payload = {'query': query, 'variables': variables}
response = utils.execute_graphql_request(authorization, payload)
# Get connection object
if response['data']['dataSourceByName']:
data_source_id = response['data']['dataSourceByName']['id']
data_source_type_id = response['data']['dataSourceByName']['dataSourceTypeId']
connection_string = response['data']['dataSourceByName']['connectionString']
login = response['data']['dataSourceByName']['login']
# Get data source password
data_source = DataSource()
password = data_source.get_password(authorization, data_source_id)
# Connect to data source
log.info('Connect to data source.')
connection = data_source.get_connection(data_source_type_id, connection_string, login, password)
# Get data frame
log.info('Execute request on data source.')
data_frame = pandas.read_sql(request, connection)
connection.close()
if data_frame.empty:
error_message = f'Request on data source {data_source} returned no data.'
log.error(error_message)
log.debug('Request: %s.', request)
raise Exception(error_message)
# Format data frame
log.debug('Format data frame.')
column_names = dimensions + measures
data_frame.columns = column_names
for column in dimensions:
data_frame[column] = data_frame[column].astype(str) # Convert dimension values to string
return data_frame
def is_alert(self, measure_value: str, alert_operator: str, alert_threshold: str):
"""
Compare measure to alert threshold based on the alert operator.
Return True if an alert must be sent, False otherwise.
Supported alert operators are: ==, >, >=, <, <=, !=
"""
return eval(measure_value + alert_operator + alert_threshold) # pylint: disable=W0123
def send_alert(self, indicator_id: int, indicator_name: str, session_id: int, distribution_list: List[str], alert_operator: str, alert_threshold: str, nb_records_alert: str, result_data: pandas.DataFrame):
"""Build the alert e-mail to be sent for the session."""
# Create csv file to send in attachment
file_name = f'indicator_{indicator_id}_session_{session_id}.csv'
file_path = os.path.dirname(__file__) + "/" + file_name
result_data.to_csv(file_path, header=True, index=False)
# Prepare e-mail body
body = {}
body['indicator_name'] = indicator_name
body['indicator_url'] = f'/indicators/{indicator_id}'
body['session_log_url'] = f'/indicators/{indicator_id}/sessions/{session_id}/logs'
body['alert_threshold'] = alert_operator + alert_threshold
body['nb_records_alert'] = nb_records_alert
# Send e-mail
log.info('Send e-mail alert.')
utils.send_mail(session_id, distribution_list, 'indicator', file_path, **body)
os.remove(file_path)
return True
|
{
"content_hash": "b7b16e89ffa7ad33b3f8da2e35b5d799",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 209,
"avg_line_length": 46.13868613138686,
"alnum_prop": 0.6568580920740389,
"repo_name": "alexisrolland/data-quality",
"id": "21a02857e9991e9c378b9019d52bbc904642d36c",
"size": "6321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/init/indicator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63"
},
{
"name": "Dockerfile",
"bytes": "2426"
},
{
"name": "HTML",
"bytes": "3999"
},
{
"name": "JavaScript",
"bytes": "40173"
},
{
"name": "PLpgSQL",
"bytes": "10320"
},
{
"name": "Python",
"bytes": "80510"
},
{
"name": "Shell",
"bytes": "26835"
}
],
"symlink_target": ""
}
|
"""
Tests project generation
"""
import mmap
import os
import re
from distutils.util import strtobool
import typing
from binaryornot.check import is_binary
from pytest_cookies.plugin import Cookies
import pytest
PATTERN = r"{{(\s?cookiecutter)[.](.*?)}}"
RE_OBJ = re.compile(PATTERN)
EXPECTED_BASE_BAKED_FILES = [
'.commitlint.config.js',
'.dockerignore',
'.flake8',
'.gitignore',
'.konchrc',
'.pre-commit-config.yaml',
'.secrets.baseline',
'Dockerfile',
'docker-entrypoint.sh',
'HISTORY.rst',
'LICENSE',
'Makefile',
'README.rst',
'/docs/Makefile',
'/docs/conf.py',
'/docs/index.rst',
'/docs/make.bat',
'logging.yaml',
'pyproject.toml',
'tox.ini',
]
EXPECTED_BAKED_AUTHORS_FILES = [
'AUTHORS.rst',
]
EXPECTED_BAKED_GITHUB_DEPENDABOT_FILES = [
'/.github/dependabot.yml',
]
EXPECTED_BAKED_GITHUB_ACTIONS_FILES = [
'/.github/labeler.yml',
'/.github/release-drafter.yml',
'/.github/workflows/ci.yml',
'/.github/workflows/codeql-analysis.yml',
'/.github/workflows/commitlint.yml',
'/.github/workflows/hadolint.yml',
'/.github/workflows/pr-labeler.yml',
'/.github/workflows/pr-size-labeler.yml',
'/.github/workflows/release-drafter.yml',
]
EXPECTED_BAKED_GITHUB_ACTIONS_PYPI_PUBLISH_FILES = [
'/.github/workflows/publish.yml',
]
def get_expected_baked_files(package_name: str) -> typing.List[str]:
return EXPECTED_BASE_BAKED_FILES + [
f'/{package_name}/__init__.py',
f'/{package_name}/{package_name}.py',
'/tests/__init__.py',
f'/tests/test_{package_name}.py',
]
def get_expected_baked_default_files(package_name: str) -> typing.List[str]:
return (
get_expected_baked_files(package_name)
+ EXPECTED_BAKED_AUTHORS_FILES
+ EXPECTED_BAKED_GITHUB_DEPENDABOT_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_PYPI_PUBLISH_FILES
)
def build_files_list(
root_dir: str, is_absolute: bool = True
) -> typing.List[str]:
"""Build a list containing abs/relative paths to the generated files."""
return [
os.path.join(dirpath, file_path)
if is_absolute
else os.path.join(dirpath[len(root_dir) :], file_path)
for dirpath, subdirs, files in os.walk(root_dir)
for file_path in files
]
def check_paths_substitution(paths: typing.List[str]) -> None:
for path in paths:
if is_binary(path):
continue
for line in open(path):
match = RE_OBJ.search(line)
assert (
match is None
), f"cookiecutter variable not replaced in {path}"
def check_paths_exist(
expected_paths: typing.List[str], baked_files: typing.List[str]
) -> None:
baked_files_no_pycache = list(
filter(lambda x: '__pycache__' not in x, baked_files)
)
assert len(expected_paths) == len(baked_files_no_pycache)
for _, expected_path in enumerate(expected_paths):
assert expected_path in baked_files_no_pycache
def test_with_default_configuration(
cookies: Cookies, default_context: typing.Dict[str, str]
) -> None:
baked_project = cookies.bake(extra_context=default_context)
assert baked_project.exit_code == 0
assert baked_project.exception is None
assert baked_project.project_path.is_dir()
abs_baked_files = build_files_list(str(baked_project.project_path))
assert abs_baked_files
check_paths_substitution(abs_baked_files)
rel_baked_files = build_files_list(
str(baked_project.project_path), is_absolute=False
)
assert rel_baked_files
check_paths_exist(
get_expected_baked_default_files(default_context['package_name']),
rel_baked_files,
)
def test_with_parameterized_configuration(
cookies: Cookies, context: typing.Dict[str, str]
) -> None:
baked_project = cookies.bake(extra_context=context)
assert baked_project.exit_code == 0
assert baked_project.exception is None
assert baked_project.project_path.is_dir()
abs_baked_files = build_files_list(str(baked_project.project_path))
assert abs_baked_files
check_paths_substitution(abs_baked_files)
rel_baked_files = build_files_list(
str(baked_project.project_path), is_absolute=False
)
assert rel_baked_files
print(f"author file: {context['should_create_author_files']}")
print(f"dependabot: {context['should_install_github_dependabot']}")
print(f"gh actions: {context['should_install_github_actions']}")
print(f"pypi: {context['should_publish_to_pypi']}")
if (
strtobool(context['should_create_author_files'])
and strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_default_files(context['package_name']),
rel_baked_files,
)
elif (
strtobool(context['should_create_author_files'])
and strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and not strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_AUTHORS_FILES
+ EXPECTED_BAKED_GITHUB_DEPENDABOT_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES,
rel_baked_files,
)
elif (
strtobool(context['should_create_author_files'])
and strtobool(context['should_install_github_dependabot'])
and not strtobool(context['should_install_github_actions'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_AUTHORS_FILES
+ EXPECTED_BAKED_GITHUB_DEPENDABOT_FILES,
rel_baked_files,
)
elif (
strtobool(context['should_create_author_files'])
and not strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_AUTHORS_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_PYPI_PUBLISH_FILES,
rel_baked_files,
)
elif (
strtobool(context['should_create_author_files'])
and not strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and not strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_AUTHORS_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES,
rel_baked_files,
)
elif (
strtobool(context['should_create_author_files'])
and not strtobool(context['should_install_github_dependabot'])
and not strtobool(context['should_install_github_actions'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_AUTHORS_FILES,
rel_baked_files,
)
elif (
not strtobool(context['should_create_author_files'])
and strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_GITHUB_DEPENDABOT_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_PYPI_PUBLISH_FILES,
rel_baked_files,
)
elif (
not strtobool(context['should_create_author_files'])
and strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and not strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_GITHUB_DEPENDABOT_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES,
rel_baked_files,
)
elif (
not strtobool(context['should_create_author_files'])
and strtobool(context['should_install_github_dependabot'])
and not strtobool(context['should_install_github_actions'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_GITHUB_DEPENDABOT_FILES,
rel_baked_files,
)
elif (
not strtobool(context['should_create_author_files'])
and not strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES
+ EXPECTED_BAKED_GITHUB_ACTIONS_PYPI_PUBLISH_FILES,
rel_baked_files,
)
elif (
not strtobool(context['should_create_author_files'])
and not strtobool(context['should_install_github_dependabot'])
and strtobool(context['should_install_github_actions'])
and not strtobool(context['should_publish_to_pypi'])
):
check_paths_exist(
get_expected_baked_files(context['package_name'])
+ EXPECTED_BAKED_GITHUB_ACTIONS_FILES,
rel_baked_files,
)
elif (
not strtobool(context['should_create_author_files'])
and not strtobool(context['should_install_github_dependabot'])
and not strtobool(context['should_install_github_actions'])
):
check_paths_exist(
get_expected_baked_files(context['package_name']), rel_baked_files
)
else:
pytest.fail('eeps. missed a case')
@pytest.mark.parametrize('codecov', ['y', 'n'])
def test_with_codecov(
cookies: Cookies, default_context: typing.Dict[str, str], codecov: str
) -> None:
default_context['should_upload_coverage_to_codecov'] = codecov
baked_project = cookies.bake(extra_context=default_context)
assert baked_project.exit_code == 0
assert baked_project.exception is None
assert baked_project.project_path.is_dir()
abs_baked_files = build_files_list(str(baked_project.project_path))
for path in abs_baked_files:
if 'ci.yml' in path:
with open(path, 'rb', 0) as file, mmap.mmap(
file.fileno(), 0, access=mmap.ACCESS_READ
) as s:
if s.find(b'codecov') == -1 and codecov == 'y':
pytest.fail('Should have codecov')
elif s.find(b'codecov') != -1 and codecov == 'n':
pytest.fail('Should not have codecov')
@pytest.mark.parametrize('poetry_version', ['8.0.8', '4.2.0'])
def test_with_poetry_version(
cookies: Cookies,
default_context: typing.Dict[str, str],
poetry_version: str,
) -> None:
default_context['poetry_version'] = poetry_version
baked_project = cookies.bake(extra_context=default_context)
assert baked_project.exit_code == 0
assert baked_project.exception is None
assert baked_project.project_path.is_dir()
abs_baked_files = build_files_list(str(baked_project.project_path))
for path in abs_baked_files:
if 'Dockerfile' in path:
with open(path, 'rb', 0) as file, mmap.mmap(
file.fileno(), 0, access=mmap.ACCESS_READ
) as s:
if s.find(f"POETRY_VERSION={poetry_version}".encode()) == -1:
pytest.fail('Should have appropriate poetry version')
def test_pyproject_with_default_configuration(
cookies: Cookies,
default_context: typing.Dict[str, str],
) -> None:
baked_project = cookies.bake(extra_context=default_context)
assert baked_project.exit_code == 0
assert baked_project.exception is None
assert baked_project.project_path.is_dir()
abs_baked_files = build_files_list(str(baked_project.project_path))
for path in abs_baked_files:
if 'pyproject.toml' in path:
with open(path, 'rb', 0) as file, mmap.mmap(
file.fileno(), 0, access=mmap.ACCESS_READ
) as s:
if s.find(b'[tool.mypy]') == -1:
pytest.fail('Should have mypy configuration section')
# vim: fenc=utf-8
# vim: filetype=python
|
{
"content_hash": "f61f483094198c7da41d4dbe991632e9",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 78,
"avg_line_length": 33.432291666666664,
"alnum_prop": 0.6306278236485434,
"repo_name": "ryankanno/cookiecutter-py",
"id": "af2c7daffea6b5f03b6e766425f0e7c6772ba220",
"size": "12998",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_cookiecutter_generation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7027"
},
{
"name": "Dockerfile",
"bytes": "2071"
},
{
"name": "JavaScript",
"bytes": "384"
},
{
"name": "Makefile",
"bytes": "9487"
},
{
"name": "Python",
"bytes": "31779"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os, sys
import json
import datetime
import argparse
from github import Github
from bearlib.config import Config
def getUser(o):
result = {}
if o is not None:
result['login'] = o.login
result['name'] = o.name
result['id'] = o.id
return result
def getDate(d):
if d is None:
return ""
else:
return datetime.datetime.strftime(d, "%Y%m%dT%H%M%SZ")
def getMilestone(o):
result = {}
if o is not None:
result["id"] = o.id
result["state"] = o.state
result["number"] = o.number
result["description"] = o.description
result["title"] = o.title
result["due_on"] = getDate(o.due_on)
result["created_at"] = getDate(o.created_at)
result["updated_at"] = getDate(o.updated_at)
return result
def getComment(o):
result = {}
if o is not None:
result['id'] = o.id
result['body'] = o.body
result['user'] = getUser(o.user)
result['created_at'] = getDate(o.created_at)
result['updated_at'] = getDate(o.updated_at)
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', default='./archive.cfg')
parser.add_argument('-i', '--issues', action='store_true')
parser.add_argument('-o', '--org')
parser.add_argument('-r', '--repo')
args = parser.parse_args()
cfg = Config()
cfg.fromJson(args.config)
if cfg.auth_token is None:
error('Unable to load configuration file %s' % args.config)
else:
gh = Github(cfg.auth_token)
org = gh.get_organization(args.org)
repo = org.get_repo(args.repo)
if repo is not None:
print('scanning', repo.name)
data = {}
if args.issues:
data['issues'] = []
for issue in repo.get_issues(state="all"):
i = { "id": issue.id,
"state": issue.state,
"body": issue.body,
"number": issue.number,
"repo": repo.name,
"assignee": getUser(issue.assignee),
"user": getUser(issue.user),
"milestone": getMilestone(issue.milestone),
"closed_at": getDate(issue.closed_at),
"closed_by": getUser(issue.closed_by),
"title": issue.title,
"url": issue.url,
"created_at": getDate(issue.created_at),
"labels": [],
"comments": [],
}
for comment in issue.get_comments():
i['comments'].append(getComment(comment))
for label in issue.labels:
i['labels'].append(label.name)
data['issues'].append(i)
print('\t', len(data['issues']), 'issues')
with open('%s.json' % repo.name, 'w+') as h:
h.write(json.dumps(data, indent=2))
|
{
"content_hash": "4426b2e1441195f154d7f2632946df4d",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 70,
"avg_line_length": 33.19,
"alnum_prop": 0.4754444109671588,
"repo_name": "bear/github-archive",
"id": "7c095ddeb90d8d42371e582fbde3e8b442355742",
"size": "3431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3431"
},
{
"name": "Shell",
"bytes": "558"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
print(cx_Oracle.version)
print("Database version:", con.version)
print("Client version:", cx_Oracle.clientversion())
|
{
"content_hash": "4ebec1ec9b2146fcc18159e07e4f0214",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 68,
"avg_line_length": 26.1,
"alnum_prop": 0.7547892720306514,
"repo_name": "kawamon/hue",
"id": "513ac30cd013acd5e02096a8ef1dd493a0360437",
"size": "686",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/cx_Oracle-6.4.1/samples/tutorial/solutions/versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
import pygame
from SystemPanic.Core.draw_util import draw_text
def draw_game_over_screen(game_surface, game_state):
# Add the background
game_surface.blit(
game_state["active_config"]["background"],
[0, 0]
)
draw_text(game_surface, "GAME OVER", (160, 120), game_state["garbled"])
draw_text(game_surface, "FINAL SCORE: %s" % (game_state["score"],), (160, 130), game_state["garbled"])
if game_state["mode_specific"].get("fade_percent") is not None:
fade_mask = pygame.Surface(game_surface.get_size(), flags=pygame.SRCALPHA)
alpha = 255 - game_state["mode_specific"]["fade_percent"] * 255.0
if alpha < 0:
alpha = 0
fade_mask.fill((0, 0, 0, alpha))
game_surface.blit(
fade_mask,
[0, 0]
)
def advance_game_over(paks, game_state, time_since_start, delta_t):
from SystemPanic.Core.game_state import change_mode, GAME_MODES, new_game_state
fade_time = 0.5 # seconds to fade out over
# We want to ensure that the player has released the fire button before advancing to the title screen,
# since it's likely that they'll die with the fire button pressed.
if game_state["pressed_buttons"]["fire"] is False:
game_state["mode_specific"]["fire_released"] = True
elif game_state["mode_specific"].get("fire_released") is True:
game_state["mode_specific"]["fade_timer"] = fade_time
if game_state["mode_specific"].get("fade_timer") is not None:
game_state["mode_specific"]["fade_timer"] -= delta_t
game_state["mode_specific"]["fade_percent"] = game_state["mode_specific"]["fade_timer"] / fade_time
if game_state["mode_specific"]["fade_timer"] <= 0.0:
game_state = new_game_state(paks, 0)
return change_mode(game_state, GAME_MODES.TITLE_SCREEN)
return game_state
|
{
"content_hash": "87df1e0a6075641ee6d24a1f6a9ce3e7",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 107,
"avg_line_length": 40.76086956521739,
"alnum_prop": 0.6325333333333333,
"repo_name": "xaroth8088/SystemPanic",
"id": "c8c23580df1960d874ca4cda6d8774aee57df846",
"size": "1875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SystemPanic/Core/Screens/game_over.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "Python",
"bytes": "147122"
}
],
"symlink_target": ""
}
|
import sys
# NullWriter defines a dummy file object which does nothing with its output
class NullWriter:
def write(self, s):
pass
# BeQuiet allows stderr output to be temporarily suppressed:
#
# with BeQuiet():
# stuff stuff stuff
class BeQuiet:
def __enter__(self):
self.old_stderr = sys.stderr
sys.stderr = NullWriter()
def __exit__(self, type, value, traceback):
sys.stderr = self.old_stderr
|
{
"content_hash": "a5cb0e70de1d329f454ed94f8326dffa",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 24.77777777777778,
"alnum_prop": 0.6614349775784754,
"repo_name": "BinaryMuse/moodle-tools",
"id": "2b2975fe79f4920bc526d4c8f629b988b28ccb6a",
"size": "590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/BeQuiet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13460"
},
{
"name": "Shell",
"bytes": "1974"
}
],
"symlink_target": ""
}
|
r"""
Check Python source code formatting, according to PEP 8.
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
from __future__ import with_statement
__version__ = '1.5.7'
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
DEFAULT_IGNORE = 'E123,E226,E24'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
# ERRORTOKEN is triggered by backticks in Python 3
SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""For new projects, spaces-only are strongly recommended over tabs.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number, total_lines):
r"""Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
However the last line should end with a new line (warning W292).
"""
if line_number == total_lines:
stripped_last_line = physical_line.rstrip()
if not stripped_last_line:
return 0, "W391 blank line at end of file"
if stripped_last_line == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length, multiline):
r"""Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
chunks = line.split()
if ((len(chunks) == 1 and multiline) or
(len(chunks) == 2 and chunks[0] == '#')) and \
len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
blank_before, previous_logical, previous_indent_level):
r"""Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_before or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_before != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_before
def extraneous_whitespace(logical_line):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
r"""Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
"""
if indent_char == ' ' and indent_level % 4:
yield 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
yield 0, "E113 unexpected indentation"
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa, verbose):
r"""Continuation lines indentation.
Continuation lines should align wrapped elements either vertically
using Python's implicit line joining inside parentheses, brackets
and braces, or using a hanging indent.
When using a hanging indent these considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
E129: if (a or\n b):\n pass
E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# for each depth, collect a list of opening rows
open_rows = [[0]]
# for each depth, memorize the hanging indentation
hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
visual_indent = None
# for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = not last_token_multiline and token_type not in NEWLINE
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
# identify closing bracket
close_bracket = (token_type == tokenize.OP and text in ']})')
# is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
# is there any chance of visual indent?
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif indent[depth] and start[1] < indent[depth]:
if visual_indent is not True:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hanging_indent or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif not close_bracket and hangs[depth]:
error = "E131", "unaligned for hanging indent"
else:
hangs[depth] = hang
if hang > 4:
error = "E126", "over-indented for hanging indent"
else:
error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
if indent_next and expand_indent(line) == indent_level + 4:
pos = (start[0], indent[0] + 4)
if visual_indent:
code = "E129 visually indented line"
else:
code = "E125 continuation line"
yield pos, "%s with same indent as next logical line" % code
def whitespace_before_parameters(logical_line, tokens):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in the following situations:
- before the open parenthesis that starts the argument list of a
function call.
- before the open parenthesis that starts an indexing or slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""Avoid extraneous whitespace around an operator.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""Surround operators with a single space on either side.
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- If operators with different priorities are used, consider adding
whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in SKIP_COMMENTS:
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
else:
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if (prev_text in '}])' if prev_type == tokenize.OP
else prev_text not in KEYWORDS):
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
r"""Don't use spaces around the '=' sign in function arguments.
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if token_type == tokenize.NL:
continue
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
elif token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif parens and text == '=':
no_space = True
if start != prev_end:
yield (prev_end, message)
prev_end = end
def whitespace_before_comment(logical_line, tokens):
r"""Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in ('#', '#:')
if inline_comment:
if bad_prefix or comment[:1].isspace():
yield start, "E262 inline comment should start with '# '"
elif bad_prefix:
if text.rstrip('#') and (start[0] > 1 or symbol[1] != '!'):
yield start, "E265 block comment should start with '# '"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def compound_statements(logical_line):
r"""Compound statements (on the same line) are generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements.
Also avoid folding such long lines!
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')') and # (Python 3 annotation)
not LAMBDA_REGEX.search(before)): # lambda x: x
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
"""
prev_start = prev_end = parens = 0
for token_type, text, start, end, line in tokens:
if start[0] != prev_start and parens and backslash:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def comparison_to_singleton(logical_line, noqa):
r"""Comparison to singletons should use "is" or "is not".
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E712: if arg == True:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(1), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_negative(logical_line):
r"""Negative comparison should be done using "not in" and "is not".
Okay: if x not in y:\n pass
Okay: assert (X in Y or X is Z)
Okay: if not (X in Y):\n pass
Okay: zz = x is not y
E713: Z = not X in Y
E713: if not X.B in Y:\n pass
E714: if not X is Y:\n pass
E714: Z = not X.B is Y
"""
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if match.group(2) == 'in':
yield pos, "E713 test for membership should be 'not in'"
else:
yield pos, "E714 test for object identity should be 'is not'"
def comparison_type(logical_line):
r"""Object type comparisons should always use isinstance().
Do not compare types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line, noqa):
r"""The {}.has_key() method is removed in Python 3: use the 'in' operator.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
r"""When raising an exception, use "raise ValueError('message')".
The older form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
r"""New code should always use != instead of <>.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
r"""Backticks are removed in Python 3: use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
"""Read the source code."""
with open(filename, 'rU') as f:
return f.readlines()
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
"""Read the source code."""
try:
with open(filename, 'rb') as f:
(coding, lines) = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
# Fall back if file encoding is improperly declared
with open(filename, encoding='latin-1') as f:
return f.readlines()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
(row, nrows) = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, path), rows)
for (path, rows) in rv.items()
if rows and filename_match(path, patterns)])
def normalize_paths(value, parent=os.curdir):
"""Parse a comma-separated list of paths.
Return a list of absolute paths.
"""
if not value or isinstance(value, list):
return value
paths = []
for path in value.split(','):
if '/' in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip('/'))
return paths
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
if COMMENT_WITH_NL:
def _is_eol_token(token):
return (token[0] in NEWLINE or
(token[0] == tokenize.COMMENT and token[1] == token[4]))
else:
def _is_eol_token(token):
return token[0] in NEWLINE
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
"""Register a new check object."""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""Register all globally visible functions.
The first argument name is either 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""Load a Python source file, tokenize it, check coding style."""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
"""Check if the syntax is valid."""
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
def readline(self):
"""Get the next line from the input buffer."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""Run a check plugin."""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping
def check_logical(self):
"""Build a line from tokens and run all logical checks on it."""
self.report.increment_logical_line()
mapping = self.build_tokens_line()
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
for offset, text in self.run_check(check, argument_names) or ():
if not isinstance(offset, tuple):
for token_offset, pos in mapping:
if offset <= token_offset:
break
offset = (pos[0], pos[1] + offset - token_offset)
self.report_error(offset[0], offset[1], text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.blank_lines = 0
self.tokens = []
def check_ast(self):
"""Build the file's AST and run all AST checks."""
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
"""Tokenize the file, run physical line checks and yield tokens."""
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
if token[2][0] > self.total_lines:
return
self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token):
"""If appropriate (based on token), check current physical line(s)."""
# Called after every token, but act only on end of line.
if _is_eol_token(token):
# Obviously, a newline token ends a single physical line.
self.check_physical(token[4])
elif token[0] == tokenize.STRING and '\n' in token[1]:
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in the
# string *except* for the last one: its newline is outside of
# the multiline string, so we consider it a regular physical
# line, and will check it like any other physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it contains
# the magical "# noqa" comment, we disable all physical
# checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token[4]):
return
self.multiline = True
self.line_number = token[2][0]
for line in token[1].split('\n')[:-1]:
self.check_physical(line + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
if len(self.tokens) == 1:
# The comment also ends a physical line
token = list(token)
token[1] = text.rstrip('\r\n')
token[3] = (token[2][0], token[2][1] + len(token[1]))
self.tokens = [tuple(token)]
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
print(' ' + doc.strip())
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', None)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""Check if the file should be excluded.
Check if 'options.exclude' contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""Get all the checks for this category.
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report only lines changed according to the "
"unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read both user configuration and local configuration."""
config = RawConfigParser()
user_conf = options.config
if user_conf and os.path.isfile(user_conf):
if options.verbose:
print('user configuration: %s' % user_conf)
config.read(user_conf)
local_dir = os.curdir
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if opt.replace('_', '-') not in parser.config_options:
print(" unknown option '%s' ignored" % opt)
continue
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args."""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
if config_file is True:
config_file = DEFAULT_CONFIG
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location (default: %default)")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = options.filename and options.filename.split(',')
options.exclude = normalize_paths(options.exclude)
options.select = options.select and options.select.split(',')
options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
pep8style = StyleGuide(parse_argv=True, config_file=True)
options = pep8style.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(pep8style)
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
|
{
"content_hash": "179c1f9bbef7e481637f55307c356bb3",
"timestamp": "",
"source": "github",
"line_count": 1917,
"max_line_length": 79,
"avg_line_length": 38.48982785602504,
"alnum_prop": 0.5575930067086806,
"repo_name": "dguo-coursera/arcanist",
"id": "450dcdf4b99f24598654b069ec3b3302d22f2bc5",
"size": "75072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "externals/pep8/pep8.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "410"
},
{
"name": "PHP",
"bytes": "1267770"
},
{
"name": "Python",
"bytes": "81453"
},
{
"name": "Shell",
"bytes": "695"
}
],
"symlink_target": ""
}
|
def shifList(numbers , n ):
if(n-1 < 0):
return numbers
numbers[n] = numbers[n-1];
return numbers ;
def findIndex(numbers , num ):
for i in range(0 , len(numbers)):
if(numbers[i] > num ):
return i
return len(numbers)-1
def printList(numbers):
strp = ""
for i in range(0 , len(numbers)):
strp += str(numbers[i])
if(i+1 < len(numbers)):
strp += " "
print strp
N = int(raw_input());
numbers = map(int , raw_input().strip().split(" "));
placeIndex = findIndex(numbers , numbers[len(numbers)-1])
smallest = numbers[len(numbers)-1]
for i in range(0 , N):
index = N-1-i
numbers = shifList(numbers , index )
if(index == placeIndex):
numbers[index] = smallest ;
printList(numbers)
break
else :
printList(numbers)
|
{
"content_hash": "000a56d9c9268c0ad9de846dd8b0f233",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 57,
"avg_line_length": 21.514285714285716,
"alnum_prop": 0.6294820717131474,
"repo_name": "MajidLashgarian/HackerRank",
"id": "c495acba7a7ea527e1d04555bcb090372fe1bafb",
"size": "753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InsertationSort1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5362"
},
{
"name": "Python",
"bytes": "6234"
}
],
"symlink_target": ""
}
|
from django.utils.translation import gettext as _
import plotly.offline as plotly
import plotly.graph_objs as go
from reports import utils
def bmi_bmi(objects):
"""
Create a graph showing bmi over time.
:param objects: a QuerySet of BMI instances.
:returns: a tuple of the the graph's html and javascript.
"""
objects = objects.order_by("-date")
trace = go.Scatter(
name=_("BMI"),
x=list(objects.values_list("date", flat=True)),
y=list(objects.values_list("bmi", flat=True)),
fill="tozeroy",
)
layout_args = utils.default_graph_layout_options()
layout_args["barmode"] = "stack"
layout_args["title"] = _("<b>BMI</b>")
layout_args["xaxis"]["title"] = _("Date")
layout_args["xaxis"]["rangeselector"] = utils.rangeselector_date()
layout_args["yaxis"]["title"] = _("BMI")
fig = go.Figure({"data": [trace], "layout": go.Layout(**layout_args)})
output = plotly.plot(fig, output_type="div", include_plotlyjs=False)
return utils.split_graph_output(output)
|
{
"content_hash": "77d6adae21b9e54ab5d0842c40c3e910",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 31.939393939393938,
"alnum_prop": 0.6375711574952562,
"repo_name": "cdubz/babybuddy",
"id": "355a453a4269dc517ea2f13b652666dfc2778f89",
"size": "1078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reports/graphs/bmi_bmi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "712618"
},
{
"name": "Dockerfile",
"bytes": "367"
},
{
"name": "HTML",
"bytes": "112489"
},
{
"name": "JavaScript",
"bytes": "7555762"
},
{
"name": "Python",
"bytes": "242367"
}
],
"symlink_target": ""
}
|
from typing import Union
from fastapi import Body, FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
class User(BaseModel):
username: str
full_name: Union[str, None] = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item,
user: User,
importance: int = Body(gt=0),
q: Union[str, None] = None
):
results = {"item_id": item_id, "item": item, "user": user, "importance": importance}
if q:
results.update({"q": q})
return results
|
{
"content_hash": "75bf0c9284b393025fbe6621a5af95cd",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 88,
"avg_line_length": 19.757575757575758,
"alnum_prop": 0.6211656441717791,
"repo_name": "tiangolo/fastapi",
"id": "beea7d1e38ff89b95418697f7fa416ea84930816",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/body_multiple_params/tutorial004.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
from sheetDB.table import *
from nose.tools import *
from mock import patch, MagicMock
# main class tests
@patch('sheetDB.table.Table.fetchFromParent')
@patch('sheetDB.table.Table.updateHeader')
@patch('sheetDB.table.Table.updateConstraints')
@patch('sheetDB.table.Table.fetchRowLabels')
@patch('sheetDB.table.reverseDict')
def test_table(reverseDict, fetchRowLabels, updateConstraints,
updateHeader, fetchFromParent):
fetchFromParent.return_value = [3,]
worksheet = MagicMock()
table = Table(worksheet, "parent")
assert_equals(table.sheet, worksheet)
assert_equals(table.name, worksheet.ID)
assert_equals(table.parent, "parent")
assert_equals(table.headerRow, fetchFromParent.return_value[0])
assert_equals(table.refRow, fetchFromParent.return_value[0])
assert_true(table.headerRow in table.ignoredRows)
assert_true(table.refRow in table.ignoredRows)
updateHeader.assert_called_once()
updateConstraints.assert_called_once()
assert_equals(fetchFromParent.call_count, 4)
fetchFromParent.assert_called_with("IGNOREDCOLS")
return table
## global table
table = test_table()
def test_parent():
assert_equals(table.parent, table._parent)
def test_sheet():
assert_equals(table.sheet, table._sheet)
@patch('sheetDB.table.Table.parent')
def test_fetchFromParent(parent):
table.name = "name"
assert_equals(table.fetchFromParent("label"),
parent.fetchConstant.return_value)
parent.fetchConstant.assert_called_once_with("name_label")
@patch('sheetDB.table.Table.parent')
def test_removeFromParent(parent):
table.name = "name"
table.removeFromParent("label")
parent.removeConstant.assert_called_once_with("name_label")
@patch('sheetDB.table.Table.fetchRowLabels')
@patch('sheetDB.table.reverseDict')
def test_updateHeader(reverseDict, fetchRowLabels):
table.updateHeader()
fetchRowLabels.assert_called_once_with(table.headerRow, False)
reverseDict.assert_called_once_with(fetchRowLabels.return_value)
@patch('sheetDB.table.Table.fetchRowLabels')
@patch('sheetDB.table.Table.sheet')
def test_updateConstraints(sheet, fetch):
table.refRow = None
sheet.colCount = 4
table.updateConstraints()
assert_equals(type(table.constraints), dict)
assert_equals(table.constraints[4], "")
assert_equals(table.constraints[1], "")
assert_true(0 not in table.constraints)
table.refRow = 5
table.updateConstraints()
assert_equals(table.constraints, fetch.return_value)
fetch.assert_called_once_with(5)
@patch('sheetDB.table.Table.updateConstraints')
@patch('sheetDB.table.Table.parent')
@patch('sheetDB.table.Table.sheet')
def test_setRefRow(sheet, parent, update):
table.name = "name"
sheet.rowCount = 500
table.refRow = None
table.setRefRow(500)
parent.setConstant.assert_called_once_with("name_REFROW", 500)
assert_equals(table.refRow, 500)
update.assert_called_once_with()
assert_true(table.refRow in table.ignoredRows)
table.setRefRow(None)
parent.setConstant.assert_called_with("name_REFROW", None)
assert_equals(table.refRow, None)
update.assert_called_with()
assert_true(500 not in table.ignoredRows)
assert_equals(update.call_count, 2)
assert_raises(SheetError, table.setRefRow, 0)
assert_raises(SheetError, table.setRefRow, 501)
@patch('sheetDB.table.Table.sheet')
@patch('sheetDB.table.Table.setRefRow')
def test_addRefRow(setRow, sheet):
table.addRefRow()
sheet.addRows.assert_called_once_with(1)
setRow.assert_called_once_with(sheet.rowCount)
@patch('sheetDB.table.Table.updateHeader')
@patch('sheetDB.table.Table.parent')
@patch('sheetDB.table.Table.sheet')
def test_setHeader(sheet, parent, updateHeader):
sheet.rowCount = 9485
table.name = "name"
assert_raises(SheetError, table.setHeader, -1)
assert_raises(SheetError, table.setHeader, 0)
assert_raises(SheetError, table.setHeader, 9486)
table.headerRow = 1
table.setHeader(9485)
parent.setConstant.assert_called_once_with("name_HEADER", 9485)
assert_equals(table.headerRow, 9485)
assert_true(table.headerRow in table.ignoredRows)
assert_false(1 in table.ignoredRows)
table.ignoredRows = set()
table.setHeader(9400)
parent.setConstant.assert_called_with("name_HEADER", 9400)
assert_equals(table.headerRow, 9400)
assert_true(table.headerRow in table.ignoredRows)
assert_false(9485 in table.ignoredRows)
@patch('sheetDB.table.Table.parent')
def test_ignoreRows(parent):
table.name = "name"
table.ignoreRows()
parent.addToConstantList.assert_not_called()
table.ignoreRows(40)
parent.addToConstantList.assert_called_once_with("name_IGNOREDROWS",
40)
assert_true(40 in table.ignoredRows)
table.ignoreRows(1, 2, 3, 4, 5)
parent.addToConstantList.assert_called_with("name_IGNOREDROWS", 5)
assert_equals(parent.addToConstantList.call_count, 6)
for x in xrange(1, 6):
assert_true(x in table.ignoredRows)
@patch('sheetDB.table.Table.parent')
def test_unignoreRows(parent):
table.name = "name"
table.unignoreRows()
parent.removeFromConstantList.assert_not_called()
assert_raises(DataError, table.unignoreRows, table.headerRow)
assert_raises(DataError, table.unignoreRows, table.refRow)
assert_raises(DataError, table.unignoreRows,
1, 2, 3, table.headerRow, 4, 5, 6)
parent.removeFromConstantList.assert_not_called()
table.ignoredRows = set([10, 20, 30, 40, 50, 60])
table.unignoreRows(10, 20, 30)
assert_true(50 in table.ignoredRows)
assert_false(20 in table.ignoredRows)
assert_equals(len(table.ignoredRows), 3)
@patch('sheetDB.table.Table.parent')
def test_ignoreCols(parent):
table.name = "name"
table.ignoreCols()
parent.addToConstantList.assert_not_called()
table.ignoreCols(40)
parent.addToConstantList.assert_called_once_with("name_IGNOREDCOLS",
40)
assert_true(40 in table.ignoredCols)
table.ignoreCols(1, 2, 3, 4, 5)
parent.addToConstantList.assert_called_with("name_IGNOREDCOLS", 5)
assert_equals(parent.addToConstantList.call_count, 6)
for x in xrange(1, 6):
assert_true(x in table.ignoredCols)
@patch('sheetDB.table.Table.parent')
def test_unignoreCols(parent):
table.name = "name"
table.unignoreCols()
parent.removeFromConstantList.assert_not_called()
table.ignoredCols = set([10, 20, 30, 40, 50, 60])
table.unignoreCols(10, 20, 30)
assert_true(50 in table.ignoredCols)
assert_false(20 in table.ignoredCols)
assert_equals(len(table.ignoredCols), 3)
@patch('sheetDB.table.Table.addRefRow')
@patch('sheetDB.table.Table.sheet')
@patch('sheetDB.table.Table.updateConstraints')
def test_constrain(update, sheet, addRef):
assert_raises(DataError, table.constrain, 8, "ABACUS")
upCell, addCell = sheet.updateCell, sheet.addToCell
table.refRow = None
table.constrain(8, "INT")
addRef.assert_called_once_with()
addCell.assert_called_once_with((table.refRow, 8), "INT")
update.assert_called_once_with()
table.refRow = 2
table.constrain(9, "=R3C4", True)
assert_equals(addRef.call_count, 1)
upCell.assert_called_once_with((table.refRow, 9), "=R3C4")
assert_equals(update.call_count, 2)
@patch('sheetDB.table.Table.constrain')
def test_updateConstraint(constrain):
table.reverseHeader = {'label': 4}
assert_raises(DataError, table.updateConstraint,
"fakeLabel", "CONSTRAINT")
table.updateConstraint('label', "CONSTRAINT")
constrain.assert_called_once_with(4, "CONSTRAINT", erase=False)
table.updateConstraint('label', "CONSTRAINT2", True)
constrain.assert_called_with(4, "CONSTRAINT2", erase=True)
@patch('sheetDB.table.Table.constrain')
@patch('sheetDB.table.Table.sheet')
@patch('sheetDB.table.Table.updateHeader')
@patch('sheetDB.table.Table.updateConstraints')
def test_expandHeader(upConstrain, header, sheet,
constrain):
table.reverseHeader = {'label': 4}
assert_raises(DataError, table.expandHeader, "label3",
"label2", "label1", "label")
assert_raises(DataError, table.expandHeader, ("label4", "a"),
"label3", "label2", "label1", "label3")
assert_raises(DataError, table.expandHeader, dict())
upConstrain.assert_not_called()
header.assert_not_called()
sheet.addCols.assert_not_called()
sheet.updateCell.assert_not_called()
constrain.assert_not_called()
sheet.colCount = 0
table.expandHeader("label1", ("label2", "constraint"))
upConstrain.assert_called_once_with()
header.assert_called_once_with()
sheet.addCols.assert_called_once_with(2)
assert_equals(sheet.updateCell.call_count, 2)
constrain.assert_called_once_with(2, "constraint")
@patch('sheetDB.table.Table.sheet')
@patch('sheetDB.table.Table.updateHeader')
def test_setHeaderLabels(update, sheet):
table.setHeaderLabels("label", "label2", "label3")
sheet.fillRow.assert_called_once_with(table.headerRow,
("label", "label2", "label3"))
update.assert_called_once_with()
@patch('sheetDB.table.Table.sheet')
@patch('sheetDB.table.Table.updateConstraints')
@patch('sheetDB.table.Table.addRefRow')
def test_setConstraints(add, update, sheet):
table.refRow = None
table.setConstraints("label", "label2", "label3")
add.assert_called_once_with()
sheet.fillRow.assert_called_once_with(table.refRow,
("label", "label2", "label3"))
update.assert_called_once_with()
table.refRow = 1
table.setConstraints("la", "bel")
assert_equals(add.call_count, 1)
sheet.fillRow.assert_called_with(table.refRow,
("la", "bel"))
assert_equals(update.call_count, 2)
update.assert_called_with()
@patch('sheetDB.table.Table.sheet')
def test_fetchRowLabels(sheet):
sheet.getRawRow.return_value = [1, 2, 3, 4, 5]
sheet.getRow.return_value = ["1", "2", "3", "4", "5"]
assert_equals(table.fetchRowLabels(1, False)[3], "3")
sheet.getRow.assert_called_once_with(1)
assert_equals(table.fetchRowLabels(4)[1], 1)
sheet.getRawRow.assert_called_once_with(4)
@patch('sheetDB.table.Table.getConstrained')
def test_getRowAsDict(getConstrained):
getConstrained.return_value = "g"
table.ignoredCols = set([1, 2, 3])
table.header = {1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e'}
table.constraints = {1: '', 2: '', 3: 'a', 4: '', 5: 'a'}
assert_equals(table._getRowAsDict(range(1, 6)),
{'d': 'g', 'e': 'g'})
getConstrained.assert_called_with(5, 'a')
assert_equals(getConstrained.call_count, 2)
assert_equals(table._getRowAsDict(range(1, 6), True),
{'a': 'g', 'b': 'g', 'c': 'g', 'd': 'g', 'e': 'g'})
getConstrained.assert_called_with(5, 'a')
assert_equals(getConstrained.call_count, 7)
@patch('sheetDB.table.Table.sheet')
@patch('sheetDB.table.Table._getRowAsDict')
def test_fetchEntity(getRow, sheet):
assert_equals(table.fetchEntity(4), getRow.return_value)
sheet.getRow.assert_called_once_with(4)
getRow.assert_called_once_with(sheet.getRow.return_value, False)
@patch('sheetDB.table.Table._getRowAsDict')
@patch('sheetDB.table.Table.sheet')
def test_fetchEntities(sheet, getRow):
sheet.rowCount = 40
assert_raises(SheetError, table.fetchEntities, [0,4])
assert_raises(SheetError, table.fetchEntities, range(1, 42))
sheet.getAll.assert_not_called()
getRow.assert_not_called()
sheet.getAll.return_value = range(40)
assert_equals(table.fetchEntities([1,2,3,4,5]),
[getRow.return_value,] * 5)
sheet.getAll.assert_called_once_with()
getRow.assert_called_with(4, False)
assert_equals(getRow.call_count, 5)
@patch('sheetDB.table.Table.sheet')
def test_findEntityRows(sheet):
table.reverseHeader = {"a": 1, "b": 2, "c": 4}
sheet.rowCount = 6
table.ignoredRows = set()
sheet.getCol.return_value = ["a", "b", "a", "c", "u", "s"]
assert_raises(DataError, table.findEntityRows, {"d": {"value": 5,
"type": "positive"}})
sheet.getCol.assert_not_called()
assert_equals(table.findEntityRows(dict()), set(range(1, 7)))
sheet.getCol.assert_not_called()
assert_equals(table.findEntityRows({'a': {'value': 'a',
'type': 'positive'}}),
{1, 3})
sheet.getCol.assert_called_once_with(1)
assert_equals(table.findEntityRows({'b': {'values': ['a', 'k'],
'type': 'negative'}}),
{2, 4, 5, 6})
sheet.getCol.assert_called_with(2)
@patch('sheetDB.table.Table.findEntityRows')
@patch('sheetDB.table.Table.fetchEntities')
@patch('sheetDB.table.convertToKeyed')
def test_findEntities(convert, fetch, find):
find.return_value = {12, 11, 15, 14, 19}
assert_equals(table.findEntities({'a': {'value': 'k',
'type': 'positive'}}),
fetch.return_value)
find.assert_called_once_with({'a': {'value': 'k',
'type': 'positive'}})
fetch.assert_called_once_with([11, 12, 14, 15, 19])
convert.assert_not_called()
assert_equals(table.findEntities({'a': {'value': 'k',
'type': 'negative'}},
keyLabel="label"),
convert.return_value)
find.assert_called_with({'a': {'value': 'k',
'type': 'negative'}})
fetch.assert_called_with([11, 12, 14, 15, 19])
assert_equals(fetch.call_count, 2)
convert.assert_called_once_with(fetch.return_value, "label", False)
@patch('sheetDB.table.Table.findEntities')
def test_getAllEntities(find):
assert_equals(table.getAllEntities(), find.return_value)
find.assert_called_once_with(dict(), None, False)
assert_equals(table.getAllEntities("label", True), find.return_value)
find.assert_called_with(dict(), "label", True)
@patch('sheetDB.table.Table.findEntities')
def test_findValues(find):
find.return_value = [{'a': 1, 'b': 2, 'c': 3},]
assert_raises(DataError, table.findValues, {'d': 4}, ['a', 'd'])
find.assert_called_once_with({'d': 4})
assert_equals(table.findValues(dict(), ['a']), [{'a': 1}])
find.assert_called_with(dict())
assert_equals(find.call_count, 2)
@patch('sheetDB.table.Table.findValues')
def test_findValue(find):
find.return_value = [{'a': 1}, {'a': 2}, {'a': 3}]
assert_equals(table.findValue({'d': 4}, 'a'), range(1, 4))
@patch('sheetDB.table.dropIndices')
def test_hideIgnored(drop):
table.ignoredRows = set([2, 3, 5])
table.ignoredCols = set([1, 4, 6])
data = [1, 2, 3, 4, 5, 6]
assert_equals(table._hideIgnored(data, "row"), drop.return_value)
drop.assert_called_once_with(data, {1, 2, 4})
assert_equals(table._hideIgnored(data, "col"), drop.return_value)
drop.assert_called_with(data, {0, 3, 5})
def test_hideIgnoredWithDrop():
table.ignoredRows = set([2, 3, 5])
table.ignoredCols = set([1, 4, 6])
data = [1, 2, 3, 4, 5, 6]
assert_equals(table._hideIgnored(data, "col"), [2, 3, 5])
assert_equals(table._hideIgnored(data, "row"), [1, 4, 6])
@patch('sheetDB.table.Table._hideIgnored')
def test_checkConstraint(hide):
table.ignoredRows = table.ignoredCols = set()
hide.return_value = ["1","2","3"]
assert_true(table.checkConstraint("", "INT"))
assert_true(table.checkConstraint("val", ""))
assert_true(table.checkConstraint("", ""))
assert_true(table.checkConstraint(4, "UNIQUE", 5))
assert_raises(DataError, table.checkConstraint, 1, "UNIQUE")
assert_false(table.checkConstraint(1, "UNIQUE", 5))
assert_true(table.checkConstraint(1, "POSITIVE"))
assert_false(table.checkConstraint(-1, "POSITIVE"))
assert_false(table.checkConstraint("0", "POSITIVE"))
assert_true(table.checkConstraint("0", "NONNEGATIVE"))
assert_true(table.checkConstraint("19.23", "NONNEG"))
assert_false(table.checkConstraint("-0.41", "NONNEG"))
assert_true(table.checkConstraint("9", "INT"))
assert_true(table.checkConstraint("-1", "INT"))
assert_false(table.checkConstraint("9.4", "INT"))
assert_false(table.checkConstraint("string", "INT"))
assert_true(table.checkConstraint(4, "INT"))
assert_false(table.checkConstraint(float(4), "INT"))
assert_true(table.checkConstraint("4.00", "NUMERIC"))
assert_false(table.checkConstraint("49eerio", "NUMERIC"))
assert_true(table.checkConstraint("cheerios", "STRING"))
assert_false(table.checkConstraint(1, "STRING"))
assert_true(table.checkConstraint("LKJLKJaslkj", "ALPHA"))
assert_false(table.checkConstraint("LKF4444FLJ", "ALPHA"))
assert_true(table.checkConstraint("hunter2", "ALPHANUMERIC"))
assert_true(table.checkConstraint("Hunter33", "ALPHANUM"))
assert_false(table.checkConstraint("[HUN,TER,2]", "ALPHANUM"))
assert_true(table.checkConstraint("1,2,3", "ARRAY"))
assert_true(table.checkConstraint([1,2,3], "LIST"))
assert_false(table.checkConstraint(dict(), "ARRAY"))
assert_true(table.checkConstraint(True, "BOOL"))
assert_true(table.checkConstraint("FALSE", "BOOL"))
assert_false(table.checkConstraint("3", "BOOLEAN"))
assert_true(table.checkConstraint("=IFERROR(R[-1]C4, 3)", "FORMULA"))
assert_false(table.checkConstraint(4, "FORMULA"))
assert_raises(DataError, table.checkConstraint, "val", "???")
def test_checkConstraints():
assert_true(table.checkConstraints("4", "INT NUMERIC POSITIVE"))
assert_true(table.checkConstraints(-9, ""))
assert_false(table.checkConstraints("0", "ALPHA BOOL"))
assert_raises(DataError, table.checkConstraints, "4", "CHEERIOS")
def test_getConstrained():
assert_equals(table.getConstrained("", "CON"), "")
assert_equals(table.getConstrained("VALU", ""), "VALU")
assert_equals(table.getConstrained("4", "int"), 4)
assert_equals(table.getConstrained("4.000", "nUmEric"), 4.000)
assert_equals(table.getConstrained(1, "LIST"), [1,])
assert_equals(table.getConstrained("12, 3, 4", "ARRAY"), ['12', '3', '4'])
assert_equals(table.getConstrained("[12, 3, 4]", "INT ARRAY"), [12, 3, 4])
assert_equals(table.getConstrained("TRUE", "BOOL"), True)
assert_equals(table.getConstrained("FALSE", "bool"), False)
assert_equals(table.getConstrained("hunter2", "ALPHANUMERIC"), "hunter2")
@patch('sheetDB.table.Table.checkConstraints')
@patch('sheetDB.table.Table.sheet')
@patch('sheetDB.table.Table.expandHeader')
def test_addEntity(expand, sheet, check):
check.return_value = True
sheet.colCount = 10
table.ignoredCols = set()
table.constraints = dict()
for i in xrange(1, 11):
table.constraints[i] = ""
table.constraints[4] = "=FORMULA"
table.reverseHeader = {"label": 1}
def expandHead(val):
table.reverseHeader[val] = 11
table.constraints[11] = ""
expand.side_effect = expandHead
table.addEntity({"label": "value", "other": "val"})
expand.assert_called_once_with("other")
sheet.appendRow.assert_called_once_with(["value", "",
"", "=FORMULA", "", "", "", "", "", "", "val"])
check.return_value = False
assert_raises(DataError, table.addEntity, {"label": "value"})
table.ignoredCols.add(1)
check.return_value = True
assert_raises(DataError, table.addEntity, {"label": "value"})
@patch('sheetDB.table.Table.fetchFromParent')
@patch('sheetDB.table.Table.setHeader')
@patch('sheetDB.table.Table.setRefRow')
@patch('sheetDB.table.Table.unignoreRows')
@patch('sheetDB.table.Table.ignoreRows')
@patch('sheetDB.table.Table.sheet')
def test_removeEntity(sheet, ignore, unignore,
ref, head, fetch):
table.ignoredRows = {1, 5, 6, 7}
fetch.return_value = [5]
assert_raises(DataError, table.removeEntity, 5)
table.headerRow = 7
table.refRow = 6
table.removeEntity(4)
sheet.deleteRow.assert_called_once_with(4)
ref.assert_called_once_with(5)
head.assert_called_once_with(6)
unignore.assert_called_once_with(5)
ignore.assert_called_once_with(4)
table.headerRow = 2
table.refRow = 1
table.ignoredRows = {1, 3, 2}
fetch.return_value = [3]
table.removeEntity(4)
assert_equals(unignore.call_count, 1)
assert_equals(ignore.call_count, 1)
@patch('sheetDB.table.Table.checkConstraints')
@patch('sheetDB.table.Table.expandHeader')
@patch('sheetDB.table.Table.sheet')
def test_updateEntity(sheet, expand, check):
table.ignoredRows = {5, 6, 7}
table.ignoredCols = {1}
table.reverseHeader = {'label': 1, 'other': 2}
check.return_value = True
table.constraints = {1: "", 2: ""}
def expandHead(val):
table.reverseHeader[val] = 3
table.constraints[3] = ""
expand.side_effect = expandHead
assert_raises(DataError, table.updateEntity, {'other': "val"},
5)
assert_raises(DataError, table.updateEntity, {'label': 'val'},
4)
check.return_value = False
assert_raises(DataError, table.updateEntity, {'other': 'val'},
4)
check.return_value = True
table.updateEntity({'other': 'val', 'new': 2}, 9)
expand.assert_called_once_with('new')
sheet.updateCell.assert_called_with((9, 2), 'val')
assert_equals(sheet.updateCell.call_count, 2)
@patch('sheetDB.table.Table.addEntity')
@patch('sheetDB.table.Table.updateEntity')
@patch('sheetDB.table.Table.findEntityRows')
def test_updateMatchingEntities(find, update, add):
find.return_value = [1,2,3]
table.updateMatchingEntities(dict(), dict())
update.assert_called_with(dict(), 3)
assert_equals(update.call_count, 3)
find.return_value = list()
table.updateMatchingEntities(dict(), dict())
assert_equals(update.call_count, 3)
add.assert_not_called()
def addAFind(*args):
find.return_value = [5,]
add.side_effect = addAFind
table.updateMatchingEntities(dict(), dict(), True)
add.assert_called_once_with(dict())
find.assert_called_with(dict())
assert_equals(find.call_count, 4)
update.assert_called_with(dict(), 5)
assert_equals(update.call_count, 4)
@patch('sheetDB.table.Table.removeEntity')
@patch('sheetDB.table.Table.findEntityRows')
def test_removeMatchingEntities(find, remove):
find.return_value = [1,2,3]
table.removeMatchingEntities(dict())
remove.assert_called_with(3)
assert_equals(remove.call_count, 3)
@patch('sheetDB.table.Table.removeFromParent')
@patch('sheetDB.table.Table.sheet')
def test_delete(sheet, remove):
table.delete()
assert_equals(remove.call_count, 4)
remove.assert_called_with("IGNOREDCOLS")
sheet.delete.assert_called_once_with()
|
{
"content_hash": "6687f280d1c3df3a1516b90f9bb8b0c5",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 78,
"avg_line_length": 41.09660107334526,
"alnum_prop": 0.6683498019414095,
"repo_name": "knyte/sheetDB",
"id": "5fb8251b72af7c44838da7677dcbf7a614d2c28c",
"size": "23005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sheetDB/test/table_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145240"
}
],
"symlink_target": ""
}
|
from typing import Dict, Union # pylint: disable=unused-import
import time
import apache_beam as beam
import fastavro
from gcp_variant_transforms.beam_io import vcf_header_io # pylint: disable=unused-import
from gcp_variant_transforms.beam_io import vcf_parser
from gcp_variant_transforms.libs import hashing_util
from gcp_variant_transforms.libs import sample_info_table_schema_generator
from gcp_variant_transforms.libs import schema_converter
SampleNameEncoding = vcf_parser.SampleNameEncoding
_SECS_IN_MIN = 60
_MICROS_IN_SEC = 1000000
class ConvertSampleInfoToRow(beam.DoFn):
"""Extracts sample info from `VcfHeader` and converts it to a BigQuery row."""
def __init__(self, sample_name_encoding):
# type: (int) -> None
self._sample_name_encoding = sample_name_encoding
def _get_now_to_minute(self):
return int(time.time()) // _SECS_IN_MIN * _SECS_IN_MIN * _MICROS_IN_SEC
def process(self, vcf_header):
# type: (vcf_header_io.VcfHeader, bool) -> Dict[str, Union[int, str]]
current_minute = self._get_now_to_minute()
for sample in vcf_header.samples:
if self._sample_name_encoding == SampleNameEncoding.WITH_FILE_PATH:
sample = hashing_util.create_composite_sample_name(sample,
vcf_header.file_path)
sample_id = hashing_util.generate_sample_id(sample)
row = {
sample_info_table_schema_generator.SAMPLE_ID: sample_id,
sample_info_table_schema_generator.SAMPLE_NAME: sample,
sample_info_table_schema_generator.FILE_PATH: vcf_header.file_path,
sample_info_table_schema_generator.INGESTION_DATETIME: current_minute
}
yield row
class SampleInfoToAvro(beam.PTransform):
"""Writes sample info to BigQuery."""
def __init__(self, output_path, sample_name_encoding):
# type: (str, Dict[str, str], bool, int) -> None
"""Initializes the transform.
Args:
output_path: The output path of the sample file in the avro directory.
sample_name_encoding: If SampleNameEncoding.WITHOUT_FILE_PATH is supplied,
sample_id would only use sample_name in to get a hashed name; otherwise
both sample_name and file_name will be used.
"""
self._output_path = output_path
self._sample_name_encoding = sample_name_encoding
bq_schema = sample_info_table_schema_generator.generate_schema()
self._fastavro_schema = fastavro.parse_schema(
schema_converter.convert_schema_to_avro_dict(bq_schema))
def expand(self, pcoll):
return (pcoll
| 'ConvertSampleInfoToAvroTableRow' >> beam.ParDo(
ConvertSampleInfoToRow(self._sample_name_encoding))
| 'WriteToAvroFiles' >> beam.io.WriteToAvro(
self._output_path, self._fastavro_schema))
|
{
"content_hash": "d391d14e526908ce5e2d7774e508d893",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 89,
"avg_line_length": 39.61971830985915,
"alnum_prop": 0.6871667259153929,
"repo_name": "googlegenomics/gcp-variant-transforms",
"id": "855db68818679f762922fc0de586b82f3d1a423f",
"size": "3390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcp_variant_transforms/transforms/sample_info_to_avro.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3534"
},
{
"name": "Python",
"bytes": "1101324"
},
{
"name": "Shell",
"bytes": "17097"
}
],
"symlink_target": ""
}
|
import k3d
import numpy as np
from k3d.colormaps import matplotlib_color_maps
from k3d.helpers import map_colors
from numpy.linalg import norm
from k3d.headless import k3d_remote, get_headless_driver
def generate():
p = np.linspace(-1, 1, 10)
def f(x, y, z):
return y * z, x * z, x * y
vectors = np.array([[[f(x, y, z) for x in p] for y in p]
for z in p]).astype(np.float32)
norms = np.apply_along_axis(norm, 1, vectors.reshape(-1, 3))
plt_vector_field = k3d.vector_field(vectors,
head_size=1.5,
scale=2,
bounds=[-1, 1, -1, 1, -1, 1])
colors = map_colors(norms, matplotlib_color_maps.Turbo, [0, 1]).astype(np.uint32)
plt_vector_field.colors = np.repeat(colors, 2)
plot = k3d.plot(screenshot_scale=1,
grid_visible=False,
axes_helper=0)
plot += plt_vector_field
headless = k3d_remote(plot, get_headless_driver(), width=800, height=800)
headless.sync(hold_until_refreshed=True)
headless.camera_reset(1)
screenshot = headless.get_screenshot()
headless.close()
return screenshot
|
{
"content_hash": "2aaa8f8bb32daf0f7ad25650f8d20193",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 85,
"avg_line_length": 30.9,
"alnum_prop": 0.5711974110032363,
"repo_name": "K3D-tools/K3D-jupyter",
"id": "35d4c7d2f4f7c3805d067e038089605625f7c32d",
"size": "1236",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/source/gallery/api/thumbnails/vector_field_colormap_thumbnail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "1326"
},
{
"name": "GLSL",
"bytes": "33792"
},
{
"name": "HTML",
"bytes": "8112"
},
{
"name": "JavaScript",
"bytes": "599147"
},
{
"name": "Jupyter Notebook",
"bytes": "5311"
},
{
"name": "Python",
"bytes": "1949685"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
}
|
"""
BOSH OpenStack Ironic CPI
"""
# Python 2 and 3 compatibility
from __future__ import unicode_literals
import re
import time
class CPISettings(object):
_instance = None
_string_booleans_true = ['1', 'yes', 'true', 'on']
_re_macaddr = re.compile("^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})$")
def __new__(cls, *args, **kwargs):
# Singleton implementation
if not cls._instance:
cls._instance = super(CPISettings, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
self.configdrive_ext = '.cfgd'
self.stemcell_image_ext = '.qcow2'
self.stemcell_metadata_ext = '.meta'
self.stemcell_image = 'root.img'
self.stemcell_id_format = 'stemcell_{os_distro}-{architecture}-{version}'
# To setup the name in set_vm_metadata
self.server_name = '{job}-{index}'
# Sort of timeout for waiting in ironic loops (create_vm, delete_vm).
# 30s x 40 is the limit
self.ironic_sleep_times = 40
self.ironic_sleep_seconds = 30
# Default settings for registry
self.disk_system_device = '/dev/sda'
self.disk_ephemeral_device = '/dev/sdb'
self.disk_persistent_device = '/dev/sdc'
# Ironic
self.ironic_search_state = 'manageable'
def encode_disk(self, mac, device, size):
disk_id = mac.lower().replace(':','')
disk_id = device.replace('/dev/', disk_id + '-', 1)
disk_id = str(int(time.time()*10)) + '-' + disk_id
return disk_id
def decode_disk(self, disk_id):
t, mac, d = disk_id.split('-', 2)
macaddr = ':'.join([mac[i:i+2] for i in range(0, len(mac), 2)])
device = '/dev/' + d
return (macaddr, device)
|
{
"content_hash": "c0c6c7924bdd702ae588aa3e9525e0e9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 81,
"avg_line_length": 33.39622641509434,
"alnum_prop": 0.572316384180791,
"repo_name": "jriguera/bosh-ironic-cpi-release",
"id": "b6f7d2b70bec6765f89b740eec7687547c0bdb66",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bosh_ironic_cpi/ironic_cpi/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13005"
},
{
"name": "Lua",
"bytes": "55086"
},
{
"name": "Python",
"bytes": "111012"
},
{
"name": "Shell",
"bytes": "22086"
}
],
"symlink_target": ""
}
|
from keystone.common import sql
from keystone.common.sql import migration
from keystone import exception
from keystone.openstack.common.db.sqlalchemy import session as db_session
class ProjectEndpoint(sql.ModelBase, sql.DictBase):
"""project-endpoint relationship table."""
__tablename__ = 'project_endpoint'
attributes = ['endpoint_id', 'project_id']
endpoint_id = sql.Column(sql.String(64),
primary_key=True,
nullable=False)
project_id = sql.Column(sql.String(64),
primary_key=True,
nullable=False)
class EndpointFilter(sql.Base):
# Internal interface to manage the database
def db_sync(self, version=None):
migration.db_sync(version=version)
@sql.handle_conflicts(conflict_type='project_endpoint')
def add_endpoint_to_project(self, endpoint_id, project_id):
session = db_session.get_session()
with session.begin():
endpoint_filter_ref = ProjectEndpoint(endpoint_id=endpoint_id,
project_id=project_id)
session.add(endpoint_filter_ref)
def _get_project_endpoint_ref(self, session, endpoint_id, project_id):
endpoint_filter_ref = session.query(ProjectEndpoint).get(
(endpoint_id, project_id))
if endpoint_filter_ref is None:
msg = _('Endpoint %(endpoint_id)s not found in project '
'%(project_id)s') % {'endpoint_id': endpoint_id,
'project_id': project_id}
raise exception.NotFound(msg)
return endpoint_filter_ref
def check_endpoint_in_project(self, endpoint_id, project_id):
session = db_session.get_session()
self._get_project_endpoint_ref(session, endpoint_id, project_id)
def remove_endpoint_from_project(self, endpoint_id, project_id):
session = db_session.get_session()
endpoint_filter_ref = self._get_project_endpoint_ref(
session, endpoint_id, project_id)
with session.begin():
session.delete(endpoint_filter_ref)
def list_endpoints_for_project(self, project_id):
session = db_session.get_session()
query = session.query(ProjectEndpoint)
query = query.filter_by(project_id=project_id)
endpoint_filter_refs = query.all()
return endpoint_filter_refs
def list_projects_for_endpoint(self, endpoint_id):
session = db_session.get_session()
query = session.query(ProjectEndpoint)
query = query.filter_by(endpoint_id=endpoint_id)
endpoint_filter_refs = query.all()
return endpoint_filter_refs
|
{
"content_hash": "b301e8fb8e892b913d97c7635c23d2b5",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 74,
"avg_line_length": 41.46969696969697,
"alnum_prop": 0.6225794665692364,
"repo_name": "derekchiang/keystone",
"id": "d0d02f1c9fdb9bdff3b0ca300da4a65b7fc49540",
"size": "3368",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/contrib/endpoint_filter/backends/sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2833790"
},
{
"name": "Shell",
"bytes": "10512"
}
],
"symlink_target": ""
}
|
import json
import boto3
import os
import requests
import StringIO
from flask import send_file
from botocore.exceptions import ClientError
from threading import Thread
from time import sleep
client = boto3.client('s3')
resource = boto3.resource('s3')
region = 'eu-west-1'
def pather(*args):
return "/".join([arg for arg in args])
def init(path, name):
if name == "favicon.ico":
return None
policy ={
"Version":"2012-10-17",
"Statement":[{
"Sid":"PublicReadGetObject",
"Effect":"Allow",
"Principal": "*",
"Action":["s3:GetObject"],
"Resource":["arn:aws:s3:::{}/*".format(name)
]
}
]
}
try:
client.create_bucket(
ACL='public-read',
Bucket=name,
CreateBucketConfiguration={
'LocationConstraint': region
}
)
client.put_bucket_policy(
Bucket=name,
Policy=json.dumps(policy)
)
client.put_bucket_website(
Bucket=name,
WebsiteConfiguration={
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
)
except ClientError as e:
return e
def write(key, value, path, name, raw=False):
page = StringIO.StringIO()
if raw:
page.write(value)
extra_args={}
else:
page.write(json.dumps(value))
extra_args={'ContentType': 'application/json'}
page.seek(0)
client.upload_fileobj(page, name, key, ExtraArgs=extra_args)
def _write(*args):
t = Thread(target=thread_write, args=args)
t.start()
def read(key, path, name, raw=False):
s3_path = "https://s3-{}.amazonaws.com/{}/{}".format(
region,
name,
key
)
if raw:
contents = StringIO.StringIO()
contents.write(requests.get(s3_path).content)
contents.seek(0)
return send_file(contents, attachment_filename=key)
else:
return requests.get(s3_path).json()
def remove(key, path, name):
client.delete_object(
Bucket=name,
Key=key
)
def _remove(*args):
t = Thread(target=thread_remove, args=args)
t.start()
def drop(path, name):
bucket = resource.Bucket(name)
bucket.object_versions.all().delete()
bucket.delete()
|
{
"content_hash": "b62420d6950283f6967756077e4221e7",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 70,
"avg_line_length": 26.104761904761904,
"alnum_prop": 0.4914264866836921,
"repo_name": "martyni/not_db",
"id": "a0eec6c6dc8cb561021a22607ed6f49bef313468",
"size": "2741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Not_Db/s3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4740"
},
{
"name": "Python",
"bytes": "21837"
},
{
"name": "Shell",
"bytes": "808"
}
],
"symlink_target": ""
}
|
"""
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
from django.utils.version import get_version_tuple
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n', warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n', err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Get a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory:
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, restype=None, errcheck=None, argtypes=None, **kwargs):
self.func_name = func_name
if restype is not None:
self.restype = restype
if errcheck is not None:
self.errcheck = errcheck
if argtypes is not None:
self.argtypes = argtypes
self.args = args
self.kwargs = kwargs
self.func = None
def __call__(self, *args, **kwargs):
if self.func is None:
self.func = self.get_func(*self.args, **self.kwargs)
return self.func(*args, **kwargs)
def get_func(self, *args, **kwargs):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
# Return the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = GEOSFuncFactory('GEOSversion', restype=c_char_p)
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Return a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
def geos_version_tuple():
"""Return the GEOS version as a tuple (major, minor, subminor)."""
return get_version_tuple(geos_version_info()['version'])
|
{
"content_hash": "2c4232e55e0e5bfd57eb74eee1f9094c",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 95,
"avg_line_length": 31.347826086956523,
"alnum_prop": 0.6584989983048235,
"repo_name": "evansd/django",
"id": "810161ffebb757727d892e4314aa5de213432894",
"size": "6489",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/contrib/gis/geos/libgeos.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55975"
},
{
"name": "HTML",
"bytes": "203931"
},
{
"name": "JavaScript",
"bytes": "253392"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12009521"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
'''
====================================================================================
Copyright 2013, 2014 Windy Darian (大地无敌), Studio "Sekai no Kagami"
(世界之镜制作组) of Seven Ocean Game Arts (七海游戏文化社
, 北京航空航天大学学生七海游戏文化社) @ http://sogarts.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
====================================================================================
Created on Apr 16, 2013
Simple Main Menu
@author: Windy Darian (大地无敌)
'''
import direct.gui.DirectGuiGlobals as DGG
from direct.gui.DirectButton import DirectButton
from direct.stdpy.threading import Lock
from sogal_form import SogalForm
from gui.layout import VLayout
BUTTON_SIZE = (-0.4,0.4,-0.04,0.08)
class MainMenu(SogalForm):
'''
main menu class
'''
def __init__(self, entry = 'ifselectionjumptest'):
'''
Constructor
'''
self.entry = entry
self.closed = True
SogalForm.__init__(self, fading = True, fading_duration = 1.0, backgroundImage = None, backgroundColor = (0,0,0,1),
enableMask = True,
hiddenFunc = self.closedFunc, shownFunc = self.openedFunc)
self.addButtonBar()
self.fadinglock = Lock() #This lock is to prevent open or close failure when the main menu is on fading
def addButtonBar(self):
self.bar = VLayout(margin= 0.1)
self.bar.reparentTo(self)
def addButtons(self):
'''
override this if you want custom buttons
'''
#TODO:Continue Button 注意继续的位置可能是从硬盘上读入的也可能是从游戏中返回标题画面的 也有可能是在游戏正常结束之后所以没有Continue数据
self.addButton(text = 'New Game', state = DGG.NORMAL, command = self._startGame)
self.addButton(text = 'Load', state = DGG.NORMAL, command = self._load)
self.addButton(text = 'Options', state = DGG.NORMAL, command = self._config)
#TODO:Gallery Button
self.addButton(text = 'Exit', state = DGG.NORMAL, command = self._exit)
def close(self):
'''Called by SogalBase. Do something and hide, you will need it if you want a more complex main menu'''
self.fadinglock.acquire()
if self.closed:
return
SogalForm.hide(self)
self.closed = True
if self.bar:
for btn in self.bar:
btn['state'] = DGG.DISABLED
def open(self):
'''Called by SogalBase. Do something and show, you will need it if you want a more complex main menu'''
self.fadinglock.acquire()
if not self.closed:
return
SogalForm.show(self)
self.addButtons()
self.closed = False
def show(self):
#redirect show/hide to open/close
self.open()
def hide(self):
#redirect show/hide to open/close
self.close()
def closedFunc(self):
self.cleanup()
self.fadinglock.release()
def cleanup(self):
for btn in self.bar:
btn.destroy()
self.bar.removeNode()
self.addButtonBar()
def openedFunc(self):
self.fadinglock.release()
def addButton(self,**args):
'''Add a button and return it'''
btn = DirectButton(**dict(base.getStyle()['mainMenuButton'], frameSize = BUTTON_SIZE,**args)) # @UndefinedVariable
self.bar.append(btn)
#self.vbox.pack(btn)
return btn
def _startGame(self, scene = None):
if self.closed:
return
if not scene:
messenger.send('start_game', [self.entry])
else:
messenger.send('start_game', [scene])
def _load(self):
if self.closed:
return
messenger.send('load_game')
def _exit(self):
if self.closed:
return
messenger.send('exit_game')
def _config(self):
messenger.send('config_form')
|
{
"content_hash": "e2546ad6e41ed3e0e9a3dc963feec8f7",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 124,
"avg_line_length": 31.523489932885905,
"alnum_prop": 0.5507770917606983,
"repo_name": "WindyDarian/Sogal",
"id": "1da39369c755dd4235499a557ae7dd6d131d1107",
"size": "4906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sogasys/main_menu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "228118"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.